drm/GPU: Add support Imagination PowerVR GPU driver v1.17
authorshanlong.li <shanlong.li@starfivetech.com>
Wed, 5 Jul 2023 02:25:22 +0000 (11:25 +0900)
committerJaehoon Chung <jh80.chung@samsung.com>
Mon, 24 Jul 2023 23:25:21 +0000 (08:25 +0900)
Add support Imagination PowerVR GPU driver with starfive JH7110. This
driver is from [1], and the imported patch list[2]. We modified
driver compatible and clock in the device-tree to fix kernel v6.1.

[1] https://github.com/starfive-tech/linux

[2] patch list from v5.15.y
  b9049f928d30 gpu: drm: img: disable PDUMP
  092482e4811e driver:GPU: adjust interface sequence 1. add axi disable interface 2. adjust the interface order between clk and
  b320e5501342 gpu:driver: fix up hibernation resume problem
  0dea95655224 driver:gpu: add gpu runtime pm
  0381e221bc0d driver:gpu: gpu driver change to release
  6ccc2d8209d9 driver:GPU:set the gpu frequency to 594Mhz
  d81e5fcbcccc driver:gpu: gpu driver upgrade to 1.17
  18a68c90c061 Add Wayland with GPU support.
  e4d3672cccbe driver:GPU: use pm runtime interface
  4c34123a3d81 driver:GPU: Using the GPU driver release version
  1ed4d6d30d92 driver:GPU: Disable apm to resolve pvrdebug -dd suspension  error
  13e336d5b5ae driver:GPU: adjust ClockSpeed to 409.6MHz
  c2a828350549 driver:GPU: fix compile warnings
  2ba6ee6d00df driver:GPU: add gpu driver

Signed-off-by: shanlong.li <shanlong.li@starfivetech.com>
Change-Id: I8ac4ba0038e8ba48c48d615f8700c49c7ff706b9
Signed-off-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
598 files changed:
arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
arch/riscv/boot/dts/starfive/jh7110.dtsi
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/img/Kconfig [new file with mode: 0644]
drivers/gpu/drm/img/Makefile [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/Kconfig [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/Makefile [new file with mode: 0755]
drivers/gpu/drm/img/img-rogue/config_kernel.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/config_kernel.mk [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/client_cache_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/client_cache_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/common_cache_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/server_cache_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/cmm_bridge/common_cmm_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/cmm_bridge/server_cmm_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/di_bridge/common_di_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/di_bridge/server_di_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/dma_bridge/common_dma_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/dma_bridge/server_dma_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/client_mm_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/client_mm_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/common_mm_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/server_mm_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/client_pdump_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/common_pdump_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/server_pdump_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtimerquery_bridge/common_rgxtimerquery_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/client_ri_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/client_ri_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/common_ri_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/server_ri_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/srvcore_bridge/common_srvcore_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/srvcore_bridge/server_srvcore_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/client_sync_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/client_sync_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/common_sync_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/server_sync_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/syncfallback_bridge/common_syncfallback_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/client_synctracking_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/common_synctracking_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/server_synctracking_bridge.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.105.208.318.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_33.8.22.1.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.53.104.796.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.54.54.183.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.55.54.103.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.56.104.183.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_bvnc_defs_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_bvnc_table_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_cr_defs_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxdefs_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxmhdefs_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxmmudefs_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/cache_ops.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/devicemem_typedefs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/dllist.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/drm/netlink.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/drm/nulldisp_drm.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/drm/pdp_drm.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/drm/pvr_drm.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/img_3dtypes.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/img_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/img_drm_fourcc_internal.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/img_elf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/img_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/kernel_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/linux_sw_sync.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/lock_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/log2.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/multicore_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/osfunc_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pdumpdefs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pdumpdesc.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/public/powervr/buffer_attribs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/public/powervr/img_drm_fourcc.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/public/powervr/mem_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/public/powervr/pvrsrv_sync_ext.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvr_buffer_sync_shared.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvr_debug.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvr_fd_sync_kernel.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvr_intrinsics.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrmodule.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_device_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_devvar.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_error.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_errors.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_memalloc_physheap.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_memallocflags.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_memallocflags_internal.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_sync_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_tlcommon.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrsrv_tlstreams.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/pvrversion.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_common_asserts.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_compat_bvnc.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_fwif_resetframework.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_fwif_sf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_heap_firmware.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_hwperf_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_meta.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_mips.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgx_riscv.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rgxfw_log_helper.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/ri_typedefs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_alignchecks.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_hwperf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_shared.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_heaps.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_hwperf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgx_options.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgxheapconfig.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/rgxheapconfig_65273.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/services_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/servicesext.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/sync_checkpoint_external.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/sync_prim_internal.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/apollo_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/bonnie_tcf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_pdp_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/orion_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/orion_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pdp_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pfim_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pfim_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_pll.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/include/virt_validation_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/dma_flags.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/htbuffer_sf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/htbuffer_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/img_types_check.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/info_page_client.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/info_page_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/km_apphint_defs_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/os_cpu_cache.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/pdump.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/physheap.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/physheap_config.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/pvr_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/pvr_dicommon.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/pvr_ricommon.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rgx_bridge.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rgx_fw_info.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rgx_memallocflags.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rgx_pdump_panics.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rgx_tq_shared.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rgxtransfer_shader.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rogue/km_apphint_defs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/rogue/rgxapi_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/include/sync_checkpoint_internal.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/cache_km.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/connection_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/debug_common.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_heapcfg.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_history_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg_intern.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/di_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/dma_km.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/handle.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/htb_debug.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/htb_debug.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/htbserver.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/info_page_km.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/lists.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/mmu_common.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pdump_mmu.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pdump_physmem.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pdump_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/physheap.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/physmem.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/physmem_hostmem.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/physmem_lma.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pmr.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/power.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/process_stats.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pvr_notifier.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv_bridge_init.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv_pool.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/ri_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/srvcore.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/sync_checkpoint.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/sync_fallback_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/sync_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/tlintern.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/tlserver.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/tlstream.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/vmm_pvz_client.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/vmm_pvz_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/vz_vmm_pvz.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/common/vz_vmm_vm.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgx_bridge_init.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgx_bridge_init.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbreakpoint.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbreakpoint.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbvnc.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbvnc.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxccb.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxccb.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwdbg.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwdbg.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwimageutils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwimageutils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwtrace_strings.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxhwperf_common.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxhwperf_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxkicksync.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxkicksync.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmem.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmulticore.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdump.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdvfs.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdvfs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxregconfig.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxregconfig.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxshader.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxshader.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxstartstop.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxsyncutils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxsyncutils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtdmtransfer.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimecorr.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimecorr.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimerquery.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimerquery.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxutils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxutils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxworkest.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rgxworkest.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxcompute.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxcompute.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdebug.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdebug.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdevice.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxfwutils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxfwutils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxhwperf.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxhwperf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxinit.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer_impl.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer_impl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmipsmmuinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmipsmmuinit.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmmuinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmmuinit.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmulticore.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpdump.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpower.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpower.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxsrvinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxstartstop.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxta3d.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxta3d.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtdmtransfer.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtransfer.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtransfer.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxcompute.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxcompute.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdebug.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdebug.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdevice.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxfwutils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxfwutils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxhwperf.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxhwperf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxinit.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer_impl.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer_impl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmmuinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmmuinit.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmulticore.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpdump.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpower.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpower.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxray.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxray.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxsrvinit.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxstartstop.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxta3d.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxta3d.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxtdmtransfer.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/Kbuild.mk [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/Linux.mk [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/allocmem.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/env_connection.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/event.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/event.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/fwload.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/handle_idr.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/km_apphint.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/km_apphint.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/linkage.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/module_common.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/module_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osconnection_server.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_arm.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_arm64.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_riscv.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_x86.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/osmmap_stub.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/ossecure_export.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_dmabuf.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_osmem_linux.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_osmem_linux.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_test.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_test.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pmr_os.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/private_data.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_bridge_k.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_bridge_k.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_buffer_sync.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_buffer_sync.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_counting_timeline.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_counting_timeline.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debug.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_drm.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_drv.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_dvfs_device.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_dvfs_device.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence_trace.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_gputrace.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_ion_stats.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_platform_drv.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_procfs.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_procfs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sw_fence.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sw_fence.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync2.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_api.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_file.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_common.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_drm.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_drm.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_uaccess.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/rogue_trace_events.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/services_kernel_client.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/trace_events.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/env/linux/trace_events.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/cache_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/connection_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/debug_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/device.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_heapcfg.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_history_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_server_utils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/di_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/di_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/dma_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/fwload.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/fwtrace_string.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/handle.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/handle_impl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/handle_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/htbserver.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/info_page.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/lists.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/mmu_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/opaque_types.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/os_srvinit_param.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/osconnection_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/osdi_impl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/osfunc.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/oskm_apphint.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/ospvr_gputrace.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/ossecure_export.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pdump_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pdump_mmu.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pdump_physmem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pdump_symbolicaddr.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/physmem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/physmem_dmabuf.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/physmem_hostmem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/physmem_lma.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/physmem_osmem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pmr.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pmr_impl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pmr_os.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/power.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/process_stats.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvr_dvfs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvr_notifier.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_apphint.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_bridge_init.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_cleanup.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_device.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_firmware_boot.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_pool.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_sync_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/ri_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/sofunc_pvr.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/sofunc_rgx.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/srvcore.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/srvinit.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/srvkm.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/sync_checkpoint.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/sync_checkpoint_init.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/sync_fallback_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/sync_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/tlintern.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/tlserver.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/tlstream.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/tutils_km.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/vmm_impl.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_client.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_common.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_server.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/vz_vm.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/server/include/vz_vmm_pvz.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem_pdump.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem_utils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/devicememx_pdump.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/hash.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/htbuffer.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/mem_utils.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/pvrsrv_error.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/ra.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/sync.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/tlclient.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/uniq_key_splay_tree.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/common/uniq_key_splay_tree.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/devices/rogue/rgx_hwperf_table.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/devices/rogue/rgx_hwperf_table.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/allocmem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/device_connection.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem_pdump.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem_utils.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/devicememx.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/devicememx_pdump.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/hash.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/htbuffer.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/htbuffer_init.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/lock.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/osmmap.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/proc_stats.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/ra.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/sync.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/sync_internal.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/tlclient.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/shared/include/tutilsdefs.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/common/env/linux/interrupt_support.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/common/env/linux/pci_support.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/common/sysconfig_cmn.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/include/interrupt_support.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/include/pci_support.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/include/syscommon.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/include/sysvalidation.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/rogue/common/env/linux/dma_support.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/rogue/common/vmm_type_stub.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/rogue/include/dma_support.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/Kbuild.mk [new file with mode: 0755]
drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysconfig.c [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysconfig.h [new file with mode: 0644]
drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysinfo.h [new file with mode: 0644]
drivers/gpu/drm/img/kernel_compatibility.h [new file with mode: 0644]
drivers/gpu/drm/img/kernel_config_compatibility.h [new file with mode: 0644]
drivers/gpu/drm/img/kernel_nospec.h [new file with mode: 0644]
drivers/gpu/drm/img/pvr_dma_resv.h [new file with mode: 0644]
drivers/gpu/drm/img/pvr_linux_fence.h [new file with mode: 0644]
drivers/gpu/drm/img/pvr_vmap.h [new file with mode: 0644]
drivers/gpu/drm/img/pvrversion.h [new file with mode: 0644]
drivers/gpu/drm/img/riscv_vmap.c [new file with mode: 0644]
drivers/gpu/drm/img/riscv_vmap.h [new file with mode: 0644]

index 18e87e0..d81366a 100644 (file)
        };
 };
 
+&gpu {
+       status = "okay";
+};
+
 &i2c0 {
        clock-frequency = <100000>;
        i2c-sda-hold-time-ns = <300>;
index 2848fb8..b3fa383 100644 (file)
                        status = "disabled";
                };
 
+               gpu: gpu@18000000 {
+                       compatible = "img-gpu";
+                       reg = <0x0 0x18000000 0x0 0x100000>,
+                               <0x0 0x130C000 0x0 0x10000>;
+                       clocks = <&syscrg JH7110_SYSCLK_GPU_CORE>,
+                                <&syscrg JH7110_SYSCLK_GPU_APB>,
+                                <&syscrg JH7110_SYSCLK_GPU_RTC_TOGGLE>,
+                                <&syscrg JH7110_SYSCLK_GPU_CORE_CLK>,
+                                <&syscrg JH7110_SYSCLK_GPU_SYS_CLK>,
+                                <&syscrg JH7110_SYSCLK_NOC_BUS_GPU_AXI>;
+                       clock-names = "clk_bv", "clk_apb", "clk_rtc",
+                                       "clk_core", "clk_sys", "clk_axi";
+                       resets = <&syscrg JH7110_SYSRST_GPU_APB>,
+                                <&syscrg JH7110_SYSRST_GPU_DOMA>;
+                       reset-names = "rst_apb", "rst_doma";
+                       power-domains = <&pwrc JH7110_PD_GPUA>;
+                       interrupts = <82>;
+                       current-clock = <8000000>;
+                       status = "disabled";
+               };
+
                dma: dma-controller@16050000 {
                        compatible = "starfive,jh7110-axi-dma";
                        reg = <0x0 0x16050000 0x0 0x10000>;
index 742481f..eb10466 100644 (file)
@@ -419,6 +419,8 @@ source "drivers/gpu/drm/sprd/Kconfig"
 
 source "drivers/gpu/drm/verisilicon/Kconfig"
 
+source "drivers/gpu/drm/img/Kconfig"
+
 config DRM_HYPERV
        tristate "DRM Support for Hyper-V synthetic video device"
        depends on DRM && PCI && MMU && HYPERV
index c5fff6d..0c3182a 100644 (file)
@@ -149,3 +149,4 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/
 obj-y                  += solomon/
 obj-$(CONFIG_DRM_SPRD) += sprd/
 obj-$(CONFIG_DRM_VERISILICON) += verisilicon/
+obj-$(CONFIG_DRM_IMG) += img/
diff --git a/drivers/gpu/drm/img/Kconfig b/drivers/gpu/drm/img/Kconfig
new file mode 100644 (file)
index 0000000..71d66af
--- /dev/null
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_IMG
+       bool "Imagination PowerVR GPU"
+        select DRM
+        default n
+
+source "drivers/gpu/drm/img/img-rogue/Kconfig"
+
diff --git a/drivers/gpu/drm/img/Makefile b/drivers/gpu/drm/img/Makefile
new file mode 100644 (file)
index 0000000..1b64843
--- /dev/null
@@ -0,0 +1,6 @@
+#drm_img-y += riscv_vmap.o
+
+obj-$(CONFIG_DRM_IMG) += riscv_vmap.o
+obj-$(CONFIG_DRM_IMG_ROGUE) += img-rogue/
+
+ccflags-y+= -I$(srctree)/mm
diff --git a/drivers/gpu/drm/img/img-rogue/Kconfig b/drivers/gpu/drm/img/img-rogue/Kconfig
new file mode 100644 (file)
index 0000000..5b793bb
--- /dev/null
@@ -0,0 +1,6 @@
+config DRM_IMG_ROGUE
+       bool "DRM support for PowerVR GPU"
+       select DRM_IMG
+       default n
+       help
+         Enable img powerVR GPU
diff --git a/drivers/gpu/drm/img/img-rogue/Makefile b/drivers/gpu/drm/img/img-rogue/Makefile
new file mode 100755 (executable)
index 0000000..ee37ef8
--- /dev/null
@@ -0,0 +1,129 @@
+########################################################################### ###
+#@Title         Root kernel makefile
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+# This top-level kbuild makefile builds all the Linux kernel modules in the
+# DDK. To run kbuild, this makefile is copied to $(TARGET_PRIMARY_OUT)/kbuild/Makefile
+# and make is invoked in $(TARGET_PRIMARY_OUT)/kbuild.
+
+# This makefile doesn't define any kbuild special variables apart from
+# ccflags-y and obj-m. The variables for objects are picked up by including
+# the kbuild makefile fragments named in $(INTERNAL_KBUILD_MAKEFILES). The
+# list of objects that these fragments make is collected in
+# $(INTERNAL_KBUILD_OBJECTS) and $(INTERNAL_EXTRA_KBUILD_OBJECTS). These
+# variables are set according to the build's $(KERNEL_COMPONENTS) and
+# $(EXTRA_PVRSRVKM_COMPONENTS). To add a new kernel module to the build, edit
+# these variables in the per-build Makefile.
+
+INTERNAL_KBUILD_OBJECTS=pvrsrvkm.o
+INTERNAL_EXTRA_KBUILD_OBJECTS=""
+TOP:=$(dir $(realpath $(lastword $(MAKEFILE_LIST))))
+TOP:=$(TOP:/=)
+BRIDGE_SOURCE_ROOT=$(TOP)/generated/rogue
+TARGET_PRIMARY_ARCH=target_riscv64
+PVR_ARCH=rogue
+PVR_ARCH_DEFS=rogue
+PVR_SYSTEM := sf_7110
+#PDUMP ?= 1
+BUILD ?= release
+RGX_BVNC ?= 36.50.54.182
+RGX_BNC ?= 36.50.54.182
+PVRSRV_NEED_PVR_DPF=1
+PVRSRV_NEED_PVR_ASSERT=1
+PVR_SERVICES_DEBUG=1
+WINDOW_SYSTEM=nulldrmws
+
+#include $(OUT)/config_kernel.mk
+include $(srctree)/$(src)/config_kernel.mk
+
+.SECONDARY:
+
+define symlink-source-file
+@if [ ! -e $(dir $@) ]; then mkdir -p $(dir $@); fi
+@if [ ! -h $@ ]; then ln -sf $< $@; fi
+endef
+
+bridge_base := $(BRIDGE_SOURCE_ROOT)
+
+#$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/%.c: $(TOP)/%.c
+#      $(symlink-source-file)
+
+#$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/generated/$(PVR_ARCH)/%.c: $(bridge_base)/%.c
+#      $(symlink-source-file)
+
+#$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/external/%.c: $(abspath $(srctree))/%.c
+#      $(symlink-source-file)
+
+#ccflags-y += -D__linux__ -include $(OUT)/config_kernel.h \
+# -include kernel_config_compatibility.h \
+# -I$(OUT)/include -I$(TOP)/kernel/drivers/staging/imgtec
+ccflags-y += -D__linux__ -include $(srctree)/$(src)/config_kernel.h \
+ -include $(srctree)/drivers/gpu/drm/img/kernel_config_compatibility.h \
+ -I$(OUT)/include \
+ -I$(srctree)/drivers/gpu/drm/img \
+ -I$(srctree)/$(src)/hwdefs/rogue \
+ -I$(srctree)/$(src)/hwdefs/rogue/km
+
+include $(srctree)/$(src)/services/server/env/linux/Kbuild.mk
+include $(srctree)/$(src)/services/system/rogue/sf_7110/Kbuild.mk
+
+#include $(INTERNAL_KBUILD_MAKEFILES)
+
+define add-file-cflags
+CFLAGS_$(notdir $(1)) := $(CFLAGS_$(1))
+endef
+
+# Define old style CFLAG_ variables for kernels older than 5.4
+$(foreach _m,$(INTERNAL_KBUILD_OBJECTS:.o=-y), \
+ $(foreach _f,$($(_m)), \
+  $(if $(CFLAGS_$(_f)), \
+   $(eval $(call add-file-cflags,$(_f))))))
+
+#ifneq ($(KERNEL_DRIVER_DIR),)
+# ccflags-y += \
+#   -I$(abspath $(srctree))/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM) \
+#   -I$(abspath $(srctree))/$(KERNEL_DRIVER_DIR)
+#endif
+
+$(if $($(PVRSRVKM_NAME)-y),,$(warning $(PVRSRVKM_NAME)-y was empty, which could mean that srvkm is missing from $$(KERNEL_COMPONENTS)))
+$(PVRSRVKM_NAME)-y += $(foreach _m,$(INTERNAL_EXTRA_KBUILD_OBJECTS:.o=),$($(_m)-y))
+
+#obj-m += $(INTERNAL_KBUILD_OBJECTS)
+obj-$(CONFIG_DRM_IMG_ROGUE) += $(INTERNAL_KBUILD_OBJECTS)
diff --git a/drivers/gpu/drm/img/img-rogue/config_kernel.h b/drivers/gpu/drm/img/img-rogue/config_kernel.h
new file mode 100644 (file)
index 0000000..4d96b6b
--- /dev/null
@@ -0,0 +1,162 @@
+#define PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY 5
+#define PVRSRV_ENABLE_CCCB_GROW
+#define RGX_FW_FILENAME "rgx.fw"
+#define RGX_SH_FILENAME "rgx.sh"
+#define PVR_BUILD_DIR "sf_7110"
+#define PVR_BUILD_TYPE "release"
+#define PVRSRV_MODNAME "pvrsrvkm"
+#define PVRSYNC_MODNAME "pvr_sync"
+#define SUPPORT_RGX 1
+#define DISPLAY_CONTROLLER drm_starfive
+#define PVRSRV_HWPERF_COUNTERS_PERBLK 12
+#define RELEASE
+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_36.50.54.182.h"
+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_36.V.54.182.h"
+//#define PDUMP
+#define PVRSRV_NEED_PVR_DPF
+#define PVRSRV_NEED_PVR_ASSERT
+#define SUPPORT_RGXTQ_BRIDGE
+#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9
+#define PVRSRV_POISON_ON_FREE_VALUE 0x63
+#define RGX_NUM_OS_SUPPORTED 1
+#define RGX_OSID_0_DEFAULT_PRIORITY (1 - 0)
+#define RGX_OSID_1_DEFAULT_PRIORITY (1 - 1)
+#define RGX_OSID_2_DEFAULT_PRIORITY (1 - 2)
+#define RGX_OSID_3_DEFAULT_PRIORITY (1 - 3)
+#define RGX_OSID_4_DEFAULT_PRIORITY (1 - 4)
+#define RGX_OSID_5_DEFAULT_PRIORITY (1 - 5)
+#define RGX_OSID_6_DEFAULT_PRIORITY (1 - 6)
+#define RGX_OSID_7_DEFAULT_PRIORITY (1 - 7)
+#define RGX_HCS_DEFAULT_DEADLINE_MS 0xFFFFFFFFU
+#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF
+#define RGX_FW_HEAP_SHIFT 25
+#define SUPPORT_POWMON_COMPONENT
+#define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U
+#define PVR_POWER_MONITOR_HWPERF
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm"
+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256
+#define ION_DEFAULT_HEAP_NAME "ion_system_heap"
+#define ION_DEFAULT_HEAP_ID_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE 0x4000
+#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432
+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS
+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN
+#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG IMG_TRUE
+#define PVRSRV_APPHINT_VALIDATEIRQ 0
+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0
+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0
+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0
+#define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0
+#define PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH 0
+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL
+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT
+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE
+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN
+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0
+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50
+#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP 0
+#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME 0
+#define PVRSRV_APPHINT_JONESDISABLEMASK 0
+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
+#define PVRSRV_APPHINT_TRUNCATEMODE 0
+#define PVRSRV_APPHINT_EMUMAXFREQ 0
+#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0
+#define PVRSRV_APPHINT_RGXBVNC ""
+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5
+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0
+#define PVRSRV_APPHINT_CACHEOPTHREADPRIORITY 1
+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE
+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE
+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG
+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE
+#define PVRSRV_APPHINT_KCCB_SIZE_LOG2 7
+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT
+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0
+#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE
+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0
+#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS
+#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0
+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST
+#define PVRSRV_APPHINT_HTBUFFERSIZE 64
+#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE
+#define PVRSRV_APPHINT_HWPERFFWFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL 0
+#define PVRSRV_APPHINT_TIMECORRCLOCK 0
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE
+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD
+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE
+#define PVRSRV_APPHINT_GPUUNITSPOWERCHANGE IMG_FALSE
+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE
+#define PVRSRV_APPHINT_CACHEOPCONFIG 0
+#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0
+#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE
+#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE
+#define PVRSRV_APPHINT_TESTSLRINTERVAL 0
+#define PVRSRV_APPHINT_RISCVDMITEST 0
+#define PVRSRV_APPHINT_VALIDATESOCUSCTIMERS 0
+#define SOC_TIMER_FREQ 20
+#define PDVFS_COM_HOST 1
+#define PDVFS_COM_AP 2
+#define PDVFS_COM_PMC 3
+#define PDVFS_COM_IMG_CLKDIV 4
+#define PDVFS_COM PDVFS_COM_HOST
+#define PVR_GPIO_MODE_GENERAL 1
+#define PVR_GPIO_MODE_POWMON_PIN 2
+#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL
+#define PVRSRV_ENABLE_PROCESS_STATS
+#define SUPPORT_USC_BREAKPOINT
+//#define SUPPORT_TBI_INTERFACE
+#define SUPPORT_AGP
+#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 1000000
+#define PVR_ANNOTATION_MAX_LEN 96
+#define PVRSRV_DEVICE_INIT_MODE PVRSRV_LINUX_DEV_INIT_ON_PROBE
+#define SUPPORT_DI_BRG_IMPL
+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480
+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288
+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2
+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384
+#define PDUMP_PARAM_INIT_STREAM_SIZE 0x200000
+#define PDUMP_PARAM_MAIN_STREAM_SIZE 0x1000000
+#define PDUMP_PARAM_DEINIT_STREAM_SIZE 0x10000
+#define PDUMP_PARAM_BLOCK_STREAM_SIZE 0x0
+#define PDUMP_SCRIPT_INIT_STREAM_SIZE 0x80000
+#define PDUMP_SCRIPT_MAIN_STREAM_SIZE 0x800000
+#define PDUMP_SCRIPT_DEINIT_STREAM_SIZE 0x10000
+#define PDUMP_SCRIPT_BLOCK_STREAM_SIZE 0x800000
+#define PDUMP_SPLIT_64BIT_REGISTER_ACCESS
+#define SUPPORT_NATIVE_FENCE_SYNC
+#define PVRSRV_STALLED_CCB_ACTION
+#define UPDATE_FENCE_CHECKPOINT_COUNT 1
+#define PVR_DRM_NAME "pvr"
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16
+#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM 15
+#define SUPPORT_BUFFER_SYNC 1
diff --git a/drivers/gpu/drm/img/img-rogue/config_kernel.mk b/drivers/gpu/drm/img/img-rogue/config_kernel.mk
new file mode 100644 (file)
index 0000000..5038474
--- /dev/null
@@ -0,0 +1,52 @@
+override PVRSRV_DIR := services
+override HOST_PRIMARY_ARCH := host_x86_64
+override HOST_32BIT_ARCH := host_i386
+override HOST_FORCE_32BIT := -m32
+override HOST_ALL_ARCH := host_x86_64 host_i386
+override TARGET_PRIMARY_ARCH := target_riscv64
+override TARGET_SECONDARY_ARCH :=
+override TARGET_ALL_ARCH := target_riscv64
+override TARGET_FORCE_32BIT :=
+override PVR_ARCH := rogue
+override METAG_VERSION_NEEDED := 2.8.1.0.3
+override MIPS_VERSION_NEEDED := 2014.07-1
+override RISCV_VERSION_NEEDED := 1.0.1
+override KERNELDIR := /home/lisl/freelight-u-sdk/soft_3rdpart/IMG_GPU/linux/../../../work/linux
+override KERNEL_ID := 5.15.0-00006-g35a89f4886a9-dirty
+override PVRSRV_MODULE_BASEDIR := /lib/modules/5.15.0-00006-g35a89f4886a9-dirty/extra/
+override KERNEL_COMPONENTS := srvkm drm_starfive
+override KERNEL_CROSS_COMPILE := riscv64-linux-
+override WINDOW_SYSTEM := nulldrmws
+override PVRSRV_MODNAME := pvrsrvkm
+override PVR_BUILD_DIR := sf_7110
+override PVR_BUILD_TYPE := release
+override SUPPORT_RGX := 1
+override DISPLAY_CONTROLLER := drm_starfive
+override PVR_SYSTEM := sf_7110
+override PVR_LOADER :=
+override BUILD := release
+override SORT_BRIDGE_STRUCTS := 1
+override DEBUGLINK := 1
+override RGX_BNC := 36.V.54.182
+override SUPPORT_MIPS_64K_PAGE_SIZE :=
+override RGX_NUM_OS_SUPPORTED := 1
+override VMM_TYPE := stub
+override SUPPORT_POWMON_COMPONENT := 1
+#override PDUMP := 1
+override RGX_TIMECORR_CLOCK := mono
+override PDVFS_COM_HOST := 1
+override PDVFS_COM_AP := 2
+override PDVFS_COM_PMC := 3
+override PDVFS_COM_IMG_CLKDIV := 4
+override PDVFS_COM := PDVFS_COM_HOST
+override PVR_GPIO_MODE_GENERAL := 1
+override PVR_GPIO_MODE_POWMON_PIN := 2
+override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL
+override PVR_HANDLE_BACKEND := idr
+override SUPPORT_DMABUF_BRIDGE := 1
+override SUPPORT_USC_BREAKPOINT := 1
+override SUPPORT_DI_BRG_IMPL := 1
+override SUPPORT_NATIVE_FENCE_SYNC := 1
+override SUPPORT_DMA_FENCE := 1
+override SUPPORT_BUFFER_SYNC := 1
+override DEFINE_X86_FEATURE_LA57 := 1
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/client_cache_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/client_cache_bridge.h
new file mode 100644 (file)
index 0000000..1dec13f
--- /dev/null
@@ -0,0 +1,80 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_CACHE_BRIDGE_H
+#define CLIENT_CACHE_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_cache_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge,
+                                            IMG_UINT32 ui32NumCacheOps,
+                                            IMG_HANDLE * phPMR,
+                                            IMG_UINT64 * pui64Address,
+                                            IMG_DEVMEM_OFFSET_T * puiOffset,
+                                            IMG_DEVMEM_SIZE_T * puiSize,
+                                            PVRSRV_CACHE_OP * piuCacheOp,
+                                            IMG_UINT32 ui32OpTimeline);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hPMR,
+                                           IMG_UINT64 ui64Address,
+                                           IMG_DEVMEM_OFFSET_T uiOffset,
+                                           IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge,
+                                          IMG_HANDLE hPMR,
+                                          IMG_UINT64 ui64Address,
+                                          IMG_DEVMEM_OFFSET_T uiOffset,
+                                          IMG_DEVMEM_SIZE_T uiSize,
+                                          IMG_INT64 i64StartTime,
+                                          IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp);
+
+#endif /* CLIENT_CACHE_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/client_cache_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/client_cache_direct_bridge.c
new file mode 100644 (file)
index 0000000..9691bae
--- /dev/null
@@ -0,0 +1,112 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for cache
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_cache_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "cache_ops.h"
+
+#include "cache_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge,
+                                            IMG_UINT32 ui32NumCacheOps,
+                                            IMG_HANDLE * phPMR,
+                                            IMG_UINT64 * pui64Address,
+                                            IMG_DEVMEM_OFFSET_T * puiOffset,
+                                            IMG_DEVMEM_SIZE_T * puiSize,
+                                            PVRSRV_CACHE_OP * piuCacheOp,
+                                            IMG_UINT32 ui32OpTimeline)
+{
+       PVRSRV_ERROR eError;
+       PMR **psPMRInt;
+
+       psPMRInt = (PMR **) phPMR;
+
+       eError =
+           CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                        ui32NumCacheOps,
+                        psPMRInt, pui64Address, puiOffset, puiSize, piuCacheOp, ui32OpTimeline);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hPMR,
+                                           IMG_UINT64 ui64Address,
+                                           IMG_DEVMEM_OFFSET_T uiOffset,
+                                           IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge,
+                                          IMG_HANDLE hPMR,
+                                          IMG_UINT64 ui64Address,
+                                          IMG_DEVMEM_OFFSET_T uiOffset,
+                                          IMG_DEVMEM_SIZE_T uiSize,
+                                          IMG_INT64 i64StartTime,
+                                          IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           CacheOpLog(psPMRInt,
+                      ui64Address, uiOffset, uiSize, i64StartTime, i64EndTime, iuCacheOp);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/common_cache_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/common_cache_bridge.h
new file mode 100644 (file)
index 0000000..cc84875
--- /dev/null
@@ -0,0 +1,126 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CACHE_BRIDGE_H
+#define COMMON_CACHE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_ops.h"
+
+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST                  0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE                       PVRSRV_BRIDGE_CACHE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC                        PVRSRV_BRIDGE_CACHE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG                 PVRSRV_BRIDGE_CACHE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CACHE_CMD_LAST                   (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2)
+
+/*******************************************
+            CacheOpQueue
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+       PVRSRV_CACHE_OP *piuCacheOp;
+       IMG_UINT64 *pui64Address;
+       IMG_DEVMEM_OFFSET_T *puiOffset;
+       IMG_DEVMEM_SIZE_T *puiSize;
+       IMG_HANDLE *phPMR;
+       IMG_UINT32 ui32NumCacheOps;
+       IMG_UINT32 ui32OpTimeline;
+} __packed PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+/*******************************************
+            CacheOpExec
+ *******************************************/
+
+/* Bridge in structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG
+{
+       IMG_UINT64 ui64Address;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       PVRSRV_CACHE_OP iuCacheOp;
+} __packed PVRSRV_BRIDGE_IN_CACHEOPEXEC;
+
+/* Bridge out structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CACHEOPEXEC;
+
+/*******************************************
+            CacheOpLog
+ *******************************************/
+
+/* Bridge in structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG
+{
+       IMG_INT64 i64EndTime;
+       IMG_INT64 i64StartTime;
+       IMG_UINT64 ui64Address;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       PVRSRV_CACHE_OP iuCacheOp;
+} __packed PVRSRV_BRIDGE_IN_CACHEOPLOG;
+
+/* Bridge out structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CACHEOPLOG;
+
+#endif /* COMMON_CACHE_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/server_cache_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/cache_bridge/server_cache_bridge.c
new file mode 100644 (file)
index 0000000..18509ba
--- /dev/null
@@ -0,0 +1,457 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for cache
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cache
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_km.h"
+
+#include "common_cache_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(CACHE_BATCH_MAX <= IMG_UINT32_MAX,
+             "CACHE_BATCH_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psCacheOpQueueIN_UI8,
+                        IMG_UINT8 * psCacheOpQueueOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN =
+           (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT =
+           (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0);
+
+       PMR **psPMRInt = NULL;
+       IMG_HANDLE *hPMRInt2 = NULL;
+       IMG_UINT64 *ui64AddressInt = NULL;
+       IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+       IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+       PVRSRV_CACHE_OP *iuCacheOpInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) +
+           ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) +
+           ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) +
+           ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) +
+           ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0;
+
+       if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX))
+       {
+               psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto CacheOpQueue_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto CacheOpQueue_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psCacheOpQueueIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto CacheOpQueue_exit;
+                       }
+               }
+       }
+
+       if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+       {
+               psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psPMRInt, 0, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *));
+               ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *);
+               hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hPMRInt2, (const void __user *)psCacheOpQueueIN->phPMR,
+                    psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto CacheOpQueue_exit;
+               }
+       }
+       if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+       {
+               ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64);
+       }
+
+       /* Copy the data over */
+       if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui64AddressInt, (const void __user *)psCacheOpQueueIN->pui64Address,
+                    psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK)
+               {
+                       psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto CacheOpQueue_exit;
+               }
+       }
+       if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+       {
+               uiOffsetInt =
+                   (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T);
+       }
+
+       /* Copy the data over */
+       if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiOffsetInt, (const void __user *)psCacheOpQueueIN->puiOffset,
+                    psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK)
+               {
+                       psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto CacheOpQueue_exit;
+               }
+       }
+       if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+       {
+               uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T);
+       }
+
+       /* Copy the data over */
+       if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiSizeInt, (const void __user *)psCacheOpQueueIN->puiSize,
+                    psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK)
+               {
+                       psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto CacheOpQueue_exit;
+               }
+       }
+       if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+       {
+               iuCacheOpInt =
+                   (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP);
+       }
+
+       /* Copy the data over */
+       if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, iuCacheOpInt, (const void __user *)psCacheOpQueueIN->piuCacheOp,
+                    psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK)
+               {
+                       psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto CacheOpQueue_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++)
+               {
+                       /* Look up the address from the handle */
+                       psCacheOpQueueOUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psPMRInt[i],
+                                                      hPMRInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+                       if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto CacheOpQueue_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psCacheOpQueueOUT->eError =
+           CacheOpQueue(psConnection, OSGetDevNode(psConnection),
+                        psCacheOpQueueIN->ui32NumCacheOps,
+                        psPMRInt,
+                        ui64AddressInt,
+                        uiOffsetInt, uiSizeInt, iuCacheOpInt, psCacheOpQueueIN->ui32OpTimeline);
+
+CacheOpQueue_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       if (hPMRInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psPMRInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hPMRInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psCacheOpQueueOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psCacheOpExecIN_UI8,
+                       IMG_UINT8 * psCacheOpExecOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN =
+           (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT =
+           (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psCacheOpExecIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psCacheOpExecOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto CacheOpExec_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psCacheOpExecOUT->eError =
+           CacheOpValExec(psPMRInt,
+                          psCacheOpExecIN->ui64Address,
+                          psCacheOpExecIN->uiOffset,
+                          psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp);
+
+CacheOpExec_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry,
+                      IMG_UINT8 * psCacheOpLogIN_UI8,
+                      IMG_UINT8 * psCacheOpLogOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN =
+           (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT =
+           (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psCacheOpLogIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psCacheOpLogOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto CacheOpLog_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psCacheOpLogOUT->eError =
+           CacheOpLog(psPMRInt,
+                      psCacheOpLogIN->ui64Address,
+                      psCacheOpLogIN->uiOffset,
+                      psCacheOpLogIN->uiSize,
+                      psCacheOpLogIN->i64StartTime,
+                      psCacheOpLogIN->i64EndTime, psCacheOpLogIN->iuCacheOp);
+
+CacheOpLog_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitCACHEBridge(void);
+void DeinitCACHEBridge(void);
+
+/*
+ * Register all CACHE functions with services
+ */
+PVRSRV_ERROR InitCACHEBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE,
+                             PVRSRVBridgeCacheOpQueue, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC,
+                             PVRSRVBridgeCacheOpExec, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG,
+                             PVRSRVBridgeCacheOpLog, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cache functions with services
+ */
+void DeinitCACHEBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/cmm_bridge/common_cmm_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/cmm_bridge/common_cmm_bridge.h
new file mode 100644 (file)
index 0000000..da48de3
--- /dev/null
@@ -0,0 +1,114 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST                    0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX                   PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX                 PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX                    PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST                     (PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+/*******************************************
+            DevmemIntExportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG
+{
+       IMG_HANDLE hContext;
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX;
+
+/* Bridge out structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG
+{
+       IMG_HANDLE hContextExport;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX;
+
+/*******************************************
+            DevmemIntUnexportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG
+{
+       IMG_HANDLE hContextExport;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX;
+
+/* Bridge out structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX;
+
+/*******************************************
+            DevmemIntAcquireRemoteCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX;
+
+/* Bridge out structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+       IMG_HANDLE hContext;
+       IMG_HANDLE hPrivData;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX;
+
+#endif /* COMMON_CMM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/cmm_bridge/server_cmm_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/cmm_bridge/server_cmm_bridge.c
new file mode 100644 (file)
index 0000000..b95f858
--- /dev/null
@@ -0,0 +1,409 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psDevmemIntExportCtxIN_UI8,
+                              IMG_UINT8 * psDevmemIntExportCtxOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8,
+                                                                    0);
+
+       IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext;
+       DEVMEMINT_CTX *psContextInt = NULL;
+       IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR;
+       PMR *psPMRInt = NULL;
+       DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntExportCtxOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psContextInt,
+                                      hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntExportCtx_exit;
+       }
+
+       /* Look up the address from the handle */
+       psDevmemIntExportCtxOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntExportCtx_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntExportCtxOUT->eError =
+           DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+       {
+               goto DevmemIntExportCtx_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                   &psDevmemIntExportCtxOUT->
+                                                                   hContextExport,
+                                                                   (void *)psContextExportInt,
+                                                                   PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+                                                                   PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                                                   (PFN_HANDLE_RELEASE) &
+                                                                   _DevmemIntExportCtxpsContextExportIntRelease);
+       if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntExportCtx_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntExportCtx_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+       {
+               if (psContextExportInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       DevmemIntUnexportCtx(psContextExportInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8,
+                                IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *)
+           IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntUnexportCtxOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport,
+                                             PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+       if (unlikely((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) &&
+                    (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnexportCtx_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntUnexportCtx_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psDevmemIntAcquireRemoteCtxIN_UI8,
+                                     IMG_UINT8 * psDevmemIntAcquireRemoteCtxOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *)
+           IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *)
+           IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR;
+       PMR *psPMRInt = NULL;
+       DEVMEMINT_CTX *psContextInt = NULL;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       psDevmemIntAcquireRemoteCtxOUT->hContext = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntAcquireRemoteCtxOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntAcquireRemoteCtx_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntAcquireRemoteCtxOUT->eError =
+           DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+       {
+               goto DevmemIntAcquireRemoteCtx_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntAcquireRemoteCtxOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psDevmemIntAcquireRemoteCtxOUT->hContext,
+                                     (void *)psContextInt, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _DevmemIntAcquireRemoteCtxpsContextIntRelease);
+       if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntAcquireRemoteCtx_exit;
+       }
+
+       psDevmemIntAcquireRemoteCtxOUT->eError =
+           PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+                                        &psDevmemIntAcquireRemoteCtxOUT->hPrivData,
+                                        (void *)hPrivDataInt, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                        psDevmemIntAcquireRemoteCtxOUT->hContext);
+       if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntAcquireRemoteCtx_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntAcquireRemoteCtx_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+       {
+               if (psDevmemIntAcquireRemoteCtxOUT->hContext)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psHandleBase);
+
+                       eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+                                                            (IMG_HANDLE)
+                                                            psDevmemIntAcquireRemoteCtxOUT->
+                                                            hContext,
+                                                            PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psContextInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psHandleBase);
+
+               }
+
+               if (psContextInt)
+               {
+                       DevmemIntCtxDestroy(psContextInt);
+               }
+       }
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* EXCLUDE_CMM_BRIDGE */
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+void DeinitCMMBridge(void);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX,
+                             PVRSRVBridgeDevmemIntExportCtx, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX,
+                             PVRSRVBridgeDevmemIntUnexportCtx, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX,
+                             PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+void DeinitCMMBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX);
+
+}
+#else /* EXCLUDE_CMM_BRIDGE */
+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitCMMBridge() \
+       PVRSRV_OK
+
+#define DeinitCMMBridge()
+
+#endif /* EXCLUDE_CMM_BRIDGE */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/client_devicememhistory_bridge.h
new file mode 100644 (file)
index 0000000..bfa6bfb
--- /dev/null
@@ -0,0 +1,111 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hPMR,
+                                                   IMG_DEVMEM_SIZE_T uiOffset,
+                                                   IMG_DEV_VIRTADDR sDevVAddr,
+                                                   IMG_DEVMEM_SIZE_T uiSize,
+                                                   const IMG_CHAR * puiText,
+                                                   IMG_UINT32 ui32Log2PageSize,
+                                                   IMG_UINT32 ui32AllocationIndex,
+                                                   IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hPMR,
+                                                     IMG_DEVMEM_SIZE_T uiOffset,
+                                                     IMG_DEV_VIRTADDR sDevVAddr,
+                                                     IMG_DEVMEM_SIZE_T uiSize,
+                                                     const IMG_CHAR * puiText,
+                                                     IMG_UINT32 ui32Log2PageSize,
+                                                     IMG_UINT32 ui32AllocationIndex,
+                                                     IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+                                                         IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                         IMG_UINT32 ui32ui32StartPage,
+                                                         IMG_UINT32 ui32NumPages,
+                                                         IMG_DEVMEM_SIZE_T uiAllocSize,
+                                                         const IMG_CHAR * puiText,
+                                                         IMG_UINT32 ui32Log2PageSize,
+                                                         IMG_UINT32 ui32AllocationIndex,
+                                                         IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+                                                           IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                           IMG_UINT32 ui32ui32StartPage,
+                                                           IMG_UINT32 ui32NumPages,
+                                                           IMG_DEVMEM_SIZE_T uiAllocSize,
+                                                           const IMG_CHAR * puiText,
+                                                           IMG_UINT32 ui32Log2PageSize,
+                                                           IMG_UINT32 ui32AllocationIndex,
+                                                           IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+                                                            IMG_HANDLE hPMR,
+                                                            IMG_DEVMEM_SIZE_T uiOffset,
+                                                            IMG_DEV_VIRTADDR sDevVAddr,
+                                                            IMG_DEVMEM_SIZE_T uiSize,
+                                                            const IMG_CHAR * puiText,
+                                                            IMG_UINT32 ui32Log2PageSize,
+                                                            IMG_UINT32 ui32AllocPageCount,
+                                                            IMG_UINT32 * pui32AllocPageIndices,
+                                                            IMG_UINT32 ui32FreePageCount,
+                                                            IMG_UINT32 * pui32FreePageIndices,
+                                                            IMG_UINT32 ui32AllocationIndex,
+                                                            IMG_UINT32 * pui32AllocationIndexOut);
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/client_devicememhistory_direct_bridge.c
new file mode 100644 (file)
index 0000000..acbb464
--- /dev/null
@@ -0,0 +1,194 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for devicememhistory
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_history_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hPMR,
+                                                   IMG_DEVMEM_SIZE_T uiOffset,
+                                                   IMG_DEV_VIRTADDR sDevVAddr,
+                                                   IMG_DEVMEM_SIZE_T uiSize,
+                                                   const IMG_CHAR * puiText,
+                                                   IMG_UINT32 ui32Log2PageSize,
+                                                   IMG_UINT32 ui32AllocationIndex,
+                                                   IMG_UINT32 * pui32AllocationIndexOut)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           DevicememHistoryMapKM(psPMRInt,
+                                 uiOffset,
+                                 sDevVAddr,
+                                 uiSize,
+                                 puiText,
+                                 ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hPMR,
+                                                     IMG_DEVMEM_SIZE_T uiOffset,
+                                                     IMG_DEV_VIRTADDR sDevVAddr,
+                                                     IMG_DEVMEM_SIZE_T uiSize,
+                                                     const IMG_CHAR * puiText,
+                                                     IMG_UINT32 ui32Log2PageSize,
+                                                     IMG_UINT32 ui32AllocationIndex,
+                                                     IMG_UINT32 * pui32AllocationIndexOut)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           DevicememHistoryUnmapKM(psPMRInt,
+                                   uiOffset,
+                                   sDevVAddr,
+                                   uiSize,
+                                   puiText,
+                                   ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+                                                         IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                         IMG_UINT32 ui32ui32StartPage,
+                                                         IMG_UINT32 ui32NumPages,
+                                                         IMG_DEVMEM_SIZE_T uiAllocSize,
+                                                         const IMG_CHAR * puiText,
+                                                         IMG_UINT32 ui32Log2PageSize,
+                                                         IMG_UINT32 ui32AllocationIndex,
+                                                         IMG_UINT32 * pui32AllocationIndexOut)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           DevicememHistoryMapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                       sBaseDevVAddr,
+                                       ui32ui32StartPage,
+                                       ui32NumPages,
+                                       uiAllocSize,
+                                       puiText,
+                                       ui32Log2PageSize,
+                                       ui32AllocationIndex, pui32AllocationIndexOut);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+                                                           IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                           IMG_UINT32 ui32ui32StartPage,
+                                                           IMG_UINT32 ui32NumPages,
+                                                           IMG_DEVMEM_SIZE_T uiAllocSize,
+                                                           const IMG_CHAR * puiText,
+                                                           IMG_UINT32 ui32Log2PageSize,
+                                                           IMG_UINT32 ui32AllocationIndex,
+                                                           IMG_UINT32 * pui32AllocationIndexOut)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           DevicememHistoryUnmapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                         sBaseDevVAddr,
+                                         ui32ui32StartPage,
+                                         ui32NumPages,
+                                         uiAllocSize,
+                                         puiText,
+                                         ui32Log2PageSize,
+                                         ui32AllocationIndex, pui32AllocationIndexOut);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+                                                            IMG_HANDLE hPMR,
+                                                            IMG_DEVMEM_SIZE_T uiOffset,
+                                                            IMG_DEV_VIRTADDR sDevVAddr,
+                                                            IMG_DEVMEM_SIZE_T uiSize,
+                                                            const IMG_CHAR * puiText,
+                                                            IMG_UINT32 ui32Log2PageSize,
+                                                            IMG_UINT32 ui32AllocPageCount,
+                                                            IMG_UINT32 * pui32AllocPageIndices,
+                                                            IMG_UINT32 ui32FreePageCount,
+                                                            IMG_UINT32 * pui32FreePageIndices,
+                                                            IMG_UINT32 ui32AllocationIndex,
+                                                            IMG_UINT32 * pui32AllocationIndexOut)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           DevicememHistorySparseChangeKM(psPMRInt,
+                                          uiOffset,
+                                          sDevVAddr,
+                                          uiSize,
+                                          puiText,
+                                          ui32Log2PageSize,
+                                          ui32AllocPageCount,
+                                          pui32AllocPageIndices,
+                                          ui32FreePageCount,
+                                          pui32FreePageIndices,
+                                          ui32AllocationIndex, pui32AllocationIndexOut);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/common_devicememhistory_bridge.h
new file mode 100644 (file)
index 0000000..800f98d
--- /dev/null
@@ -0,0 +1,185 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST                       0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP                     PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP                   PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE                       PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE                     PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE                    PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST                        (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4)
+
+/*******************************************
+            DevicememHistoryMap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_DEVMEM_SIZE_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       const IMG_CHAR *puiText;
+       IMG_UINT32 ui32AllocationIndex;
+       IMG_UINT32 ui32Log2PageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+/*******************************************
+            DevicememHistoryUnmap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_DEVMEM_SIZE_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       const IMG_CHAR *puiText;
+       IMG_UINT32 ui32AllocationIndex;
+       IMG_UINT32 ui32Log2PageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+/*******************************************
+            DevicememHistoryMapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+       IMG_DEV_VIRTADDR sBaseDevVAddr;
+       IMG_DEVMEM_SIZE_T uiAllocSize;
+       const IMG_CHAR *puiText;
+       IMG_UINT32 ui32AllocationIndex;
+       IMG_UINT32 ui32Log2PageSize;
+       IMG_UINT32 ui32NumPages;
+       IMG_UINT32 ui32ui32StartPage;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE;
+
+/*******************************************
+            DevicememHistoryUnmapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+       IMG_DEV_VIRTADDR sBaseDevVAddr;
+       IMG_DEVMEM_SIZE_T uiAllocSize;
+       const IMG_CHAR *puiText;
+       IMG_UINT32 ui32AllocationIndex;
+       IMG_UINT32 ui32Log2PageSize;
+       IMG_UINT32 ui32NumPages;
+       IMG_UINT32 ui32ui32StartPage;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/*******************************************
+            DevicememHistorySparseChange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_DEVMEM_SIZE_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       IMG_UINT32 *pui32AllocPageIndices;
+       IMG_UINT32 *pui32FreePageIndices;
+       const IMG_CHAR *puiText;
+       IMG_UINT32 ui32AllocPageCount;
+       IMG_UINT32 ui32AllocationIndex;
+       IMG_UINT32 ui32FreePageCount;
+       IMG_UINT32 ui32Log2PageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE;
+
+/* Bridge out structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE;
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/devicememhistory_bridge/server_devicememhistory_bridge.c
new file mode 100644 (file)
index 0000000..db440d0
--- /dev/null
@@ -0,0 +1,846 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_history_server.h"
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psDevicememHistoryMapIN_UI8,
+                               IMG_UINT8 * psDevicememHistoryMapOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN =
+           (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT =
+           (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_CHAR *uiTextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DevicememHistoryMap_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DevicememHistoryMap_exit;
+                       }
+               }
+       }
+
+       {
+               uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapIN->puiText,
+                    DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistoryMap_exit;
+               }
+               ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevicememHistoryMapOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevicememHistoryMap_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevicememHistoryMapOUT->eError =
+           DevicememHistoryMapKM(psPMRInt,
+                                 psDevicememHistoryMapIN->uiOffset,
+                                 psDevicememHistoryMapIN->sDevVAddr,
+                                 psDevicememHistoryMapIN->uiSize,
+                                 uiTextInt,
+                                 psDevicememHistoryMapIN->ui32Log2PageSize,
+                                 psDevicememHistoryMapIN->ui32AllocationIndex,
+                                 &psDevicememHistoryMapOUT->ui32AllocationIndexOut);
+
+DevicememHistoryMap_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDevicememHistoryMapOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psDevicememHistoryUnmapIN_UI8,
+                                 IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN =
+           (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *)
+           IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT =
+           (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *)
+           IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_CHAR *uiTextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DevicememHistoryUnmap_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DevicememHistoryUnmap_exit;
+                       }
+               }
+       }
+
+       {
+               uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapIN->puiText,
+                    DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistoryUnmap_exit;
+               }
+               ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevicememHistoryUnmapOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevicememHistoryUnmap_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevicememHistoryUnmapOUT->eError =
+           DevicememHistoryUnmapKM(psPMRInt,
+                                   psDevicememHistoryUnmapIN->uiOffset,
+                                   psDevicememHistoryUnmapIN->sDevVAddr,
+                                   psDevicememHistoryUnmapIN->uiSize,
+                                   uiTextInt,
+                                   psDevicememHistoryUnmapIN->ui32Log2PageSize,
+                                   psDevicememHistoryUnmapIN->ui32AllocationIndex,
+                                   &psDevicememHistoryUnmapOUT->ui32AllocationIndexOut);
+
+DevicememHistoryUnmap_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDevicememHistoryUnmapOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psDevicememHistoryMapVRangeIN_UI8,
+                                     IMG_UINT8 * psDevicememHistoryMapVRangeOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN =
+           (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *)
+           IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT =
+           (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *)
+           IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0);
+
+       IMG_CHAR *uiTextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DevicememHistoryMapVRange_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DevicememHistoryMapVRange_exit;
+                       }
+               }
+       }
+
+       {
+               uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapVRangeIN->puiText,
+                    DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistoryMapVRange_exit;
+               }
+               ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       psDevicememHistoryMapVRangeOUT->eError =
+           DevicememHistoryMapVRangeKM(psConnection, OSGetDevNode(psConnection),
+                                       psDevicememHistoryMapVRangeIN->sBaseDevVAddr,
+                                       psDevicememHistoryMapVRangeIN->ui32ui32StartPage,
+                                       psDevicememHistoryMapVRangeIN->ui32NumPages,
+                                       psDevicememHistoryMapVRangeIN->uiAllocSize,
+                                       uiTextInt,
+                                       psDevicememHistoryMapVRangeIN->ui32Log2PageSize,
+                                       psDevicememHistoryMapVRangeIN->ui32AllocationIndex,
+                                       &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut);
+
+DevicememHistoryMapVRange_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDevicememHistoryMapVRangeOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry,
+                                       IMG_UINT8 * psDevicememHistoryUnmapVRangeIN_UI8,
+                                       IMG_UINT8 * psDevicememHistoryUnmapVRangeOUT_UI8,
+                                       CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN =
+           (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *)
+           IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT =
+           (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *)
+           IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0);
+
+       IMG_CHAR *uiTextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DevicememHistoryUnmapVRange_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer =
+                           (IMG_BYTE *) (void *)psDevicememHistoryUnmapVRangeIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDevicememHistoryUnmapVRangeOUT->eError =
+                                   PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DevicememHistoryUnmapVRange_exit;
+                       }
+               }
+       }
+
+       {
+               uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapVRangeIN->puiText,
+                    DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistoryUnmapVRange_exit;
+               }
+               ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       psDevicememHistoryUnmapVRangeOUT->eError =
+           DevicememHistoryUnmapVRangeKM(psConnection, OSGetDevNode(psConnection),
+                                         psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr,
+                                         psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage,
+                                         psDevicememHistoryUnmapVRangeIN->ui32NumPages,
+                                         psDevicememHistoryUnmapVRangeIN->uiAllocSize,
+                                         uiTextInt,
+                                         psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize,
+                                         psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex,
+                                         &psDevicememHistoryUnmapVRangeOUT->
+                                         ui32AllocationIndexOut);
+
+DevicememHistoryUnmapVRange_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDevicememHistoryUnmapVRangeOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
+                                        IMG_UINT8 * psDevicememHistorySparseChangeIN_UI8,
+                                        IMG_UINT8 * psDevicememHistorySparseChangeOUT_UI8,
+                                        CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN =
+           (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *)
+           IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT =
+           (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *)
+           IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_CHAR *uiTextInt = NULL;
+       IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+       IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32AllocPageCount *
+            sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32FreePageCount *
+            sizeof(IMG_UINT32)) + 0;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DevicememHistorySparseChange_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer =
+                           (IMG_BYTE *) (void *)psDevicememHistorySparseChangeIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDevicememHistorySparseChangeOUT->eError =
+                                   PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DevicememHistorySparseChange_exit;
+                       }
+               }
+       }
+
+       {
+               uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextInt,
+                    (const void __user *)psDevicememHistorySparseChangeIN->puiText,
+                    DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistorySparseChange_exit;
+               }
+               ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+       if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0)
+       {
+               ui32AllocPageIndicesInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32AllocPageIndicesInt,
+                    (const void __user *)psDevicememHistorySparseChangeIN->pui32AllocPageIndices,
+                    psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistorySparseChange_exit;
+               }
+       }
+       if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0)
+       {
+               ui32FreePageIndicesInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32FreePageIndicesInt,
+                    (const void __user *)psDevicememHistorySparseChangeIN->pui32FreePageIndices,
+                    psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevicememHistorySparseChange_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevicememHistorySparseChangeOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevicememHistorySparseChange_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevicememHistorySparseChangeOUT->eError =
+           DevicememHistorySparseChangeKM(psPMRInt,
+                                          psDevicememHistorySparseChangeIN->uiOffset,
+                                          psDevicememHistorySparseChangeIN->sDevVAddr,
+                                          psDevicememHistorySparseChangeIN->uiSize,
+                                          uiTextInt,
+                                          psDevicememHistorySparseChangeIN->ui32Log2PageSize,
+                                          psDevicememHistorySparseChangeIN->ui32AllocPageCount,
+                                          ui32AllocPageIndicesInt,
+                                          psDevicememHistorySparseChangeIN->ui32FreePageCount,
+                                          ui32FreePageIndicesInt,
+                                          psDevicememHistorySparseChangeIN->ui32AllocationIndex,
+                                          &psDevicememHistorySparseChangeOUT->
+                                          ui32AllocationIndexOut);
+
+DevicememHistorySparseChange_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDevicememHistorySparseChangeOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+void DeinitDEVICEMEMHISTORYBridge(void);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void)
+{
+       PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), "OSLockCreate");
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                             PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP,
+                             PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                             PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP,
+                             PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                             PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE,
+                             PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                             PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE,
+                             PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                             PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE,
+                             PVRSRVBridgeDevicememHistorySparseChange,
+                             pDEVICEMEMHISTORYBridgeLock);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+void DeinitDEVICEMEMHISTORYBridge(void)
+{
+       OSLockDestroy(pDEVICEMEMHISTORYBridgeLock);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                               PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                               PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                               PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                               PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+                               PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/di_bridge/common_di_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/di_bridge/common_di_bridge.h
new file mode 100644 (file)
index 0000000..8591006
--- /dev/null
@@ -0,0 +1,153 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for di
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for di
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DI_BRIDGE_H
+#define COMMON_DI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "pvr_dicommon.h"
+
+#define PVRSRV_BRIDGE_DI_CMD_FIRST                     0
+#define PVRSRV_BRIDGE_DI_DICREATECONTEXT                       PVRSRV_BRIDGE_DI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT                      PVRSRV_BRIDGE_DI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DI_DIREADENTRY                   PVRSRV_BRIDGE_DI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DI_DIWRITEENTRY                  PVRSRV_BRIDGE_DI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DI_DILISTALLENTRIES                      PVRSRV_BRIDGE_DI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DI_CMD_LAST                      (PVRSRV_BRIDGE_DI_CMD_FIRST+4)
+
+/*******************************************
+            DICreateContext
+ *******************************************/
+
+/* Bridge in structure for DICreateContext */
+typedef struct PVRSRV_BRIDGE_IN_DICREATECONTEXT_TAG
+{
+       IMG_CHAR *puiStreamName;
+} __packed PVRSRV_BRIDGE_IN_DICREATECONTEXT;
+
+/* Bridge out structure for DICreateContext */
+typedef struct PVRSRV_BRIDGE_OUT_DICREATECONTEXT_TAG
+{
+       IMG_HANDLE hContext;
+       IMG_CHAR *puiStreamName;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DICREATECONTEXT;
+
+/*******************************************
+            DIDestroyContext
+ *******************************************/
+
+/* Bridge in structure for DIDestroyContext */
+typedef struct PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT_TAG
+{
+       IMG_HANDLE hContext;
+} __packed PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT;
+
+/* Bridge out structure for DIDestroyContext */
+typedef struct PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT;
+
+/*******************************************
+            DIReadEntry
+ *******************************************/
+
+/* Bridge in structure for DIReadEntry */
+typedef struct PVRSRV_BRIDGE_IN_DIREADENTRY_TAG
+{
+       IMG_UINT64 ui64Offset;
+       IMG_UINT64 ui64Size;
+       IMG_HANDLE hContext;
+       const IMG_CHAR *puiEntryPath;
+} __packed PVRSRV_BRIDGE_IN_DIREADENTRY;
+
+/* Bridge out structure for DIReadEntry */
+typedef struct PVRSRV_BRIDGE_OUT_DIREADENTRY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DIREADENTRY;
+
+/*******************************************
+            DIWriteEntry
+ *******************************************/
+
+/* Bridge in structure for DIWriteEntry */
+typedef struct PVRSRV_BRIDGE_IN_DIWRITEENTRY_TAG
+{
+       IMG_HANDLE hContext;
+       const IMG_CHAR *puiEntryPath;
+       const IMG_CHAR *puiValue;
+       IMG_UINT32 ui32ValueSize;
+} __packed PVRSRV_BRIDGE_IN_DIWRITEENTRY;
+
+/* Bridge out structure for DIWriteEntry */
+typedef struct PVRSRV_BRIDGE_OUT_DIWRITEENTRY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DIWRITEENTRY;
+
+/*******************************************
+            DIListAllEntries
+ *******************************************/
+
+/* Bridge in structure for DIListAllEntries */
+typedef struct PVRSRV_BRIDGE_IN_DILISTALLENTRIES_TAG
+{
+       IMG_HANDLE hContext;
+} __packed PVRSRV_BRIDGE_IN_DILISTALLENTRIES;
+
+/* Bridge out structure for DIListAllEntries */
+typedef struct PVRSRV_BRIDGE_OUT_DILISTALLENTRIES_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DILISTALLENTRIES;
+
+#endif /* COMMON_DI_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/di_bridge/server_di_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/di_bridge/server_di_bridge.c
new file mode 100644 (file)
index 0000000..49a97a0
--- /dev/null
@@ -0,0 +1,639 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for di
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for di
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "di_impl_brg.h"
+
+#include "common_di_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _DICreateContextpsContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DIDestroyContextKM((DI_CONTEXT *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psDICreateContextIN_UI8,
+                           IMG_UINT8 * psDICreateContextOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DICREATECONTEXT *psDICreateContextIN =
+           (PVRSRV_BRIDGE_IN_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DICREATECONTEXT *psDICreateContextOUT =
+           (PVRSRV_BRIDGE_OUT_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextOUT_UI8, 0);
+
+       IMG_CHAR *puiStreamNameInt = NULL;
+       DI_CONTEXT *psContextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0;
+
+       PVR_UNREFERENCED_PARAMETER(psDICreateContextIN);
+
+       psDICreateContextOUT->puiStreamName = psDICreateContextIN->puiStreamName;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDICreateContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DICreateContext_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDICreateContextIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDICreateContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DICreateContext_exit;
+                       }
+               }
+       }
+
+       if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+       {
+               puiStreamNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+       }
+
+       psDICreateContextOUT->eError = DICreateContextKM(puiStreamNameInt, &psContextInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK))
+       {
+               goto DICreateContext_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDICreateContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                &psDICreateContextOUT->hContext,
+                                                                (void *)psContextInt,
+                                                                PVRSRV_HANDLE_TYPE_DI_CONTEXT,
+                                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                                                (PFN_HANDLE_RELEASE) &
+                                                                _DICreateContextpsContextIntRelease);
+       if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DICreateContext_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((puiStreamNameInt) && ((PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psDICreateContextOUT->puiStreamName, puiStreamNameInt,
+                     (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR))) != PVRSRV_OK))
+               {
+                       psDICreateContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DICreateContext_exit;
+               }
+       }
+
+DICreateContext_exit:
+
+       if (psDICreateContextOUT->eError != PVRSRV_OK)
+       {
+               if (psContextInt)
+               {
+                       DIDestroyContextKM(psContextInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDICreateContextOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDIDestroyContext(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psDIDestroyContextIN_UI8,
+                            IMG_UINT8 * psDIDestroyContextOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *psDIDestroyContextIN =
+           (PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *psDIDestroyContextOUT =
+           (PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDIDestroyContextOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psDIDestroyContextIN->hContext,
+                                             PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+       if (unlikely((psDIDestroyContextOUT->eError != PVRSRV_OK) &&
+                    (psDIDestroyContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psDIDestroyContextOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psDIDestroyContextOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto DIDestroyContext_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DIDestroyContext_exit:
+
+       return 0;
+}
+
+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX,
+             "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psDIReadEntryIN_UI8,
+                       IMG_UINT8 * psDIReadEntryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DIREADENTRY *psDIReadEntryIN =
+           (PVRSRV_BRIDGE_IN_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DIREADENTRY *psDIReadEntryOUT =
+           (PVRSRV_BRIDGE_OUT_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryOUT_UI8, 0);
+
+       IMG_HANDLE hContext = psDIReadEntryIN->hContext;
+       DI_CONTEXT *psContextInt = NULL;
+       IMG_CHAR *uiEntryPathInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDIReadEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DIReadEntry_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIReadEntryIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDIReadEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DIReadEntry_exit;
+                       }
+               }
+       }
+
+       {
+               uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiEntryPathInt, (const void __user *)psDIReadEntryIN->puiEntryPath,
+                    DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDIReadEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DIReadEntry_exit;
+               }
+               ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDIReadEntryOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psContextInt,
+                                      hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE);
+       if (unlikely(psDIReadEntryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DIReadEntry_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDIReadEntryOUT->eError =
+           DIReadEntryKM(psContextInt,
+                         uiEntryPathInt, psDIReadEntryIN->ui64Offset, psDIReadEntryIN->ui64Size);
+
+DIReadEntry_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDIReadEntryOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX,
+             "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX");
+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX,
+             "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psDIWriteEntryIN_UI8,
+                        IMG_UINT8 * psDIWriteEntryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DIWRITEENTRY *psDIWriteEntryIN =
+           (PVRSRV_BRIDGE_IN_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DIWRITEENTRY *psDIWriteEntryOUT =
+           (PVRSRV_BRIDGE_OUT_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryOUT_UI8, 0);
+
+       IMG_HANDLE hContext = psDIWriteEntryIN->hContext;
+       DI_CONTEXT *psContextInt = NULL;
+       IMG_CHAR *uiEntryPathInt = NULL;
+       IMG_CHAR *uiValueInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psDIWriteEntryIN->ui32ValueSize > DI_IMPL_BRG_PATH_LEN))
+       {
+               psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto DIWriteEntry_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DIWriteEntry_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIWriteEntryIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDIWriteEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DIWriteEntry_exit;
+                       }
+               }
+       }
+
+       {
+               uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiEntryPathInt, (const void __user *)psDIWriteEntryIN->puiEntryPath,
+                    DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DIWriteEntry_exit;
+               }
+               ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+       if (psDIWriteEntryIN->ui32ValueSize != 0)
+       {
+               uiValueInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiValueInt, (const void __user *)psDIWriteEntryIN->puiValue,
+                    psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DIWriteEntry_exit;
+               }
+               ((IMG_CHAR *) uiValueInt)[(psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) -
+                                         1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDIWriteEntryOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psContextInt,
+                                      hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE);
+       if (unlikely(psDIWriteEntryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DIWriteEntry_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDIWriteEntryOUT->eError =
+           DIWriteEntryKM(psContextInt,
+                          uiEntryPathInt, psDIWriteEntryIN->ui32ValueSize, uiValueInt);
+
+DIWriteEntry_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDIWriteEntryOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDIListAllEntries(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psDIListAllEntriesIN_UI8,
+                            IMG_UINT8 * psDIListAllEntriesOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DILISTALLENTRIES *psDIListAllEntriesIN =
+           (PVRSRV_BRIDGE_IN_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *psDIListAllEntriesOUT =
+           (PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesOUT_UI8, 0);
+
+       IMG_HANDLE hContext = psDIListAllEntriesIN->hContext;
+       DI_CONTEXT *psContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDIListAllEntriesOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psContextInt,
+                                      hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE);
+       if (unlikely(psDIListAllEntriesOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DIListAllEntries_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDIListAllEntriesOUT->eError = DIListAllEntriesKM(psContextInt);
+
+DIListAllEntries_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitDIBridge(void);
+void DeinitDIBridge(void);
+
+/*
+ * Register all DI functions with services
+ */
+PVRSRV_ERROR InitDIBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT,
+                             PVRSRVBridgeDICreateContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT,
+                             PVRSRVBridgeDIDestroyContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY,
+                             PVRSRVBridgeDIReadEntry, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY,
+                             PVRSRVBridgeDIWriteEntry, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES,
+                             PVRSRVBridgeDIListAllEntries, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all di functions with services
+ */
+void DeinitDIBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/dma_bridge/common_dma_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/dma_bridge/common_dma_bridge.h
new file mode 100644 (file)
index 0000000..dafa475
--- /dev/null
@@ -0,0 +1,123 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for dma
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for dma
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DMA_BRIDGE_H
+#define COMMON_DMA_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_DMA_CMD_FIRST                    0
+#define PVRSRV_BRIDGE_DMA_DMATRANSFER                  PVRSRV_BRIDGE_DMA_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE                        PVRSRV_BRIDGE_DMA_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS                      PVRSRV_BRIDGE_DMA_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMA_CMD_LAST                     (PVRSRV_BRIDGE_DMA_CMD_FIRST+2)
+
+/*******************************************
+            DmaTransfer
+ *******************************************/
+
+/* Bridge in structure for DmaTransfer */
+typedef struct PVRSRV_BRIDGE_IN_DMATRANSFER_TAG
+{
+       IMG_UINT64 *pui64Address;
+       IMG_DEVMEM_OFFSET_T *puiOffset;
+       IMG_DEVMEM_SIZE_T *puiSize;
+       IMG_HANDLE *phPMR;
+       PVRSRV_TIMELINE hUpdateTimeline;
+       IMG_UINT32 ui32NumDMAs;
+       IMG_UINT32 ui32uiFlags;
+} __packed PVRSRV_BRIDGE_IN_DMATRANSFER;
+
+/* Bridge out structure for DmaTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_DMATRANSFER_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DMATRANSFER;
+
+/*******************************************
+            DmaSparseMappingTable
+ *******************************************/
+
+/* Bridge in structure for DmaSparseMappingTable */
+typedef struct PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_HANDLE hPMR;
+       IMG_BOOL *pbTable;
+       IMG_UINT32 ui32SizeInPages;
+} __packed PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE;
+
+/* Bridge out structure for DmaSparseMappingTable */
+typedef struct PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE_TAG
+{
+       IMG_BOOL *pbTable;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE;
+
+/*******************************************
+            DmaDeviceParams
+ *******************************************/
+
+/* Bridge in structure for DmaDeviceParams */
+typedef struct PVRSRV_BRIDGE_IN_DMADEVICEPARAMS_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_DMADEVICEPARAMS;
+
+/* Bridge out structure for DmaDeviceParams */
+typedef struct PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32DmaBuffAlign;
+       IMG_UINT32 ui32DmaTransferMult;
+} __packed PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS;
+
+#endif /* COMMON_DMA_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/dma_bridge/server_dma_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/dma_bridge/server_dma_bridge.c
new file mode 100644 (file)
index 0000000..570be45
--- /dev/null
@@ -0,0 +1,500 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for dma
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for dma
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "dma_km.h"
+
+#include "common_dma_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(MAX_DMA_OPS <= IMG_UINT32_MAX, "MAX_DMA_OPS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psDmaTransferIN_UI8,
+                       IMG_UINT8 * psDmaTransferOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DMATRANSFER *psDmaTransferIN =
+           (PVRSRV_BRIDGE_IN_DMATRANSFER *) IMG_OFFSET_ADDR(psDmaTransferIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DMATRANSFER *psDmaTransferOUT =
+           (PVRSRV_BRIDGE_OUT_DMATRANSFER *) IMG_OFFSET_ADDR(psDmaTransferOUT_UI8, 0);
+
+       PMR **psPMRInt = NULL;
+       IMG_HANDLE *hPMRInt2 = NULL;
+       IMG_UINT64 *ui64AddressInt = NULL;
+       IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+       IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(PMR *)) +
+           ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64)) +
+           ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T)) +
+           ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T)) + 0;
+
+       if (unlikely(psDmaTransferIN->ui32NumDMAs > MAX_DMA_OPS))
+       {
+               psDmaTransferOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto DmaTransfer_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDmaTransferOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DmaTransfer_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDmaTransferIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDmaTransferIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDmaTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DmaTransfer_exit;
+                       }
+               }
+       }
+
+       if (psDmaTransferIN->ui32NumDMAs != 0)
+       {
+               psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psPMRInt, 0, psDmaTransferIN->ui32NumDMAs * sizeof(PMR *));
+               ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(PMR *);
+               hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hPMRInt2, (const void __user *)psDmaTransferIN->phPMR,
+                    psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DmaTransfer_exit;
+               }
+       }
+       if (psDmaTransferIN->ui32NumDMAs != 0)
+       {
+               ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64);
+       }
+
+       /* Copy the data over */
+       if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui64AddressInt, (const void __user *)psDmaTransferIN->pui64Address,
+                    psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64)) != PVRSRV_OK)
+               {
+                       psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DmaTransfer_exit;
+               }
+       }
+       if (psDmaTransferIN->ui32NumDMAs != 0)
+       {
+               uiOffsetInt =
+                   (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T);
+       }
+
+       /* Copy the data over */
+       if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiOffsetInt, (const void __user *)psDmaTransferIN->puiOffset,
+                    psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK)
+               {
+                       psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DmaTransfer_exit;
+               }
+       }
+       if (psDmaTransferIN->ui32NumDMAs != 0)
+       {
+               uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T);
+       }
+
+       /* Copy the data over */
+       if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiSizeInt, (const void __user *)psDmaTransferIN->puiSize,
+                    psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK)
+               {
+                       psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DmaTransfer_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psDmaTransferIN->ui32NumDMAs; i++)
+               {
+                       /* Look up the address from the handle */
+                       psDmaTransferOUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psPMRInt[i],
+                                                      hPMRInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+                       if (unlikely(psDmaTransferOUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto DmaTransfer_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDmaTransferOUT->eError =
+           DmaTransfer(psConnection, OSGetDevNode(psConnection),
+                       psDmaTransferIN->ui32NumDMAs,
+                       psPMRInt,
+                       ui64AddressInt,
+                       uiOffsetInt,
+                       uiSizeInt, psDmaTransferIN->ui32uiFlags, psDmaTransferIN->hUpdateTimeline);
+
+DmaTransfer_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       if (hPMRInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psDmaTransferIN->ui32NumDMAs; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psPMRInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hPMRInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDmaTransferOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psDmaSparseMappingTableIN_UI8,
+                                 IMG_UINT8 * psDmaSparseMappingTableOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE *psDmaSparseMappingTableIN =
+           (PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE *)
+           IMG_OFFSET_ADDR(psDmaSparseMappingTableIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE *psDmaSparseMappingTableOUT =
+           (PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE *)
+           IMG_OFFSET_ADDR(psDmaSparseMappingTableOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psDmaSparseMappingTableIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_BOOL *pbTableInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL)) + 0;
+
+       if (psDmaSparseMappingTableIN->ui32SizeInPages > 32)
+       {
+               psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto DmaSparseMappingTable_exit;
+       }
+
+       psDmaSparseMappingTableOUT->pbTable = psDmaSparseMappingTableIN->pbTable;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DmaSparseMappingTable_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDmaSparseMappingTableIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDmaSparseMappingTableIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DmaSparseMappingTable_exit;
+                       }
+               }
+       }
+
+       if (psDmaSparseMappingTableIN->ui32SizeInPages != 0)
+       {
+               pbTableInt = (IMG_BOOL *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL);
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDmaSparseMappingTableOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDmaSparseMappingTableOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DmaSparseMappingTable_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDmaSparseMappingTableOUT->eError =
+           DmaSparseMappingTable(psPMRInt,
+                                 psDmaSparseMappingTableIN->uiOffset,
+                                 psDmaSparseMappingTableIN->ui32SizeInPages, pbTableInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDmaSparseMappingTableOUT->eError != PVRSRV_OK))
+       {
+               goto DmaSparseMappingTable_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((pbTableInt) && ((psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psDmaSparseMappingTableOUT->pbTable, pbTableInt,
+                     (psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL))) !=
+                    PVRSRV_OK))
+               {
+                       psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DmaSparseMappingTable_exit;
+               }
+       }
+
+DmaSparseMappingTable_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDmaSparseMappingTableOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDmaDeviceParams(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psDmaDeviceParamsIN_UI8,
+                           IMG_UINT8 * psDmaDeviceParamsOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DMADEVICEPARAMS *psDmaDeviceParamsIN =
+           (PVRSRV_BRIDGE_IN_DMADEVICEPARAMS *) IMG_OFFSET_ADDR(psDmaDeviceParamsIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS *psDmaDeviceParamsOUT =
+           (PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS *) IMG_OFFSET_ADDR(psDmaDeviceParamsOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psDmaDeviceParamsIN);
+
+       psDmaDeviceParamsOUT->eError =
+           DmaDeviceParams(psConnection, OSGetDevNode(psConnection),
+                           &psDmaDeviceParamsOUT->ui32DmaBuffAlign,
+                           &psDmaDeviceParamsOUT->ui32DmaTransferMult);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitDMABridge(void);
+void DeinitDMABridge(void);
+
+/*
+ * Register all DMA functions with services
+ */
+PVRSRV_ERROR InitDMABridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER,
+                             PVRSRVBridgeDmaTransfer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE,
+                             PVRSRVBridgeDmaSparseMappingTable, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS,
+                             PVRSRVBridgeDmaDeviceParams, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dma functions with services
+ */
+void DeinitDMABridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/dmabuf_bridge/common_dmabuf_bridge.h
new file mode 100644 (file)
index 0000000..7547d9f
--- /dev/null
@@ -0,0 +1,150 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST                 0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF                       PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED                 PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF                       PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF                 PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST                  (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3)
+
+/*******************************************
+            PhysmemImportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+       const IMG_CHAR *puiName;
+       IMG_INT ifd;
+       IMG_UINT32 ui32NameSize;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+       IMG_DEVMEM_ALIGN_T uiAlign;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMRPtr;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+/*******************************************
+            PhysmemImportDmaBufLocked
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBufLocked */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED_TAG
+{
+       const IMG_CHAR *puiName;
+       IMG_INT ifd;
+       IMG_UINT32 ui32NameSize;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED;
+
+/* Bridge out structure for PhysmemImportDmaBufLocked */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED_TAG
+{
+       IMG_DEVMEM_ALIGN_T uiAlign;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMRPtr;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED;
+
+/*******************************************
+            PhysmemExportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF;
+
+/* Bridge out structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_INT iFd;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF;
+
+/*******************************************
+            PhysmemImportSparseDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+       IMG_DEVMEM_SIZE_T uiChunkSize;
+       IMG_UINT32 *pui32MappingTable;
+       const IMG_CHAR *puiName;
+       IMG_INT ifd;
+       IMG_UINT32 ui32NameSize;
+       IMG_UINT32 ui32NumPhysChunks;
+       IMG_UINT32 ui32NumVirtChunks;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF;
+
+/* Bridge out structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+       IMG_DEVMEM_ALIGN_T uiAlign;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMRPtr;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF;
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/dmabuf_bridge/server_dmabuf_bridge.c
new file mode 100644 (file)
index 0000000..07851de
--- /dev/null
@@ -0,0 +1,694 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefPMR((PMR *) pvData);
+       return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psPhysmemImportDmaBufIN_UI8,
+                               IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN =
+           (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT =
+           (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8,
+                                                                     0);
+
+       IMG_CHAR *uiNameInt = NULL;
+       PMR *psPMRPtrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemImportDmaBuf_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysmemImportDmaBuf_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysmemImportDmaBuf_exit;
+                       }
+               }
+       }
+
+       if (psPhysmemImportDmaBufIN->ui32NameSize != 0)
+       {
+               uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufIN->puiName,
+                    psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemImportDmaBuf_exit;
+               }
+               ((IMG_CHAR *) uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR))
+                                        - 1] = '\0';
+       }
+
+       psPhysmemImportDmaBufOUT->eError =
+           PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection),
+                               psPhysmemImportDmaBufIN->ifd,
+                               psPhysmemImportDmaBufIN->uiFlags,
+                               psPhysmemImportDmaBufIN->ui32NameSize,
+                               uiNameInt,
+                               &psPMRPtrInt,
+                               &psPhysmemImportDmaBufOUT->uiSize,
+                               &psPhysmemImportDmaBufOUT->uiAlign);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK))
+       {
+               goto PhysmemImportDmaBuf_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                    &psPhysmemImportDmaBufOUT->
+                                                                    hPMRPtr, (void *)psPMRPtrInt,
+                                                                    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                                                    PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                    (PFN_HANDLE_RELEASE) &
+                                                                    _PhysmemImportDmaBufpsPMRPtrIntRelease);
+       if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PhysmemImportDmaBuf_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PhysmemImportDmaBuf_exit:
+
+       if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+       {
+               if (psPMRPtrInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefPMR(psPMRPtrInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysmemImportDmaBufOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PhysmemImportDmaBufLockedpsPMRPtrIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefUnlockPMR((PMR *) pvData);
+       return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBufLocked(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psPhysmemImportDmaBufLockedIN_UI8,
+                                     IMG_UINT8 * psPhysmemImportDmaBufLockedOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedIN =
+           (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *)
+           IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedOUT =
+           (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *)
+           IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedOUT_UI8, 0);
+
+       IMG_CHAR *uiNameInt = NULL;
+       PMR *psPMRPtrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psPhysmemImportDmaBufLockedIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemImportDmaBufLocked_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysmemImportDmaBufLocked_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufLockedIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysmemImportDmaBufLocked_exit;
+                       }
+               }
+       }
+
+       if (psPhysmemImportDmaBufLockedIN->ui32NameSize != 0)
+       {
+               uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufLockedIN->puiName,
+                    psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemImportDmaBufLocked_exit;
+               }
+               ((IMG_CHAR *)
+                uiNameInt)[(psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       psPhysmemImportDmaBufLockedOUT->eError =
+           PhysmemImportDmaBufLocked(psConnection, OSGetDevNode(psConnection),
+                                     psPhysmemImportDmaBufLockedIN->ifd,
+                                     psPhysmemImportDmaBufLockedIN->uiFlags,
+                                     psPhysmemImportDmaBufLockedIN->ui32NameSize,
+                                     uiNameInt,
+                                     &psPMRPtrInt,
+                                     &psPhysmemImportDmaBufLockedOUT->uiSize,
+                                     &psPhysmemImportDmaBufLockedOUT->uiAlign);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK))
+       {
+               goto PhysmemImportDmaBufLocked_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPhysmemImportDmaBufLockedOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psPhysmemImportDmaBufLockedOUT->hPMRPtr, (void *)psPMRPtrInt,
+                                     PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _PhysmemImportDmaBufLockedpsPMRPtrIntRelease);
+       if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PhysmemImportDmaBufLocked_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PhysmemImportDmaBufLocked_exit:
+
+       if (psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)
+       {
+               if (psPMRPtrInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefUnlockPMR(psPMRPtrInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysmemImportDmaBufLockedOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psPhysmemExportDmaBufIN_UI8,
+                               IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN =
+           (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT =
+           (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPhysmemExportDmaBufOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PhysmemExportDmaBuf_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPhysmemExportDmaBufOUT->eError =
+           PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection),
+                               psPMRInt, &psPhysmemExportDmaBufOUT->iFd);
+
+PhysmemExportDmaBuf_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefPMR((PMR *) pvData);
+       return eError;
+}
+
+static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
+             "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psPhysmemImportSparseDmaBufIN_UI8,
+                                     IMG_UINT8 * psPhysmemImportSparseDmaBufOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN =
+           (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *)
+           IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT =
+           (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *)
+           IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0);
+
+       IMG_UINT32 *ui32MappingTableInt = NULL;
+       IMG_CHAR *uiNameInt = NULL;
+       PMR *psPMRPtrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely
+           (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT))
+       {
+               psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemImportSparseDmaBuf_exit;
+       }
+
+       if (unlikely(psPhysmemImportSparseDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemImportSparseDmaBuf_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysmemImportSparseDmaBuf_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysmemImportSparseDmaBuf_exit;
+                       }
+               }
+       }
+
+       if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0)
+       {
+               ui32MappingTableInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32MappingTableInt,
+                    (const void __user *)psPhysmemImportSparseDmaBufIN->pui32MappingTable,
+                    psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemImportSparseDmaBuf_exit;
+               }
+       }
+       if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0)
+       {
+               uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiNameInt, (const void __user *)psPhysmemImportSparseDmaBufIN->puiName,
+                    psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemImportSparseDmaBuf_exit;
+               }
+               ((IMG_CHAR *)
+                uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       psPhysmemImportSparseDmaBufOUT->eError =
+           PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection),
+                                     psPhysmemImportSparseDmaBufIN->ifd,
+                                     psPhysmemImportSparseDmaBufIN->uiFlags,
+                                     psPhysmemImportSparseDmaBufIN->uiChunkSize,
+                                     psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks,
+                                     psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks,
+                                     ui32MappingTableInt,
+                                     psPhysmemImportSparseDmaBufIN->ui32NameSize,
+                                     uiNameInt,
+                                     &psPMRPtrInt,
+                                     &psPhysmemImportSparseDmaBufOUT->uiSize,
+                                     &psPhysmemImportSparseDmaBufOUT->uiAlign);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK))
+       {
+               goto PhysmemImportSparseDmaBuf_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPhysmemImportSparseDmaBufOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psPhysmemImportSparseDmaBufOUT->hPMRPtr, (void *)psPMRPtrInt,
+                                     PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _PhysmemImportSparseDmaBufpsPMRPtrIntRelease);
+       if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PhysmemImportSparseDmaBuf_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PhysmemImportSparseDmaBuf_exit:
+
+       if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+       {
+               if (psPMRPtrInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefPMR(psPMRPtrInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysmemImportSparseDmaBufOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+void DeinitDMABUFBridge(void);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF,
+                             PVRSRVBridgePhysmemImportDmaBuf, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED,
+                             PVRSRVBridgePhysmemImportDmaBufLocked, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF,
+                             PVRSRVBridgePhysmemExportDmaBuf, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF,
+                             PVRSRVBridgePhysmemImportSparseDmaBuf, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+void DeinitDMABUFBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+                               PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+                               PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/client_htbuffer_bridge.h
new file mode 100644 (file)
index 0000000..b3514ea
--- /dev/null
@@ -0,0 +1,71 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_HTBUFFER_BRIDGE_H
+#define CLIENT_HTBUFFER_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_htbuffer_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge,
+                                          IMG_UINT32 ui32NumGroups,
+                                          IMG_UINT32 * pui32GroupEnable,
+                                          IMG_UINT32 ui32LogLevel,
+                                          IMG_UINT32 ui32EnablePID,
+                                          IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge,
+                                      IMG_UINT32 ui32PID,
+                                      IMG_UINT32 ui32TID,
+                                      IMG_UINT64 ui64TimeStamp,
+                                      IMG_UINT32 ui32SF,
+                                      IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args);
+
+#endif /* CLIENT_HTBUFFER_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/client_htbuffer_direct_bridge.c
new file mode 100644 (file)
index 0000000..9c58331
--- /dev/null
@@ -0,0 +1,85 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for htbuffer
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_htbuffer_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "htbuffer_types.h"
+
+#include "htbserver.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge,
+                                          IMG_UINT32 ui32NumGroups,
+                                          IMG_UINT32 * pui32GroupEnable,
+                                          IMG_UINT32 ui32LogLevel,
+                                          IMG_UINT32 ui32EnablePID,
+                                          IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError =
+           HTBControlKM(ui32NumGroups,
+                        pui32GroupEnable, ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge,
+                                      IMG_UINT32 ui32PID,
+                                      IMG_UINT32 ui32TID,
+                                      IMG_UINT64 ui64TimeStamp,
+                                      IMG_UINT32 ui32SF,
+                                      IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = HTBLogKM(ui32PID, ui32TID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/common_htbuffer_bridge.h
new file mode 100644 (file)
index 0000000..69a406b
--- /dev/null
@@ -0,0 +1,104 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_HTBUFFER_BRIDGE_H
+#define COMMON_HTBUFFER_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "htbuffer_types.h"
+
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST                       0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL                      PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG                  PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST                        (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1)
+
+/*******************************************
+            HTBControl
+ *******************************************/
+
+/* Bridge in structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG
+{
+       IMG_UINT32 *pui32GroupEnable;
+       IMG_UINT32 ui32EnablePID;
+       IMG_UINT32 ui32LogLevel;
+       IMG_UINT32 ui32LogMode;
+       IMG_UINT32 ui32NumGroups;
+       IMG_UINT32 ui32OpMode;
+} __packed PVRSRV_BRIDGE_IN_HTBCONTROL;
+
+/* Bridge out structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HTBCONTROL;
+
+/*******************************************
+            HTBLog
+ *******************************************/
+
+/* Bridge in structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG
+{
+       IMG_UINT64 ui64TimeStamp;
+       IMG_UINT32 *pui32Args;
+       IMG_UINT32 ui32NumArgs;
+       IMG_UINT32 ui32PID;
+       IMG_UINT32 ui32SF;
+       IMG_UINT32 ui32TID;
+} __packed PVRSRV_BRIDGE_IN_HTBLOG;
+
+/* Bridge out structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HTBLOG;
+
+#endif /* COMMON_HTBUFFER_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/htbuffer_bridge/server_htbuffer_bridge.c
new file mode 100644 (file)
index 0000000..dd81d91
--- /dev/null
@@ -0,0 +1,351 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for htbuffer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for htbuffer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "htbserver.h"
+
+#include "common_htbuffer_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(HTB_FLAG_NUM_EL <= IMG_UINT32_MAX,
+             "HTB_FLAG_NUM_EL must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry,
+                      IMG_UINT8 * psHTBControlIN_UI8,
+                      IMG_UINT8 * psHTBControlOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN =
+           (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT =
+           (PVRSRV_BRIDGE_OUT_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0);
+
+       IMG_UINT32 *ui32GroupEnableInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0;
+
+       if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL))
+       {
+               psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto HTBControl_exit;
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto HTBControl_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBControlIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto HTBControl_exit;
+                       }
+               }
+       }
+
+       if (psHTBControlIN->ui32NumGroups != 0)
+       {
+               ui32GroupEnableInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32GroupEnableInt,
+                    (const void __user *)psHTBControlIN->pui32GroupEnable,
+                    psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto HTBControl_exit;
+               }
+       }
+
+       psHTBControlOUT->eError =
+           HTBControlKM(psHTBControlIN->ui32NumGroups,
+                        ui32GroupEnableInt,
+                        psHTBControlIN->ui32LogLevel,
+                        psHTBControlIN->ui32EnablePID,
+                        psHTBControlIN->ui32LogMode, psHTBControlIN->ui32OpMode);
+
+HTBControl_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psHTBControlOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(HTB_LOG_MAX_PARAMS <= IMG_UINT32_MAX,
+             "HTB_LOG_MAX_PARAMS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry,
+                  IMG_UINT8 * psHTBLogIN_UI8,
+                  IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN =
+           (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT =
+           (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0);
+
+       IMG_UINT32 *ui32ArgsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0;
+
+       if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS))
+       {
+               psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto HTBLog_exit;
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto HTBLog_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBLogIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto HTBLog_exit;
+                       }
+               }
+       }
+
+       if (psHTBLogIN->ui32NumArgs != 0)
+       {
+               ui32ArgsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ArgsInt, (const void __user *)psHTBLogIN->pui32Args,
+                    psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto HTBLog_exit;
+               }
+       }
+
+       psHTBLogOUT->eError =
+           HTBLogKM(psHTBLogIN->ui32PID,
+                    psHTBLogIN->ui32TID,
+                    psHTBLogIN->ui64TimeStamp,
+                    psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt);
+
+HTBLog_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psHTBLogOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pHTBUFFERBridgeLock;
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+void DeinitHTBUFFERBridge(void);
+
+/*
+ * Register all HTBUFFER functions with services
+ */
+PVRSRV_ERROR InitHTBUFFERBridge(void)
+{
+       PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate");
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL,
+                             PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG,
+                             PVRSRVBridgeHTBLog, pHTBUFFERBridgeLock);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all htbuffer functions with services
+ */
+void DeinitHTBUFFERBridge(void)
+{
+       OSLockDestroy(pHTBUFFERBridgeLock);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG);
+
+}
+#else /* EXCLUDE_HTBUFFER_BRIDGE */
+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitHTBUFFERBridge() \
+       PVRSRV_OK
+
+#define DeinitHTBUFFERBridge()
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/client_mm_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/client_mm_bridge.h
new file mode 100644 (file)
index 0000000..ce172ea
--- /dev/null
@@ -0,0 +1,265 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge,
+                                            IMG_HANDLE hPMR,
+                                            IMG_HANDLE * phPMRExport,
+                                            IMG_UINT64 * pui64Size,
+                                            IMG_UINT32 * pui32Log2Contig,
+                                            IMG_UINT64 * pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge,
+                                         IMG_HANDLE hPMR, IMG_UINT64 * pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+                                                        IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge,
+                                            IMG_HANDLE hPMRExport,
+                                            IMG_UINT64 ui64uiPassword,
+                                            IMG_UINT64 ui64uiSize,
+                                            IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+                                                 IMG_HANDLE hExtHandle,
+                                                 IMG_HANDLE * phPMR,
+                                                 IMG_DEVMEM_SIZE_T * puiSize,
+                                                 IMG_DEVMEM_ALIGN_T * puiAlign);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+                                                      IMG_DEVMEM_SIZE_T uiSize,
+                                                      IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                      IMG_UINT32 ui32NumPhysChunks,
+                                                      IMG_UINT32 ui32NumVirtChunks,
+                                                      IMG_UINT32 * pui32MappingTable,
+                                                      IMG_UINT32 ui32Log2PageSize,
+                                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                      IMG_UINT32 ui32AnnotationLength,
+                                                      const IMG_CHAR * puiAnnotation,
+                                                      IMG_PID ui32PID,
+                                                      IMG_HANDLE * phPMRPtr,
+                                                      IMG_UINT32 ui32PDumpFlags,
+                                                      PVRSRV_MEMALLOCFLAGS_T * puiOutFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+                                                            IMG_DEVMEM_SIZE_T uiSize,
+                                                            IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                            IMG_UINT32 ui32NumPhysChunks,
+                                                            IMG_UINT32 ui32NumVirtChunks,
+                                                            IMG_UINT32 * pui32MappingTable,
+                                                            IMG_UINT32 ui32Log2PageSize,
+                                                            PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                            IMG_UINT32 ui32AnnotationLength,
+                                                            const IMG_CHAR * puiAnnotation,
+                                                            IMG_PID ui32PID,
+                                                            IMG_HANDLE * phPMRPtr,
+                                                            IMG_UINT32 ui32PDumpFlags,
+                                                            PVRSRV_MEMALLOCFLAGS_T * puiOutFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+                                                    IMG_HANDLE hMapping, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+                                                        IMG_HANDLE hMapping, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+                                                  IMG_BOOL bbKernelMemoryCtx,
+                                                  IMG_HANDLE * phDevMemServerContext,
+                                                  IMG_HANDLE * phPrivData,
+                                                  IMG_UINT32 * pui32CPUCacheLineSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hDevmemCtx,
+                                                   IMG_DEV_VIRTADDR sHeapBaseAddr,
+                                                   IMG_DEVMEM_SIZE_T uiHeapLength,
+                                                   IMG_UINT32 ui32Log2DataPageSize,
+                                                   IMG_HANDLE * phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hDevmemServerHeap,
+                                               IMG_HANDLE hReservation,
+                                               IMG_HANDLE hPMR,
+                                               PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                                               IMG_HANDLE * phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hDevmemServerHeap,
+                                                     IMG_DEV_VIRTADDR sAddress,
+                                                     IMG_DEVMEM_SIZE_T uiLength,
+                                                     IMG_HANDLE * phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+                                                       IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hSrvDevMemHeap,
+                                               IMG_HANDLE hPMR,
+                                               IMG_UINT32 ui32AllocPageCount,
+                                               IMG_UINT32 * pui32AllocPageIndices,
+                                               IMG_UINT32 ui32FreePageCount,
+                                               IMG_UINT32 * pui32FreePageIndices,
+                                               IMG_UINT32 ui32SparseFlags,
+                                               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                               IMG_DEV_VIRTADDR sDevVAddr,
+                                               IMG_UINT64 ui64CPUVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+                                                 IMG_HANDLE hReservation,
+                                                 IMG_HANDLE hPMR,
+                                                 IMG_UINT32 ui32PageCount,
+                                                 IMG_UINT32 ui32PhysicalPgOffset,
+                                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                 IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hReservation,
+                                                   IMG_DEV_VIRTADDR sDevVAddr,
+                                                   IMG_UINT32 ui32PageCount);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hDevmemCtx,
+                                                     IMG_DEV_VIRTADDR sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge,
+                                                      IMG_HANDLE hDevmemCtx,
+                                                      IMG_DEV_VIRTADDR sAddress,
+                                                      IMG_DEVMEM_SIZE_T uiSize,
+                                                      IMG_BOOL bInvalidate);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge,
+                                                         IMG_HANDLE hDevmemCtx,
+                                                         IMG_UINT64 ui64FBSCEntries);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+                                                      IMG_UINT32 * pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+                                                IMG_UINT32 ui32HeapConfigIndex,
+                                                IMG_UINT32 * pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+                                                     IMG_UINT32 ui32HeapConfigIndex,
+                                                     IMG_UINT32 ui32HeapConfigNameBufSz,
+                                                     IMG_CHAR * puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+                                                  IMG_UINT32 ui32HeapConfigIndex,
+                                                  IMG_UINT32 ui32HeapIndex,
+                                                  IMG_UINT32 ui32HeapNameBufSz,
+                                                  IMG_CHAR * puiHeapNameOut,
+                                                  IMG_DEV_VIRTADDR * psDevVAddrBase,
+                                                  IMG_DEVMEM_SIZE_T * puiHeapLength,
+                                                  IMG_DEVMEM_SIZE_T * puiReservedRegionLength,
+                                                  IMG_UINT32 * pui32Log2DataPageSizeOut,
+                                                  IMG_UINT32 * pui32Log2ImportAlignmentOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+                                                           IMG_HANDLE hDevm,
+                                                           IMG_UINT32 ui32PID, IMG_BOOL bRegister);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge,
+                                                   IMG_UINT32 * pui32PhysHeapCount);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge,
+                                                  IMG_UINT32 ui32PhysHeapCount,
+                                                  PVRSRV_PHYS_HEAP * peaPhysHeapID,
+                                                  PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge,
+                                                      PVRSRV_PHYS_HEAP * peHeap);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge,
+                                                   IMG_UINT32 ui32PhysHeapCount,
+                                                   PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hDevmemCtx,
+                                                     IMG_DEV_VIRTADDR * psFaultAddress);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge,
+                                                    IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfoPkd(IMG_HANDLE hBridge,
+                                                     IMG_UINT32 ui32PhysHeapCount,
+                                                     PVRSRV_PHYS_HEAP * peaPhysHeapID,
+                                                     PHYS_HEAP_MEM_STATS_PKD *
+                                                     psapPhysHeapMemStats);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsagePkd(IMG_HANDLE hBridge,
+                                                      IMG_UINT32 ui32PhysHeapCount,
+                                                      PHYS_HEAP_MEM_STATS_PKD *
+                                                      psapPhysHeapMemStats);
+
+#endif /* CLIENT_MM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/client_mm_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/client_mm_direct_bridge.c
new file mode 100644 (file)
index 0000000..958706b
--- /dev/null
@@ -0,0 +1,804 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for mm
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem_typedefs.h"
+
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "process_stats.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge,
+                                            IMG_HANDLE hPMR,
+                                            IMG_HANDLE * phPMRExport,
+                                            IMG_UINT64 * pui64Size,
+                                            IMG_UINT32 * pui32Log2Contig,
+                                            IMG_UINT64 * pui64Password)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PMR_EXPORT *psPMRExportInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRExportPMR(psPMRInt, &psPMRExportInt, pui64Size, pui32Log2Contig, pui64Password);
+
+       *phPMRExport = psPMRExportInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport)
+{
+       PVRSRV_ERROR eError;
+       PMR_EXPORT *psPMRExportInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+       eError = PMRUnexportPMR(psPMRExportInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge,
+                                         IMG_HANDLE hPMR, IMG_UINT64 * pui64UID)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRGetUID(psPMRInt, pui64UID);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+                                                        IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem)
+{
+       PVRSRV_ERROR eError;
+       PMR *psBufferInt;
+       PMR *psExtMemInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psBufferInt = (PMR *) hBuffer;
+
+       eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt);
+
+       *phExtMem = psExtMemInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem)
+{
+       PVRSRV_ERROR eError;
+       PMR *psExtMemInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psExtMemInt = (PMR *) hExtMem;
+
+       eError = PMRUnmakeLocalImportHandle(psExtMemInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge,
+                                            IMG_HANDLE hPMRExport,
+                                            IMG_UINT64 ui64uiPassword,
+                                            IMG_UINT64 ui64uiSize,
+                                            IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR)
+{
+       PVRSRV_ERROR eError;
+       PMR_EXPORT *psPMRExportInt;
+       PMR *psPMRInt = NULL;
+
+       psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+       eError =
+           PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                            psPMRExportInt,
+                            ui64uiPassword, ui64uiSize, ui32uiLog2Contig, &psPMRInt);
+
+       *phPMR = psPMRInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+                                                 IMG_HANDLE hExtHandle,
+                                                 IMG_HANDLE * phPMR,
+                                                 IMG_DEVMEM_SIZE_T * puiSize,
+                                                 IMG_DEVMEM_ALIGN_T * puiAlign)
+{
+       PVRSRV_ERROR eError;
+       PMR *psExtHandleInt;
+       PMR *psPMRInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psExtHandleInt = (PMR *) hExtHandle;
+
+       eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, puiAlign);
+
+       *phPMR = psPMRInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRUnrefPMR(psPMRInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRUnrefUnlockPMR(psPMRInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+                                                      IMG_DEVMEM_SIZE_T uiSize,
+                                                      IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                      IMG_UINT32 ui32NumPhysChunks,
+                                                      IMG_UINT32 ui32NumVirtChunks,
+                                                      IMG_UINT32 * pui32MappingTable,
+                                                      IMG_UINT32 ui32Log2PageSize,
+                                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                      IMG_UINT32 ui32AnnotationLength,
+                                                      const IMG_CHAR * puiAnnotation,
+                                                      IMG_PID ui32PID,
+                                                      IMG_HANDLE * phPMRPtr,
+                                                      IMG_UINT32 ui32PDumpFlags,
+                                                      PVRSRV_MEMALLOCFLAGS_T * puiOutFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRPtrInt = NULL;
+
+       eError =
+           PhysmemNewRamBackedPMR_direct(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                         uiSize,
+                                         uiChunkSize,
+                                         ui32NumPhysChunks,
+                                         ui32NumVirtChunks,
+                                         pui32MappingTable,
+                                         ui32Log2PageSize,
+                                         uiFlags,
+                                         ui32AnnotationLength,
+                                         puiAnnotation,
+                                         ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags);
+
+       *phPMRPtr = psPMRPtrInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+                                                            IMG_DEVMEM_SIZE_T uiSize,
+                                                            IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                            IMG_UINT32 ui32NumPhysChunks,
+                                                            IMG_UINT32 ui32NumVirtChunks,
+                                                            IMG_UINT32 * pui32MappingTable,
+                                                            IMG_UINT32 ui32Log2PageSize,
+                                                            PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                            IMG_UINT32 ui32AnnotationLength,
+                                                            const IMG_CHAR * puiAnnotation,
+                                                            IMG_PID ui32PID,
+                                                            IMG_HANDLE * phPMRPtr,
+                                                            IMG_UINT32 ui32PDumpFlags,
+                                                            PVRSRV_MEMALLOCFLAGS_T * puiOutFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRPtrInt = NULL;
+
+       eError =
+           PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                        uiSize,
+                                        uiChunkSize,
+                                        ui32NumPhysChunks,
+                                        ui32NumVirtChunks,
+                                        pui32MappingTable,
+                                        ui32Log2PageSize,
+                                        uiFlags,
+                                        ui32AnnotationLength,
+                                        puiAnnotation,
+                                        ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags);
+
+       *phPMRPtr = psPMRPtrInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = DevmemIntPin(psPMRInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = DevmemIntUnpin(psPMRInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+                                                    IMG_HANDLE hMapping, IMG_HANDLE hPMR)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_MAPPING *psMappingInt;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+       psPMRInt = (PMR *) hPMR;
+
+       eError = DevmemIntPinValidate(psMappingInt, psPMRInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+                                                        IMG_HANDLE hMapping, IMG_HANDLE hPMR)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_MAPPING *psMappingInt;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+       psPMRInt = (PMR *) hPMR;
+
+       eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+                                                  IMG_BOOL bbKernelMemoryCtx,
+                                                  IMG_HANDLE * phDevMemServerContext,
+                                                  IMG_HANDLE * phPrivData,
+                                                  IMG_UINT32 * pui32CPUCacheLineSize)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevMemServerContextInt = NULL;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       eError =
+           DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                              bbKernelMemoryCtx,
+                              &psDevMemServerContextInt, &hPrivDataInt, pui32CPUCacheLineSize);
+
+       *phDevMemServerContext = psDevMemServerContextInt;
+       *phPrivData = hPrivDataInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hDevmemServerContext)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemServerContextInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+       eError = DevmemIntCtxDestroy(psDevmemServerContextInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hDevmemCtx,
+                                                   IMG_DEV_VIRTADDR sHeapBaseAddr,
+                                                   IMG_DEVMEM_SIZE_T uiHeapLength,
+                                                   IMG_UINT32 ui32Log2DataPageSize,
+                                                   IMG_HANDLE * phDevmemHeapPtr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+       DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError =
+           DevmemIntHeapCreate(psDevmemCtxInt,
+                               sHeapBaseAddr,
+                               uiHeapLength, ui32Log2DataPageSize, &psDevmemHeapPtrInt);
+
+       *phDevmemHeapPtr = psDevmemHeapPtrInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_HEAP *psDevmemHeapInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+       eError = DevmemIntHeapDestroy(psDevmemHeapInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hDevmemServerHeap,
+                                               IMG_HANDLE hReservation,
+                                               IMG_HANDLE hPMR,
+                                               PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                                               IMG_HANDLE * phMapping)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_HEAP *psDevmemServerHeapInt;
+       DEVMEMINT_RESERVATION *psReservationInt;
+       PMR *psPMRInt;
+       DEVMEMINT_MAPPING *psMappingInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+       psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           DevmemIntMapPMR(psDevmemServerHeapInt,
+                           psReservationInt, psPMRInt, uiMapFlags, &psMappingInt);
+
+       *phMapping = psMappingInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_MAPPING *psMappingInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+       eError = DevmemIntUnmapPMR(psMappingInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hDevmemServerHeap,
+                                                     IMG_DEV_VIRTADDR sAddress,
+                                                     IMG_DEVMEM_SIZE_T uiLength,
+                                                     IMG_HANDLE * phReservation)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_HEAP *psDevmemServerHeapInt;
+       DEVMEMINT_RESERVATION *psReservationInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+       eError =
+           DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt);
+
+       *phReservation = psReservationInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_RESERVATION *psReservationInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+       eError = DevmemIntUnreserveRange(psReservationInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hSrvDevMemHeap,
+                                               IMG_HANDLE hPMR,
+                                               IMG_UINT32 ui32AllocPageCount,
+                                               IMG_UINT32 * pui32AllocPageIndices,
+                                               IMG_UINT32 ui32FreePageCount,
+                                               IMG_UINT32 * pui32FreePageIndices,
+                                               IMG_UINT32 ui32SparseFlags,
+                                               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                               IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_HEAP *psSrvDevMemHeapInt;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap;
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           DevmemIntChangeSparse(psSrvDevMemHeapInt,
+                                 psPMRInt,
+                                 ui32AllocPageCount,
+                                 pui32AllocPageIndices,
+                                 ui32FreePageCount,
+                                 pui32FreePageIndices,
+                                 ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+                                                 IMG_HANDLE hReservation,
+                                                 IMG_HANDLE hPMR,
+                                                 IMG_UINT32 ui32PageCount,
+                                                 IMG_UINT32 ui32PhysicalPgOffset,
+                                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                 IMG_DEV_VIRTADDR sDevVAddr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_RESERVATION *psReservationInt;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           DevmemIntMapPages(psReservationInt,
+                             psPMRInt, ui32PageCount, ui32PhysicalPgOffset, uiFlags, sDevVAddr);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hReservation,
+                                                   IMG_DEV_VIRTADDR sDevVAddr,
+                                                   IMG_UINT32 ui32PageCount)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_RESERVATION *psReservationInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+       eError = DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hDevmemCtx,
+                                                     IMG_DEV_VIRTADDR sAddress)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError =
+           DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                    psDevmemCtxInt, sAddress);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge,
+                                                      IMG_HANDLE hDevmemCtx,
+                                                      IMG_DEV_VIRTADDR sAddress,
+                                                      IMG_DEVMEM_SIZE_T uiSize,
+                                                      IMG_BOOL bInvalidate)
+{
+#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED)
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError = DevmemIntFlushDevSLCRange(psDevmemCtxInt, sAddress, uiSize, bInvalidate);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hDevmemCtx);
+       PVR_UNREFERENCED_PARAMETER(sAddress);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(bInvalidate);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge,
+                                                         IMG_HANDLE hDevmemCtx,
+                                                         IMG_UINT64 ui64FBSCEntries)
+{
+#if defined(RGX_FEATURE_FBCDC)
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hDevmemCtx);
+       PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+                                                      IMG_UINT32 * pui32NumHeapConfigs)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                  pui32NumHeapConfigs);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+                                                IMG_UINT32 ui32HeapConfigIndex,
+                                                IMG_UINT32 * pui32NumHeaps)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                            ui32HeapConfigIndex, pui32NumHeaps);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+                                                     IMG_UINT32 ui32HeapConfigIndex,
+                                                     IMG_UINT32 ui32HeapConfigNameBufSz,
+                                                     IMG_CHAR * puiHeapConfigName)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                 ui32HeapConfigIndex, ui32HeapConfigNameBufSz, puiHeapConfigName);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+                                                  IMG_UINT32 ui32HeapConfigIndex,
+                                                  IMG_UINT32 ui32HeapIndex,
+                                                  IMG_UINT32 ui32HeapNameBufSz,
+                                                  IMG_CHAR * puiHeapNameOut,
+                                                  IMG_DEV_VIRTADDR * psDevVAddrBase,
+                                                  IMG_DEVMEM_SIZE_T * puiHeapLength,
+                                                  IMG_DEVMEM_SIZE_T * puiReservedRegionLength,
+                                                  IMG_UINT32 * pui32Log2DataPageSizeOut,
+                                                  IMG_UINT32 * pui32Log2ImportAlignmentOut)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                              ui32HeapConfigIndex,
+                              ui32HeapIndex,
+                              ui32HeapNameBufSz,
+                              puiHeapNameOut,
+                              psDevVAddrBase,
+                              puiHeapLength,
+                              puiReservedRegionLength,
+                              pui32Log2DataPageSizeOut, pui32Log2ImportAlignmentOut);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+                                                           IMG_HANDLE hDevm,
+                                                           IMG_UINT32 ui32PID, IMG_BOOL bRegister)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmInt = (DEVMEMINT_CTX *) hDevm;
+
+       eError = DevmemIntRegisterPFNotifyKM(psDevmInt, ui32PID, bRegister);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge,
+                                                   IMG_UINT32 * pui32PhysHeapCount)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVGetMaxPhysHeapCountKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                       pui32PhysHeapCount);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge,
+                                                  IMG_UINT32 ui32PhysHeapCount,
+                                                  PVRSRV_PHYS_HEAP * peaPhysHeapID,
+                                                  PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPhysHeapGetMemInfoKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                      ui32PhysHeapCount, peaPhysHeapID, pasapPhysHeapMemStats);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge,
+                                                      PVRSRV_PHYS_HEAP * peHeap)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVGetDefaultPhysicalHeapKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), peHeap);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge,
+                                                   IMG_UINT32 ui32PhysHeapCount,
+                                                   PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVGetHeapPhysMemUsageKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                       ui32PhysHeapCount, pasapPhysHeapMemStats);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
+                                                     IMG_HANDLE hDevmemCtx,
+                                                     IMG_DEV_VIRTADDR * psFaultAddress)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError =
+           DevmemIntGetFaultAddress(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                    psDevmemCtxInt, psFaultAddress);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge,
+                                                    IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = PVRSRVServerUpdateOOMStats(ui32ui32StatType, ui32pid);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(ui32ui32StatType);
+       PVR_UNREFERENCED_PARAMETER(ui32pid);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfoPkd(IMG_HANDLE hBridge,
+                                                     IMG_UINT32 ui32PhysHeapCount,
+                                                     PVRSRV_PHYS_HEAP * peaPhysHeapID,
+                                                     PHYS_HEAP_MEM_STATS_PKD *
+                                                     psapPhysHeapMemStats)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPhysHeapGetMemInfoPkdKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                         ui32PhysHeapCount, peaPhysHeapID, psapPhysHeapMemStats);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsagePkd(IMG_HANDLE hBridge,
+                                                      IMG_UINT32 ui32PhysHeapCount,
+                                                      PHYS_HEAP_MEM_STATS_PKD *
+                                                      psapPhysHeapMemStats)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVGetHeapPhysMemUsagePkdKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                          ui32PhysHeapCount, psapPhysHeapMemStats);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/common_mm_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/common_mm_bridge.h
new file mode 100644 (file)
index 0000000..bbb419b
--- /dev/null
@@ -0,0 +1,879 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST                     0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR                  PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR                        PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID                     PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE                      PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE                    PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR                  PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR                     PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR                   PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR                     PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR                        PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR                  PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN                  PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN                        PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE                  PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE                      PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE                    PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY                   PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE                   PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY                  PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR                       PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR                     PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE                 PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE                       PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM                       PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES                     PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES                   PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID                 PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE                        PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE                     PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT                        PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT                      PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME                 PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS                    PVRSRV_BRIDGE_MM_CMD_FIRST+32
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM                   PVRSRV_BRIDGE_MM_CMD_FIRST+33
+#define PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT                   PVRSRV_BRIDGE_MM_CMD_FIRST+34
+#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO                    PVRSRV_BRIDGE_MM_CMD_FIRST+35
+#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP                        PVRSRV_BRIDGE_MM_CMD_FIRST+36
+#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE                   PVRSRV_BRIDGE_MM_CMD_FIRST+37
+#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS                 PVRSRV_BRIDGE_MM_CMD_FIRST+38
+#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS                  PVRSRV_BRIDGE_MM_CMD_FIRST+39
+#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD                 PVRSRV_BRIDGE_MM_CMD_FIRST+40
+#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD                        PVRSRV_BRIDGE_MM_CMD_FIRST+41
+#define PVRSRV_BRIDGE_MM_CMD_LAST                      (PVRSRV_BRIDGE_MM_CMD_FIRST+41)
+
+/*******************************************
+            PMRExportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+       IMG_UINT64 ui64Password;
+       IMG_UINT64 ui64Size;
+       IMG_HANDLE hPMRExport;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Log2Contig;
+} __packed PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+/*******************************************
+            PMRUnexportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+       IMG_HANDLE hPMRExport;
+} __packed PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+/*******************************************
+            PMRGetUID
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRGETUID;
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+       IMG_UINT64 ui64UID;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+/*******************************************
+            PMRMakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+       IMG_HANDLE hBuffer;
+} __packed PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+       IMG_HANDLE hExtMem;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE;
+
+/*******************************************
+            PMRUnmakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+       IMG_HANDLE hExtMem;
+} __packed PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE;
+
+/*******************************************
+            PMRImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+       IMG_UINT64 ui64uiPassword;
+       IMG_UINT64 ui64uiSize;
+       IMG_HANDLE hPMRExport;
+       IMG_UINT32 ui32uiLog2Contig;
+} __packed PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+       IMG_HANDLE hPMR;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+/*******************************************
+            PMRLocalImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+       IMG_HANDLE hExtHandle;
+} __packed PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+       IMG_DEVMEM_ALIGN_T uiAlign;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+/*******************************************
+            PMRUnrefPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+/*******************************************
+            PMRUnrefUnlockPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR;
+
+/* Bridge out structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR;
+
+/*******************************************
+            PhysmemNewRamBackedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+       IMG_DEVMEM_SIZE_T uiChunkSize;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_UINT32 *pui32MappingTable;
+       const IMG_CHAR *puiAnnotation;
+       IMG_UINT32 ui32AnnotationLength;
+       IMG_UINT32 ui32Log2PageSize;
+       IMG_UINT32 ui32NumPhysChunks;
+       IMG_UINT32 ui32NumVirtChunks;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_PID ui32PID;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+       IMG_HANDLE hPMRPtr;
+       PVRSRV_ERROR eError;
+       PVRSRV_MEMALLOCFLAGS_T uiOutFlags;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+/*******************************************
+            PhysmemNewRamBackedLockedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+       IMG_DEVMEM_SIZE_T uiChunkSize;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_UINT32 *pui32MappingTable;
+       const IMG_CHAR *puiAnnotation;
+       IMG_UINT32 ui32AnnotationLength;
+       IMG_UINT32 ui32Log2PageSize;
+       IMG_UINT32 ui32NumPhysChunks;
+       IMG_UINT32 ui32NumVirtChunks;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_PID ui32PID;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+       IMG_HANDLE hPMRPtr;
+       PVRSRV_ERROR eError;
+       PVRSRV_MEMALLOCFLAGS_T uiOutFlags;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/*******************************************
+            DevmemIntPin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPIN;
+
+/* Bridge out structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPIN;
+
+/*******************************************
+            DevmemIntUnpin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN;
+
+/* Bridge out structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN;
+
+/*******************************************
+            DevmemIntPinValidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG
+{
+       IMG_HANDLE hMapping;
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE;
+
+/* Bridge out structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE;
+
+/*******************************************
+            DevmemIntUnpinInvalidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG
+{
+       IMG_HANDLE hMapping;
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE;
+
+/* Bridge out structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE;
+
+/*******************************************
+            DevmemIntCtxCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+       IMG_BOOL bbKernelMemoryCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+       IMG_HANDLE hDevMemServerContext;
+       IMG_HANDLE hPrivData;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32CPUCacheLineSize;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+/*******************************************
+            DevmemIntCtxDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+       IMG_HANDLE hDevmemServerContext;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+/*******************************************
+            DevmemIntHeapCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+       IMG_DEV_VIRTADDR sHeapBaseAddr;
+       IMG_DEVMEM_SIZE_T uiHeapLength;
+       IMG_HANDLE hDevmemCtx;
+       IMG_UINT32 ui32Log2DataPageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+       IMG_HANDLE hDevmemHeapPtr;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+/*******************************************
+            DevmemIntHeapDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+       IMG_HANDLE hDevmemHeap;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+/*******************************************
+            DevmemIntMapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+       IMG_HANDLE hDevmemServerHeap;
+       IMG_HANDLE hPMR;
+       IMG_HANDLE hReservation;
+       PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+       IMG_HANDLE hMapping;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+/*******************************************
+            DevmemIntUnmapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+       IMG_HANDLE hMapping;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+/*******************************************
+            DevmemIntReserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+       IMG_DEV_VIRTADDR sAddress;
+       IMG_DEVMEM_SIZE_T uiLength;
+       IMG_HANDLE hDevmemServerHeap;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+       IMG_HANDLE hReservation;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+/*******************************************
+            DevmemIntUnreserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+       IMG_HANDLE hReservation;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+/*******************************************
+            ChangeSparseMem
+ *******************************************/
+
+/* Bridge in structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_UINT64 ui64CPUVAddr;
+       IMG_HANDLE hPMR;
+       IMG_HANDLE hSrvDevMemHeap;
+       IMG_UINT32 *pui32AllocPageIndices;
+       IMG_UINT32 *pui32FreePageIndices;
+       IMG_UINT32 ui32AllocPageCount;
+       IMG_UINT32 ui32FreePageCount;
+       IMG_UINT32 ui32SparseFlags;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM;
+
+/* Bridge out structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM;
+
+/*******************************************
+            DevmemIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_HANDLE hPMR;
+       IMG_HANDLE hReservation;
+       IMG_UINT32 ui32PageCount;
+       IMG_UINT32 ui32PhysicalPgOffset;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES;
+
+/* Bridge out structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES;
+
+/*******************************************
+            DevmemIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_HANDLE hReservation;
+       IMG_UINT32 ui32PageCount;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES;
+
+/*******************************************
+            DevmemIsVDevAddrValid
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+       IMG_DEV_VIRTADDR sAddress;
+       IMG_HANDLE hDevmemCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+/*******************************************
+            DevmemFlushDevSLCRange
+ *******************************************/
+
+/* Bridge in structure for DevmemFlushDevSLCRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG
+{
+       IMG_DEV_VIRTADDR sAddress;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hDevmemCtx;
+       IMG_BOOL bInvalidate;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE;
+
+/* Bridge out structure for DevmemFlushDevSLCRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE;
+
+/*******************************************
+            DevmemInvalidateFBSCTable
+ *******************************************/
+
+/* Bridge in structure for DevmemInvalidateFBSCTable */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG
+{
+       IMG_UINT64 ui64FBSCEntries;
+       IMG_HANDLE hDevmemCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE;
+
+/* Bridge out structure for DevmemInvalidateFBSCTable */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE;
+
+/*******************************************
+            HeapCfgHeapConfigCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32NumHeapConfigs;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+/*******************************************
+            HeapCfgHeapCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+       IMG_UINT32 ui32HeapConfigIndex;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32NumHeaps;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+/*******************************************
+            HeapCfgHeapConfigName
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+       IMG_CHAR *puiHeapConfigName;
+       IMG_UINT32 ui32HeapConfigIndex;
+       IMG_UINT32 ui32HeapConfigNameBufSz;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+       IMG_CHAR *puiHeapConfigName;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+/*******************************************
+            HeapCfgHeapDetails
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+       IMG_CHAR *puiHeapNameOut;
+       IMG_UINT32 ui32HeapConfigIndex;
+       IMG_UINT32 ui32HeapIndex;
+       IMG_UINT32 ui32HeapNameBufSz;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddrBase;
+       IMG_DEVMEM_SIZE_T uiHeapLength;
+       IMG_DEVMEM_SIZE_T uiReservedRegionLength;
+       IMG_CHAR *puiHeapNameOut;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Log2DataPageSizeOut;
+       IMG_UINT32 ui32Log2ImportAlignmentOut;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+/*******************************************
+            DevmemIntRegisterPFNotifyKM
+ *******************************************/
+
+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+       IMG_HANDLE hDevm;
+       IMG_BOOL bRegister;
+       IMG_UINT32 ui32PID;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/*******************************************
+            GetMaxPhysHeapCount
+ *******************************************/
+
+/* Bridge in structure for GetMaxPhysHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT;
+
+/* Bridge out structure for GetMaxPhysHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32PhysHeapCount;
+} __packed PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT;
+
+/*******************************************
+            PhysHeapGetMemInfo
+ *******************************************/
+
+/* Bridge in structure for PhysHeapGetMemInfo */
+typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG
+{
+       PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
+       PVRSRV_PHYS_HEAP *peaPhysHeapID;
+       IMG_UINT32 ui32PhysHeapCount;
+} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO;
+
+/* Bridge out structure for PhysHeapGetMemInfo */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO_TAG
+{
+       PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO;
+
+/*******************************************
+            GetDefaultPhysicalHeap
+ *******************************************/
+
+/* Bridge in structure for GetDefaultPhysicalHeap */
+typedef struct PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP;
+
+/* Bridge out structure for GetDefaultPhysicalHeap */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP_TAG
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_PHYS_HEAP eHeap;
+} __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP;
+
+/*******************************************
+            GetHeapPhysMemUsage
+ *******************************************/
+
+/* Bridge in structure for GetHeapPhysMemUsage */
+typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE_TAG
+{
+       PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
+       IMG_UINT32 ui32PhysHeapCount;
+} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE;
+
+/* Bridge out structure for GetHeapPhysMemUsage */
+typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE_TAG
+{
+       PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE;
+
+/*******************************************
+            DevmemGetFaultAddress
+ *******************************************/
+
+/* Bridge in structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG
+{
+       IMG_HANDLE hDevmemCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS;
+
+/* Bridge out structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG
+{
+       IMG_DEV_VIRTADDR sFaultAddress;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS;
+
+/*******************************************
+            PVRSRVUpdateOOMStats
+ *******************************************/
+
+/* Bridge in structure for PVRSRVUpdateOOMStats */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG
+{
+       IMG_PID ui32pid;
+       IMG_UINT32 ui32ui32StatType;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS;
+
+/* Bridge out structure for PVRSRVUpdateOOMStats */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS;
+
+/*******************************************
+            PhysHeapGetMemInfoPkd
+ *******************************************/
+
+/* Bridge in structure for PhysHeapGetMemInfoPkd */
+typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD_TAG
+{
+       PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
+       PVRSRV_PHYS_HEAP *peaPhysHeapID;
+       IMG_UINT32 ui32PhysHeapCount;
+} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD;
+
+/* Bridge out structure for PhysHeapGetMemInfoPkd */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD_TAG
+{
+       PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD;
+
+/*******************************************
+            GetHeapPhysMemUsagePkd
+ *******************************************/
+
+/* Bridge in structure for GetHeapPhysMemUsagePkd */
+typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD_TAG
+{
+       PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
+       IMG_UINT32 ui32PhysHeapCount;
+} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD;
+
+/* Bridge out structure for GetHeapPhysMemUsagePkd */
+typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD_TAG
+{
+       PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD;
+
+#endif /* COMMON_MM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/server_mm_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/mm_bridge/server_mm_bridge.c
new file mode 100644 (file)
index 0000000..7375eb6
--- /dev/null
@@ -0,0 +1,3802 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "process_stats.h"
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+static PVRSRV_ERROR ReleasePMRExport(void *pvData)
+{
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       return PVRSRV_OK;
+}
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnexportPMR((PMR_EXPORT *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psPMRExportPMRIN_UI8,
+                        IMG_UINT8 * psPMRExportPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN =
+           (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT =
+           (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR;
+       PMR *psPMRInt = NULL;
+       PMR_EXPORT *psPMRExportInt = NULL;
+       IMG_HANDLE hPMRExportInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRExportPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRExportPMR_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRExportPMROUT->eError =
+           PMRExportPMR(psPMRInt,
+                        &psPMRExportInt,
+                        &psPMRExportPMROUT->ui64Size,
+                        &psPMRExportPMROUT->ui32Log2Contig, &psPMRExportPMROUT->ui64Password);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+       {
+               goto PMRExportPMR_exit;
+       }
+
+       /*
+        * For cases where we need a cross process handle we actually allocate two.
+        *
+        * The first one is a connection specific handle and it gets given the real
+        * release function. This handle does *NOT* get returned to the caller. It's
+        * purpose is to release any leaked resources when we either have a bad or
+        * abnormally terminated client. If we didn't do this then the resource
+        * wouldn't be freed until driver unload. If the resource is freed normally,
+        * this handle can be looked up via the cross process handle and then
+        * released accordingly.
+        *
+        * The second one is a cross process handle and it gets given a noop release
+        * function. This handle does get returned to the caller.
+        */
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psPMRExportPMROUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &hPMRExportInt, (void *)psPMRExportInt,
+                                     PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) & _PMRExportPMRpsPMRExportIntRelease);
+       if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto PMRExportPMR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Lock over handle creation. */
+       LockHandle(KERNEL_HANDLE_BASE);
+       psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+                                                             &psPMRExportPMROUT->hPMRExport,
+                                                             (void *)psPMRExportInt,
+                                                             PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+                                                             PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                             (PFN_HANDLE_RELEASE) &
+                                                             ReleasePMRExport);
+       if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(KERNEL_HANDLE_BASE);
+               goto PMRExportPMR_exit;
+       }
+       /* Release now we have created handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+PMRExportPMR_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psPMRExportPMROUT->eError != PVRSRV_OK)
+       {
+               if (psPMRExportPMROUT->hPMRExport)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(KERNEL_HANDLE_BASE);
+
+                       eError = PVRSRVDestroyHandleUnlocked(KERNEL_HANDLE_BASE,
+                                                            (IMG_HANDLE) psPMRExportPMROUT->
+                                                            hPMRExport,
+                                                            PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+
+               }
+
+               if (hPMRExportInt)
+               {
+                       PVRSRV_ERROR eError;
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+                       eError =
+                           PVRSRVDestroyHandleUnlocked(psConnection->psProcessHandleBase->
+                                                       psHandleBase, hPMRExportInt,
+                                                       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+                       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psPMRExportInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               }
+
+               if (psPMRExportInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnexportPMR(psPMRExportInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psPMRUnexportPMRIN_UI8,
+                          IMG_UINT8 * psPMRUnexportPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN =
+           (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT =
+           (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0);
+
+       PMR_EXPORT *psPMRExportInt = NULL;
+       IMG_HANDLE hPMRExportInt = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* Lock over handle destruction. */
+       LockHandle(KERNEL_HANDLE_BASE);
+       psPMRUnexportPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+                                      (void **)&psPMRExportInt,
+                                      (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_FALSE);
+       if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+       }
+       PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+       /*
+        * Find the connection specific handle that represents the same data
+        * as the cross process handle as releasing it will actually call the
+        * data's real release function (see the function where the cross
+        * process handle is allocated for more details).
+        */
+       psPMRUnexportPMROUT->eError =
+           PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                    &hPMRExportInt,
+                                    psPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+       if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+       }
+       PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+       psPMRUnexportPMROUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                             hPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+       if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+                    (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+       }
+       PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) ||
+                  (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Lock over handle destruction. */
+       LockHandle(KERNEL_HANDLE_BASE);
+
+       psPMRUnexportPMROUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(KERNEL_HANDLE_BASE,
+                                             (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+                                             PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+       if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+                    (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+               UnlockHandle(KERNEL_HANDLE_BASE);
+               goto PMRUnexportPMR_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+PMRUnexportPMR_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+                     IMG_UINT8 * psPMRGetUIDIN_UI8,
+                     IMG_UINT8 * psPMRGetUIDOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN =
+           (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT =
+           (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRGetUIDOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRGetUID_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID);
+
+PMRGetUID_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnmakeLocalImportHandle((PMR *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psPMRMakeLocalImportHandleIN_UI8,
+                                    IMG_UINT8 * psPMRMakeLocalImportHandleOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN =
+           (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *)
+           IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT =
+           (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *)
+           IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0);
+
+       IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer;
+       PMR *psBufferInt = NULL;
+       PMR *psExtMemInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRMakeLocalImportHandleOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psBufferInt,
+                                      hBuffer,
+                                      PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, IMG_TRUE);
+       if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRMakeLocalImportHandle_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRMakeLocalImportHandleOUT->eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+       {
+               goto PMRMakeLocalImportHandle_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psPMRMakeLocalImportHandleOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psPMRMakeLocalImportHandleOUT->hExtMem, (void *)psExtMemInt,
+                                     PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _PMRMakeLocalImportHandlepsExtMemIntRelease);
+       if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto PMRMakeLocalImportHandle_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+PMRMakeLocalImportHandle_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psBufferInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hBuffer, PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+       {
+               if (psExtMemInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnmakeLocalImportHandle(psExtMemInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+                                      IMG_UINT8 * psPMRUnmakeLocalImportHandleIN_UI8,
+                                      IMG_UINT8 * psPMRUnmakeLocalImportHandleOUT_UI8,
+                                      CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN =
+           (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *)
+           IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT =
+           (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *)
+           IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psPMRUnmakeLocalImportHandleOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                             (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem,
+                                             PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+       if (unlikely((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) &&
+                    (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT->eError)));
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto PMRUnmakeLocalImportHandle_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+PMRUnmakeLocalImportHandle_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefPMR((PMR *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psPMRImportPMRIN_UI8,
+                        IMG_UINT8 * psPMRImportPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN =
+           (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT =
+           (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0);
+
+       IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport;
+       PMR_EXPORT *psPMRExportInt = NULL;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(KERNEL_HANDLE_BASE);
+
+       /* Look up the address from the handle */
+       psPMRImportPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+                                      (void **)&psPMRExportInt,
+                                      hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_TRUE);
+       if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(KERNEL_HANDLE_BASE);
+               goto PMRImportPMR_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+       psPMRImportPMROUT->eError =
+           PhysmemImportPMR(psConnection, OSGetDevNode(psConnection),
+                            psPMRExportInt,
+                            psPMRImportPMRIN->ui64uiPassword,
+                            psPMRImportPMRIN->ui64uiSize,
+                            psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+       {
+               goto PMRImportPMR_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                             &psPMRImportPMROUT->hPMR,
+                                                             (void *)psPMRInt,
+                                                             PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                                             PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                             (PFN_HANDLE_RELEASE) &
+                                                             _PMRImportPMRpsPMRIntRelease);
+       if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRImportPMR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PMRImportPMR_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(KERNEL_HANDLE_BASE);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRExportInt)
+       {
+               PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+                                           hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+       if (psPMRImportPMROUT->eError != PVRSRV_OK)
+       {
+               if (psPMRInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefPMR(psPMRInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefPMR((PMR *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psPMRLocalImportPMRIN_UI8,
+                             IMG_UINT8 * psPMRLocalImportPMROUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN =
+           (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT =
+           (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0);
+
+       IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle;
+       PMR *psExtHandleInt = NULL;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRLocalImportPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psExtHandleInt,
+                                      hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, IMG_TRUE);
+       if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto PMRLocalImportPMR_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psPMRLocalImportPMROUT->eError =
+           PMRLocalImportPMR(psExtHandleInt,
+                             &psPMRInt,
+                             &psPMRLocalImportPMROUT->uiSize, &psPMRLocalImportPMROUT->uiAlign);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+       {
+               goto PMRLocalImportPMR_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                  &psPMRLocalImportPMROUT->hPMR,
+                                                                  (void *)psPMRInt,
+                                                                  PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                                                  PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                  (PFN_HANDLE_RELEASE) &
+                                                                  _PMRLocalImportPMRpsPMRIntRelease);
+       if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRLocalImportPMR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PMRLocalImportPMR_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psExtHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+       {
+               if (psPMRInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefPMR(psPMRInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psPMRUnrefPMRIN_UI8,
+                       IMG_UINT8 * psPMRUnrefPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN =
+           (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT =
+           (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPMRUnrefPMROUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+                                             PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) &&
+                    (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefPMROUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRUnrefPMR_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PMRUnrefPMR_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8,
+                             IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN =
+           (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT =
+           (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPMRUnrefUnlockPMROUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR,
+                                             PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) &&
+                    (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRUnrefUnlockPMR_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PMRUnrefUnlockPMR_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefPMR((PMR *) pvData);
+       return eError;
+}
+
+static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
+             "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8,
+                                  IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN =
+           (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *)
+           IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT =
+           (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *)
+           IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0);
+
+       IMG_UINT32 *ui32MappingTableInt = NULL;
+       IMG_CHAR *uiAnnotationInt = NULL;
+       PMR *psPMRPtrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT))
+       {
+               psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemNewRamBackedPMR_exit;
+       }
+
+       if (unlikely(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemNewRamBackedPMR_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysmemNewRamBackedPMR_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysmemNewRamBackedPMR_exit;
+                       }
+               }
+       }
+
+       if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0)
+       {
+               ui32MappingTableInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32MappingTableInt,
+                    (const void __user *)psPhysmemNewRamBackedPMRIN->pui32MappingTable,
+                    psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemNewRamBackedPMR_exit;
+               }
+       }
+       if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0)
+       {
+               uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiAnnotationInt,
+                    (const void __user *)psPhysmemNewRamBackedPMRIN->puiAnnotation,
+                    psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemNewRamBackedPMR_exit;
+               }
+               ((IMG_CHAR *)
+                uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength *
+                                  sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       psPhysmemNewRamBackedPMROUT->eError =
+           PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection),
+                                  psPhysmemNewRamBackedPMRIN->uiSize,
+                                  psPhysmemNewRamBackedPMRIN->uiChunkSize,
+                                  psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
+                                  psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
+                                  ui32MappingTableInt,
+                                  psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+                                  psPhysmemNewRamBackedPMRIN->uiFlags,
+                                  psPhysmemNewRamBackedPMRIN->ui32AnnotationLength,
+                                  uiAnnotationInt,
+                                  psPhysmemNewRamBackedPMRIN->ui32PID,
+                                  &psPMRPtrInt,
+                                  psPhysmemNewRamBackedPMRIN->ui32PDumpFlags,
+                                  &psPhysmemNewRamBackedPMROUT->uiOutFlags);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK))
+       {
+               goto PhysmemNewRamBackedPMR_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                       &psPhysmemNewRamBackedPMROUT->
+                                                                       hPMRPtr,
+                                                                       (void *)psPMRPtrInt,
+                                                                       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                                                       PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                       (PFN_HANDLE_RELEASE) &
+                                                                       _PhysmemNewRamBackedPMRpsPMRPtrIntRelease);
+       if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PhysmemNewRamBackedPMR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PhysmemNewRamBackedPMR_exit:
+
+       if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+       {
+               if (psPMRPtrInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefPMR(psPMRPtrInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysmemNewRamBackedPMROUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PMRUnrefUnlockPMR((PMR *) pvData);
+       return eError;
+}
+
+static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
+             "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry,
+                                        IMG_UINT8 * psPhysmemNewRamBackedLockedPMRIN_UI8,
+                                        IMG_UINT8 * psPhysmemNewRamBackedLockedPMROUT_UI8,
+                                        CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN =
+           (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *)
+           IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT =
+           (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *)
+           IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0);
+
+       IMG_UINT32 *ui32MappingTableInt = NULL;
+       IMG_CHAR *uiAnnotationInt = NULL;
+       PMR *psPMRPtrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks *
+            sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
+            sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely
+           (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > PMR_MAX_SUPPORTED_PAGE_COUNT))
+       {
+               psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemNewRamBackedLockedPMR_exit;
+       }
+
+       if (unlikely
+           (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysmemNewRamBackedLockedPMR_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysmemNewRamBackedLockedPMR_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer =
+                           (IMG_BYTE *) (void *)psPhysmemNewRamBackedLockedPMRIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysmemNewRamBackedLockedPMROUT->eError =
+                                   PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysmemNewRamBackedLockedPMR_exit;
+                       }
+               }
+       }
+
+       if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0)
+       {
+               ui32MappingTableInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32MappingTableInt,
+                    (const void __user *)psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable,
+                    psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemNewRamBackedLockedPMR_exit;
+               }
+       }
+       if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0)
+       {
+               uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiAnnotationInt,
+                    (const void __user *)psPhysmemNewRamBackedLockedPMRIN->puiAnnotation,
+                    psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysmemNewRamBackedLockedPMR_exit;
+               }
+               ((IMG_CHAR *)
+                uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
+                                  sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       psPhysmemNewRamBackedLockedPMROUT->eError =
+           PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevNode(psConnection),
+                                        psPhysmemNewRamBackedLockedPMRIN->uiSize,
+                                        psPhysmemNewRamBackedLockedPMRIN->uiChunkSize,
+                                        psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks,
+                                        psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks,
+                                        ui32MappingTableInt,
+                                        psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize,
+                                        psPhysmemNewRamBackedLockedPMRIN->uiFlags,
+                                        psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength,
+                                        uiAnnotationInt,
+                                        psPhysmemNewRamBackedLockedPMRIN->ui32PID,
+                                        &psPMRPtrInt,
+                                        psPhysmemNewRamBackedLockedPMRIN->ui32PDumpFlags,
+                                        &psPhysmemNewRamBackedLockedPMROUT->uiOutFlags);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK))
+       {
+               goto PhysmemNewRamBackedLockedPMR_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psPhysmemNewRamBackedLockedPMROUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr,
+                                     (void *)psPMRPtrInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease);
+       if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PhysmemNewRamBackedLockedPMR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+PhysmemNewRamBackedLockedPMR_exit:
+
+       if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+       {
+               if (psPMRPtrInt)
+               {
+                       LockHandle(KERNEL_HANDLE_BASE);
+                       PMRUnrefUnlockPMR(psPMRPtrInt);
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysmemNewRamBackedLockedPMROUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psDevmemIntPinIN_UI8,
+                        IMG_UINT8 * psDevmemIntPinOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntPinOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntPin_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt);
+
+DevmemIntPin_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psDevmemIntUnpinIN_UI8,
+                          IMG_UINT8 * psDevmemIntUnpinOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntUnpinOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnpin_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt);
+
+DevmemIntUnpin_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psDevmemIntPinValidateIN_UI8,
+                                IMG_UINT8 * psDevmemIntPinValidateOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *) IMG_OFFSET_ADDR(psDevmemIntPinValidateIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *)
+           IMG_OFFSET_ADDR(psDevmemIntPinValidateOUT_UI8, 0);
+
+       IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping;
+       DEVMEMINT_MAPPING *psMappingInt = NULL;
+       IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntPinValidateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psMappingInt,
+                                      hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE);
+       if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntPinValidate_exit;
+       }
+
+       /* Look up the address from the handle */
+       psDevmemIntPinValidateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntPinValidate_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntPinValidateOUT->eError = DevmemIntPinValidate(psMappingInt, psPMRInt);
+
+DevmemIntPinValidate_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psMappingInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psDevmemIntUnpinInvalidateIN_UI8,
+                                    IMG_UINT8 * psDevmemIntUnpinInvalidateOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *)
+           IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *)
+           IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateOUT_UI8, 0);
+
+       IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping;
+       DEVMEMINT_MAPPING *psMappingInt = NULL;
+       IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntUnpinInvalidateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psMappingInt,
+                                      hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE);
+       if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnpinInvalidate_exit;
+       }
+
+       /* Look up the address from the handle */
+       psDevmemIntUnpinInvalidateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnpinInvalidate_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntUnpinInvalidateOUT->eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt);
+
+DevmemIntUnpinInvalidate_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psMappingInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psDevmemIntCtxCreateIN_UI8,
+                              IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8,
+                                                                    0);
+
+       DEVMEMINT_CTX *psDevMemServerContextInt = NULL;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL;
+
+       psDevmemIntCtxCreateOUT->eError =
+           DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection),
+                              psDevmemIntCtxCreateIN->bbKernelMemoryCtx,
+                              &psDevMemServerContextInt,
+                              &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+       {
+               goto DevmemIntCtxCreate_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                   &psDevmemIntCtxCreateOUT->
+                                                                   hDevMemServerContext,
+                                                                   (void *)
+                                                                   psDevMemServerContextInt,
+                                                                   PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+                                                                   PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                   (PFN_HANDLE_RELEASE) &
+                                                                   _DevmemIntCtxCreatepsDevMemServerContextIntRelease);
+       if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntCtxCreate_exit;
+       }
+
+       psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+                                                                      &psDevmemIntCtxCreateOUT->
+                                                                      hPrivData,
+                                                                      (void *)hPrivDataInt,
+                                                                      PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+                                                                      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                      psDevmemIntCtxCreateOUT->
+                                                                      hDevMemServerContext);
+       if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntCtxCreate_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntCtxCreate_exit:
+
+       if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+       {
+               if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psHandleBase);
+
+                       eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+                                                            (IMG_HANDLE) psDevmemIntCtxCreateOUT->
+                                                            hDevMemServerContext,
+                                                            PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psDevMemServerContextInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psHandleBase);
+
+               }
+
+               if (psDevMemServerContextInt)
+               {
+                       DevmemIntCtxDestroy(psDevMemServerContextInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8,
+                               IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8,
+                                                                     0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntCtxDestroyOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psDevmemIntCtxDestroyIN->
+                                             hDevmemServerContext,
+                                             PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       if (unlikely
+           ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK)
+            && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntCtxDestroy_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntCtxDestroy_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psDevmemIntHeapCreateIN_UI8,
+                               IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+       DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntHeapCreateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntHeapCreate_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntHeapCreateOUT->eError =
+           DevmemIntHeapCreate(psDevmemCtxInt,
+                               psDevmemIntHeapCreateIN->sHeapBaseAddr,
+                               psDevmemIntHeapCreateIN->uiHeapLength,
+                               psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+       {
+               goto DevmemIntHeapCreate_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                    &psDevmemIntHeapCreateOUT->
+                                                                    hDevmemHeapPtr,
+                                                                    (void *)psDevmemHeapPtrInt,
+                                                                    PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+                                                                    PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                    (PFN_HANDLE_RELEASE) &
+                                                                    _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease);
+       if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntHeapCreate_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntHeapCreate_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+       {
+               if (psDevmemHeapPtrInt)
+               {
+                       DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8,
+                                IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *)
+           IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntHeapDestroyOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap,
+                                             PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+       if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) &&
+                    (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntHeapDestroy_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntHeapDestroy_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psDevmemIntMapPMRIN_UI8,
+                           IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0);
+
+       IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap;
+       DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+       IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation;
+       DEVMEMINT_RESERVATION *psReservationInt = NULL;
+       IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR;
+       PMR *psPMRInt = NULL;
+       DEVMEMINT_MAPPING *psMappingInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntMapPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemServerHeapInt,
+                                      hDevmemServerHeap,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+       if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntMapPMR_exit;
+       }
+
+       /* Look up the address from the handle */
+       psDevmemIntMapPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psReservationInt,
+                                      hReservation,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+       if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntMapPMR_exit;
+       }
+
+       /* Look up the address from the handle */
+       psDevmemIntMapPMROUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntMapPMR_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntMapPMROUT->eError =
+           DevmemIntMapPMR(psDevmemServerHeapInt,
+                           psReservationInt,
+                           psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+       {
+               goto DevmemIntMapPMR_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                &psDevmemIntMapPMROUT->hMapping,
+                                                                (void *)psMappingInt,
+                                                                PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+                                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                (PFN_HANDLE_RELEASE) &
+                                                                _DevmemIntMapPMRpsMappingIntRelease);
+       if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntMapPMR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntMapPMR_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemServerHeapInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psReservationInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+       {
+               if (psMappingInt)
+               {
+                       DevmemIntUnmapPMR(psMappingInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8,
+                             IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntUnmapPMROUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping,
+                                             PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+       if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) &&
+                    (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnmapPMR_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntUnmapPMR_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psDevmemIntReserveRangeIN_UI8,
+                                 IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *)
+           IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *)
+           IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap;
+       DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+       DEVMEMINT_RESERVATION *psReservationInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntReserveRangeOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemServerHeapInt,
+                                      hDevmemServerHeap,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+       if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntReserveRange_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntReserveRangeOUT->eError =
+           DevmemIntReserveRange(psDevmemServerHeapInt,
+                                 psDevmemIntReserveRangeIN->sAddress,
+                                 psDevmemIntReserveRangeIN->uiLength, &psReservationInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+       {
+               goto DevmemIntReserveRange_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                      &psDevmemIntReserveRangeOUT->
+                                                                      hReservation,
+                                                                      (void *)psReservationInt,
+                                                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+                                                                      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                      (PFN_HANDLE_RELEASE) &
+                                                                      _DevmemIntReserveRangepsReservationIntRelease);
+       if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntReserveRange_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntReserveRange_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemServerHeapInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+       {
+               if (psReservationInt)
+               {
+                       DevmemIntUnreserveRange(psReservationInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8,
+                                   IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *)
+           IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *)
+           IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psDevmemIntUnreserveRangeOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psDevmemIntUnreserveRangeIN->
+                                             hReservation,
+                                             PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+       if (unlikely
+           ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK)
+            && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnreserveRange_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntUnreserveRange_exit:
+
+       return 0;
+}
+
+static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
+             "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
+             "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psChangeSparseMemIN_UI8,
+                           IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN =
+           (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT =
+           (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0);
+
+       IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap;
+       DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL;
+       IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+       IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0;
+
+       if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_PAGE_COUNT))
+       {
+               psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto ChangeSparseMem_exit;
+       }
+
+       if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_PAGE_COUNT))
+       {
+               psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto ChangeSparseMem_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto ChangeSparseMem_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psChangeSparseMemIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto ChangeSparseMem_exit;
+                       }
+               }
+       }
+
+       if (psChangeSparseMemIN->ui32AllocPageCount != 0)
+       {
+               ui32AllocPageIndicesInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32AllocPageIndicesInt,
+                    (const void __user *)psChangeSparseMemIN->pui32AllocPageIndices,
+                    psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto ChangeSparseMem_exit;
+               }
+       }
+       if (psChangeSparseMemIN->ui32FreePageCount != 0)
+       {
+               ui32FreePageIndicesInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32FreePageIndicesInt,
+                    (const void __user *)psChangeSparseMemIN->pui32FreePageIndices,
+                    psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto ChangeSparseMem_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psChangeSparseMemOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSrvDevMemHeapInt,
+                                      hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+       if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto ChangeSparseMem_exit;
+       }
+
+       /* Look up the address from the handle */
+       psChangeSparseMemOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto ChangeSparseMem_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psChangeSparseMemOUT->eError =
+           DevmemIntChangeSparse(psSrvDevMemHeapInt,
+                                 psPMRInt,
+                                 psChangeSparseMemIN->ui32AllocPageCount,
+                                 ui32AllocPageIndicesInt,
+                                 psChangeSparseMemIN->ui32FreePageCount,
+                                 ui32FreePageIndicesInt,
+                                 psChangeSparseMemIN->ui32SparseFlags,
+                                 psChangeSparseMemIN->uiFlags,
+                                 psChangeSparseMemIN->sDevVAddr,
+                                 psChangeSparseMemIN->ui64CPUVAddr);
+
+ChangeSparseMem_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSrvDevMemHeapInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psChangeSparseMemOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psDevmemIntMapPagesIN_UI8,
+                             IMG_UINT8 * psDevmemIntMapPagesOUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0);
+
+       IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation;
+       DEVMEMINT_RESERVATION *psReservationInt = NULL;
+       IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntMapPagesOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psReservationInt,
+                                      hReservation,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+       if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntMapPages_exit;
+       }
+
+       /* Look up the address from the handle */
+       psDevmemIntMapPagesOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntMapPages_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntMapPagesOUT->eError =
+           DevmemIntMapPages(psReservationInt,
+                             psPMRInt,
+                             psDevmemIntMapPagesIN->ui32PageCount,
+                             psDevmemIntMapPagesIN->ui32PhysicalPgOffset,
+                             psDevmemIntMapPagesIN->uiFlags, psDevmemIntMapPagesIN->sDevVAddr);
+
+DevmemIntMapPages_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psReservationInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8,
+                               IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation;
+       DEVMEMINT_RESERVATION *psReservationInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntUnmapPagesOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psReservationInt,
+                                      hReservation,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+       if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntUnmapPages_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntUnmapPagesOUT->eError =
+           DevmemIntUnmapPages(psReservationInt,
+                               psDevmemIntUnmapPagesIN->sDevVAddr,
+                               psDevmemIntUnmapPagesIN->ui32PageCount);
+
+DevmemIntUnmapPages_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psReservationInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8,
+                                 IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *)
+           IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *)
+           IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIsVDevAddrValidOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIsVDevAddrValid_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIsVDevAddrValidOUT->eError =
+           DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection),
+                                    psDevmemCtxInt, psDevmemIsVDevAddrValidIN->sAddress);
+
+DevmemIsVDevAddrValid_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED)
+
+static IMG_INT
+PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8,
+                                  IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *)
+           IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *)
+           IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemFlushDevSLCRangeOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemFlushDevSLCRange_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemFlushDevSLCRangeOUT->eError =
+           DevmemIntFlushDevSLCRange(psDevmemCtxInt,
+                                     psDevmemFlushDevSLCRangeIN->sAddress,
+                                     psDevmemFlushDevSLCRangeIN->uiSize,
+                                     psDevmemFlushDevSLCRangeIN->bInvalidate);
+
+DevmemFlushDevSLCRange_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeDevmemFlushDevSLCRange NULL
+#endif
+
+#if defined(RGX_FEATURE_FBCDC)
+
+static IMG_INT
+PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psDevmemInvalidateFBSCTableIN_UI8,
+                                     IMG_UINT8 * psDevmemInvalidateFBSCTableOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *)
+           IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *)
+           IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemInvalidateFBSCTableOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemInvalidateFBSCTable_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemInvalidateFBSCTableOUT->eError =
+           DevmemIntInvalidateFBSCTable(psDevmemCtxInt,
+                                        psDevmemInvalidateFBSCTableIN->ui64FBSCEntries);
+
+DevmemInvalidateFBSCTable_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8,
+                                  IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN =
+           (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *)
+           IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT =
+           (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *)
+           IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN);
+
+       psHeapCfgHeapConfigCountOUT->eError =
+           HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection),
+                                  &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psHeapCfgHeapCountIN_UI8,
+                            IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN =
+           (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT =
+           (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0);
+
+       psHeapCfgHeapCountOUT->eError =
+           HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection),
+                            psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+                            &psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8,
+                                 IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN =
+           (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *)
+           IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT =
+           (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *)
+           IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0);
+
+       IMG_CHAR *puiHeapConfigNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) +
+           0;
+
+       if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH)
+       {
+               psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto HeapCfgHeapConfigName_exit;
+       }
+
+       psHeapCfgHeapConfigNameOUT->puiHeapConfigName =
+           psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto HeapCfgHeapConfigName_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto HeapCfgHeapConfigName_exit;
+                       }
+               }
+       }
+
+       if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+       {
+               puiHeapConfigNameInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR);
+       }
+
+       psHeapCfgHeapConfigNameOUT->eError =
+           HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection),
+                                 psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex,
+                                 psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz,
+                                 puiHeapConfigNameInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psHeapCfgHeapConfigNameOUT->eError != PVRSRV_OK))
+       {
+               goto HeapCfgHeapConfigName_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((puiHeapConfigNameInt) &&
+           ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psHeapCfgHeapConfigNameOUT->puiHeapConfigName,
+                     puiHeapConfigNameInt,
+                     (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) !=
+                    PVRSRV_OK))
+               {
+                       psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto HeapCfgHeapConfigName_exit;
+               }
+       }
+
+HeapCfgHeapConfigName_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psHeapCfgHeapConfigNameOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8,
+                              IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN =
+           (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT =
+           (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8,
+                                                                    0);
+
+       IMG_CHAR *puiHeapNameOutInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0;
+
+       if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH)
+       {
+               psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto HeapCfgHeapDetails_exit;
+       }
+
+       psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto HeapCfgHeapDetails_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto HeapCfgHeapDetails_exit;
+                       }
+               }
+       }
+
+       if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+       {
+               puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR);
+       }
+
+       psHeapCfgHeapDetailsOUT->eError =
+           HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection),
+                              psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+                              psHeapCfgHeapDetailsIN->ui32HeapIndex,
+                              psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+                              puiHeapNameOutInt,
+                              &psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+                              &psHeapCfgHeapDetailsOUT->uiHeapLength,
+                              &psHeapCfgHeapDetailsOUT->uiReservedRegionLength,
+                              &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut,
+                              &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut);
+       /* Exit early if bridged call fails */
+       if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK))
+       {
+               goto HeapCfgHeapDetails_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((puiHeapNameOutInt) &&
+           ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut,
+                     puiHeapNameOutInt,
+                     (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK))
+               {
+                       psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto HeapCfgHeapDetails_exit;
+               }
+       }
+
+HeapCfgHeapDetails_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry,
+                                       IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8,
+                                       IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8,
+                                       CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *)
+           IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *)
+           IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0);
+
+       IMG_HANDLE hDevm = psDevmemIntRegisterPFNotifyKMIN->hDevm;
+       DEVMEMINT_CTX *psDevmInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntRegisterPFNotifyKMOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmInt,
+                                      hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntRegisterPFNotifyKM_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntRegisterPFNotifyKMOUT->eError =
+           DevmemIntRegisterPFNotifyKM(psDevmInt,
+                                       psDevmemIntRegisterPFNotifyKMIN->ui32PID,
+                                       psDevmemIntRegisterPFNotifyKMIN->bRegister);
+
+DevmemIntRegisterPFNotifyKM_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetMaxPhysHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psGetMaxPhysHeapCountIN_UI8,
+                               IMG_UINT8 * psGetMaxPhysHeapCountOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountIN =
+           (PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountOUT =
+           (PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountOUT_UI8,
+                                                                     0);
+
+       PVR_UNREFERENCED_PARAMETER(psGetMaxPhysHeapCountIN);
+
+       psGetMaxPhysHeapCountOUT->eError =
+           PVRSRVGetMaxPhysHeapCountKM(psConnection, OSGetDevNode(psConnection),
+                                       &psGetMaxPhysHeapCountOUT->ui32PhysHeapCount);
+
+       return 0;
+}
+
+static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX,
+             "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8,
+                              IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN =
+           (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT =
+           (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8,
+                                                                    0);
+
+       PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL;
+       PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) +
+           ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) +
+           0;
+
+       if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST))
+       {
+               psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysHeapGetMemInfo_exit;
+       }
+
+       psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats =
+           psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysHeapGetMemInfo_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysHeapGetMemInfo_exit;
+                       }
+               }
+       }
+
+       if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0)
+       {
+               eaPhysHeapIDInt =
+                   (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP);
+       }
+
+       /* Copy the data over */
+       if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, eaPhysHeapIDInt,
+                    (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID,
+                    psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysHeapGetMemInfo_exit;
+               }
+       }
+       if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0)
+       {
+               pasapPhysHeapMemStatsInt =
+                   (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS);
+       }
+
+       psPhysHeapGetMemInfoOUT->eError =
+           PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection),
+                                      psPhysHeapGetMemInfoIN->ui32PhysHeapCount,
+                                      eaPhysHeapIDInt, pasapPhysHeapMemStatsInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK))
+       {
+               goto PhysHeapGetMemInfo_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((pasapPhysHeapMemStatsInt) &&
+           ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats,
+                     pasapPhysHeapMemStatsInt,
+                     (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) !=
+                    PVRSRV_OK))
+               {
+                       psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysHeapGetMemInfo_exit;
+               }
+       }
+
+PhysHeapGetMemInfo_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8,
+                                  IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN =
+           (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *)
+           IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT =
+           (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *)
+           IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN);
+
+       psGetDefaultPhysicalHeapOUT->eError =
+           PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection),
+                                          &psGetDefaultPhysicalHeapOUT->eHeap);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psGetHeapPhysMemUsageIN_UI8,
+                               IMG_UINT8 * psGetHeapPhysMemUsageOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageIN =
+           (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageOUT =
+           (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageOUT_UI8,
+                                                                     0);
+
+       PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psGetHeapPhysMemUsageIN->ui32PhysHeapCount *
+            sizeof(PHYS_HEAP_MEM_STATS)) + 0;
+
+       if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)
+       {
+               psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto GetHeapPhysMemUsage_exit;
+       }
+
+       psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats =
+           psGetHeapPhysMemUsageIN->pasapPhysHeapMemStats;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto GetHeapPhysMemUsage_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psGetHeapPhysMemUsageIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsageIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto GetHeapPhysMemUsage_exit;
+                       }
+               }
+       }
+
+       if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount != 0)
+       {
+               pasapPhysHeapMemStatsInt =
+                   (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS);
+       }
+
+       psGetHeapPhysMemUsageOUT->eError =
+           PVRSRVGetHeapPhysMemUsageKM(psConnection, OSGetDevNode(psConnection),
+                                       psGetHeapPhysMemUsageIN->ui32PhysHeapCount,
+                                       pasapPhysHeapMemStatsInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psGetHeapPhysMemUsageOUT->eError != PVRSRV_OK))
+       {
+               goto GetHeapPhysMemUsage_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((pasapPhysHeapMemStatsInt) &&
+           ((psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats,
+                     pasapPhysHeapMemStatsInt,
+                     (psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) !=
+                    PVRSRV_OK))
+               {
+                       psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto GetHeapPhysMemUsage_exit;
+               }
+       }
+
+GetHeapPhysMemUsage_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psGetHeapPhysMemUsageOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psDevmemGetFaultAddressIN_UI8,
+                                 IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *)
+           IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *)
+           IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemGetFaultAddressOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemGetFaultAddress_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemGetFaultAddressOUT->eError =
+           DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection),
+                                    psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress);
+
+DevmemGetFaultAddress_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+
+static IMG_INT
+PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8,
+                                IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *)
+           IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       psPVRSRVUpdateOOMStatsOUT->eError =
+           PVRSRVServerUpdateOOMStats(psPVRSRVUpdateOOMStatsIN->ui32ui32StatType,
+                                      psPVRSRVUpdateOOMStatsIN->ui32pid);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgePVRSRVUpdateOOMStats NULL
+#endif
+
+static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX,
+             "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysHeapGetMemInfoPkd(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psPhysHeapGetMemInfoPkdIN_UI8,
+                                 IMG_UINT8 * psPhysHeapGetMemInfoPkdOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD *psPhysHeapGetMemInfoPkdIN =
+           (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD *)
+           IMG_OFFSET_ADDR(psPhysHeapGetMemInfoPkdIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD *psPhysHeapGetMemInfoPkdOUT =
+           (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD *)
+           IMG_OFFSET_ADDR(psPhysHeapGetMemInfoPkdOUT_UI8, 0);
+
+       PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL;
+       PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStatsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) +
+           ((IMG_UINT64) psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount *
+            sizeof(PHYS_HEAP_MEM_STATS_PKD)) + 0;
+
+       if (unlikely(psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST))
+       {
+               psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PhysHeapGetMemInfoPkd_exit;
+       }
+
+       psPhysHeapGetMemInfoPkdOUT->psapPhysHeapMemStats =
+           psPhysHeapGetMemInfoPkdIN->psapPhysHeapMemStats;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PhysHeapGetMemInfoPkd_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoPkdIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoPkdIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PhysHeapGetMemInfoPkd_exit;
+                       }
+               }
+       }
+
+       if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount != 0)
+       {
+               eaPhysHeapIDInt =
+                   (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP);
+       }
+
+       /* Copy the data over */
+       if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, eaPhysHeapIDInt,
+                    (const void __user *)psPhysHeapGetMemInfoPkdIN->peaPhysHeapID,
+                    psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) !=
+                   PVRSRV_OK)
+               {
+                       psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysHeapGetMemInfoPkd_exit;
+               }
+       }
+       if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount != 0)
+       {
+               psapPhysHeapMemStatsInt =
+                   (PHYS_HEAP_MEM_STATS_PKD *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD);
+       }
+
+       psPhysHeapGetMemInfoPkdOUT->eError =
+           PVRSRVPhysHeapGetMemInfoPkdKM(psConnection, OSGetDevNode(psConnection),
+                                         psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount,
+                                         eaPhysHeapIDInt, psapPhysHeapMemStatsInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPhysHeapGetMemInfoPkdOUT->eError != PVRSRV_OK))
+       {
+               goto PhysHeapGetMemInfoPkd_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((psapPhysHeapMemStatsInt) &&
+           ((psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psPhysHeapGetMemInfoPkdOUT->psapPhysHeapMemStats,
+                     psapPhysHeapMemStatsInt,
+                     (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount *
+                      sizeof(PHYS_HEAP_MEM_STATS_PKD))) != PVRSRV_OK))
+               {
+                       psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PhysHeapGetMemInfoPkd_exit;
+               }
+       }
+
+PhysHeapGetMemInfoPkd_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPhysHeapGetMemInfoPkdOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetHeapPhysMemUsagePkd(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psGetHeapPhysMemUsagePkdIN_UI8,
+                                  IMG_UINT8 * psGetHeapPhysMemUsagePkdOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD *psGetHeapPhysMemUsagePkdIN =
+           (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD *)
+           IMG_OFFSET_ADDR(psGetHeapPhysMemUsagePkdIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD *psGetHeapPhysMemUsagePkdOUT =
+           (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD *)
+           IMG_OFFSET_ADDR(psGetHeapPhysMemUsagePkdOUT_UI8, 0);
+
+       PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStatsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount *
+            sizeof(PHYS_HEAP_MEM_STATS_PKD)) + 0;
+
+       if (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)
+       {
+               psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto GetHeapPhysMemUsagePkd_exit;
+       }
+
+       psGetHeapPhysMemUsagePkdOUT->psapPhysHeapMemStats =
+           psGetHeapPhysMemUsagePkdIN->psapPhysHeapMemStats;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto GetHeapPhysMemUsagePkd_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psGetHeapPhysMemUsagePkdIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsagePkdIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto GetHeapPhysMemUsagePkd_exit;
+                       }
+               }
+       }
+
+       if (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount != 0)
+       {
+               psapPhysHeapMemStatsInt =
+                   (PHYS_HEAP_MEM_STATS_PKD *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD);
+       }
+
+       psGetHeapPhysMemUsagePkdOUT->eError =
+           PVRSRVGetHeapPhysMemUsagePkdKM(psConnection, OSGetDevNode(psConnection),
+                                          psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount,
+                                          psapPhysHeapMemStatsInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psGetHeapPhysMemUsagePkdOUT->eError != PVRSRV_OK))
+       {
+               goto GetHeapPhysMemUsagePkd_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((psapPhysHeapMemStatsInt) &&
+           ((psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psGetHeapPhysMemUsagePkdOUT->psapPhysHeapMemStats,
+                     psapPhysHeapMemStatsInt,
+                     (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount *
+                      sizeof(PHYS_HEAP_MEM_STATS_PKD))) != PVRSRV_OK))
+               {
+                       psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto GetHeapPhysMemUsagePkd_exit;
+               }
+       }
+
+GetHeapPhysMemUsagePkd_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psGetHeapPhysMemUsagePkdOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitMMBridge(void);
+void DeinitMMBridge(void);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR,
+                             PVRSRVBridgePMRExportPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR,
+                             PVRSRVBridgePMRUnexportPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID,
+                             NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE,
+                             PVRSRVBridgePMRMakeLocalImportHandle, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE,
+                             PVRSRVBridgePMRUnmakeLocalImportHandle, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR,
+                             PVRSRVBridgePMRImportPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR,
+                             PVRSRVBridgePMRLocalImportPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR,
+                             PVRSRVBridgePMRUnrefPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR,
+                             PVRSRVBridgePMRUnrefUnlockPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR,
+                             PVRSRVBridgePhysmemNewRamBackedPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR,
+                             PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN,
+                             PVRSRVBridgeDevmemIntPin, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN,
+                             PVRSRVBridgeDevmemIntUnpin, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE,
+                             PVRSRVBridgeDevmemIntPinValidate, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE,
+                             PVRSRVBridgeDevmemIntUnpinInvalidate, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
+                             PVRSRVBridgeDevmemIntCtxCreate, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY,
+                             PVRSRVBridgeDevmemIntCtxDestroy, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE,
+                             PVRSRVBridgeDevmemIntHeapCreate, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY,
+                             PVRSRVBridgeDevmemIntHeapDestroy, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR,
+                             PVRSRVBridgeDevmemIntMapPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR,
+                             PVRSRVBridgeDevmemIntUnmapPMR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE,
+                             PVRSRVBridgeDevmemIntReserveRange, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE,
+                             PVRSRVBridgeDevmemIntUnreserveRange, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM,
+                             PVRSRVBridgeChangeSparseMem, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES,
+                             PVRSRVBridgeDevmemIntMapPages, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES,
+                             PVRSRVBridgeDevmemIntUnmapPages, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID,
+                             PVRSRVBridgeDevmemIsVDevAddrValid, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE,
+                             PVRSRVBridgeDevmemFlushDevSLCRange, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE,
+                             PVRSRVBridgeDevmemInvalidateFBSCTable, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT,
+                             PVRSRVBridgeHeapCfgHeapConfigCount, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT,
+                             PVRSRVBridgeHeapCfgHeapCount, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME,
+                             PVRSRVBridgeHeapCfgHeapConfigName, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS,
+                             PVRSRVBridgeHeapCfgHeapDetails, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM,
+                             PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT,
+                             PVRSRVBridgeGetMaxPhysHeapCount, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO,
+                             PVRSRVBridgePhysHeapGetMemInfo, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP,
+                             PVRSRVBridgeGetDefaultPhysicalHeap, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE,
+                             PVRSRVBridgeGetHeapPhysMemUsage, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS,
+                             PVRSRVBridgeDevmemGetFaultAddress, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS,
+                             PVRSRVBridgePVRSRVUpdateOOMStats, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD,
+                             PVRSRVBridgePhysHeapGetMemInfoPkd, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD,
+                             PVRSRVBridgeGetHeapPhysMemUsagePkd, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+void DeinitMMBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/client_pdump_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/client_pdump_bridge.h
new file mode 100644 (file)
index 0000000..2bf967b
--- /dev/null
@@ -0,0 +1,93 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMP_BRIDGE_H
+#define CLIENT_PDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpImageDescriptor(IMG_HANDLE hBridge,
+                                                    IMG_HANDLE hDevmemCtx,
+                                                    IMG_UINT32 ui32StringSize,
+                                                    const IMG_CHAR * puiFileName,
+                                                    IMG_DEV_VIRTADDR sDataDevAddr,
+                                                    IMG_UINT32 ui32DataSize,
+                                                    IMG_UINT32 ui32LogicalWidth,
+                                                    IMG_UINT32 ui32LogicalHeight,
+                                                    IMG_UINT32 ui32PhysicalWidth,
+                                                    IMG_UINT32 ui32PhysicalHeight,
+                                                    PDUMP_PIXEL_FORMAT ePixelFormat,
+                                                    IMG_MEMLAYOUT eMemLayout,
+                                                    IMG_FB_COMPRESSION eFBCompression,
+                                                    const IMG_UINT32 * pui32FBCClearColour,
+                                                    PDUMP_FBC_SWIZZLE eeFBCSwizzle,
+                                                    IMG_DEV_VIRTADDR sHeaderDevAddr,
+                                                    IMG_UINT32 ui32HeaderSize,
+                                                    IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+                                                  IMG_UINT32 ui32CommentSize,
+                                                  IMG_CHAR * puiComment, IMG_UINT32 ui32Flags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge, IMG_UINT32 ui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpDataDescriptor(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hDevmemCtx,
+                                                   IMG_UINT32 ui32StringSize,
+                                                   const IMG_CHAR * puiFileName,
+                                                   IMG_DEV_VIRTADDR sDataDevAddr,
+                                                   IMG_UINT32 ui32DataSize,
+                                                   IMG_UINT32 ui32HeaderType,
+                                                   IMG_UINT32 ui32ElementType,
+                                                   IMG_UINT32 ui32ElementCount,
+                                                   IMG_UINT32 ui32PDumpFlags);
+
+#endif /* CLIENT_PDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/client_pdump_direct_bridge.c
new file mode 100644 (file)
index 0000000..7c012db
--- /dev/null
@@ -0,0 +1,151 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for pdump
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include <powervr/buffer_attribs.h>
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpImageDescriptor(IMG_HANDLE hBridge,
+                                                    IMG_HANDLE hDevmemCtx,
+                                                    IMG_UINT32 ui32StringSize,
+                                                    const IMG_CHAR * puiFileName,
+                                                    IMG_DEV_VIRTADDR sDataDevAddr,
+                                                    IMG_UINT32 ui32DataSize,
+                                                    IMG_UINT32 ui32LogicalWidth,
+                                                    IMG_UINT32 ui32LogicalHeight,
+                                                    IMG_UINT32 ui32PhysicalWidth,
+                                                    IMG_UINT32 ui32PhysicalHeight,
+                                                    PDUMP_PIXEL_FORMAT ePixelFormat,
+                                                    IMG_MEMLAYOUT eMemLayout,
+                                                    IMG_FB_COMPRESSION eFBCompression,
+                                                    const IMG_UINT32 * pui32FBCClearColour,
+                                                    PDUMP_FBC_SWIZZLE eeFBCSwizzle,
+                                                    IMG_DEV_VIRTADDR sHeaderDevAddr,
+                                                    IMG_UINT32 ui32HeaderSize,
+                                                    IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError =
+           DevmemIntPDumpImageDescriptor(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                         psDevmemCtxInt,
+                                         ui32StringSize,
+                                         puiFileName,
+                                         sDataDevAddr,
+                                         ui32DataSize,
+                                         ui32LogicalWidth,
+                                         ui32LogicalHeight,
+                                         ui32PhysicalWidth,
+                                         ui32PhysicalHeight,
+                                         ePixelFormat,
+                                         eMemLayout,
+                                         eFBCompression,
+                                         pui32FBCClearColour,
+                                         eeFBCSwizzle,
+                                         sHeaderDevAddr, ui32HeaderSize, ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+                                                  IMG_UINT32 ui32CommentSize,
+                                                  IMG_CHAR * puiComment, IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PDumpCommentKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                          ui32CommentSize, puiComment, ui32Flags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge, IMG_UINT32 ui32Frame)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32Frame);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpDataDescriptor(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hDevmemCtx,
+                                                   IMG_UINT32 ui32StringSize,
+                                                   const IMG_CHAR * puiFileName,
+                                                   IMG_DEV_VIRTADDR sDataDevAddr,
+                                                   IMG_UINT32 ui32DataSize,
+                                                   IMG_UINT32 ui32HeaderType,
+                                                   IMG_UINT32 ui32ElementType,
+                                                   IMG_UINT32 ui32ElementCount,
+                                                   IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtxInt;
+
+       psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+       eError =
+           DevmemIntPDumpDataDescriptor(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                        psDevmemCtxInt,
+                                        ui32StringSize,
+                                        puiFileName,
+                                        sDataDevAddr,
+                                        ui32DataSize,
+                                        ui32HeaderType,
+                                        ui32ElementType, ui32ElementCount, ui32PDumpFlags);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/common_pdump_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/common_pdump_bridge.h
new file mode 100644 (file)
index 0000000..eb6ab94
--- /dev/null
@@ -0,0 +1,155 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMP_BRIDGE_H
+#define COMMON_PDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include <powervr/buffer_attribs.h>
+
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST                  0
+#define PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR                       PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT                 PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME                        PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR                        PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST                   (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
+
+/*******************************************
+            PDumpImageDescriptor
+ *******************************************/
+
+/* Bridge in structure for PDumpImageDescriptor */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR_TAG
+{
+       IMG_DEV_VIRTADDR sDataDevAddr;
+       IMG_DEV_VIRTADDR sHeaderDevAddr;
+       IMG_HANDLE hDevmemCtx;
+       const IMG_UINT32 *pui32FBCClearColour;
+       const IMG_CHAR *puiFileName;
+       IMG_FB_COMPRESSION eFBCompression;
+       IMG_MEMLAYOUT eMemLayout;
+       PDUMP_PIXEL_FORMAT ePixelFormat;
+       PDUMP_FBC_SWIZZLE eeFBCSwizzle;
+       IMG_UINT32 ui32DataSize;
+       IMG_UINT32 ui32HeaderSize;
+       IMG_UINT32 ui32LogicalHeight;
+       IMG_UINT32 ui32LogicalWidth;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32PhysicalHeight;
+       IMG_UINT32 ui32PhysicalWidth;
+       IMG_UINT32 ui32StringSize;
+} __packed PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR;
+
+/* Bridge out structure for PDumpImageDescriptor */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR;
+
+/*******************************************
+            PVRSRVPDumpComment
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG
+{
+       IMG_CHAR *puiComment;
+       IMG_UINT32 ui32CommentSize;
+       IMG_UINT32 ui32Flags;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT;
+
+/* Bridge out structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT;
+
+/*******************************************
+            PVRSRVPDumpSetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG
+{
+       IMG_UINT32 ui32Frame;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME;
+
+/*******************************************
+            PDumpDataDescriptor
+ *******************************************/
+
+/* Bridge in structure for PDumpDataDescriptor */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR_TAG
+{
+       IMG_DEV_VIRTADDR sDataDevAddr;
+       IMG_HANDLE hDevmemCtx;
+       const IMG_CHAR *puiFileName;
+       IMG_UINT32 ui32DataSize;
+       IMG_UINT32 ui32ElementCount;
+       IMG_UINT32 ui32ElementType;
+       IMG_UINT32 ui32HeaderType;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32StringSize;
+} __packed PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR;
+
+/* Bridge out structure for PDumpDataDescriptor */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR;
+
+#endif /* COMMON_PDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/server_pdump_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdump_bridge/server_pdump_bridge.c
new file mode 100644 (file)
index 0000000..862343f
--- /dev/null
@@ -0,0 +1,587 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+#include "common_pdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+             "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(4 <= IMG_UINT32_MAX, "4 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psPDumpImageDescriptorIN_UI8,
+                                IMG_UINT8 * psPDumpImageDescriptorOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorIN =
+           (PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *) IMG_OFFSET_ADDR(psPDumpImageDescriptorIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *)
+           IMG_OFFSET_ADDR(psPDumpImageDescriptorOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemCtx = psPDumpImageDescriptorIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+       IMG_CHAR *uiFileNameInt = NULL;
+       IMG_UINT32 *ui32FBCClearColourInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) 4 * sizeof(IMG_UINT32)) + 0;
+
+       if (unlikely(psPDumpImageDescriptorIN->ui32StringSize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+       {
+               psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PDumpImageDescriptor_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PDumpImageDescriptor_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPDumpImageDescriptorIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PDumpImageDescriptor_exit;
+                       }
+               }
+       }
+
+       if (psPDumpImageDescriptorIN->ui32StringSize != 0)
+       {
+               uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFileNameInt,
+                    (const void __user *)psPDumpImageDescriptorIN->puiFileName,
+                    psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PDumpImageDescriptor_exit;
+               }
+               ((IMG_CHAR *)
+                uiFileNameInt)[(psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       {
+               ui32FBCClearColourInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += 4 * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (4 * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32FBCClearColourInt,
+                    (const void __user *)psPDumpImageDescriptorIN->pui32FBCClearColour,
+                    4 * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PDumpImageDescriptor_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPDumpImageDescriptorOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psPDumpImageDescriptorOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PDumpImageDescriptor_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPDumpImageDescriptorOUT->eError =
+           DevmemIntPDumpImageDescriptor(psConnection, OSGetDevNode(psConnection),
+                                         psDevmemCtxInt,
+                                         psPDumpImageDescriptorIN->ui32StringSize,
+                                         uiFileNameInt,
+                                         psPDumpImageDescriptorIN->sDataDevAddr,
+                                         psPDumpImageDescriptorIN->ui32DataSize,
+                                         psPDumpImageDescriptorIN->ui32LogicalWidth,
+                                         psPDumpImageDescriptorIN->ui32LogicalHeight,
+                                         psPDumpImageDescriptorIN->ui32PhysicalWidth,
+                                         psPDumpImageDescriptorIN->ui32PhysicalHeight,
+                                         psPDumpImageDescriptorIN->ePixelFormat,
+                                         psPDumpImageDescriptorIN->eMemLayout,
+                                         psPDumpImageDescriptorIN->eFBCompression,
+                                         ui32FBCClearColourInt,
+                                         psPDumpImageDescriptorIN->eeFBCSwizzle,
+                                         psPDumpImageDescriptorIN->sHeaderDevAddr,
+                                         psPDumpImageDescriptorIN->ui32HeaderSize,
+                                         psPDumpImageDescriptorIN->ui32PDumpFlags);
+
+PDumpImageDescriptor_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPDumpImageDescriptorOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_COMMENT_SIZE <= IMG_UINT32_MAX,
+             "PVRSRV_PDUMP_MAX_COMMENT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psPVRSRVPDumpCommentIN_UI8,
+                              IMG_UINT8 * psPVRSRVPDumpCommentOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *) IMG_OFFSET_ADDR(psPVRSRVPDumpCommentIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *) IMG_OFFSET_ADDR(psPVRSRVPDumpCommentOUT_UI8,
+                                                                    0);
+
+       IMG_CHAR *uiCommentInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psPVRSRVPDumpCommentIN->ui32CommentSize > PVRSRV_PDUMP_MAX_COMMENT_SIZE))
+       {
+               psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PVRSRVPDumpComment_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PVRSRVPDumpComment_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPVRSRVPDumpCommentIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PVRSRVPDumpComment_exit;
+                       }
+               }
+       }
+
+       if (psPVRSRVPDumpCommentIN->ui32CommentSize != 0)
+       {
+               uiCommentInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiCommentInt, (const void __user *)psPVRSRVPDumpCommentIN->puiComment,
+                    psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PVRSRVPDumpComment_exit;
+               }
+               ((IMG_CHAR *)
+                uiCommentInt)[(psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       psPVRSRVPDumpCommentOUT->eError =
+           PDumpCommentKM(psConnection, OSGetDevNode(psConnection),
+                          psPVRSRVPDumpCommentIN->ui32CommentSize,
+                          uiCommentInt, psPVRSRVPDumpCommentIN->ui32Flags);
+
+PVRSRVPDumpComment_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPVRSRVPDumpCommentOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psPVRSRVPDumpSetFrameIN_UI8,
+                               IMG_UINT8 * psPVRSRVPDumpSetFrameOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameOUT_UI8,
+                                                                     0);
+
+       psPVRSRVPDumpSetFrameOUT->eError =
+           PDumpSetFrameKM(psConnection, OSGetDevNode(psConnection),
+                           psPVRSRVPDumpSetFrameIN->ui32Frame);
+
+       return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+             "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psPDumpDataDescriptorIN_UI8,
+                               IMG_UINT8 * psPDumpDataDescriptorOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorIN =
+           (PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *) IMG_OFFSET_ADDR(psPDumpDataDescriptorIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *) IMG_OFFSET_ADDR(psPDumpDataDescriptorOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hDevmemCtx = psPDumpDataDescriptorIN->hDevmemCtx;
+       DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+       IMG_CHAR *uiFileNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psPDumpDataDescriptorIN->ui32StringSize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+       {
+               psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PDumpDataDescriptor_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PDumpDataDescriptor_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPDumpDataDescriptorIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PDumpDataDescriptor_exit;
+                       }
+               }
+       }
+
+       if (psPDumpDataDescriptorIN->ui32StringSize != 0)
+       {
+               uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFileNameInt, (const void __user *)psPDumpDataDescriptorIN->puiFileName,
+                    psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PDumpDataDescriptor_exit;
+               }
+               ((IMG_CHAR *)
+                uiFileNameInt)[(psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPDumpDataDescriptorOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemCtxInt,
+                                      hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psPDumpDataDescriptorOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PDumpDataDescriptor_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPDumpDataDescriptorOUT->eError =
+           DevmemIntPDumpDataDescriptor(psConnection, OSGetDevNode(psConnection),
+                                        psDevmemCtxInt,
+                                        psPDumpDataDescriptorIN->ui32StringSize,
+                                        uiFileNameInt,
+                                        psPDumpDataDescriptorIN->sDataDevAddr,
+                                        psPDumpDataDescriptorIN->ui32DataSize,
+                                        psPDumpDataDescriptorIN->ui32HeaderType,
+                                        psPDumpDataDescriptorIN->ui32ElementType,
+                                        psPDumpDataDescriptorIN->ui32ElementCount,
+                                        psPDumpDataDescriptorIN->ui32PDumpFlags);
+
+PDumpDataDescriptor_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemCtxInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPDumpDataDescriptorOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitPDUMPBridge(void);
+void DeinitPDUMPBridge(void);
+
+/*
+ * Register all PDUMP functions with services
+ */
+PVRSRV_ERROR InitPDUMPBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR,
+                             PVRSRVBridgePDumpImageDescriptor, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT,
+                             PVRSRVBridgePVRSRVPDumpComment, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME,
+                             PVRSRVBridgePVRSRVPDumpSetFrame, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR,
+                             PVRSRVBridgePDumpDataDescriptor, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdump functions with services
+ */
+void DeinitPDUMPBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_bridge.h
new file mode 100644 (file)
index 0000000..8bda83a
--- /dev/null
@@ -0,0 +1,73 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMPCTRL_BRIDGE_H
+#define CLIENT_PDUMPCTRL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpctrl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetState(IMG_HANDLE hBridge, IMG_UINT64 * pui64State);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge, IMG_UINT32 * pui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+                                                                  IMG_UINT32 ui32Mode,
+                                                                  IMG_UINT32 ui32Start,
+                                                                  IMG_UINT32 ui32End,
+                                                                  IMG_UINT32 ui32Interval,
+                                                                  IMG_UINT32 ui32MaxParamFileSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+                                                             IMG_BOOL * pbpbIsLastCaptureFrame);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge);
+
+#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.c
new file mode 100644 (file)
index 0000000..7ba9298
--- /dev/null
@@ -0,0 +1,106 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for pdumpctrl
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdumpctrl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "pdump_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetState(IMG_HANDLE hBridge, IMG_UINT64 * pui64State)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = PDumpGetStateKM(pui64State);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge, IMG_UINT32 * pui32Frame)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), pui32Frame);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+                                                                  IMG_UINT32 ui32Mode,
+                                                                  IMG_UINT32 ui32Start,
+                                                                  IMG_UINT32 ui32End,
+                                                                  IMG_UINT32 ui32Interval,
+                                                                  IMG_UINT32 ui32MaxParamFileSize)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PDumpSetDefaultCaptureParamsKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                          ui32Mode,
+                                          ui32Start, ui32End, ui32Interval, ui32MaxParamFileSize);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+                                                             IMG_BOOL * pbpbIsLastCaptureFrame)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = PDumpIsLastCaptureFrameKM(pbpbIsLastCaptureFrame);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PDumpForceCaptureStopKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge));
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/common_pdumpctrl_bridge.h
new file mode 100644 (file)
index 0000000..4c32781
--- /dev/null
@@ -0,0 +1,149 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMPCTRL_BRIDGE_H
+#define COMMON_PDUMPCTRL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST                      0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE                    PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME                    PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS                     PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME                  PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP                    PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST                       (PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4)
+
+/*******************************************
+            PVRSRVPDumpGetState
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetState */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE;
+
+/* Bridge out structure for PVRSRVPDumpGetState */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE_TAG
+{
+       IMG_UINT64 ui64State;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE;
+
+/*******************************************
+            PVRSRVPDumpGetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Frame;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME;
+
+/*******************************************
+            PVRSRVPDumpSetDefaultCaptureParams
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+       IMG_UINT32 ui32End;
+       IMG_UINT32 ui32Interval;
+       IMG_UINT32 ui32MaxParamFileSize;
+       IMG_UINT32 ui32Mode;
+       IMG_UINT32 ui32Start;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/*******************************************
+            PVRSRVPDumpIsLastCaptureFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+       IMG_BOOL bpbIsLastCaptureFrame;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/*******************************************
+            PVRSRVPDumpForceCaptureStop
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpForceCaptureStop */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP;
+
+/* Bridge out structure for PVRSRVPDumpForceCaptureStop */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP;
+
+#endif /* COMMON_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpctrl_bridge/server_pdumpctrl_bridge.c
new file mode 100644 (file)
index 0000000..4edc0fc
--- /dev/null
@@ -0,0 +1,244 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pdump_km.h"
+
+#include "common_pdumpctrl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetState(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psPVRSRVPDumpGetStateIN_UI8,
+                               IMG_UINT8 * psPVRSRVPDumpGetStateOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateOUT_UI8,
+                                                                     0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetStateIN);
+
+       psPVRSRVPDumpGetStateOUT->eError = PDumpGetStateKM(&psPVRSRVPDumpGetStateOUT->ui64State);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psPVRSRVPDumpGetFrameIN_UI8,
+                               IMG_UINT8 * psPVRSRVPDumpGetFrameOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameOUT_UI8,
+                                                                     0);
+
+       PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN);
+
+       psPVRSRVPDumpGetFrameOUT->eError =
+           PDumpGetFrameKM(psConnection, OSGetDevNode(psConnection),
+                           &psPVRSRVPDumpGetFrameOUT->ui32Frame);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry,
+                                              IMG_UINT8 *
+                                              psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8,
+                                              IMG_UINT8 *
+                                              psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8,
+                                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsIN
+           =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *)
+           IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS
+           *psPVRSRVPDumpSetDefaultCaptureParamsOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *)
+           IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8, 0);
+
+       psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError =
+           PDumpSetDefaultCaptureParamsKM(psConnection, OSGetDevNode(psConnection),
+                                          psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode,
+                                          psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start,
+                                          psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
+                                          psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
+                                          psPVRSRVPDumpSetDefaultCaptureParamsIN->
+                                          ui32MaxParamFileSize);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psPVRSRVPDumpIsLastCaptureFrameIN_UI8,
+                                         IMG_UINT8 * psPVRSRVPDumpIsLastCaptureFrameOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *)
+           IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *)
+           IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN);
+
+       psPVRSRVPDumpIsLastCaptureFrameOUT->eError =
+           PDumpIsLastCaptureFrameKM(&psPVRSRVPDumpIsLastCaptureFrameOUT->bpbIsLastCaptureFrame);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpForceCaptureStop(IMG_UINT32 ui32DispatchTableEntry,
+                                       IMG_UINT8 * psPVRSRVPDumpForceCaptureStopIN_UI8,
+                                       IMG_UINT8 * psPVRSRVPDumpForceCaptureStopOUT_UI8,
+                                       CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP *psPVRSRVPDumpForceCaptureStopIN =
+           (PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP *)
+           IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP *psPVRSRVPDumpForceCaptureStopOUT =
+           (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP *)
+           IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpForceCaptureStopIN);
+
+       psPVRSRVPDumpForceCaptureStopOUT->eError =
+           PDumpForceCaptureStopKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pPDUMPCTRLBridgeLock;
+
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+void DeinitPDUMPCTRLBridge(void);
+
+/*
+ * Register all PDUMPCTRL functions with services
+ */
+PVRSRV_ERROR InitPDUMPCTRLBridge(void)
+{
+       PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock), "OSLockCreate");
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE,
+                             PVRSRVBridgePVRSRVPDumpGetState, pPDUMPCTRLBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME,
+                             PVRSRVBridgePVRSRVPDumpGetFrame, pPDUMPCTRLBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                             PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS,
+                             PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, pPDUMPCTRLBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                             PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME,
+                             PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, pPDUMPCTRLBridgeLock);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                             PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP,
+                             PVRSRVBridgePVRSRVPDumpForceCaptureStop, pPDUMPCTRLBridgeLock);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpctrl functions with services
+ */
+void DeinitPDUMPCTRLBridge(void)
+{
+       OSLockDestroy(pPDUMPCTRLBridgeLock);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                               PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                               PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                               PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                               PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+                               PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/client_pdumpmm_bridge.h
new file mode 100644 (file)
index 0000000..a436847
--- /dev/null
@@ -0,0 +1,125 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMPMM_BRIDGE_H
+#define CLIENT_PDUMPMM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpmm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hPMR,
+                                               IMG_DEVMEM_OFFSET_T uiOffset,
+                                               IMG_DEVMEM_SIZE_T uiSize,
+                                               IMG_UINT32 ui32PDumpFlags, IMG_BOOL bbZero);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+                                                      IMG_HANDLE hPMR,
+                                                      IMG_DEVMEM_OFFSET_T uiOffset,
+                                                      IMG_UINT32 ui32Value,
+                                                      IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+                                                      IMG_HANDLE hPMR,
+                                                      IMG_DEVMEM_OFFSET_T uiOffset,
+                                                      IMG_UINT64 ui64Value,
+                                                      IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+                                                  IMG_HANDLE hPMR,
+                                                  IMG_DEVMEM_OFFSET_T uiOffset,
+                                                  IMG_DEVMEM_SIZE_T uiSize,
+                                                  IMG_UINT32 ui32ArraySize,
+                                                  const IMG_CHAR * puiFileName,
+                                                  IMG_UINT32 ui32uiFileOffset);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+                                                    IMG_HANDLE hPMR,
+                                                    IMG_DEVMEM_OFFSET_T uiOffset,
+                                                    IMG_UINT32 ui32MemspaceNameLen,
+                                                    IMG_CHAR * puiMemspaceName,
+                                                    IMG_UINT32 ui32SymbolicAddrLen,
+                                                    IMG_CHAR * puiSymbolicAddr,
+                                                    IMG_DEVMEM_OFFSET_T * puiNewOffset,
+                                                    IMG_DEVMEM_OFFSET_T * puiNextSymName);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hPMR,
+                                             IMG_DEVMEM_OFFSET_T uiOffset,
+                                             IMG_UINT32 ui32Value,
+                                             IMG_UINT32 ui32Mask,
+                                             PDUMP_POLL_OPERATOR eOperator,
+                                             IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCheck32(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hPMR,
+                                               IMG_DEVMEM_OFFSET_T uiOffset,
+                                               IMG_UINT32 ui32Value,
+                                               IMG_UINT32 ui32Mask,
+                                               PDUMP_POLL_OPERATOR eOperator,
+                                               IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hPMR,
+                                           IMG_DEVMEM_OFFSET_T uiReadOffset,
+                                           IMG_DEVMEM_OFFSET_T uiWriteOffset,
+                                           IMG_DEVMEM_SIZE_T uiPacketSize,
+                                           IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+                                                               IMG_HANDLE hDevmemServerContext,
+                                                               IMG_DEV_VIRTADDR sAddress,
+                                                               IMG_DEVMEM_SIZE_T uiSize,
+                                                               IMG_UINT32 ui32ArraySize,
+                                                               const IMG_CHAR * puiFileName,
+                                                               IMG_UINT32 ui32FileOffset,
+                                                               IMG_UINT32 ui32PDumpFlags);
+
+#endif /* CLIENT_PDUMPMM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/client_pdumpmm_direct_bridge.c
new file mode 100644 (file)
index 0000000..b1906b3
--- /dev/null
@@ -0,0 +1,239 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for pdumpmm
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdumpmm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+#include "pdump_physmem.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hPMR,
+                                               IMG_DEVMEM_OFFSET_T uiOffset,
+                                               IMG_DEVMEM_SIZE_T uiSize,
+                                               IMG_UINT32 ui32PDumpFlags, IMG_BOOL bbZero)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRPDumpLoadMem(psPMRInt, uiOffset, uiSize, ui32PDumpFlags, bbZero);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+                                                      IMG_HANDLE hPMR,
+                                                      IMG_DEVMEM_OFFSET_T uiOffset,
+                                                      IMG_UINT32 ui32Value,
+                                                      IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRPDumpLoadMemValue32(psPMRInt, uiOffset, ui32Value, ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+                                                      IMG_HANDLE hPMR,
+                                                      IMG_DEVMEM_OFFSET_T uiOffset,
+                                                      IMG_UINT64 ui64Value,
+                                                      IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRPDumpLoadMemValue64(psPMRInt, uiOffset, ui64Value, ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+                                                  IMG_HANDLE hPMR,
+                                                  IMG_DEVMEM_OFFSET_T uiOffset,
+                                                  IMG_DEVMEM_SIZE_T uiSize,
+                                                  IMG_UINT32 ui32ArraySize,
+                                                  const IMG_CHAR * puiFileName,
+                                                  IMG_UINT32 ui32uiFileOffset)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           PMRPDumpSaveToFile(psPMRInt,
+                              uiOffset, uiSize, ui32ArraySize, puiFileName, ui32uiFileOffset);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+                                                    IMG_HANDLE hPMR,
+                                                    IMG_DEVMEM_OFFSET_T uiOffset,
+                                                    IMG_UINT32 ui32MemspaceNameLen,
+                                                    IMG_CHAR * puiMemspaceName,
+                                                    IMG_UINT32 ui32SymbolicAddrLen,
+                                                    IMG_CHAR * puiSymbolicAddr,
+                                                    IMG_DEVMEM_OFFSET_T * puiNewOffset,
+                                                    IMG_DEVMEM_OFFSET_T * puiNextSymName)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           PMR_PDumpSymbolicAddr(psPMRInt,
+                                 uiOffset,
+                                 ui32MemspaceNameLen,
+                                 puiMemspaceName,
+                                 ui32SymbolicAddrLen,
+                                 puiSymbolicAddr, puiNewOffset, puiNextSymName);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hPMR,
+                                             IMG_DEVMEM_OFFSET_T uiOffset,
+                                             IMG_UINT32 ui32Value,
+                                             IMG_UINT32 ui32Mask,
+                                             PDUMP_POLL_OPERATOR eOperator,
+                                             IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRPDumpPol32(psPMRInt, uiOffset, ui32Value, ui32Mask, eOperator, ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCheck32(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hPMR,
+                                               IMG_DEVMEM_OFFSET_T uiOffset,
+                                               IMG_UINT32 ui32Value,
+                                               IMG_UINT32 ui32Mask,
+                                               PDUMP_POLL_OPERATOR eOperator,
+                                               IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError =
+           PMRPDumpCheck32(psPMRInt, uiOffset, ui32Value, ui32Mask, eOperator, ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hPMR,
+                                           IMG_DEVMEM_OFFSET_T uiReadOffset,
+                                           IMG_DEVMEM_OFFSET_T uiWriteOffset,
+                                           IMG_DEVMEM_SIZE_T uiPacketSize,
+                                           IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRInt = (PMR *) hPMR;
+
+       eError = PMRPDumpCBP(psPMRInt, uiReadOffset, uiWriteOffset, uiPacketSize, uiBufferSize);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+                                                               IMG_HANDLE hDevmemServerContext,
+                                                               IMG_DEV_VIRTADDR sAddress,
+                                                               IMG_DEVMEM_SIZE_T uiSize,
+                                                               IMG_UINT32 ui32ArraySize,
+                                                               const IMG_CHAR * puiFileName,
+                                                               IMG_UINT32 ui32FileOffset,
+                                                               IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemServerContextInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+       eError =
+           DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt,
+                                           sAddress,
+                                           uiSize,
+                                           ui32ArraySize,
+                                           puiFileName, ui32FileOffset, ui32PDumpFlags);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/common_pdumpmm_bridge.h
new file mode 100644 (file)
index 0000000..6554132
--- /dev/null
@@ -0,0 +1,259 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMPMM_BRIDGE_H
+#define COMMON_PDUMPMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST                        0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM                  PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32                   PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64                   PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE                       PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR                     PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32                    PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32                  PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP                      PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL                  PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST                 (PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8)
+
+/*******************************************
+            PMRPDumpLoadMem
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       IMG_BOOL bbZero;
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
+
+/* Bridge out structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM;
+
+/*******************************************
+            PMRPDumpLoadMemValue32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_HANDLE hPMR;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32;
+
+/* Bridge out structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32;
+
+/*******************************************
+            PMRPDumpLoadMemValue64
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG
+{
+       IMG_UINT64 ui64Value;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_HANDLE hPMR;
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64;
+
+/* Bridge out structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64;
+
+/*******************************************
+            PMRPDumpSaveToFile
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hPMR;
+       const IMG_CHAR *puiFileName;
+       IMG_UINT32 ui32ArraySize;
+       IMG_UINT32 ui32uiFileOffset;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE;
+
+/* Bridge out structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE;
+
+/*******************************************
+            PMRPDumpSymbolicAddr
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_HANDLE hPMR;
+       IMG_CHAR *puiMemspaceName;
+       IMG_CHAR *puiSymbolicAddr;
+       IMG_UINT32 ui32MemspaceNameLen;
+       IMG_UINT32 ui32SymbolicAddrLen;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR;
+
+/* Bridge out structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiNewOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_CHAR *puiMemspaceName;
+       IMG_CHAR *puiSymbolicAddr;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR;
+
+/*******************************************
+            PMRPDumpPol32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_HANDLE hPMR;
+       PDUMP_POLL_OPERATOR eOperator;
+       IMG_UINT32 ui32Mask;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPPOL32;
+
+/* Bridge out structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32;
+
+/*******************************************
+            PMRPDumpCheck32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCheck32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32_TAG
+{
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_HANDLE hPMR;
+       PDUMP_POLL_OPERATOR eOperator;
+       IMG_UINT32 ui32Mask;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32;
+
+/* Bridge out structure for PMRPDumpCheck32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32;
+
+/*******************************************
+            PMRPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG
+{
+       IMG_DEVMEM_SIZE_T uiBufferSize;
+       IMG_DEVMEM_SIZE_T uiPacketSize;
+       IMG_DEVMEM_OFFSET_T uiReadOffset;
+       IMG_DEVMEM_OFFSET_T uiWriteOffset;
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPCBP;
+
+/* Bridge out structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPCBP;
+
+/*******************************************
+            DevmemIntPDumpSaveToFileVirtual
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+       IMG_DEV_VIRTADDR sAddress;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_HANDLE hDevmemServerContext;
+       const IMG_CHAR *puiFileName;
+       IMG_UINT32 ui32ArraySize;
+       IMG_UINT32 ui32FileOffset;
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+#endif /* COMMON_PDUMPMM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pdumpmm_bridge/server_pdumpmm_bridge.c
new file mode 100644 (file)
index 0000000..5feeab7
--- /dev/null
@@ -0,0 +1,959 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+#include "pdump_physmem.h"
+
+#include "common_pdumpmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psPMRPDumpLoadMemIN_UI8,
+                           IMG_UINT8 * psPMRPDumpLoadMemOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *) IMG_OFFSET_ADDR(psPMRPDumpLoadMemIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *) IMG_OFFSET_ADDR(psPMRPDumpLoadMemOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpLoadMemOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpLoadMem_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpLoadMemOUT->eError =
+           PMRPDumpLoadMem(psPMRInt,
+                           psPMRPDumpLoadMemIN->uiOffset,
+                           psPMRPDumpLoadMemIN->uiSize,
+                           psPMRPDumpLoadMemIN->ui32PDumpFlags, psPMRPDumpLoadMemIN->bbZero);
+
+PMRPDumpLoadMem_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psPMRPDumpLoadMemValue32IN_UI8,
+                                  IMG_UINT8 * psPMRPDumpLoadMemValue32OUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *)
+           IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *)
+           IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32OUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpLoadMemValue32OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpLoadMemValue32_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpLoadMemValue32OUT->eError =
+           PMRPDumpLoadMemValue32(psPMRInt,
+                                  psPMRPDumpLoadMemValue32IN->uiOffset,
+                                  psPMRPDumpLoadMemValue32IN->ui32Value,
+                                  psPMRPDumpLoadMemValue32IN->ui32PDumpFlags);
+
+PMRPDumpLoadMemValue32_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psPMRPDumpLoadMemValue64IN_UI8,
+                                  IMG_UINT8 * psPMRPDumpLoadMemValue64OUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *)
+           IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *)
+           IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64OUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpLoadMemValue64OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpLoadMemValue64_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpLoadMemValue64OUT->eError =
+           PMRPDumpLoadMemValue64(psPMRInt,
+                                  psPMRPDumpLoadMemValue64IN->uiOffset,
+                                  psPMRPDumpLoadMemValue64IN->ui64Value,
+                                  psPMRPDumpLoadMemValue64IN->ui32PDumpFlags);
+
+PMRPDumpLoadMemValue64_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+             "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psPMRPDumpSaveToFileIN_UI8,
+                              IMG_UINT8 * psPMRPDumpSaveToFileOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *) IMG_OFFSET_ADDR(psPMRPDumpSaveToFileIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *) IMG_OFFSET_ADDR(psPMRPDumpSaveToFileOUT_UI8,
+                                                                    0);
+
+       IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_CHAR *uiFileNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psPMRPDumpSaveToFileIN->ui32ArraySize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+       {
+               psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PMRPDumpSaveToFile_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PMRPDumpSaveToFile_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPMRPDumpSaveToFileIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PMRPDumpSaveToFile_exit;
+                       }
+               }
+       }
+
+       if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0)
+       {
+               uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFileNameInt, (const void __user *)psPMRPDumpSaveToFileIN->puiFileName,
+                    psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PMRPDumpSaveToFile_exit;
+               }
+               ((IMG_CHAR *)
+                uiFileNameInt)[(psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpSaveToFileOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpSaveToFile_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpSaveToFileOUT->eError =
+           PMRPDumpSaveToFile(psPMRInt,
+                              psPMRPDumpSaveToFileIN->uiOffset,
+                              psPMRPDumpSaveToFileIN->uiSize,
+                              psPMRPDumpSaveToFileIN->ui32ArraySize,
+                              uiFileNameInt, psPMRPDumpSaveToFileIN->ui32uiFileOffset);
+
+PMRPDumpSaveToFile_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPMRPDumpSaveToFileOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psPMRPDumpSymbolicAddrIN_UI8,
+                                IMG_UINT8 * psPMRPDumpSymbolicAddrOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *) IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *)
+           IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR;
+       PMR *psPMRInt = NULL;
+       IMG_CHAR *puiMemspaceNameInt = NULL;
+       IMG_CHAR *puiSymbolicAddrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) + 0;
+
+       if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen > PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+       {
+               psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PMRPDumpSymbolicAddr_exit;
+       }
+
+       if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen > PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH)
+       {
+               psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto PMRPDumpSymbolicAddr_exit;
+       }
+
+       psPMRPDumpSymbolicAddrOUT->puiMemspaceName = psPMRPDumpSymbolicAddrIN->puiMemspaceName;
+       psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = psPMRPDumpSymbolicAddrIN->puiSymbolicAddr;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto PMRPDumpSymbolicAddr_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPMRPDumpSymbolicAddrIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto PMRPDumpSymbolicAddr_exit;
+                       }
+               }
+       }
+
+       if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0)
+       {
+               puiMemspaceNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR);
+       }
+
+       if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0)
+       {
+               puiSymbolicAddrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR);
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpSymbolicAddrOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpSymbolicAddr_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpSymbolicAddrOUT->eError =
+           PMR_PDumpSymbolicAddr(psPMRInt,
+                                 psPMRPDumpSymbolicAddrIN->uiOffset,
+                                 psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen,
+                                 puiMemspaceNameInt,
+                                 psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen,
+                                 puiSymbolicAddrInt,
+                                 &psPMRPDumpSymbolicAddrOUT->uiNewOffset,
+                                 &psPMRPDumpSymbolicAddrOUT->uiNextSymName);
+       /* Exit early if bridged call fails */
+       if (unlikely(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK))
+       {
+               goto PMRPDumpSymbolicAddr_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((puiMemspaceNameInt) &&
+           ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psPMRPDumpSymbolicAddrOUT->puiMemspaceName,
+                     puiMemspaceNameInt,
+                     (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) !=
+                    PVRSRV_OK))
+               {
+                       psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PMRPDumpSymbolicAddr_exit;
+               }
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((puiSymbolicAddrInt) &&
+           ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr,
+                     puiSymbolicAddrInt,
+                     (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) !=
+                    PVRSRV_OK))
+               {
+                       psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto PMRPDumpSymbolicAddr_exit;
+               }
+       }
+
+PMRPDumpSymbolicAddr_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psPMRPDumpSymbolicAddrOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psPMRPDumpPol32IN_UI8,
+                         IMG_UINT8 * psPMRPDumpPol32OUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *) IMG_OFFSET_ADDR(psPMRPDumpPol32IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *) IMG_OFFSET_ADDR(psPMRPDumpPol32OUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpPol32OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpPol32OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpPol32_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpPol32OUT->eError =
+           PMRPDumpPol32(psPMRInt,
+                         psPMRPDumpPol32IN->uiOffset,
+                         psPMRPDumpPol32IN->ui32Value,
+                         psPMRPDumpPol32IN->ui32Mask,
+                         psPMRPDumpPol32IN->eOperator, psPMRPDumpPol32IN->ui32PDumpFlags);
+
+PMRPDumpPol32_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCheck32(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psPMRPDumpCheck32IN_UI8,
+                           IMG_UINT8 * psPMRPDumpCheck32OUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *psPMRPDumpCheck32IN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *) IMG_OFFSET_ADDR(psPMRPDumpCheck32IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *psPMRPDumpCheck32OUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *) IMG_OFFSET_ADDR(psPMRPDumpCheck32OUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpCheck32IN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpCheck32OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpCheck32OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpCheck32_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpCheck32OUT->eError =
+           PMRPDumpCheck32(psPMRInt,
+                           psPMRPDumpCheck32IN->uiOffset,
+                           psPMRPDumpCheck32IN->ui32Value,
+                           psPMRPDumpCheck32IN->ui32Mask,
+                           psPMRPDumpCheck32IN->eOperator, psPMRPDumpCheck32IN->ui32PDumpFlags);
+
+PMRPDumpCheck32_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psPMRPDumpCBPIN_UI8,
+                       IMG_UINT8 * psPMRPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN =
+           (PVRSRV_BRIDGE_IN_PMRPDUMPCBP *) IMG_OFFSET_ADDR(psPMRPDumpCBPIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT =
+           (PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *) IMG_OFFSET_ADDR(psPMRPDumpCBPOUT_UI8, 0);
+
+       IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR;
+       PMR *psPMRInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psPMRPDumpCBPOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psPMRPDumpCBPOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto PMRPDumpCBP_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psPMRPDumpCBPOUT->eError =
+           PMRPDumpCBP(psPMRInt,
+                       psPMRPDumpCBPIN->uiReadOffset,
+                       psPMRPDumpCBPIN->uiWriteOffset,
+                       psPMRPDumpCBPIN->uiPacketSize, psPMRPDumpCBPIN->uiBufferSize);
+
+PMRPDumpCBP_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+             "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry,
+                                           IMG_UINT8 * psDevmemIntPDumpSaveToFileVirtualIN_UI8,
+                                           IMG_UINT8 * psDevmemIntPDumpSaveToFileVirtualOUT_UI8,
+                                           CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualIN =
+           (PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *)
+           IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualOUT =
+           (PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *)
+           IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualOUT_UI8, 0);
+
+       IMG_HANDLE hDevmemServerContext = psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext;
+       DEVMEMINT_CTX *psDevmemServerContextInt = NULL;
+       IMG_CHAR *uiFileNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+           0;
+
+       if (unlikely
+           (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+       {
+               psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+                   PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto DevmemIntPDumpSaveToFileVirtual_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto DevmemIntPDumpSaveToFileVirtual_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer =
+                           (IMG_BYTE *) (void *)psDevmemIntPDumpSaveToFileVirtualIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+                                   PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto DevmemIntPDumpSaveToFileVirtual_exit;
+                       }
+               }
+       }
+
+       if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0)
+       {
+               uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFileNameInt,
+                    (const void __user *)psDevmemIntPDumpSaveToFileVirtualIN->puiFileName,
+                    psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) !=
+                   PVRSRV_OK)
+               {
+                       psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto DevmemIntPDumpSaveToFileVirtual_exit;
+               }
+               ((IMG_CHAR *)
+                uiFileNameInt)[(psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize *
+                                sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psDevmemServerContextInt,
+                                      hDevmemServerContext,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+       if (unlikely(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto DevmemIntPDumpSaveToFileVirtual_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+           DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt,
+                                           psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
+                                           psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
+                                           psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
+                                           uiFileNameInt,
+                                           psDevmemIntPDumpSaveToFileVirtualIN->ui32FileOffset,
+                                           psDevmemIntPDumpSaveToFileVirtualIN->ui32PDumpFlags);
+
+DevmemIntPDumpSaveToFileVirtual_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psDevmemServerContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hDevmemServerContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psDevmemIntPDumpSaveToFileVirtualOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+void DeinitPDUMPMMBridge(void);
+
+/*
+ * Register all PDUMPMM functions with services
+ */
+PVRSRV_ERROR InitPDUMPMMBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM,
+                             PVRSRVBridgePMRPDumpLoadMem, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32,
+                             PVRSRVBridgePMRPDumpLoadMemValue32, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64,
+                             PVRSRVBridgePMRPDumpLoadMemValue64, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE,
+                             PVRSRVBridgePMRPDumpSaveToFile, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR,
+                             PVRSRVBridgePMRPDumpSymbolicAddr, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32,
+                             PVRSRVBridgePMRPDumpPol32, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32,
+                             PVRSRVBridgePMRPDumpCheck32, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP,
+                             PVRSRVBridgePMRPDumpCBP, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+                             PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL,
+                             PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpmm functions with services
+ */
+void DeinitPDUMPMMBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+                               PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+                               PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+                               PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/client_pvrtl_bridge.h
new file mode 100644 (file)
index 0000000..2cfafd5
--- /dev/null
@@ -0,0 +1,93 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge,
+                                            const IMG_CHAR * puiName,
+                                            IMG_UINT32 ui32Mode,
+                                            IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hSD,
+                                             IMG_UINT32 * pui32ReadOffset,
+                                             IMG_UINT32 * pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hSD,
+                                             IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+                                                 const IMG_CHAR * puiNamePattern,
+                                                 IMG_UINT32 ui32Size,
+                                                 IMG_CHAR * puiStreams,
+                                                 IMG_UINT32 * pui32NumFound);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hSD,
+                                               IMG_UINT32 * pui32BufferOffset,
+                                               IMG_UINT32 ui32Size,
+                                               IMG_UINT32 ui32SizeMin,
+                                               IMG_UINT32 * pui32Available);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge,
+                                              IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hSD,
+                                           IMG_UINT32 ui32Size, IMG_BYTE * pui8Data);
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/client_pvrtl_direct_bridge.c
new file mode 100644 (file)
index 0000000..fa2fbed
--- /dev/null
@@ -0,0 +1,175 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for pvrtl
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#include "tlserver.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge,
+                                            const IMG_CHAR * puiName,
+                                            IMG_UINT32 ui32Mode,
+                                            IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt = NULL;
+       PMR *psTLPMRInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt);
+
+       *phSD = psSDInt;
+       *phTLPMR = psTLPMRInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSDInt = (TL_STREAM_DESC *) hSD;
+
+       eError = TLServerCloseStreamKM(psSDInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hSD,
+                                             IMG_UINT32 * pui32ReadOffset,
+                                             IMG_UINT32 * pui32ReadLen)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSDInt = (TL_STREAM_DESC *) hSD;
+
+       eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hSD,
+                                             IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSDInt = (TL_STREAM_DESC *) hSD;
+
+       eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+                                                 const IMG_CHAR * puiNamePattern,
+                                                 IMG_UINT32 ui32Size,
+                                                 IMG_CHAR * puiStreams, IMG_UINT32 * pui32NumFound)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = TLServerDiscoverStreamsKM(puiNamePattern, ui32Size, puiStreams, pui32NumFound);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge,
+                                               IMG_HANDLE hSD,
+                                               IMG_UINT32 * pui32BufferOffset,
+                                               IMG_UINT32 ui32Size,
+                                               IMG_UINT32 ui32SizeMin, IMG_UINT32 * pui32Available)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSDInt = (TL_STREAM_DESC *) hSD;
+
+       eError =
+           TLServerReserveStreamKM(psSDInt,
+                                   pui32BufferOffset, ui32Size, ui32SizeMin, pui32Available);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge,
+                                              IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSDInt = (TL_STREAM_DESC *) hSD;
+
+       eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hSD,
+                                           IMG_UINT32 ui32Size, IMG_BYTE * pui8Data)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC *psSDInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSDInt = (TL_STREAM_DESC *) hSD;
+
+       eError = TLServerWriteDataKM(psSDInt, ui32Size, pui8Data);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/common_pvrtl_bridge.h
new file mode 100644 (file)
index 0000000..edc8223
--- /dev/null
@@ -0,0 +1,214 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST                  0
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM                       PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM                      PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA                      PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA                      PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS                  PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM                    PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM                     PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA                        PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST                   (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7)
+
+/*******************************************
+            TLOpenStream
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+       const IMG_CHAR *puiName;
+       IMG_UINT32 ui32Mode;
+} __packed PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+       IMG_HANDLE hSD;
+       IMG_HANDLE hTLPMR;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+/*******************************************
+            TLCloseStream
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+       IMG_HANDLE hSD;
+} __packed PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+/*******************************************
+            TLAcquireData
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+       IMG_HANDLE hSD;
+} __packed PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32ReadLen;
+       IMG_UINT32 ui32ReadOffset;
+} __packed PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+/*******************************************
+            TLReleaseData
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+       IMG_HANDLE hSD;
+       IMG_UINT32 ui32ReadLen;
+       IMG_UINT32 ui32ReadOffset;
+} __packed PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+/*******************************************
+            TLDiscoverStreams
+ *******************************************/
+
+/* Bridge in structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG
+{
+       const IMG_CHAR *puiNamePattern;
+       IMG_CHAR *puiStreams;
+       IMG_UINT32 ui32Size;
+} __packed PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS;
+
+/* Bridge out structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG
+{
+       IMG_CHAR *puiStreams;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32NumFound;
+} __packed PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS;
+
+/*******************************************
+            TLReserveStream
+ *******************************************/
+
+/* Bridge in structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG
+{
+       IMG_HANDLE hSD;
+       IMG_UINT32 ui32Size;
+       IMG_UINT32 ui32SizeMin;
+} __packed PVRSRV_BRIDGE_IN_TLRESERVESTREAM;
+
+/* Bridge out structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Available;
+       IMG_UINT32 ui32BufferOffset;
+} __packed PVRSRV_BRIDGE_OUT_TLRESERVESTREAM;
+
+/*******************************************
+            TLCommitStream
+ *******************************************/
+
+/* Bridge in structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG
+{
+       IMG_HANDLE hSD;
+       IMG_UINT32 ui32ReqSize;
+} __packed PVRSRV_BRIDGE_IN_TLCOMMITSTREAM;
+
+/* Bridge out structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM;
+
+/*******************************************
+            TLWriteData
+ *******************************************/
+
+/* Bridge in structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG
+{
+       IMG_HANDLE hSD;
+       IMG_BYTE *pui8Data;
+       IMG_UINT32 ui32Size;
+} __packed PVRSRV_BRIDGE_IN_TLWRITEDATA;
+
+/* Bridge out structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLWRITEDATA;
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/pvrtl_bridge/server_pvrtl_bridge.c
new file mode 100644 (file)
index 0000000..e25137c
--- /dev/null
@@ -0,0 +1,836 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData);
+       return eError;
+}
+
+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
+             "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psTLOpenStreamIN_UI8,
+                        IMG_UINT8 * psTLOpenStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN =
+           (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT =
+           (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0);
+
+       IMG_CHAR *uiNameInt = NULL;
+       TL_STREAM_DESC *psSDInt = NULL;
+       PMR *psTLPMRInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0;
+
+       psTLOpenStreamOUT->hSD = NULL;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psTLOpenStreamOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto TLOpenStream_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLOpenStreamIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto TLOpenStream_exit;
+                       }
+               }
+       }
+
+       {
+               uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiNameInt, (const void __user *)psTLOpenStreamIN->puiName,
+                    PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto TLOpenStream_exit;
+               }
+               ((IMG_CHAR *) uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - 1] =
+                   '\0';
+       }
+
+       psTLOpenStreamOUT->eError =
+           TLServerOpenStreamKM(uiNameInt, psTLOpenStreamIN->ui32Mode, &psSDInt, &psTLPMRInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+       {
+               goto TLOpenStream_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                             &psTLOpenStreamOUT->hSD,
+                                                             (void *)psSDInt,
+                                                             PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+                                                             PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                             (PFN_HANDLE_RELEASE) &
+                                                             _TLOpenStreampsSDIntRelease);
+       if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLOpenStream_exit;
+       }
+
+       psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+                                                                &psTLOpenStreamOUT->hTLPMR,
+                                                                (void *)psTLPMRInt,
+                                                                PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+                                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                psTLOpenStreamOUT->hSD);
+       if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLOpenStream_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+TLOpenStream_exit:
+
+       if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+       {
+               if (psTLOpenStreamOUT->hSD)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psHandleBase);
+
+                       eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+                                                            (IMG_HANDLE) psTLOpenStreamOUT->hSD,
+                                                            PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psSDInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psHandleBase);
+
+               }
+
+               if (psSDInt)
+               {
+                       TLServerCloseStreamKM(psSDInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psTLOpenStreamOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psTLCloseStreamIN_UI8,
+                         IMG_UINT8 * psTLCloseStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN =
+           (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT =
+           (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psTLCloseStreamOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psTLCloseStreamIN->hSD,
+                                             PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+       if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) &&
+                    (psTLCloseStreamOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psTLCloseStreamOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLCloseStream_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+TLCloseStream_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psTLAcquireDataIN_UI8,
+                         IMG_UINT8 * psTLAcquireDataOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN =
+           (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT =
+           (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0);
+
+       IMG_HANDLE hSD = psTLAcquireDataIN->hSD;
+       TL_STREAM_DESC *psSDInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psTLAcquireDataOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSDInt,
+                                      hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+       if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLAcquireData_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psTLAcquireDataOUT->eError =
+           TLServerAcquireDataKM(psSDInt,
+                                 &psTLAcquireDataOUT->ui32ReadOffset,
+                                 &psTLAcquireDataOUT->ui32ReadLen);
+
+TLAcquireData_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSDInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psTLReleaseDataIN_UI8,
+                         IMG_UINT8 * psTLReleaseDataOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN =
+           (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT =
+           (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0);
+
+       IMG_HANDLE hSD = psTLReleaseDataIN->hSD;
+       TL_STREAM_DESC *psSDInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psTLReleaseDataOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSDInt,
+                                      hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+       if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLReleaseData_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psTLReleaseDataOUT->eError =
+           TLServerReleaseDataKM(psSDInt,
+                                 psTLReleaseDataIN->ui32ReadOffset,
+                                 psTLReleaseDataIN->ui32ReadLen);
+
+TLReleaseData_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSDInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
+             "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psTLDiscoverStreamsIN_UI8,
+                             IMG_UINT8 * psTLDiscoverStreamsOUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN =
+           (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT =
+           (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0);
+
+       IMG_CHAR *uiNamePatternInt = NULL;
+       IMG_CHAR *puiStreamsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0;
+
+       if (psTLDiscoverStreamsIN->ui32Size > PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER)
+       {
+               psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto TLDiscoverStreams_exit;
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto TLDiscoverStreams_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLDiscoverStreamsIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto TLDiscoverStreams_exit;
+                       }
+               }
+       }
+
+       {
+               uiNamePatternInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiNamePatternInt,
+                    (const void __user *)psTLDiscoverStreamsIN->puiNamePattern,
+                    PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto TLDiscoverStreams_exit;
+               }
+               ((IMG_CHAR *) uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) -
+                                               1] = '\0';
+       }
+       if (psTLDiscoverStreamsIN->ui32Size != 0)
+       {
+               puiStreamsInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR);
+       }
+
+       psTLDiscoverStreamsOUT->eError =
+           TLServerDiscoverStreamsKM(uiNamePatternInt,
+                                     psTLDiscoverStreamsIN->ui32Size,
+                                     puiStreamsInt, &psTLDiscoverStreamsOUT->ui32NumFound);
+       /* Exit early if bridged call fails */
+       if (unlikely(psTLDiscoverStreamsOUT->eError != PVRSRV_OK))
+       {
+               goto TLDiscoverStreams_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((puiStreamsInt) && ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt,
+                     (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK))
+               {
+                       psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto TLDiscoverStreams_exit;
+               }
+       }
+
+TLDiscoverStreams_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psTLDiscoverStreamsOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psTLReserveStreamIN_UI8,
+                           IMG_UINT8 * psTLReserveStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN =
+           (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT =
+           (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0);
+
+       IMG_HANDLE hSD = psTLReserveStreamIN->hSD;
+       TL_STREAM_DESC *psSDInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psTLReserveStreamOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSDInt,
+                                      hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+       if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLReserveStream_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psTLReserveStreamOUT->eError =
+           TLServerReserveStreamKM(psSDInt,
+                                   &psTLReserveStreamOUT->ui32BufferOffset,
+                                   psTLReserveStreamIN->ui32Size,
+                                   psTLReserveStreamIN->ui32SizeMin,
+                                   &psTLReserveStreamOUT->ui32Available);
+
+TLReserveStream_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSDInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psTLCommitStreamIN_UI8,
+                          IMG_UINT8 * psTLCommitStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN =
+           (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT =
+           (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0);
+
+       IMG_HANDLE hSD = psTLCommitStreamIN->hSD;
+       TL_STREAM_DESC *psSDInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psTLCommitStreamOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSDInt,
+                                      hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+       if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLCommitStream_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psTLCommitStreamOUT->eError =
+           TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize);
+
+TLCommitStream_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSDInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRVTL_MAX_PACKET_SIZE <= IMG_UINT32_MAX,
+             "PVRSRVTL_MAX_PACKET_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psTLWriteDataIN_UI8,
+                       IMG_UINT8 * psTLWriteDataOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN =
+           (PVRSRV_BRIDGE_IN_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT =
+           (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0);
+
+       IMG_HANDLE hSD = psTLWriteDataIN->hSD;
+       TL_STREAM_DESC *psSDInt = NULL;
+       IMG_BYTE *ui8DataInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0;
+
+       if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE))
+       {
+               psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto TLWriteData_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto TLWriteData_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLWriteDataIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto TLWriteData_exit;
+                       }
+               }
+       }
+
+       if (psTLWriteDataIN->ui32Size != 0)
+       {
+               ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8DataInt, (const void __user *)psTLWriteDataIN->pui8Data,
+                    psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto TLWriteData_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psTLWriteDataOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSDInt,
+                                      hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+       if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto TLWriteData_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psTLWriteDataOUT->eError =
+           TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, ui8DataInt);
+
+TLWriteData_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSDInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psTLWriteDataOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitPVRTLBridge(void);
+void DeinitPVRTLBridge(void);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM,
+                             PVRSRVBridgeTLOpenStream, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM,
+                             PVRSRVBridgeTLCloseStream, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA,
+                             PVRSRVBridgeTLAcquireData, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA,
+                             PVRSRVBridgeTLReleaseData, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS,
+                             PVRSRVBridgeTLDiscoverStreams, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM,
+                             PVRSRVBridgeTLReserveStream, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM,
+                             PVRSRVBridgeTLCommitStream, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA,
+                             PVRSRVBridgeTLWriteData, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+void DeinitPVRTLBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxbreakpoint_bridge/common_rgxbreakpoint_bridge.h
new file mode 100644 (file)
index 0000000..7b83d9a
--- /dev/null
@@ -0,0 +1,149 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxbreakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxbreakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXBREAKPOINT_BRIDGE_H
+#define COMMON_RGXBREAKPOINT_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST                  0
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT                   PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT                 PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT                        PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT                       PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS                 PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST                   (PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4)
+
+/*******************************************
+            RGXSetBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
+{
+       IMG_HANDLE hPrivData;
+       IMG_UINT32 eFWDataMaster;
+       IMG_UINT32 ui32BreakpointAddr;
+       IMG_UINT32 ui32DM;
+       IMG_UINT32 ui32HandlerAddr;
+} __packed PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT;
+
+/* Bridge out structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT;
+
+/*******************************************
+            RGXClearBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG
+{
+       IMG_HANDLE hPrivData;
+} __packed PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT;
+
+/* Bridge out structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT;
+
+/*******************************************
+            RGXEnableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG
+{
+       IMG_HANDLE hPrivData;
+} __packed PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT;
+
+/* Bridge out structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT;
+
+/*******************************************
+            RGXDisableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG
+{
+       IMG_HANDLE hPrivData;
+} __packed PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT;
+
+/* Bridge out structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT;
+
+/*******************************************
+            RGXOverallocateBPRegisters
+ *******************************************/
+
+/* Bridge in structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+       IMG_UINT32 ui32SharedRegs;
+       IMG_UINT32 ui32TempRegs;
+} __packed PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS;
+
+/* Bridge out structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS;
+
+#endif /* COMMON_RGXBREAKPOINT_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.c
new file mode 100644 (file)
index 0000000..bb7d012
--- /dev/null
@@ -0,0 +1,370 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxbreakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxbreakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxbreakpoint.h"
+
+#include "common_rgxbreakpoint_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psRGXSetBreakpointIN_UI8,
+                            IMG_UINT8 * psRGXSetBreakpointOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN =
+           (PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointOUT_UI8, 0);
+
+       IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetBreakpointOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXSetBreakpointOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetBreakpoint_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetBreakpointOUT->eError =
+           PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection),
+                                    hPrivDataInt,
+                                    psRGXSetBreakpointIN->eFWDataMaster,
+                                    psRGXSetBreakpointIN->ui32BreakpointAddr,
+                                    psRGXSetBreakpointIN->ui32HandlerAddr,
+                                    psRGXSetBreakpointIN->ui32DM);
+
+RGXSetBreakpoint_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXClearBreakpointIN_UI8,
+                              IMG_UINT8 * psRGXClearBreakpointOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN =
+           (PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointOUT_UI8,
+                                                                    0);
+
+       IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXClearBreakpointOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXClearBreakpointOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXClearBreakpoint_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXClearBreakpointOUT->eError =
+           PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt);
+
+RGXClearBreakpoint_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRGXEnableBreakpointIN_UI8,
+                               IMG_UINT8 * psRGXEnableBreakpointOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN =
+           (PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT =
+           (PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXEnableBreakpointOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXEnableBreakpointOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXEnableBreakpoint_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXEnableBreakpointOUT->eError =
+           PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt);
+
+RGXEnableBreakpoint_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psRGXDisableBreakpointIN_UI8,
+                                IMG_UINT8 * psRGXDisableBreakpointOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN =
+           (PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXDisableBreakpointIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *)
+           IMG_OFFSET_ADDR(psRGXDisableBreakpointOUT_UI8, 0);
+
+       IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXDisableBreakpointOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXDisableBreakpointOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDisableBreakpoint_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXDisableBreakpointOUT->eError =
+           PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt);
+
+RGXDisableBreakpoint_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry,
+                                      IMG_UINT8 * psRGXOverallocateBPRegistersIN_UI8,
+                                      IMG_UINT8 * psRGXOverallocateBPRegistersOUT_UI8,
+                                      CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN =
+           (PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *)
+           IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT =
+           (PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *)
+           IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersOUT_UI8, 0);
+
+       psRGXOverallocateBPRegistersOUT->eError =
+           PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevNode(psConnection),
+                                              psRGXOverallocateBPRegistersIN->ui32TempRegs,
+                                              psRGXOverallocateBPRegistersIN->ui32SharedRegs);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */
+
+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void);
+void DeinitRGXBREAKPOINTBridge(void);
+
+/*
+ * Register all RGXBREAKPOINT functions with services
+ */
+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                             PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT,
+                             PVRSRVBridgeRGXSetBreakpoint, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                             PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT,
+                             PVRSRVBridgeRGXClearBreakpoint, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                             PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT,
+                             PVRSRVBridgeRGXEnableBreakpoint, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                             PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT,
+                             PVRSRVBridgeRGXDisableBreakpoint, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                             PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS,
+                             PVRSRVBridgeRGXOverallocateBPRegisters, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxbreakpoint functions with services
+ */
+void DeinitRGXBREAKPOINTBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                               PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                               PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                               PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                               PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT,
+                               PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS);
+
+}
+#else /* EXCLUDE_RGXBREAKPOINT_BRIDGE */
+/* This bridge is conditional on EXCLUDE_RGXBREAKPOINT_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXBREAKPOINTBridge() \
+       PVRSRV_OK
+
+#define DeinitRGXBREAKPOINTBridge()
+
+#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxcmp_bridge/common_rgxcmp_bridge.h
new file mode 100644 (file)
index 0000000..396bd3f
--- /dev/null
@@ -0,0 +1,229 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST                 0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT                   PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT                  PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA                       PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY                      PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE                 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2                       PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY                      PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR                     PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST                  (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7)
+
+/*******************************************
+            RGXCreateComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+       IMG_UINT64 ui64RobustnessAddress;
+       IMG_HANDLE hPrivData;
+       IMG_BYTE *pui8FrameworkCmd;
+       IMG_BYTE *pui8StaticComputeContextState;
+       IMG_UINT32 ui32ContextFlags;
+       IMG_UINT32 ui32FrameworkCmdize;
+       IMG_UINT32 ui32MaxDeadlineMS;
+       IMG_UINT32 ui32PackedCCBSizeU88;
+       IMG_UINT32 ui32Priority;
+       IMG_UINT32 ui32StaticComputeContextStateSize;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+       IMG_HANDLE hComputeContext;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+/*******************************************
+            RGXDestroyComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+       IMG_HANDLE hComputeContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+/*******************************************
+            RGXFlushComputeData
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+       IMG_HANDLE hComputeContext;
+} __packed PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+/*******************************************
+            RGXSetComputeContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+       IMG_HANDLE hComputeContext;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/*******************************************
+            RGXNotifyComputeWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+       IMG_HANDLE hComputeContext;
+} __packed PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/*******************************************
+            RGXKickCDM2
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG
+{
+       IMG_UINT64 ui64DeadlineInus;
+       IMG_HANDLE hComputeContext;
+       IMG_UINT32 *pui32ClientUpdateOffset;
+       IMG_UINT32 *pui32ClientUpdateValue;
+       IMG_UINT32 *pui32SyncPMRFlags;
+       IMG_BYTE *pui8DMCmd;
+       IMG_CHAR *puiUpdateFenceName;
+       IMG_HANDLE *phClientUpdateUFOSyncPrimBlock;
+       IMG_HANDLE *phSyncPMRs;
+       PVRSRV_FENCE hCheckFenceFd;
+       PVRSRV_TIMELINE hUpdateTimeline;
+       IMG_UINT32 ui32ClientUpdateCount;
+       IMG_UINT32 ui32CmdSize;
+       IMG_UINT32 ui32ExtJobRef;
+       IMG_UINT32 ui32NumOfWorkgroups;
+       IMG_UINT32 ui32NumOfWorkitems;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32SyncPMRCount;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2;
+
+/* Bridge out structure for RGXKickCDM2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE hUpdateFence;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2;
+
+/*******************************************
+            RGXSetComputeContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Input;
+       IMG_HANDLE hComputeContext;
+       IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetComputeContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Output;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY;
+
+/*******************************************
+            RGXGetLastDeviceError
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastDeviceError */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR;
+
+/* Bridge out structure for RGXGetLastDeviceError */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Error;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR;
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxcmp_bridge/server_rgxcmp_bridge.c
new file mode 100644 (file)
index 0000000..9b97e78
--- /dev/null
@@ -0,0 +1,1171 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) pvData);
+       return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_STATIC_COMPUTECONTEXT_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_STATIC_COMPUTECONTEXT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psRGXCreateComputeContextIN_UI8,
+                                   IMG_UINT8 * psRGXCreateComputeContextOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0);
+
+       IMG_BYTE *ui8FrameworkCmdInt = NULL;
+       IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+       IMG_BYTE *ui8StaticComputeContextStateInt = NULL;
+       RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+           ((IMG_UINT64) psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
+            sizeof(IMG_BYTE)) + 0;
+
+       if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE))
+       {
+               psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXCreateComputeContext_exit;
+       }
+
+       if (unlikely
+           (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize >
+            RGXFWIF_STATIC_COMPUTECONTEXT_SIZE))
+       {
+               psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXCreateComputeContext_exit;
+       }
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXCreateComputeContext_exit;
+               }
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXCreateComputeContext_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateComputeContextIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXCreateComputeContext_exit;
+                       }
+               }
+       }
+
+       if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+       {
+               ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8FrameworkCmdInt,
+                    (const void __user *)psRGXCreateComputeContextIN->pui8FrameworkCmd,
+                    psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateComputeContext_exit;
+               }
+       }
+       if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0)
+       {
+               ui8StaticComputeContextStateInt =
+                   (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
+                   sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8StaticComputeContextStateInt,
+                    (const void __user *)psRGXCreateComputeContextIN->
+                    pui8StaticComputeContextState,
+                    psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
+                    sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateComputeContext_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXCreateComputeContextOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateComputeContext_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateComputeContextOUT->eError =
+           PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevNode(psConnection),
+                                           psRGXCreateComputeContextIN->ui32Priority,
+                                           psRGXCreateComputeContextIN->ui32FrameworkCmdize,
+                                           ui8FrameworkCmdInt,
+                                           hPrivDataInt,
+                                           psRGXCreateComputeContextIN->
+                                           ui32StaticComputeContextStateSize,
+                                           ui8StaticComputeContextStateInt,
+                                           psRGXCreateComputeContextIN->ui32PackedCCBSizeU88,
+                                           psRGXCreateComputeContextIN->ui32ContextFlags,
+                                           psRGXCreateComputeContextIN->ui64RobustnessAddress,
+                                           psRGXCreateComputeContextIN->ui32MaxDeadlineMS,
+                                           &psComputeContextInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateComputeContext_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                        &psRGXCreateComputeContextOUT->
+                                                                        hComputeContext,
+                                                                        (void *)
+                                                                        psComputeContextInt,
+                                                                        PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+                                                                        PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                        (PFN_HANDLE_RELEASE) &
+                                                                        _RGXCreateComputeContextpsComputeContextIntRelease);
+       if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateComputeContext_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateComputeContext_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+       {
+               if (psComputeContextInt)
+               {
+                       PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXCreateComputeContextOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psRGXDestroyComputeContextIN_UI8,
+                                    IMG_UINT8 * psRGXDestroyComputeContextOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0);
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXDestroyComputeContext_exit;
+               }
+       }
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyComputeContextOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyComputeContextIN->
+                                             hComputeContext,
+                                             PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+       if (unlikely
+           ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK)
+            && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyComputeContextOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyComputeContext_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyComputeContext_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRGXFlushComputeDataIN_UI8,
+                               IMG_UINT8 * psRGXFlushComputeDataOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN =
+           (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext;
+       RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXFlushComputeData_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXFlushComputeDataOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psComputeContextInt,
+                                      hComputeContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXFlushComputeData_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXFlushComputeDataOUT->eError = PVRSRVRGXFlushComputeDataKM(psComputeContextInt);
+
+RGXFlushComputeData_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psComputeContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hComputeContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+                                        IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8,
+                                        IMG_UINT8 * psRGXSetComputeContextPriorityOUT_UI8,
+                                        CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN =
+           (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0);
+
+       IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext;
+       RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXSetComputeContextPriority_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetComputeContextPriorityOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psComputeContextInt,
+                                      hComputeContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetComputeContextPriority_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetComputeContextPriorityOUT->eError =
+           PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+                                                psComputeContextInt,
+                                                psRGXSetComputeContextPriorityIN->ui32Priority);
+
+RGXSetComputeContextPriority_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psComputeContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hComputeContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+                                             IMG_UINT8 * psRGXNotifyComputeWriteOffsetUpdateIN_UI8,
+                                             IMG_UINT8 *
+                                             psRGXNotifyComputeWriteOffsetUpdateOUT_UI8,
+                                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN =
+           (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *)
+           IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT
+           =
+           (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *)
+           IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0);
+
+       IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext;
+       RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXNotifyComputeWriteOffsetUpdate_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psComputeContextInt,
+                                      hComputeContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXNotifyComputeWriteOffsetUpdate_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+           PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt);
+
+RGXNotifyComputeWriteOffsetUpdate_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psComputeContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hComputeContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psRGXKickCDM2IN_UI8,
+                       IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN =
+           (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT =
+           (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0);
+
+       IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext;
+       RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+       SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL;
+       IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+       IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+       IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+       IMG_CHAR *uiUpdateFenceNameInt = NULL;
+       IMG_BYTE *ui8DMCmdInt = NULL;
+       IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+       PMR **psSyncPMRsInt = NULL;
+       IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+           ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+       if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickCDM2_exit;
+       }
+
+       if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+       {
+               psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickCDM2_exit;
+       }
+
+       if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickCDM2_exit;
+       }
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXKickCDM2_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXKickCDM2_exit;
+                       }
+               }
+       }
+
+       if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+       {
+               psClientUpdateUFOSyncPrimBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0,
+                              psRGXKickCDM2IN->ui32ClientUpdateCount *
+                              sizeof(SYNC_PRIMITIVE_BLOCK *));
+               ui32NextOffset +=
+                   psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+               hClientUpdateUFOSyncPrimBlockInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hClientUpdateUFOSyncPrimBlockInt2,
+                    (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock,
+                    psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+       if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+       {
+               ui32ClientUpdateOffsetInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientUpdateOffsetInt,
+                    (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset,
+                    psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+       if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+       {
+               ui32ClientUpdateValueInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientUpdateValueInt,
+                    (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue,
+                    psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+
+       {
+               uiUpdateFenceNameInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiUpdateFenceNameInt,
+                    (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName,
+                    PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+               ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+                                                   1] = '\0';
+       }
+       if (psRGXKickCDM2IN->ui32CmdSize != 0)
+       {
+               ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd,
+                    psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+       if (psRGXKickCDM2IN->ui32SyncPMRCount != 0)
+       {
+               ui32SyncPMRFlagsInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32SyncPMRFlagsInt,
+                    (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags,
+                    psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+       if (psRGXKickCDM2IN->ui32SyncPMRCount != 0)
+       {
+               psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *));
+               ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *);
+               hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs,
+                    psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickCDM2_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXKickCDM2OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psComputeContextInt,
+                                      hComputeContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXKickCDM2_exit;
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickCDM2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)
+                                                      &psClientUpdateUFOSyncPrimBlockInt[i],
+                                                      hClientUpdateUFOSyncPrimBlockInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                      IMG_TRUE);
+                       if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickCDM2_exit;
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickCDM2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psSyncPMRsInt[i],
+                                                      hSyncPMRsInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+                       if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickCDM2_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXKickCDM2OUT->eError =
+           PVRSRVRGXKickCDMKM(psComputeContextInt,
+                              psRGXKickCDM2IN->ui32ClientUpdateCount,
+                              psClientUpdateUFOSyncPrimBlockInt,
+                              ui32ClientUpdateOffsetInt,
+                              ui32ClientUpdateValueInt,
+                              psRGXKickCDM2IN->hCheckFenceFd,
+                              psRGXKickCDM2IN->hUpdateTimeline,
+                              &psRGXKickCDM2OUT->hUpdateFence,
+                              uiUpdateFenceNameInt,
+                              psRGXKickCDM2IN->ui32CmdSize,
+                              ui8DMCmdInt,
+                              psRGXKickCDM2IN->ui32PDumpFlags,
+                              psRGXKickCDM2IN->ui32ExtJobRef,
+                              psRGXKickCDM2IN->ui32SyncPMRCount,
+                              ui32SyncPMRFlagsInt,
+                              psSyncPMRsInt,
+                              psRGXKickCDM2IN->ui32NumOfWorkgroups,
+                              psRGXKickCDM2IN->ui32NumOfWorkitems,
+                              psRGXKickCDM2IN->ui64DeadlineInus);
+
+RGXKickCDM2_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psComputeContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hComputeContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+       }
+
+       if (hClientUpdateUFOSyncPrimBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psClientUpdateUFOSyncPrimBlockInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hClientUpdateUFOSyncPrimBlockInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       }
+               }
+       }
+
+       if (hSyncPMRsInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psSyncPMRsInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hSyncPMRsInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXKickCDM2OUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+                                        IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8,
+                                        IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8,
+                                        CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN =
+           (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0);
+
+       IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext;
+       RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXSetComputeContextProperty_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetComputeContextPropertyOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psComputeContextInt,
+                                      hComputeContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetComputeContextProperty_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetComputeContextPropertyOUT->eError =
+           PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt,
+                                                psRGXSetComputeContextPropertyIN->ui32Property,
+                                                psRGXSetComputeContextPropertyIN->ui64Input,
+                                                &psRGXSetComputeContextPropertyOUT->ui64Output);
+
+RGXSetComputeContextProperty_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psComputeContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hComputeContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8,
+                                 IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN =
+           (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *)
+           IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT =
+           (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *)
+           IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0);
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_COMPUTE_BIT_MASK))
+               {
+                       psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXGetLastDeviceError_exit;
+               }
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN);
+
+       psRGXGetLastDeviceErrorOUT->eError =
+           PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection),
+                                         &psRGXGetLastDeviceErrorOUT->ui32Error);
+
+RGXGetLastDeviceError_exit:
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXCMPBridge(void);
+void DeinitRGXCMPBridge(void);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT,
+                             PVRSRVBridgeRGXCreateComputeContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT,
+                             PVRSRVBridgeRGXDestroyComputeContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA,
+                             PVRSRVBridgeRGXFlushComputeData, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                             PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY,
+                             PVRSRVBridgeRGXSetComputeContextPriority, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                             PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE,
+                             PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2,
+                             PVRSRVBridgeRGXKickCDM2, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                             PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY,
+                             PVRSRVBridgeRGXSetComputeContextProperty, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR,
+                             PVRSRVBridgeRGXGetLastDeviceError, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+void DeinitRGXCMPBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                               PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                               PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                               PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+                               PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxfwdbg_bridge/common_rgxfwdbg_bridge.h
new file mode 100644 (file)
index 0000000..68c79e5
--- /dev/null
@@ -0,0 +1,200 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxfwdbg
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxfwdbg
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXFWDBG_BRIDGE_H
+#define COMMON_RGXFWDBG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST                       0
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG                      PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST                  PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE                        PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY                       PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE                   PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE                  PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE                  PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME                  PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST                        (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7)
+
+/*******************************************
+            RGXFWDebugSetFWLog
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG
+{
+       IMG_UINT32 ui32RGXFWLogType;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG;
+
+/* Bridge out structure for RGXFWDebugSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG;
+
+/*******************************************
+            RGXFWDebugDumpFreelistPageList
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST;
+
+/* Bridge out structure for RGXFWDebugDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST;
+
+/*******************************************
+            RGXFWDebugSetHCSDeadline
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG
+{
+       IMG_UINT32 ui32RGXHCSDeadline;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE;
+
+/* Bridge out structure for RGXFWDebugSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE;
+
+/*******************************************
+            RGXFWDebugSetOSidPriority
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG
+{
+       IMG_UINT32 ui32OSid;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY;
+
+/* Bridge out structure for RGXFWDebugSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY;
+
+/*******************************************
+            RGXFWDebugSetOSNewOnlineState
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
+{
+       IMG_UINT32 ui32OSNewState;
+       IMG_UINT32 ui32OSid;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE;
+
+/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE;
+
+/*******************************************
+            RGXFWDebugPHRConfigure
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugPHRConfigure */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG
+{
+       IMG_UINT32 ui32ui32PHRMode;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE;
+
+/* Bridge out structure for RGXFWDebugPHRConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE;
+
+/*******************************************
+            RGXFWDebugWdgConfigure
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugWdgConfigure */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE_TAG
+{
+       IMG_UINT32 ui32ui32WdgPeriodUs;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE;
+
+/* Bridge out structure for RGXFWDebugWdgConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE;
+
+/*******************************************
+            RGXCurrentTime
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+       IMG_UINT64 ui64Time;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+#endif /* COMMON_RGXFWDBG_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxfwdbg_bridge/server_rgxfwdbg_bridge.c
new file mode 100644 (file)
index 0000000..e66ce89
--- /dev/null
@@ -0,0 +1,305 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxfwdbg
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxfwdbg
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "rgxfwdbg.h"
+#include "pmr.h"
+#include "rgxtimecorr.h"
+
+#include "common_rgxfwdbg_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8,
+                              IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8,
+                                                                    0);
+
+       psRGXFWDebugSetFWLogOUT->eError =
+           PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection),
+                                      psRGXFWDebugSetFWLogIN->ui32RGXFWLogType);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+                                          IMG_UINT8 * psRGXFWDebugDumpFreelistPageListIN_UI8,
+                                          IMG_UINT8 * psRGXFWDebugDumpFreelistPageListOUT_UI8,
+                                          CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *)
+           IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *)
+           IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN);
+
+       psRGXFWDebugDumpFreelistPageListOUT->eError =
+           PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8,
+                                    IMG_UINT8 * psRGXFWDebugSetHCSDeadlineOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0);
+
+       psRGXFWDebugSetHCSDeadlineOUT->eError =
+           PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, OSGetDevNode(psConnection),
+                                            psRGXFWDebugSetHCSDeadlineIN->ui32RGXHCSDeadline);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psRGXFWDebugSetOSidPriorityIN_UI8,
+                                     IMG_UINT8 * psRGXFWDebugSetOSidPriorityOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0);
+
+       psRGXFWDebugSetOSidPriorityOUT->eError =
+           PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, OSGetDevNode(psConnection),
+                                             psRGXFWDebugSetOSidPriorityIN->ui32OSid,
+                                             psRGXFWDebugSetOSidPriorityIN->ui32Priority);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateIN_UI8,
+                                         IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0);
+
+       psRGXFWDebugSetOSNewOnlineStateOUT->eError =
+           PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection),
+                                                 psRGXFWDebugSetOSNewOnlineStateIN->ui32OSid,
+                                                 psRGXFWDebugSetOSNewOnlineStateIN->
+                                                 ui32OSNewState);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8,
+                                  IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0);
+
+       psRGXFWDebugPHRConfigureOUT->eError =
+           PVRSRVRGXFWDebugPHRConfigureKM(psConnection, OSGetDevNode(psConnection),
+                                          psRGXFWDebugPHRConfigureIN->ui32ui32PHRMode);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugWdgConfigure(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psRGXFWDebugWdgConfigureIN_UI8,
+                                  IMG_UINT8 * psRGXFWDebugWdgConfigureOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureIN =
+           (PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureOUT =
+           (PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *)
+           IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureOUT_UI8, 0);
+
+       psRGXFWDebugWdgConfigureOUT->eError =
+           PVRSRVRGXFWDebugWdgConfigureKM(psConnection, OSGetDevNode(psConnection),
+                                          psRGXFWDebugWdgConfigureIN->ui32ui32WdgPeriodUs);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psRGXCurrentTimeIN_UI8,
+                          IMG_UINT8 * psRGXCurrentTimeOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN =
+           (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN);
+
+       psRGXCurrentTimeOUT->eError =
+           PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection),
+                                &psRGXCurrentTimeOUT->ui64Time);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXFWDBGBridge(void);
+void DeinitRGXFWDBGBridge(void);
+
+/*
+ * Register all RGXFWDBG functions with services
+ */
+PVRSRV_ERROR InitRGXFWDBGBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG,
+                             PVRSRVBridgeRGXFWDebugSetFWLog, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                             PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST,
+                             PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                             PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE,
+                             PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                             PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY,
+                             PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                             PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE,
+                             PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE,
+                             PVRSRVBridgeRGXFWDebugPHRConfigure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE,
+                             PVRSRVBridgeRGXFWDebugWdgConfigure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME,
+                             PVRSRVBridgeRGXCurrentTime, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxfwdbg functions with services
+ */
+void DeinitRGXFWDBGBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                               PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                               PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                               PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                               PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                               PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+                               PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxhwperf_bridge/common_rgxhwperf_bridge.h
new file mode 100644 (file)
index 0000000..08e80bb
--- /dev/null
@@ -0,0 +1,172 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf.h"
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST                      0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF                  PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS                   PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS                     PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS                 PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS                        PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS                       PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST                       (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5)
+
+/*******************************************
+            RGXCtrlHWPerf
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+       IMG_UINT64 ui64Mask;
+       IMG_BOOL bToggle;
+       IMG_UINT32 ui32StreamId;
+} __packed PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+/*******************************************
+            RGXGetHWPerfBvncFeatureFlags
+ *******************************************/
+
+/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+       RGX_HWPERF_BVNC sBVNC;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+/*******************************************
+            RGXConfigMuxHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigMuxHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS_TAG
+{
+       RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs;
+       IMG_UINT32 ui32ArrayLen;
+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXConfigMuxHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS;
+
+/*******************************************
+            RGXControlHWPerfBlocks
+ *******************************************/
+
+/* Bridge in structure for RGXControlHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG
+{
+       IMG_UINT16 *pui16BlockIDs;
+       IMG_BOOL bEnable;
+       IMG_UINT32 ui32ArrayLen;
+} __packed PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS;
+
+/* Bridge out structure for RGXControlHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS;
+
+/*******************************************
+            RGXConfigCustomCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+       IMG_UINT32 *pui32CustomCounterIDs;
+       IMG_UINT16 ui16CustomBlockID;
+       IMG_UINT16 ui16NumCustomCounters;
+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS;
+
+/* Bridge out structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS;
+
+/*******************************************
+            RGXConfigureHWPerfBlocks
+ *******************************************/
+
+/* Bridge in structure for RGXConfigureHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG
+{
+       RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs;
+       IMG_UINT32 ui32ArrayLen;
+       IMG_UINT32 ui32CtrlWord;
+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS;
+
+/* Bridge out structure for RGXConfigureHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS;
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxhwperf_bridge/server_rgxhwperf_bridge.c
new file mode 100644 (file)
index 0000000..cc22ee3
--- /dev/null
@@ -0,0 +1,651 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+#include "rgx_fwif_km.h"
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psRGXCtrlHWPerfIN_UI8,
+                         IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN =
+           (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0);
+
+       psRGXCtrlHWPerfOUT->eError =
+           PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection),
+                                 psRGXCtrlHWPerfIN->ui32StreamId,
+                                 psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry,
+                                        IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsIN_UI8,
+                                        IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsOUT_UI8,
+                                        CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsIN =
+           (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *)
+           IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsOUT =
+           (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *)
+           IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN);
+
+       psRGXGetHWPerfBvncFeatureFlagsOUT->eError =
+           PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, OSGetDevNode(psConnection),
+                                                &psRGXGetHWPerfBvncFeatureFlagsOUT->sBVNC);
+
+       return 0;
+}
+
+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX,
+             "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXConfigMuxHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+                                      IMG_UINT8 * psRGXConfigMuxHWPerfCountersIN_UI8,
+                                      IMG_UINT8 * psRGXConfigMuxHWPerfCountersOUT_UI8,
+                                      CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersIN =
+           (PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *)
+           IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *)
+           IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersOUT_UI8, 0);
+
+       RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen *
+            sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0;
+
+       if (unlikely(psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX))
+       {
+               psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXConfigMuxHWPerfCounters_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXConfigMuxHWPerfCounters_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXConfigMuxHWPerfCountersIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer =
+                           (IMG_BYTE *) (void *)psRGXConfigMuxHWPerfCountersIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXConfigMuxHWPerfCountersOUT->eError =
+                                   PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXConfigMuxHWPerfCounters_exit;
+                       }
+               }
+       }
+
+       if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen != 0)
+       {
+               psBlockConfigsInt =
+                   (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer,
+                                                                    ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen *
+                   sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK);
+       }
+
+       /* Copy the data over */
+       if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, psBlockConfigsInt,
+                    (const void __user *)psRGXConfigMuxHWPerfCountersIN->psBlockConfigs,
+                    psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen *
+                    sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) != PVRSRV_OK)
+               {
+                       psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXConfigMuxHWPerfCounters_exit;
+               }
+       }
+
+       psRGXConfigMuxHWPerfCountersOUT->eError =
+           PVRSRVRGXConfigMuxHWPerfCountersKM(psConnection, OSGetDevNode(psConnection),
+                                              psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen,
+                                              psBlockConfigsInt);
+
+RGXConfigMuxHWPerfCounters_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXConfigMuxHWPerfCountersOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX,
+             "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8,
+                                  IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN =
+           (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *)
+           IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *)
+           IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0);
+
+       IMG_UINT16 *ui16BlockIDsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0;
+
+       if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX))
+       {
+               psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXControlHWPerfBlocks_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXControlHWPerfBlocks_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXControlHWPerfBlocks_exit;
+                       }
+               }
+       }
+
+       if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0)
+       {
+               ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16);
+       }
+
+       /* Copy the data over */
+       if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui16BlockIDsInt,
+                    (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs,
+                    psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK)
+               {
+                       psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXControlHWPerfBlocks_exit;
+               }
+       }
+
+       psRGXControlHWPerfBlocksOUT->eError =
+           PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection),
+                                          psRGXControlHWPerfBlocksIN->bEnable,
+                                          psRGXControlHWPerfBlocksIN->ui32ArrayLen,
+                                          ui16BlockIDsInt);
+
+RGXControlHWPerfBlocks_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXControlHWPerfBlocksOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(RGX_HWPERF_MAX_CUSTOM_CNTRS <= IMG_UINT32_MAX,
+             "RGX_HWPERF_MAX_CUSTOM_CNTRS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psRGXConfigCustomCountersIN_UI8,
+                                   IMG_UINT8 * psRGXConfigCustomCountersOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN =
+           (PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *)
+           IMG_OFFSET_ADDR(psRGXConfigCustomCountersIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *)
+           IMG_OFFSET_ADDR(psRGXConfigCustomCountersOUT_UI8, 0);
+
+       IMG_UINT32 *ui32CustomCounterIDsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) +
+           0;
+
+       if (unlikely
+           (psRGXConfigCustomCountersIN->ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS))
+       {
+               psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXConfigCustomCounters_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXConfigCustomCounters_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigCustomCountersIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXConfigCustomCounters_exit;
+                       }
+               }
+       }
+
+       if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0)
+       {
+               ui32CustomCounterIDsInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32CustomCounterIDsInt,
+                    (const void __user *)psRGXConfigCustomCountersIN->pui32CustomCounterIDs,
+                    psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXConfigCustomCounters_exit;
+               }
+       }
+
+       psRGXConfigCustomCountersOUT->eError =
+           PVRSRVRGXConfigCustomCountersKM(psConnection, OSGetDevNode(psConnection),
+                                           psRGXConfigCustomCountersIN->ui16CustomBlockID,
+                                           psRGXConfigCustomCountersIN->ui16NumCustomCounters,
+                                           ui32CustomCounterIDsInt);
+
+RGXConfigCustomCounters_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXConfigCustomCountersOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX,
+             "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8,
+                                    IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN =
+           (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *)
+           IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *)
+           IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0);
+
+       RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXConfigureHWPerfBlocksIN->ui32ArrayLen *
+            sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0;
+
+       if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX))
+       {
+               psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXConfigureHWPerfBlocks_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXConfigureHWPerfBlocks_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXConfigureHWPerfBlocks_exit;
+                       }
+               }
+       }
+
+       if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0)
+       {
+               psBlockConfigsInt =
+                   (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+       }
+
+       /* Copy the data over */
+       if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, psBlockConfigsInt,
+                    (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs,
+                    psRGXConfigureHWPerfBlocksIN->ui32ArrayLen *
+                    sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK)
+               {
+                       psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXConfigureHWPerfBlocks_exit;
+               }
+       }
+
+       psRGXConfigureHWPerfBlocksOUT->eError =
+           PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection),
+                                            psRGXConfigureHWPerfBlocksIN->ui32CtrlWord,
+                                            psRGXConfigureHWPerfBlocksIN->ui32ArrayLen,
+                                            psBlockConfigsInt);
+
+RGXConfigureHWPerfBlocks_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXConfigureHWPerfBlocksOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+void DeinitRGXHWPERFBridge(void);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF,
+                             PVRSRVBridgeRGXCtrlHWPerf, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                             PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS,
+                             PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                             PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS,
+                             PVRSRVBridgeRGXConfigMuxHWPerfCounters, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                             PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS,
+                             PVRSRVBridgeRGXControlHWPerfBlocks, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                             PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS,
+                             PVRSRVBridgeRGXConfigCustomCounters, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                             PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS,
+                             PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+void DeinitRGXHWPERFBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                               PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                               PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                               PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                               PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+                               PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxkicksync_bridge/common_rgxkicksync_bridge.h
new file mode 100644 (file)
index 0000000..afd882c
--- /dev/null
@@ -0,0 +1,143 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxkicksync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxkicksync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST                    0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT                     PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT                    PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2                 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY                        PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST                     (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3)
+
+/*******************************************
+            RGXCreateKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+       IMG_HANDLE hPrivData;
+       IMG_UINT32 ui32ContextFlags;
+       IMG_UINT32 ui32PackedCCBSizeU88;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+       IMG_HANDLE hKickSyncContext;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+/*******************************************
+            RGXDestroyKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+       IMG_HANDLE hKickSyncContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+/*******************************************
+            RGXKickSync2
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG
+{
+       IMG_HANDLE hKickSyncContext;
+       IMG_UINT32 *pui32UpdateDevVarOffset;
+       IMG_UINT32 *pui32UpdateValue;
+       IMG_CHAR *puiUpdateFenceName;
+       IMG_HANDLE *phUpdateUFODevVarBlock;
+       PVRSRV_FENCE hCheckFenceFD;
+       PVRSRV_TIMELINE hTimelineFenceFD;
+       IMG_UINT32 ui32ClientUpdateCount;
+       IMG_UINT32 ui32ExtJobRef;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKSYNC2;
+
+/* Bridge out structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE hUpdateFenceFD;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKSYNC2;
+
+/*******************************************
+            RGXSetKickSyncContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetKickSyncContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Input;
+       IMG_HANDLE hKickSyncContext;
+       IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetKickSyncContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Output;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxkicksync_bridge/server_rgxkicksync_bridge.c
new file mode 100644 (file)
index 0000000..25f68f3
--- /dev/null
@@ -0,0 +1,579 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxkicksync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxkicksync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxkicksync.h"
+
+#include "common_rgxkicksync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psRGXCreateKickSyncContextIN_UI8,
+                                    IMG_UINT8 * psRGXCreateKickSyncContextOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0);
+
+       IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+       RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXCreateKickSyncContextOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateKickSyncContext_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateKickSyncContextOUT->eError =
+           PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevNode(psConnection),
+                                            hPrivDataInt,
+                                            psRGXCreateKickSyncContextIN->ui32PackedCCBSizeU88,
+                                            psRGXCreateKickSyncContextIN->ui32ContextFlags,
+                                            &psKickSyncContextInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateKickSyncContext_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXCreateKickSyncContextOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psRGXCreateKickSyncContextOUT->hKickSyncContext,
+                                     (void *)psKickSyncContextInt,
+                                     PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _RGXCreateKickSyncContextpsKickSyncContextIntRelease);
+       if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateKickSyncContext_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateKickSyncContext_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+       {
+               if (psKickSyncContextInt)
+               {
+                       PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psRGXDestroyKickSyncContextIN_UI8,
+                                     IMG_UINT8 * psRGXDestroyKickSyncContextOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyKickSyncContextOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyKickSyncContextIN->
+                                             hKickSyncContext,
+                                             PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+       if (unlikely
+           ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK)
+            && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyKickSyncContext_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyKickSyncContext_exit:
+
+       return 0;
+}
+
+static_assert(PVRSRV_MAX_DEV_VARS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_DEV_VARS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psRGXKickSync2IN_UI8,
+                        IMG_UINT8 * psRGXKickSync2OUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN =
+           (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT =
+           (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0);
+
+       IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext;
+       RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+       SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL;
+       IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL;
+       IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL;
+       IMG_UINT32 *ui32UpdateValueInt = NULL;
+       IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount *
+            sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+           ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS))
+       {
+               psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickSync2_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXKickSync2_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickSync2IN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXKickSync2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXKickSync2_exit;
+                       }
+               }
+       }
+
+       if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+       {
+               psUpdateUFODevVarBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psUpdateUFODevVarBlockInt, 0,
+                              psRGXKickSync2IN->ui32ClientUpdateCount *
+                              sizeof(SYNC_PRIMITIVE_BLOCK *));
+               ui32NextOffset +=
+                   psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+               hUpdateUFODevVarBlockInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hUpdateUFODevVarBlockInt2,
+                    (const void __user *)psRGXKickSync2IN->phUpdateUFODevVarBlock,
+                    psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickSync2_exit;
+               }
+       }
+       if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+       {
+               ui32UpdateDevVarOffsetInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32UpdateDevVarOffsetInt,
+                    (const void __user *)psRGXKickSync2IN->pui32UpdateDevVarOffset,
+                    psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickSync2_exit;
+               }
+       }
+       if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+       {
+               ui32UpdateValueInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32UpdateValueInt,
+                    (const void __user *)psRGXKickSync2IN->pui32UpdateValue,
+                    psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickSync2_exit;
+               }
+       }
+
+       {
+               uiUpdateFenceNameInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiUpdateFenceNameInt,
+                    (const void __user *)psRGXKickSync2IN->puiUpdateFenceName,
+                    PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickSync2_exit;
+               }
+               ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+                                                   1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXKickSync2OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psKickSyncContextInt,
+                                      hKickSyncContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXKickSync2_exit;
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickSync2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psUpdateUFODevVarBlockInt[i],
+                                                      hUpdateUFODevVarBlockInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                      IMG_TRUE);
+                       if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickSync2_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXKickSync2OUT->eError =
+           PVRSRVRGXKickSyncKM(psKickSyncContextInt,
+                               psRGXKickSync2IN->ui32ClientUpdateCount,
+                               psUpdateUFODevVarBlockInt,
+                               ui32UpdateDevVarOffsetInt,
+                               ui32UpdateValueInt,
+                               psRGXKickSync2IN->hCheckFenceFD,
+                               psRGXKickSync2IN->hTimelineFenceFD,
+                               &psRGXKickSync2OUT->hUpdateFenceFD,
+                               uiUpdateFenceNameInt, psRGXKickSync2IN->ui32ExtJobRef);
+
+RGXKickSync2_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psKickSyncContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hKickSyncContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+       }
+
+       if (hUpdateUFODevVarBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psUpdateUFODevVarBlockInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hUpdateUFODevVarBlockInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXKickSync2OUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psRGXSetKickSyncContextPropertyIN_UI8,
+                                         IMG_UINT8 * psRGXSetKickSyncContextPropertyOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyIN =
+           (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0);
+
+       IMG_HANDLE hKickSyncContext = psRGXSetKickSyncContextPropertyIN->hKickSyncContext;
+       RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetKickSyncContextPropertyOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psKickSyncContextInt,
+                                      hKickSyncContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetKickSyncContextProperty_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetKickSyncContextPropertyOUT->eError =
+           PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt,
+                                                 psRGXSetKickSyncContextPropertyIN->ui32Property,
+                                                 psRGXSetKickSyncContextPropertyIN->ui64Input,
+                                                 &psRGXSetKickSyncContextPropertyOUT->ui64Output);
+
+RGXSetKickSyncContextProperty_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psKickSyncContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hKickSyncContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+void DeinitRGXKICKSYNCBridge(void);
+
+/*
+ * Register all RGXKICKSYNC functions with services
+ */
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+                             PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT,
+                             PVRSRVBridgeRGXCreateKickSyncContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+                             PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT,
+                             PVRSRVBridgeRGXDestroyKickSyncContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2,
+                             PVRSRVBridgeRGXKickSync2, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+                             PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY,
+                             PVRSRVBridgeRGXSetKickSyncContextProperty, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxkicksync functions with services
+ */
+void DeinitRGXKICKSYNCBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+                               PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+                               PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+                               PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/client_rgxpdump_bridge.h
new file mode 100644 (file)
index 0000000..ed5a664
--- /dev/null
@@ -0,0 +1,73 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RGXPDUMP_BRIDGE_H
+#define CLIENT_RGXPDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_rgxpdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpTraceBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpSignatureBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpComputeCRCSignatureCheck(IMG_HANDLE hBridge,
+                                                             IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpCRCSignatureCheck(IMG_HANDLE hBridge,
+                                                      IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPreCommand(IMG_HANDLE hBridge,
+                                                       IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPostCommand(IMG_HANDLE hBridge,
+                                                        IMG_UINT32 ui32PDumpFlags);
+
+#endif /* CLIENT_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/client_rgxpdump_direct_bridge.c
new file mode 100644 (file)
index 0000000..ed34b21
--- /dev/null
@@ -0,0 +1,128 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for rgxpdump
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_rgxpdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "rgx_bridge.h"
+
+#include "rgxpdump.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpTraceBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPDumpTraceBufferKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                    ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpSignatureBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPDumpSignatureBufferKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                        ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpComputeCRCSignatureCheck(IMG_HANDLE hBridge,
+                                                             IMG_UINT32 ui32PDumpFlags)
+{
+#if defined(SUPPORT_VALIDATION)
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPDumpComputeCRCSignatureCheckKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                                 ui32PDumpFlags);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpCRCSignatureCheck(IMG_HANDLE hBridge,
+                                                      IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPDumpCRCSignatureCheckKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                          ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPreCommand(IMG_HANDLE hBridge,
+                                                       IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPDumpValCheckPreCommandKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                           ui32PDumpFlags);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPostCommand(IMG_HANDLE hBridge,
+                                                        IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVPDumpValCheckPostCommandKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                            ui32PDumpFlags);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/common_rgxpdump_bridge.h
new file mode 100644 (file)
index 0000000..37317ac
--- /dev/null
@@ -0,0 +1,161 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXPDUMP_BRIDGE_H
+#define COMMON_RGXPDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST                       0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER                        PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER                    PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK                   PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK                  PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND                 PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND                        PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST                        (PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+5)
+
+/*******************************************
+            PDumpTraceBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG
+{
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER;
+
+/* Bridge out structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER;
+
+/*******************************************
+            PDumpSignatureBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG
+{
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER;
+
+/* Bridge out structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER;
+
+/*******************************************
+            PDumpComputeCRCSignatureCheck
+ *******************************************/
+
+/* Bridge in structure for PDumpComputeCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK_TAG
+{
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK;
+
+/* Bridge out structure for PDumpComputeCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK;
+
+/*******************************************
+            PDumpCRCSignatureCheck
+ *******************************************/
+
+/* Bridge in structure for PDumpCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK_TAG
+{
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK;
+
+/* Bridge out structure for PDumpCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK;
+
+/*******************************************
+            PDumpValCheckPreCommand
+ *******************************************/
+
+/* Bridge in structure for PDumpValCheckPreCommand */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND_TAG
+{
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND;
+
+/* Bridge out structure for PDumpValCheckPreCommand */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND;
+
+/*******************************************
+            PDumpValCheckPostCommand
+ *******************************************/
+
+/* Bridge in structure for PDumpValCheckPostCommand */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND_TAG
+{
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND;
+
+/* Bridge out structure for PDumpValCheckPostCommand */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND;
+
+#endif /* COMMON_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxpdump_bridge/server_rgxpdump_bridge.c
new file mode 100644 (file)
index 0000000..e9a5d42
--- /dev/null
@@ -0,0 +1,252 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxpdump.h"
+
+#include "common_rgxpdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psPDumpTraceBufferIN_UI8,
+                            IMG_UINT8 * psPDumpTraceBufferOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN =
+           (PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *) IMG_OFFSET_ADDR(psPDumpTraceBufferIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *) IMG_OFFSET_ADDR(psPDumpTraceBufferOUT_UI8, 0);
+
+       psPDumpTraceBufferOUT->eError =
+           PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevNode(psConnection),
+                                    psPDumpTraceBufferIN->ui32PDumpFlags);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psPDumpSignatureBufferIN_UI8,
+                                IMG_UINT8 * psPDumpSignatureBufferOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN =
+           (PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *) IMG_OFFSET_ADDR(psPDumpSignatureBufferIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *)
+           IMG_OFFSET_ADDR(psPDumpSignatureBufferOUT_UI8, 0);
+
+       psPDumpSignatureBufferOUT->eError =
+           PVRSRVPDumpSignatureBufferKM(psConnection, OSGetDevNode(psConnection),
+                                        psPDumpSignatureBufferIN->ui32PDumpFlags);
+
+       return 0;
+}
+
+#if defined(SUPPORT_VALIDATION)
+
+static IMG_INT
+PVRSRVBridgePDumpComputeCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psPDumpComputeCRCSignatureCheckIN_UI8,
+                                         IMG_UINT8 * psPDumpComputeCRCSignatureCheckOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckIN =
+           (PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *)
+           IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *)
+           IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckOUT_UI8, 0);
+
+       psPDumpComputeCRCSignatureCheckOUT->eError =
+           PVRSRVPDumpComputeCRCSignatureCheckKM(psConnection, OSGetDevNode(psConnection),
+                                                 psPDumpComputeCRCSignatureCheckIN->
+                                                 ui32PDumpFlags);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgePDumpComputeCRCSignatureCheck NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgePDumpCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psPDumpCRCSignatureCheckIN_UI8,
+                                  IMG_UINT8 * psPDumpCRCSignatureCheckOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckIN =
+           (PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *)
+           IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *)
+           IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckOUT_UI8, 0);
+
+       psPDumpCRCSignatureCheckOUT->eError =
+           PVRSRVPDumpCRCSignatureCheckKM(psConnection, OSGetDevNode(psConnection),
+                                          psPDumpCRCSignatureCheckIN->ui32PDumpFlags);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpValCheckPreCommand(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psPDumpValCheckPreCommandIN_UI8,
+                                   IMG_UINT8 * psPDumpValCheckPreCommandOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND *psPDumpValCheckPreCommandIN =
+           (PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND *)
+           IMG_OFFSET_ADDR(psPDumpValCheckPreCommandIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND *psPDumpValCheckPreCommandOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND *)
+           IMG_OFFSET_ADDR(psPDumpValCheckPreCommandOUT_UI8, 0);
+
+       psPDumpValCheckPreCommandOUT->eError =
+           PVRSRVPDumpValCheckPreCommandKM(psConnection, OSGetDevNode(psConnection),
+                                           psPDumpValCheckPreCommandIN->ui32PDumpFlags);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpValCheckPostCommand(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psPDumpValCheckPostCommandIN_UI8,
+                                    IMG_UINT8 * psPDumpValCheckPostCommandOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND *psPDumpValCheckPostCommandIN =
+           (PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND *)
+           IMG_OFFSET_ADDR(psPDumpValCheckPostCommandIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND *psPDumpValCheckPostCommandOUT =
+           (PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND *)
+           IMG_OFFSET_ADDR(psPDumpValCheckPostCommandOUT_UI8, 0);
+
+       psPDumpValCheckPostCommandOUT->eError =
+           PVRSRVPDumpValCheckPostCommandKM(psConnection, OSGetDevNode(psConnection),
+                                            psPDumpValCheckPostCommandIN->ui32PDumpFlags);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+void DeinitRGXPDUMPBridge(void);
+
+/*
+ * Register all RGXPDUMP functions with services
+ */
+PVRSRV_ERROR InitRGXPDUMPBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER,
+                             PVRSRVBridgePDumpTraceBuffer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER,
+                             PVRSRVBridgePDumpSignatureBuffer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                             PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK,
+                             PVRSRVBridgePDumpComputeCRCSignatureCheck, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK,
+                             PVRSRVBridgePDumpCRCSignatureCheck, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                             PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND,
+                             PVRSRVBridgePDumpValCheckPreCommand, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                             PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND,
+                             PVRSRVBridgePDumpValCheckPostCommand, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxpdump functions with services
+ */
+void DeinitRGXPDUMPBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                               PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                               PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                               PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                               PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+                               PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxregconfig_bridge/common_rgxregconfig_bridge.h
new file mode 100644 (file)
index 0000000..942b7e4
--- /dev/null
@@ -0,0 +1,146 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxregconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxregconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXREGCONFIG_BRIDGE_H
+#define COMMON_RGXREGCONFIG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST                   0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE                 PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG                     PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG                   PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG                  PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG                 PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST                    (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4)
+
+/*******************************************
+            RGXSetRegConfigType
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG
+{
+       IMG_UINT8 ui8RegPowerIsland;
+} __packed PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE;
+
+/* Bridge out structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE;
+
+/*******************************************
+            RGXAddRegconfig
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+       IMG_UINT64 ui64RegMask;
+       IMG_UINT64 ui64RegValue;
+       IMG_UINT32 ui32RegAddr;
+} __packed PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+/*******************************************
+            RGXClearRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+/*******************************************
+            RGXEnableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+/*******************************************
+            RGXDisableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxregconfig_bridge/server_rgxregconfig_bridge.c
new file mode 100644 (file)
index 0000000..4cdcb12
--- /dev/null
@@ -0,0 +1,239 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxregconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxregconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+#include "common_rgxregconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8,
+                               IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN =
+           (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8,
+                                                                     0);
+
+       psRGXSetRegConfigTypeOUT->eError =
+           PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevNode(psConnection),
+                                       psRGXSetRegConfigTypeIN->ui8RegPowerIsland);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psRGXAddRegconfigIN_UI8,
+                           IMG_UINT8 * psRGXAddRegconfigOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN =
+           (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT =
+           (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0);
+
+       psRGXAddRegconfigOUT->eError =
+           PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection),
+                                   psRGXAddRegconfigIN->ui32RegAddr,
+                                   psRGXAddRegconfigIN->ui64RegValue,
+                                   psRGXAddRegconfigIN->ui64RegMask);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psRGXClearRegConfigIN_UI8,
+                             IMG_UINT8 * psRGXClearRegConfigOUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN =
+           (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN);
+
+       psRGXClearRegConfigOUT->eError =
+           PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXEnableRegConfigIN_UI8,
+                              IMG_UINT8 * psRGXEnableRegConfigOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN =
+           (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT =
+           (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8,
+                                                                    0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN);
+
+       psRGXEnableRegConfigOUT->eError =
+           PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRGXDisableRegConfigIN_UI8,
+                               IMG_UINT8 * psRGXDisableRegConfigOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN =
+           (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8,
+                                                                     0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN);
+
+       psRGXDisableRegConfigOUT->eError =
+           PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void);
+void DeinitRGXREGCONFIGBridge(void);
+
+/*
+ * Register all RGXREGCONFIG functions with services
+ */
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                             PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE,
+                             PVRSRVBridgeRGXSetRegConfigType, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                             PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG,
+                             PVRSRVBridgeRGXAddRegconfig, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                             PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG,
+                             PVRSRVBridgeRGXClearRegConfig, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                             PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG,
+                             PVRSRVBridgeRGXEnableRegConfig, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                             PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG,
+                             PVRSRVBridgeRGXDisableRegConfig, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxregconfig functions with services
+ */
+void DeinitRGXREGCONFIGBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                               PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                               PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                               PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                               PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+                               PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG);
+
+}
+#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */
+/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXREGCONFIGBridge() \
+       PVRSRV_OK
+
+#define DeinitRGXREGCONFIGBridge()
+
+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxta3d_bridge/common_rgxta3d_bridge.h
new file mode 100644 (file)
index 0000000..b5fedd7
--- /dev/null
@@ -0,0 +1,404 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_fwif_shared.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST                        0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET                     PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET                    PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER                        PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER                       PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER                      PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER                    PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST                        PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST                       PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT                   PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT                  PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY                      PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED                  PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2                     PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY                      PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST                 (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13)
+
+/*******************************************
+            RGXCreateHWRTDataSet
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG
+{
+       IMG_UINT64 ui64FlippedMultiSampleCtl;
+       IMG_UINT64 ui64MultiSampleCtl;
+       IMG_DEV_VIRTADDR *psMacrotileArrayDevVAddr;
+       IMG_DEV_VIRTADDR *psPMMlistDevVAddr;
+       IMG_DEV_VIRTADDR *psRTCDevVAddr;
+       IMG_DEV_VIRTADDR *psRgnHeaderDevVAddr;
+       IMG_DEV_VIRTADDR *psTailPtrsDevVAddr;
+       IMG_DEV_VIRTADDR *psVHeapTableDevVAddr;
+       IMG_HANDLE *phKmHwRTDataSet;
+       IMG_HANDLE *phapsFreeLists;
+       IMG_UINT32 ui32ISPMergeLowerX;
+       IMG_UINT32 ui32ISPMergeLowerY;
+       IMG_UINT32 ui32ISPMergeScaleX;
+       IMG_UINT32 ui32ISPMergeScaleY;
+       IMG_UINT32 ui32ISPMergeUpperX;
+       IMG_UINT32 ui32ISPMergeUpperY;
+       IMG_UINT32 ui32ISPMtileSize;
+       IMG_UINT32 ui32MTileStride;
+       IMG_UINT32 ui32PPPScreen;
+       IMG_UINT32 ui32RgnHeaderSize;
+       IMG_UINT32 ui32TEAA;
+       IMG_UINT32 ui32TEMTILE1;
+       IMG_UINT32 ui32TEMTILE2;
+       IMG_UINT32 ui32TEScreen;
+       IMG_UINT32 ui32TPCSize;
+       IMG_UINT32 ui32TPCStride;
+       IMG_UINT16 ui16MaxRTs;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET;
+
+/* Bridge out structure for RGXCreateHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG
+{
+       IMG_HANDLE *phKmHwRTDataSet;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET;
+
+/*******************************************
+            RGXDestroyHWRTDataSet
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG
+{
+       IMG_HANDLE hKmHwRTDataSet;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET;
+
+/* Bridge out structure for RGXDestroyHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET;
+
+/*******************************************
+            RGXCreateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+       IMG_HANDLE hPMR;
+       IMG_HANDLE hReservation;
+       PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+       IMG_HANDLE hsZSBufferKM;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+/*******************************************
+            RGXDestroyZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+       IMG_HANDLE hsZSBufferMemDesc;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+/*******************************************
+            RGXPopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+       IMG_HANDLE hsZSBufferKM;
+} __packed PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+       IMG_HANDLE hsPopulation;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+/*******************************************
+            RGXUnpopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+       IMG_HANDLE hsPopulation;
+} __packed PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+/*******************************************
+            RGXCreateFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+       IMG_DEV_VIRTADDR spsFreeListDevVAddr;
+       IMG_DEVMEM_OFFSET_T uiPMROffset;
+       IMG_HANDLE hMemCtxPrivData;
+       IMG_HANDLE hsFreeListPMR;
+       IMG_HANDLE hsGlobalFreeList;
+       IMG_BOOL bbFreeListCheck;
+       IMG_UINT32 ui32GrowFLPages;
+       IMG_UINT32 ui32GrowParamThreshold;
+       IMG_UINT32 ui32InitFLPages;
+       IMG_UINT32 ui32MaxFLPages;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+       IMG_HANDLE hCleanupCookie;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+/*******************************************
+            RGXDestroyFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+       IMG_HANDLE hCleanupCookie;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+/*******************************************
+            RGXCreateRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+       IMG_DEV_VIRTADDR sVDMCallStackAddr;
+       IMG_UINT64 ui64RobustnessAddress;
+       IMG_HANDLE hPrivData;
+       IMG_BYTE *pui8FrameworkCmd;
+       IMG_BYTE *pui8StaticRenderContextState;
+       IMG_UINT32 ui32ContextFlags;
+       IMG_UINT32 ui32FrameworkCmdSize;
+       IMG_UINT32 ui32Max3DDeadlineMS;
+       IMG_UINT32 ui32MaxTADeadlineMS;
+       IMG_UINT32 ui32PackedCCBSizeU8888;
+       IMG_UINT32 ui32Priority;
+       IMG_UINT32 ui32StaticRenderContextStateSize;
+       IMG_UINT32 ui32ui32CallStackDepth;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+       IMG_HANDLE hRenderContext;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+/*******************************************
+            RGXDestroyRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+       IMG_HANDLE hCleanupCookie;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+/*******************************************
+            RGXSetRenderContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+       IMG_HANDLE hRenderContext;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXRenderContextStalled
+ *******************************************/
+
+/* Bridge in structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG
+{
+       IMG_HANDLE hRenderContext;
+} __packed PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED;
+
+/* Bridge out structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED;
+
+/*******************************************
+            RGXKickTA3D2
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG
+{
+       IMG_UINT64 ui64Deadline;
+       IMG_HANDLE hKMHWRTDataSet;
+       IMG_HANDLE hMSAAScratchBuffer;
+       IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+       IMG_HANDLE hRenderContext;
+       IMG_HANDLE hZSBuffer;
+       IMG_UINT32 *pui32Client3DUpdateSyncOffset;
+       IMG_UINT32 *pui32Client3DUpdateValue;
+       IMG_UINT32 *pui32ClientTAFenceSyncOffset;
+       IMG_UINT32 *pui32ClientTAFenceValue;
+       IMG_UINT32 *pui32ClientTAUpdateSyncOffset;
+       IMG_UINT32 *pui32ClientTAUpdateValue;
+       IMG_UINT32 *pui32SyncPMRFlags;
+       IMG_BYTE *pui83DCmd;
+       IMG_BYTE *pui83DPRCmd;
+       IMG_BYTE *pui8TACmd;
+       IMG_CHAR *puiUpdateFenceName;
+       IMG_CHAR *puiUpdateFenceName3D;
+       IMG_HANDLE *phClient3DUpdateSyncPrimBlock;
+       IMG_HANDLE *phClientTAFenceSyncPrimBlock;
+       IMG_HANDLE *phClientTAUpdateSyncPrimBlock;
+       IMG_HANDLE *phSyncPMRs;
+       IMG_BOOL bbAbort;
+       IMG_BOOL bbKick3D;
+       IMG_BOOL bbKickPR;
+       IMG_BOOL bbKickTA;
+       PVRSRV_FENCE hCheckFence;
+       PVRSRV_FENCE hCheckFence3D;
+       PVRSRV_TIMELINE hUpdateTimeline;
+       PVRSRV_TIMELINE hUpdateTimeline3D;
+       IMG_UINT32 ui323DCmdSize;
+       IMG_UINT32 ui323DPRCmdSize;
+       IMG_UINT32 ui32Client3DUpdateCount;
+       IMG_UINT32 ui32ClientTAFenceCount;
+       IMG_UINT32 ui32ClientTAUpdateCount;
+       IMG_UINT32 ui32ExtJobRef;
+       IMG_UINT32 ui32NumberOfDrawCalls;
+       IMG_UINT32 ui32NumberOfIndices;
+       IMG_UINT32 ui32NumberOfMRTs;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32PRFenceUFOSyncOffset;
+       IMG_UINT32 ui32PRFenceValue;
+       IMG_UINT32 ui32RenderTargetSize;
+       IMG_UINT32 ui32SyncPMRCount;
+       IMG_UINT32 ui32TACmdSize;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKTA3D2;
+
+/* Bridge out structure for RGXKickTA3D2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE hUpdateFence;
+       PVRSRV_FENCE hUpdateFence3D;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKTA3D2;
+
+/*******************************************
+            RGXSetRenderContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Input;
+       IMG_HANDLE hRenderContext;
+       IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetRenderContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Output;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxta3d_bridge/server_rgxta3d_bridge.c
new file mode 100644 (file)
index 0000000..44300ec
--- /dev/null
@@ -0,0 +1,2406 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData);
+       return eError;
+}
+
+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX,
+             "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8,
+                                IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *)
+           IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0);
+
+       IMG_DEV_VIRTADDR *sVHeapTableDevVAddrInt = NULL;
+       IMG_DEV_VIRTADDR *sPMMlistDevVAddrInt = NULL;
+       RGX_FREELIST **psapsFreeListsInt = NULL;
+       IMG_HANDLE *hapsFreeListsInt2 = NULL;
+       IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL;
+       IMG_DEV_VIRTADDR *sMacrotileArrayDevVAddrInt = NULL;
+       IMG_DEV_VIRTADDR *sRgnHeaderDevVAddrInt = NULL;
+       IMG_DEV_VIRTADDR *sRTCDevVAddrInt = NULL;
+       RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL;
+       IMG_HANDLE *hKmHwRTDataSetInt2 = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+           ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) +
+           ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0;
+
+       psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXCreateHWRTDataSet_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXCreateHWRTDataSet_exit;
+                       }
+               }
+       }
+
+       {
+               sVHeapTableDevVAddrInt =
+                   (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, sVHeapTableDevVAddrInt,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->psVHeapTableDevVAddr,
+                    RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+       {
+               sPMMlistDevVAddrInt =
+                   (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, sPMMlistDevVAddrInt,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->psPMMlistDevVAddr,
+                    RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+       {
+               psapsFreeListsInt =
+                   (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psapsFreeListsInt, 0,
+                              RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *));
+               ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *);
+               hapsFreeListsInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hapsFreeListsInt2,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists,
+                    RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+       {
+               sTailPtrsDevVAddrInt =
+                   (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, sTailPtrsDevVAddrInt,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr,
+                    RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+       {
+               sMacrotileArrayDevVAddrInt =
+                   (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, sMacrotileArrayDevVAddrInt,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->psMacrotileArrayDevVAddr,
+                    RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+       {
+               sRgnHeaderDevVAddrInt =
+                   (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, sRgnHeaderDevVAddrInt,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->psRgnHeaderDevVAddr,
+                    RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+       {
+               sRTCDevVAddrInt =
+                   (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR);
+       }
+
+       /* Copy the data over */
+       if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, sRTCDevVAddrInt,
+                    (const void __user *)psRGXCreateHWRTDataSetIN->psRTCDevVAddr,
+                    RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+       if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+       {
+               psKmHwRTDataSetInt =
+                   (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psKmHwRTDataSetInt, 0,
+                              RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *));
+               ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *);
+               hKmHwRTDataSetInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE);
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXCreateHWRTDataSetOUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psapsFreeListsInt[i],
+                                                      hapsFreeListsInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE);
+                       if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXCreateHWRTDataSet_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateHWRTDataSetOUT->eError =
+           RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection),
+                                sVHeapTableDevVAddrInt,
+                                sPMMlistDevVAddrInt,
+                                psapsFreeListsInt,
+                                psRGXCreateHWRTDataSetIN->ui32PPPScreen,
+                                psRGXCreateHWRTDataSetIN->ui64MultiSampleCtl,
+                                psRGXCreateHWRTDataSetIN->ui64FlippedMultiSampleCtl,
+                                psRGXCreateHWRTDataSetIN->ui32TPCStride,
+                                sTailPtrsDevVAddrInt,
+                                psRGXCreateHWRTDataSetIN->ui32TPCSize,
+                                psRGXCreateHWRTDataSetIN->ui32TEScreen,
+                                psRGXCreateHWRTDataSetIN->ui32TEAA,
+                                psRGXCreateHWRTDataSetIN->ui32TEMTILE1,
+                                psRGXCreateHWRTDataSetIN->ui32TEMTILE2,
+                                psRGXCreateHWRTDataSetIN->ui32MTileStride,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY,
+                                sMacrotileArrayDevVAddrInt,
+                                sRgnHeaderDevVAddrInt,
+                                sRTCDevVAddrInt,
+                                psRGXCreateHWRTDataSetIN->ui32RgnHeaderSize,
+                                psRGXCreateHWRTDataSetIN->ui32ISPMtileSize,
+                                psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateHWRTDataSet_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+       if (hKmHwRTDataSetInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++)
+               {
+
+                       psRGXCreateHWRTDataSetOUT->eError =
+                           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                     &hKmHwRTDataSetInt2[i],
+                                                     (void *)psKmHwRTDataSetInt[i],
+                                                     PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET,
+                                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                     (PFN_HANDLE_RELEASE) &
+                                                     _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease);
+                       if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXCreateHWRTDataSet_exit;
+                       }
+
+               }
+       }
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet,
+                     hKmHwRTDataSetInt2,
+                     (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK))
+               {
+                       psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateHWRTDataSet_exit;
+               }
+       }
+
+RGXCreateHWRTDataSet_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       if (hapsFreeListsInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psapsFreeListsInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hapsFreeListsInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)
+       {
+               {
+                       IMG_UINT32 i;
+
+                       if (hKmHwRTDataSetInt2)
+                       {
+                               for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++)
+                               {
+                                       if (hKmHwRTDataSetInt2[i])
+                                       {
+                                               RGXDestroyHWRTDataSet(hKmHwRTDataSetInt2[i]);
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8,
+                                 IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *)
+           IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *)
+           IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyHWRTDataSetOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyHWRTDataSetIN->
+                                             hKmHwRTDataSet,
+                                             PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET);
+       if (unlikely
+           ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK)
+            && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyHWRTDataSet_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyHWRTDataSet_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psRGXCreateZSBufferIN_UI8,
+                             IMG_UINT8 * psRGXCreateZSBufferOUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0);
+
+       IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation;
+       DEVMEMINT_RESERVATION *psReservationInt = NULL;
+       IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR;
+       PMR *psPMRInt = NULL;
+       RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXCreateZSBufferOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psReservationInt,
+                                      hReservation,
+                                      PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+       if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateZSBuffer_exit;
+       }
+
+       /* Look up the address from the handle */
+       psRGXCreateZSBufferOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRInt,
+                                      hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateZSBuffer_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateZSBufferOUT->eError =
+           RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection),
+                               psReservationInt,
+                               psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateZSBuffer_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                  &psRGXCreateZSBufferOUT->
+                                                                  hsZSBufferKM,
+                                                                  (void *)pssZSBufferKMInt,
+                                                                  PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+                                                                  PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                  (PFN_HANDLE_RELEASE) &
+                                                                  _RGXCreateZSBufferpssZSBufferKMIntRelease);
+       if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateZSBuffer_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateZSBuffer_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psReservationInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+       {
+               if (pssZSBufferKMInt)
+               {
+                       RGXDestroyZSBufferKM(pssZSBufferKMInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXDestroyZSBufferIN_UI8,
+                              IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8,
+                                                                    0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyZSBufferOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyZSBufferIN->
+                                             hsZSBufferMemDesc,
+                                             PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+       if (unlikely
+           ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK)
+            && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyZSBuffer_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyZSBuffer_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRGXPopulateZSBufferIN_UI8,
+                               IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN =
+           (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT =
+           (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM;
+       RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL;
+       RGX_POPULATION *pssPopulationInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXPopulateZSBufferOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&pssZSBufferKMInt,
+                                      hsZSBufferKM,
+                                      PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE);
+       if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXPopulateZSBuffer_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXPopulateZSBufferOUT->eError =
+           RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               goto RGXPopulateZSBuffer_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                    &psRGXPopulateZSBufferOUT->
+                                                                    hsPopulation,
+                                                                    (void *)pssPopulationInt,
+                                                                    PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+                                                                    PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                    (PFN_HANDLE_RELEASE) &
+                                                                    _RGXPopulateZSBufferpssPopulationIntRelease);
+       if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXPopulateZSBuffer_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXPopulateZSBuffer_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (pssZSBufferKMInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+       {
+               if (pssPopulationInt)
+               {
+                       RGXUnpopulateZSBufferKM(pssPopulationInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8,
+                                 IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN =
+           (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *)
+           IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT =
+           (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *)
+           IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXUnpopulateZSBufferOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation,
+                                             PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+       if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) &&
+                    (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXUnpopulateZSBuffer_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXUnpopulateZSBuffer_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = RGXDestroyFreeList((RGX_FREELIST *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+                             IMG_UINT8 * psRGXCreateFreeListIN_UI8,
+                             IMG_UINT8 * psRGXCreateFreeListOUT_UI8,
+                             CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0);
+
+       IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData;
+       IMG_HANDLE hMemCtxPrivDataInt = NULL;
+       IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList;
+       RGX_FREELIST *pssGlobalFreeListInt = NULL;
+       IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR;
+       PMR *pssFreeListPMRInt = NULL;
+       RGX_FREELIST *psCleanupCookieInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXCreateFreeListOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hMemCtxPrivDataInt,
+                                      hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateFreeList_exit;
+       }
+
+       if (psRGXCreateFreeListIN->hsGlobalFreeList)
+       {
+               /* Look up the address from the handle */
+               psRGXCreateFreeListOUT->eError =
+                   PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                              (void **)&pssGlobalFreeListInt,
+                                              hsGlobalFreeList,
+                                              PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE);
+               if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+               {
+                       UnlockHandle(psConnection->psHandleBase);
+                       goto RGXCreateFreeList_exit;
+               }
+       }
+
+       /* Look up the address from the handle */
+       psRGXCreateFreeListOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&pssFreeListPMRInt,
+                                      hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateFreeList_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateFreeListOUT->eError =
+           RGXCreateFreeList(psConnection, OSGetDevNode(psConnection),
+                             hMemCtxPrivDataInt,
+                             psRGXCreateFreeListIN->ui32MaxFLPages,
+                             psRGXCreateFreeListIN->ui32InitFLPages,
+                             psRGXCreateFreeListIN->ui32GrowFLPages,
+                             psRGXCreateFreeListIN->ui32GrowParamThreshold,
+                             pssGlobalFreeListInt,
+                             psRGXCreateFreeListIN->bbFreeListCheck,
+                             psRGXCreateFreeListIN->spsFreeListDevVAddr,
+                             pssFreeListPMRInt,
+                             psRGXCreateFreeListIN->uiPMROffset, &psCleanupCookieInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateFreeList_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                  &psRGXCreateFreeListOUT->
+                                                                  hCleanupCookie,
+                                                                  (void *)psCleanupCookieInt,
+                                                                  PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+                                                                  PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                  (PFN_HANDLE_RELEASE) &
+                                                                  _RGXCreateFreeListpsCleanupCookieIntRelease);
+       if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateFreeList_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateFreeList_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hMemCtxPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+
+       if (psRGXCreateFreeListIN->hsGlobalFreeList)
+       {
+
+               /* Unreference the previously looked up handle */
+               if (pssGlobalFreeListInt)
+               {
+                       PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                   hsGlobalFreeList,
+                                                   PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+               }
+       }
+
+       /* Unreference the previously looked up handle */
+       if (pssFreeListPMRInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+       {
+               if (psCleanupCookieInt)
+               {
+                       RGXDestroyFreeList(psCleanupCookieInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXDestroyFreeListIN_UI8,
+                              IMG_UINT8 * psRGXDestroyFreeListOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8,
+                                                                    0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyFreeListOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie,
+                                             PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+       if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) &&
+                    (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyFreeList_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyFreeList_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData);
+       return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psRGXCreateRenderContextIN_UI8,
+                                  IMG_UINT8 * psRGXCreateRenderContextOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0);
+
+       IMG_BYTE *ui8FrameworkCmdInt = NULL;
+       IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+       IMG_BYTE *ui8StaticRenderContextStateInt = NULL;
+       RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
+           ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize *
+            sizeof(IMG_BYTE)) + 0;
+
+       if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE))
+       {
+               psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXCreateRenderContext_exit;
+       }
+
+       if (unlikely
+           (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize >
+            RGXFWIF_STATIC_RENDERCONTEXT_SIZE))
+       {
+               psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXCreateRenderContext_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXCreateRenderContext_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXCreateRenderContext_exit;
+                       }
+               }
+       }
+
+       if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0)
+       {
+               ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8FrameworkCmdInt,
+                    (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd,
+                    psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateRenderContext_exit;
+               }
+       }
+       if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0)
+       {
+               ui8StaticRenderContextStateInt =
+                   (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8StaticRenderContextStateInt,
+                    (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState,
+                    psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize *
+                    sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateRenderContext_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXCreateRenderContextOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateRenderContext_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateRenderContextOUT->eError =
+           PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection),
+                                          psRGXCreateRenderContextIN->ui32Priority,
+                                          psRGXCreateRenderContextIN->sVDMCallStackAddr,
+                                          psRGXCreateRenderContextIN->ui32ui32CallStackDepth,
+                                          psRGXCreateRenderContextIN->ui32FrameworkCmdSize,
+                                          ui8FrameworkCmdInt,
+                                          hPrivDataInt,
+                                          psRGXCreateRenderContextIN->
+                                          ui32StaticRenderContextStateSize,
+                                          ui8StaticRenderContextStateInt,
+                                          psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888,
+                                          psRGXCreateRenderContextIN->ui32ContextFlags,
+                                          psRGXCreateRenderContextIN->ui64RobustnessAddress,
+                                          psRGXCreateRenderContextIN->ui32MaxTADeadlineMS,
+                                          psRGXCreateRenderContextIN->ui32Max3DDeadlineMS,
+                                          &psRenderContextInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateRenderContext_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                       &psRGXCreateRenderContextOUT->
+                                                                       hRenderContext,
+                                                                       (void *)psRenderContextInt,
+                                                                       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+                                                                       PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                       (PFN_HANDLE_RELEASE) &
+                                                                       _RGXCreateRenderContextpsRenderContextIntRelease);
+       if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateRenderContext_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateRenderContext_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+       {
+               if (psRenderContextInt)
+               {
+                       PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psRGXDestroyRenderContextIN_UI8,
+                                   IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyRenderContextOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyRenderContextIN->
+                                             hCleanupCookie,
+                                             PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+       if (unlikely
+           ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK)
+            && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyRenderContext_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyRenderContext_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+                                       IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8,
+                                       IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8,
+                                       CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN =
+           (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0);
+
+       IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext;
+       RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetRenderContextPriorityOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psRenderContextInt,
+                                      hRenderContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetRenderContextPriority_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetRenderContextPriorityOUT->eError =
+           PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+                                               psRenderContextInt,
+                                               psRGXSetRenderContextPriorityIN->ui32Priority);
+
+RGXSetRenderContextPriority_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psRenderContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hRenderContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psRGXRenderContextStalledIN_UI8,
+                                   IMG_UINT8 * psRGXRenderContextStalledOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN =
+           (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *)
+           IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT =
+           (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *)
+           IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0);
+
+       IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext;
+       RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXRenderContextStalledOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psRenderContextInt,
+                                      hRenderContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXRenderContextStalled_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt);
+
+RGXRenderContextStalled_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psRenderContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hRenderContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry,
+                        IMG_UINT8 * psRGXKickTA3D2IN_UI8,
+                        IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN =
+           (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT =
+           (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0);
+
+       IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext;
+       RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+       SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL;
+       IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+       IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+       IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+       SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL;
+       IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+       IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+       IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+       SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL;
+       IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+       IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+       IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+       IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock;
+       SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL;
+       IMG_CHAR *uiUpdateFenceNameInt = NULL;
+       IMG_CHAR *uiUpdateFenceName3DInt = NULL;
+       IMG_BYTE *ui8TACmdInt = NULL;
+       IMG_BYTE *ui83DPRCmdInt = NULL;
+       IMG_BYTE *ui83DCmdInt = NULL;
+       IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet;
+       RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL;
+       IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer;
+       RGX_ZSBUFFER_DATA *psZSBufferInt = NULL;
+       IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer;
+       RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL;
+       IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+       PMR **psSyncPMRsInt = NULL;
+       IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+            sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+            sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+            sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+           ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+       if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXKickTA3D2_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXKickTA3D2_exit;
+                       }
+               }
+       }
+
+       if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+       {
+               psClientTAFenceSyncPrimBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0,
+                              psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+                              sizeof(SYNC_PRIMITIVE_BLOCK *));
+               ui32NextOffset +=
+                   psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+               hClientTAFenceSyncPrimBlockInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hClientTAFenceSyncPrimBlockInt2,
+                    (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock,
+                    psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+       {
+               ui32ClientTAFenceSyncOffsetInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientTAFenceSyncOffsetInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset,
+                    psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+       {
+               ui32ClientTAFenceValueInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientTAFenceValueInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue,
+                    psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+       {
+               psClientTAUpdateSyncPrimBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0,
+                              psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+                              sizeof(SYNC_PRIMITIVE_BLOCK *));
+               ui32NextOffset +=
+                   psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+               hClientTAUpdateSyncPrimBlockInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hClientTAUpdateSyncPrimBlockInt2,
+                    (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock,
+                    psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+       {
+               ui32ClientTAUpdateSyncOffsetInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientTAUpdateSyncOffsetInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset,
+                    psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+       {
+               ui32ClientTAUpdateValueInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientTAUpdateValueInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue,
+                    psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+       {
+               psClient3DUpdateSyncPrimBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0,
+                              psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+                              sizeof(SYNC_PRIMITIVE_BLOCK *));
+               ui32NextOffset +=
+                   psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+               hClient3DUpdateSyncPrimBlockInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hClient3DUpdateSyncPrimBlockInt2,
+                    (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock,
+                    psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+       {
+               ui32Client3DUpdateSyncOffsetInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32Client3DUpdateSyncOffsetInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset,
+                    psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+       {
+               ui32Client3DUpdateValueInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32Client3DUpdateValueInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue,
+                    psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+
+       {
+               uiUpdateFenceNameInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiUpdateFenceNameInt,
+                    (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName,
+                    PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+               ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+                                                   1] = '\0';
+       }
+
+       {
+               uiUpdateFenceName3DInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiUpdateFenceName3DInt,
+                    (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D,
+                    PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+               ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+                                                     1] = '\0';
+       }
+       if (psRGXKickTA3D2IN->ui32TACmdSize != 0)
+       {
+               ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd,
+                    psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0)
+       {
+               ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd,
+                    psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui323DCmdSize != 0)
+       {
+               ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd,
+                    psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0)
+       {
+               ui32SyncPMRFlagsInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32SyncPMRFlagsInt,
+                    (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags,
+                    psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+       if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0)
+       {
+               psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psSyncPMRsInt, 0,
+                              psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *));
+               ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *);
+               hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs,
+                    psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXKickTA3D2OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psRenderContextInt,
+                                      hRenderContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXKickTA3D2_exit;
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickTA3D2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psClientTAFenceSyncPrimBlockInt[i],
+                                                      hClientTAFenceSyncPrimBlockInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                      IMG_TRUE);
+                       if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickTA3D2_exit;
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickTA3D2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)
+                                                      &psClientTAUpdateSyncPrimBlockInt[i],
+                                                      hClientTAUpdateSyncPrimBlockInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                      IMG_TRUE);
+                       if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickTA3D2_exit;
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickTA3D2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)
+                                                      &psClient3DUpdateSyncPrimBlockInt[i],
+                                                      hClient3DUpdateSyncPrimBlockInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                      IMG_TRUE);
+                       if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickTA3D2_exit;
+                       }
+               }
+       }
+
+       /* Look up the address from the handle */
+       psRGXKickTA3D2OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPRFenceUFOSyncPrimBlockInt,
+                                      hPRFenceUFOSyncPrimBlock,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXKickTA3D2_exit;
+       }
+
+       if (psRGXKickTA3D2IN->hKMHWRTDataSet)
+       {
+               /* Look up the address from the handle */
+               psRGXKickTA3D2OUT->eError =
+                   PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                              (void **)&psKMHWRTDataSetInt,
+                                              hKMHWRTDataSet,
+                                              PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE);
+               if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+               {
+                       UnlockHandle(psConnection->psHandleBase);
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+
+       if (psRGXKickTA3D2IN->hZSBuffer)
+       {
+               /* Look up the address from the handle */
+               psRGXKickTA3D2OUT->eError =
+                   PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                              (void **)&psZSBufferInt,
+                                              hZSBuffer,
+                                              PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE);
+               if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+               {
+                       UnlockHandle(psConnection->psHandleBase);
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+
+       if (psRGXKickTA3D2IN->hMSAAScratchBuffer)
+       {
+               /* Look up the address from the handle */
+               psRGXKickTA3D2OUT->eError =
+                   PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                              (void **)&psMSAAScratchBufferInt,
+                                              hMSAAScratchBuffer,
+                                              PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE);
+               if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+               {
+                       UnlockHandle(psConnection->psHandleBase);
+                       goto RGXKickTA3D2_exit;
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXKickTA3D2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psSyncPMRsInt[i],
+                                                      hSyncPMRsInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+                       if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXKickTA3D2_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXKickTA3D2OUT->eError =
+           PVRSRVRGXKickTA3DKM(psRenderContextInt,
+                               psRGXKickTA3D2IN->ui32ClientTAFenceCount,
+                               psClientTAFenceSyncPrimBlockInt,
+                               ui32ClientTAFenceSyncOffsetInt,
+                               ui32ClientTAFenceValueInt,
+                               psRGXKickTA3D2IN->ui32ClientTAUpdateCount,
+                               psClientTAUpdateSyncPrimBlockInt,
+                               ui32ClientTAUpdateSyncOffsetInt,
+                               ui32ClientTAUpdateValueInt,
+                               psRGXKickTA3D2IN->ui32Client3DUpdateCount,
+                               psClient3DUpdateSyncPrimBlockInt,
+                               ui32Client3DUpdateSyncOffsetInt,
+                               ui32Client3DUpdateValueInt,
+                               psPRFenceUFOSyncPrimBlockInt,
+                               psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset,
+                               psRGXKickTA3D2IN->ui32PRFenceValue,
+                               psRGXKickTA3D2IN->hCheckFence,
+                               psRGXKickTA3D2IN->hUpdateTimeline,
+                               &psRGXKickTA3D2OUT->hUpdateFence,
+                               uiUpdateFenceNameInt,
+                               psRGXKickTA3D2IN->hCheckFence3D,
+                               psRGXKickTA3D2IN->hUpdateTimeline3D,
+                               &psRGXKickTA3D2OUT->hUpdateFence3D,
+                               uiUpdateFenceName3DInt,
+                               psRGXKickTA3D2IN->ui32TACmdSize,
+                               ui8TACmdInt,
+                               psRGXKickTA3D2IN->ui323DPRCmdSize,
+                               ui83DPRCmdInt,
+                               psRGXKickTA3D2IN->ui323DCmdSize,
+                               ui83DCmdInt,
+                               psRGXKickTA3D2IN->ui32ExtJobRef,
+                               psRGXKickTA3D2IN->bbKickTA,
+                               psRGXKickTA3D2IN->bbKickPR,
+                               psRGXKickTA3D2IN->bbKick3D,
+                               psRGXKickTA3D2IN->bbAbort,
+                               psRGXKickTA3D2IN->ui32PDumpFlags,
+                               psKMHWRTDataSetInt,
+                               psZSBufferInt,
+                               psMSAAScratchBufferInt,
+                               psRGXKickTA3D2IN->ui32SyncPMRCount,
+                               ui32SyncPMRFlagsInt,
+                               psSyncPMRsInt,
+                               psRGXKickTA3D2IN->ui32RenderTargetSize,
+                               psRGXKickTA3D2IN->ui32NumberOfDrawCalls,
+                               psRGXKickTA3D2IN->ui32NumberOfIndices,
+                               psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline);
+
+RGXKickTA3D2_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psRenderContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hRenderContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+       }
+
+       if (hClientTAFenceSyncPrimBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psClientTAFenceSyncPrimBlockInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hClientTAFenceSyncPrimBlockInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       }
+               }
+       }
+
+       if (hClientTAUpdateSyncPrimBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psClientTAUpdateSyncPrimBlockInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hClientTAUpdateSyncPrimBlockInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       }
+               }
+       }
+
+       if (hClient3DUpdateSyncPrimBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psClient3DUpdateSyncPrimBlockInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hClient3DUpdateSyncPrimBlockInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       }
+               }
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psPRFenceUFOSyncPrimBlockInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPRFenceUFOSyncPrimBlock,
+                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+
+       if (psRGXKickTA3D2IN->hKMHWRTDataSet)
+       {
+
+               /* Unreference the previously looked up handle */
+               if (psKMHWRTDataSetInt)
+               {
+                       PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                   hKMHWRTDataSet,
+                                                   PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET);
+               }
+       }
+
+       if (psRGXKickTA3D2IN->hZSBuffer)
+       {
+
+               /* Unreference the previously looked up handle */
+               if (psZSBufferInt)
+               {
+                       PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                   hZSBuffer,
+                                                   PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+               }
+       }
+
+       if (psRGXKickTA3D2IN->hMSAAScratchBuffer)
+       {
+
+               /* Unreference the previously looked up handle */
+               if (psMSAAScratchBufferInt)
+               {
+                       PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                   hMSAAScratchBuffer,
+                                                   PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+               }
+       }
+
+       if (hSyncPMRsInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psSyncPMRsInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hSyncPMRsInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXKickTA3D2OUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+                                       IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8,
+                                       IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8,
+                                       CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN =
+           (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0);
+
+       IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext;
+       RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetRenderContextPropertyOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psRenderContextInt,
+                                      hRenderContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetRenderContextProperty_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetRenderContextPropertyOUT->eError =
+           PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt,
+                                               psRGXSetRenderContextPropertyIN->ui32Property,
+                                               psRGXSetRenderContextPropertyIN->ui64Input,
+                                               &psRGXSetRenderContextPropertyOUT->ui64Output);
+
+RGXSetRenderContextProperty_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psRenderContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hRenderContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+void DeinitRGXTA3DBridge(void);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET,
+                             PVRSRVBridgeRGXCreateHWRTDataSet, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET,
+                             PVRSRVBridgeRGXDestroyHWRTDataSet, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER,
+                             PVRSRVBridgeRGXCreateZSBuffer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER,
+                             PVRSRVBridgeRGXDestroyZSBuffer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER,
+                             PVRSRVBridgeRGXPopulateZSBuffer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER,
+                             PVRSRVBridgeRGXUnpopulateZSBuffer, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST,
+                             PVRSRVBridgeRGXCreateFreeList, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST,
+                             PVRSRVBridgeRGXDestroyFreeList, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT,
+                             PVRSRVBridgeRGXCreateRenderContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT,
+                             PVRSRVBridgeRGXDestroyRenderContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                             PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY,
+                             PVRSRVBridgeRGXSetRenderContextPriority, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED,
+                             PVRSRVBridgeRGXRenderContextStalled, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2,
+                             PVRSRVBridgeRGXKickTA3D2, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                             PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY,
+                             PVRSRVBridgeRGXSetRenderContextProperty, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+void DeinitRGXTA3DBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                               PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                               PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                               PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                               PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+                               PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtimerquery_bridge/common_rgxtimerquery_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtimerquery_bridge/common_rgxtimerquery_bridge.h
new file mode 100644 (file)
index 0000000..34d7c2c
--- /dev/null
@@ -0,0 +1,112 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxtimerquery
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtimerquery
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTIMERQUERY_BRIDGE_H
+#define COMMON_RGXTIMERQUERY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST                  0
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY                 PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY                   PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER                      PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST                   (PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2)
+
+/*******************************************
+            RGXBeginTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG
+{
+       IMG_UINT32 ui32QueryId;
+} __packed PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY;
+
+/* Bridge out structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY;
+
+/*******************************************
+            RGXEndTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY;
+
+/* Bridge out structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY;
+
+/*******************************************
+            RGXQueryTimer
+ *******************************************/
+
+/* Bridge in structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG
+{
+       IMG_UINT32 ui32QueryId;
+} __packed PVRSRV_BRIDGE_IN_RGXQUERYTIMER;
+
+/* Bridge out structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG
+{
+       IMG_UINT64 ui64EndTime;
+       IMG_UINT64 ui64StartTime;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXQUERYTIMER;
+
+#endif /* COMMON_RGXTIMERQUERY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtimerquery_bridge/server_rgxtimerquery_bridge.c
new file mode 100644 (file)
index 0000000..99e6239
--- /dev/null
@@ -0,0 +1,167 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxtimerquery
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtimerquery
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtimerquery.h"
+
+#include "common_rgxtimerquery_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXBeginTimerQueryIN_UI8,
+                              IMG_UINT8 * psRGXBeginTimerQueryOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN =
+           (PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT =
+           (PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryOUT_UI8,
+                                                                    0);
+
+       psRGXBeginTimerQueryOUT->eError =
+           PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevNode(psConnection),
+                                      psRGXBeginTimerQueryIN->ui32QueryId);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psRGXEndTimerQueryIN_UI8,
+                            IMG_UINT8 * psRGXEndTimerQueryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN =
+           (PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT =
+           (PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN);
+
+       psRGXEndTimerQueryOUT->eError =
+           PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psRGXQueryTimerIN_UI8,
+                         IMG_UINT8 * psRGXQueryTimerOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN =
+           (PVRSRV_BRIDGE_IN_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT =
+           (PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerOUT_UI8, 0);
+
+       psRGXQueryTimerOUT->eError =
+           PVRSRVRGXQueryTimerKM(psConnection, OSGetDevNode(psConnection),
+                                 psRGXQueryTimerIN->ui32QueryId,
+                                 &psRGXQueryTimerOUT->ui64StartTime,
+                                 &psRGXQueryTimerOUT->ui64EndTime);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void);
+void DeinitRGXTIMERQUERYBridge(void);
+
+/*
+ * Register all RGXTIMERQUERY functions with services
+ */
+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+                             PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY,
+                             PVRSRVBridgeRGXBeginTimerQuery, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+                             PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY,
+                             PVRSRVBridgeRGXEndTimerQuery, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+                             PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer,
+                             NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtimerquery functions with services
+ */
+void DeinitRGXTIMERQUERYBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+                               PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+                               PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+                               PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq2_bridge/common_rgxtq2_bridge.h
new file mode 100644 (file)
index 0000000..9489dda
--- /dev/null
@@ -0,0 +1,228 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxtq2
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtq2
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ2_BRIDGE_H
+#define COMMON_RGXTQ2_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST                 0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT                       PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT                      PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY                  PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE                     PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2                     PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY                     PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY                 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY                  PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST                  (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7)
+
+/*******************************************
+            RGXTDMCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+       IMG_UINT64 ui64RobustnessAddress;
+       IMG_HANDLE hPrivData;
+       IMG_BYTE *pui8FrameworkCmd;
+       IMG_UINT32 ui32ContextFlags;
+       IMG_UINT32 ui32FrameworkCmdSize;
+       IMG_UINT32 ui32PackedCCBSizeU88;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+       IMG_HANDLE hTransferContext;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT;
+
+/*******************************************
+            RGXTDMDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+       IMG_HANDLE hTransferContext;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/*******************************************
+            RGXTDMSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXTDMNotifyWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/*******************************************
+            RGXTDMSubmitTransfer2
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG
+{
+       IMG_UINT64 ui64DeadlineInus;
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 *pui32SyncPMRFlags;
+       IMG_UINT32 *pui32UpdateSyncOffset;
+       IMG_UINT32 *pui32UpdateValue;
+       IMG_UINT8 *pui8FWCommand;
+       IMG_CHAR *puiUpdateFenceName;
+       IMG_HANDLE *phSyncPMRs;
+       IMG_HANDLE *phUpdateUFOSyncPrimBlock;
+       PVRSRV_FENCE hCheckFenceFD;
+       PVRSRV_TIMELINE hUpdateTimeline;
+       IMG_UINT32 ui32Characteristic1;
+       IMG_UINT32 ui32Characteristic2;
+       IMG_UINT32 ui32ClientUpdateCount;
+       IMG_UINT32 ui32CommandSize;
+       IMG_UINT32 ui32ExternalJobReference;
+       IMG_UINT32 ui32PDumpFlags;
+       IMG_UINT32 ui32SyncPMRCount;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2;
+
+/* Bridge out structure for RGXTDMSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE hUpdateFence;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2;
+
+/*******************************************
+            RGXTDMGetSharedMemory
+ *******************************************/
+
+/* Bridge in structure for RGXTDMGetSharedMemory */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY;
+
+/* Bridge out structure for RGXTDMGetSharedMemory */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG
+{
+       IMG_HANDLE hCLIPMRMem;
+       IMG_HANDLE hUSCPMRMem;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY;
+
+/*******************************************
+            RGXTDMReleaseSharedMemory
+ *******************************************/
+
+/* Bridge in structure for RGXTDMReleaseSharedMemory */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG
+{
+       IMG_HANDLE hPMRMem;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY;
+
+/* Bridge out structure for RGXTDMReleaseSharedMemory */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY;
+
+/*******************************************
+            RGXTDMSetTransferContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Input;
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXTDMSetTransferContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Output;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXTQ2_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq2_bridge/server_rgxtq2_bridge.c
new file mode 100644 (file)
index 0000000..f73bb90
--- /dev/null
@@ -0,0 +1,1210 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxtq2
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq2
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtdmtransfer.h"
+
+#include "common_rgxtq2_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) pvData);
+       return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+                                       IMG_UINT8 * psRGXTDMCreateTransferContextIN_UI8,
+                                       IMG_UINT8 * psRGXTDMCreateTransferContextOUT_UI8,
+                                       CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0);
+
+       IMG_BYTE *ui8FrameworkCmdInt = NULL;
+       IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize *
+            sizeof(IMG_BYTE)) + 0;
+
+       if (unlikely(psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE))
+       {
+               psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXTDMCreateTransferContext_exit;
+       }
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMCreateTransferContext_exit;
+               }
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXTDMCreateTransferContext_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer =
+                           (IMG_BYTE *) (void *)psRGXTDMCreateTransferContextIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXTDMCreateTransferContextOUT->eError =
+                                   PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXTDMCreateTransferContext_exit;
+                       }
+               }
+       }
+
+       if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize != 0)
+       {
+               ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8FrameworkCmdInt,
+                    (const void __user *)psRGXTDMCreateTransferContextIN->pui8FrameworkCmd,
+                    psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMCreateTransferContext_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXTDMCreateTransferContextOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMCreateTransferContext_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXTDMCreateTransferContextOUT->eError =
+           PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevNode(psConnection),
+                                               psRGXTDMCreateTransferContextIN->ui32Priority,
+                                               psRGXTDMCreateTransferContextIN->
+                                               ui32FrameworkCmdSize, ui8FrameworkCmdInt,
+                                               hPrivDataInt,
+                                               psRGXTDMCreateTransferContextIN->
+                                               ui32PackedCCBSizeU88,
+                                               psRGXTDMCreateTransferContextIN->ui32ContextFlags,
+                                               psRGXTDMCreateTransferContextIN->
+                                               ui64RobustnessAddress, &psTransferContextInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               goto RGXTDMCreateTransferContext_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXTDMCreateTransferContextOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psRGXTDMCreateTransferContextOUT->hTransferContext,
+                                     (void *)psTransferContextInt,
+                                     PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _RGXTDMCreateTransferContextpsTransferContextIntRelease);
+       if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMCreateTransferContext_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMCreateTransferContext_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+       {
+               if (psTransferContextInt)
+               {
+                       PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXTDMCreateTransferContextOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+                                        IMG_UINT8 * psRGXTDMDestroyTransferContextIN_UI8,
+                                        IMG_UINT8 * psRGXTDMDestroyTransferContextOUT_UI8,
+                                        CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0);
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMDestroyTransferContext_exit;
+               }
+       }
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXTDMDestroyTransferContextOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXTDMDestroyTransferContextIN->
+                                             hTransferContext,
+                                             PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+       if (unlikely
+           ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK)
+            && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__,
+                        PVRSRVGetErrorString(psRGXTDMDestroyTransferContextOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMDestroyTransferContext_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMDestroyTransferContext_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+                                            IMG_UINT8 * psRGXTDMSetTransferContextPriorityIN_UI8,
+                                            IMG_UINT8 * psRGXTDMSetTransferContextPriorityOUT_UI8,
+                                            CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0);
+
+       IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext;
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMSetTransferContextPriority_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXTDMSetTransferContextPriorityOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMSetTransferContextPriority_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXTDMSetTransferContextPriorityOUT->eError =
+           PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+                                                    psTransferContextInt,
+                                                    psRGXTDMSetTransferContextPriorityIN->
+                                                    ui32Priority);
+
+RGXTDMSetTransferContextPriority_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateIN_UI8,
+                                         IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *)
+           IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *)
+           IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0);
+
+       IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext;
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMNotifyWriteOffsetUpdate_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMNotifyWriteOffsetUpdate_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+           PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt,
+                                                 psRGXTDMNotifyWriteOffsetUpdateIN->
+                                                 ui32PDumpFlags);
+
+RGXTDMNotifyWriteOffsetUpdate_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8,
+                                 IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN =
+           (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *)
+           IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *)
+           IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0);
+
+       IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer2IN->hTransferContext;
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+       SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL;
+       IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+       IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+       IMG_UINT32 *ui32UpdateValueInt = NULL;
+       IMG_CHAR *uiUpdateFenceNameInt = NULL;
+       IMG_UINT8 *ui8FWCommandInt = NULL;
+       IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+       PMR **psSyncPMRsInt = NULL;
+       IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+            sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+           ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+       if (unlikely(psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXTDMSubmitTransfer2_exit;
+       }
+
+       if (unlikely
+           (psRGXTDMSubmitTransfer2IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+       {
+               psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXTDMSubmitTransfer2_exit;
+       }
+
+       if (unlikely(psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXTDMSubmitTransfer2_exit;
+       }
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXTDMSubmitTransfer2_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXTDMSubmitTransfer2_exit;
+                       }
+               }
+       }
+
+       if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+       {
+               psUpdateUFOSyncPrimBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psUpdateUFOSyncPrimBlockInt, 0,
+                              psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+                              sizeof(SYNC_PRIMITIVE_BLOCK *));
+               ui32NextOffset +=
+                   psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+                   sizeof(SYNC_PRIMITIVE_BLOCK *);
+               hUpdateUFOSyncPrimBlockInt2 =
+                   (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hUpdateUFOSyncPrimBlockInt2,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->phUpdateUFOSyncPrimBlock,
+                    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+       {
+               ui32UpdateSyncOffsetInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32UpdateSyncOffsetInt,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateSyncOffset,
+                    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+       {
+               ui32UpdateValueInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32UpdateValueInt,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateValue,
+                    psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+
+       {
+               uiUpdateFenceNameInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiUpdateFenceNameInt,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->puiUpdateFenceName,
+                    PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+               ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+                                                   1] = '\0';
+       }
+       if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0)
+       {
+               ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8FWCommandInt,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->pui8FWCommand,
+                    psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0)
+       {
+               ui32SyncPMRFlagsInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32SyncPMRFlagsInt,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->pui32SyncPMRFlags,
+                    psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0)
+       {
+               psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psSyncPMRsInt, 0,
+                              psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *));
+               ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *);
+               hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hSyncPMRsInt2,
+                    (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs,
+                    psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXTDMSubmitTransfer2_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXTDMSubmitTransfer2OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMSubmitTransfer2_exit;
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXTDMSubmitTransfer2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psUpdateUFOSyncPrimBlockInt[i],
+                                                      hUpdateUFOSyncPrimBlockInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                      IMG_TRUE);
+                       if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXTDMSubmitTransfer2_exit;
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXTDMSubmitTransfer2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psSyncPMRsInt[i],
+                                                      hSyncPMRsInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+                       if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXTDMSubmitTransfer2_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXTDMSubmitTransfer2OUT->eError =
+           PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt,
+                                        psRGXTDMSubmitTransfer2IN->ui32PDumpFlags,
+                                        psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount,
+                                        psUpdateUFOSyncPrimBlockInt,
+                                        ui32UpdateSyncOffsetInt,
+                                        ui32UpdateValueInt,
+                                        psRGXTDMSubmitTransfer2IN->hCheckFenceFD,
+                                        psRGXTDMSubmitTransfer2IN->hUpdateTimeline,
+                                        &psRGXTDMSubmitTransfer2OUT->hUpdateFence,
+                                        uiUpdateFenceNameInt,
+                                        psRGXTDMSubmitTransfer2IN->ui32CommandSize,
+                                        ui8FWCommandInt,
+                                        psRGXTDMSubmitTransfer2IN->ui32ExternalJobReference,
+                                        psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount,
+                                        ui32SyncPMRFlagsInt,
+                                        psSyncPMRsInt,
+                                        psRGXTDMSubmitTransfer2IN->ui32Characteristic1,
+                                        psRGXTDMSubmitTransfer2IN->ui32Characteristic2,
+                                        psRGXTDMSubmitTransfer2IN->ui64DeadlineInus);
+
+RGXTDMSubmitTransfer2_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+       }
+
+       if (hUpdateUFOSyncPrimBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psUpdateUFOSyncPrimBlockInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hUpdateUFOSyncPrimBlockInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       }
+               }
+       }
+
+       if (hSyncPMRsInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psSyncPMRsInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hSyncPMRsInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXTDMSubmitTransfer2OUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData);
+       return eError;
+}
+
+static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8,
+                                 IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *)
+           IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *)
+           IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0);
+
+       PMR *psCLIPMRMemInt = NULL;
+       PMR *psUSCPMRMemInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMGetSharedMemory_exit;
+               }
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN);
+
+       psRGXTDMGetSharedMemoryOUT->eError =
+           PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection),
+                                         &psCLIPMRMemInt, &psUSCPMRMemInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK))
+       {
+               goto RGXTDMGetSharedMemory_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                      &psRGXTDMGetSharedMemoryOUT->
+                                                                      hCLIPMRMem,
+                                                                      (void *)psCLIPMRMemInt,
+                                                                      PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+                                                                      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                      (PFN_HANDLE_RELEASE) &
+                                                                      _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease);
+       if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMGetSharedMemory_exit;
+       }
+
+       psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                      &psRGXTDMGetSharedMemoryOUT->
+                                                                      hUSCPMRMem,
+                                                                      (void *)psUSCPMRMemInt,
+                                                                      PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+                                                                      PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                      (PFN_HANDLE_RELEASE) &
+                                                                      _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease);
+       if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMGetSharedMemory_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMGetSharedMemory_exit:
+
+       if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)
+       {
+               if (psCLIPMRMemInt)
+               {
+                       PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt);
+               }
+               if (psUSCPMRMemInt)
+               {
+                       PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psRGXTDMReleaseSharedMemoryIN_UI8,
+                                     IMG_UINT8 * psRGXTDMReleaseSharedMemoryOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *)
+           IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *)
+           IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0);
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMReleaseSharedMemory_exit;
+               }
+       }
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXTDMReleaseSharedMemoryOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXTDMReleaseSharedMemoryIN->hPMRMem,
+                                             PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+       if (unlikely((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) &&
+                    (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMReleaseSharedMemory_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMReleaseSharedMemory_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+                                            IMG_UINT8 * psRGXTDMSetTransferContextPropertyIN_UI8,
+                                            IMG_UINT8 * psRGXTDMSetTransferContextPropertyOUT_UI8,
+                                            CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyIN =
+           (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyOUT =
+           (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0);
+
+       IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPropertyIN->hTransferContext;
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+               /* Check that device supports the required feature */
+               if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                   !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+                                                        RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+               {
+                       psRGXTDMSetTransferContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+                       goto RGXTDMSetTransferContextProperty_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXTDMSetTransferContextPropertyOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXTDMSetTransferContextProperty_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXTDMSetTransferContextPropertyOUT->eError =
+           PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt,
+                                                    psRGXTDMSetTransferContextPropertyIN->
+                                                    ui32Property,
+                                                    psRGXTDMSetTransferContextPropertyIN->
+                                                    ui64Input,
+                                                    &psRGXTDMSetTransferContextPropertyOUT->
+                                                    ui64Output);
+
+RGXTDMSetTransferContextProperty_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+void DeinitRGXTQ2Bridge(void);
+
+/*
+ * Register all RGXTQ2 functions with services
+ */
+PVRSRV_ERROR InitRGXTQ2Bridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                             PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT,
+                             PVRSRVBridgeRGXTDMCreateTransferContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                             PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT,
+                             PVRSRVBridgeRGXTDMDestroyTransferContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                             PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY,
+                             PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                             PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE,
+                             PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2,
+                             PVRSRVBridgeRGXTDMSubmitTransfer2, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY,
+                             PVRSRVBridgeRGXTDMGetSharedMemory, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY,
+                             PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                             PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY,
+                             PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq2 functions with services
+ */
+void DeinitRGXTQ2Bridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                               PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                               PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                               PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                               PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                               PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+                               PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq_bridge/common_rgxtq_bridge.h
new file mode 100644 (file)
index 0000000..b864284
--- /dev/null
@@ -0,0 +1,176 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ_BRIDGE_H
+#define COMMON_RGXTQ_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST                  0
+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT                   PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT                  PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY                      PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2                 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY                      PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST                   (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4)
+
+/*******************************************
+            RGXCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG
+{
+       IMG_UINT64 ui64RobustnessAddress;
+       IMG_HANDLE hPrivData;
+       IMG_BYTE *pui8FrameworkCmd;
+       IMG_UINT32 ui32ContextFlags;
+       IMG_UINT32 ui32FrameworkCmdize;
+       IMG_UINT32 ui32PackedCCBSizeU8888;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
+{
+       IMG_HANDLE hCLIPMRMem;
+       IMG_HANDLE hTransferContext;
+       IMG_HANDLE hUSCPMRMem;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
+
+/*******************************************
+            RGXDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+       IMG_HANDLE hTransferContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT;
+
+/*******************************************
+            RGXSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXSubmitTransfer2
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2_TAG
+{
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 *pui32ClientUpdateCount;
+       IMG_UINT32 *pui32CommandSize;
+       IMG_UINT32 *pui32SyncPMRFlags;
+       IMG_UINT32 *pui32TQPrepareFlags;
+       IMG_UINT32 **pui32UpdateSyncOffset;
+       IMG_UINT32 **pui32UpdateValue;
+       IMG_UINT8 **pui8FWCommand;
+       IMG_CHAR *puiUpdateFenceName;
+       IMG_HANDLE *phSyncPMRs;
+       IMG_HANDLE **phUpdateUFOSyncPrimBlock;
+       PVRSRV_TIMELINE h2DUpdateTimeline;
+       PVRSRV_TIMELINE h3DUpdateTimeline;
+       PVRSRV_FENCE hCheckFenceFD;
+       IMG_UINT32 ui32ExtJobRef;
+       IMG_UINT32 ui32PrepareCount;
+       IMG_UINT32 ui32SyncPMRCount;
+} __packed PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2;
+
+/* Bridge out structure for RGXSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE h2DUpdateFence;
+       PVRSRV_FENCE h3DUpdateFence;
+} __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2;
+
+/*******************************************
+            RGXSetTransferContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Input;
+       IMG_HANDLE hTransferContext;
+       IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetTransferContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY_TAG
+{
+       IMG_UINT64 ui64Output;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXTQ_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/rgxtq_bridge/server_rgxtq_bridge.c
new file mode 100644 (file)
index 0000000..70415f5
--- /dev/null
@@ -0,0 +1,1212 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+
+#include "common_rgxtq_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateTransferContextpsTransferContextIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVRGXDestroyTransferContextKM((RGX_SERVER_TQ_CONTEXT *) pvData);
+       return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psRGXCreateTransferContextIN_UI8,
+                                    IMG_UINT8 * psRGXCreateTransferContextOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN =
+           (PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateTransferContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXCreateTransferContextOUT_UI8, 0);
+
+       IMG_BYTE *ui8FrameworkCmdInt = NULL;
+       IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData;
+       IMG_HANDLE hPrivDataInt = NULL;
+       RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+       PMR *psCLIPMRMemInt = NULL;
+       PMR *psUSCPMRMemInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) + 0;
+
+       if (unlikely(psRGXCreateTransferContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE))
+       {
+               psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXCreateTransferContext_exit;
+       }
+
+       psRGXCreateTransferContextOUT->hTransferContext = NULL;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXCreateTransferContext_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateTransferContextIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXCreateTransferContext_exit;
+                       }
+               }
+       }
+
+       if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0)
+       {
+               ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+       }
+
+       /* Copy the data over */
+       if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui8FrameworkCmdInt,
+                    (const void __user *)psRGXCreateTransferContextIN->pui8FrameworkCmd,
+                    psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) !=
+                   PVRSRV_OK)
+               {
+                       psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXCreateTransferContext_exit;
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXCreateTransferContextOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hPrivDataInt,
+                                      hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+       if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateTransferContext_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXCreateTransferContextOUT->eError =
+           PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevNode(psConnection),
+                                            psRGXCreateTransferContextIN->ui32Priority,
+                                            psRGXCreateTransferContextIN->ui32FrameworkCmdize,
+                                            ui8FrameworkCmdInt,
+                                            hPrivDataInt,
+                                            psRGXCreateTransferContextIN->ui32PackedCCBSizeU8888,
+                                            psRGXCreateTransferContextIN->ui32ContextFlags,
+                                            psRGXCreateTransferContextIN->ui64RobustnessAddress,
+                                            &psTransferContextInt,
+                                            &psCLIPMRMemInt, &psUSCPMRMemInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               goto RGXCreateTransferContext_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXCreateTransferContextOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psRGXCreateTransferContextOUT->hTransferContext,
+                                     (void *)psTransferContextInt,
+                                     PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _RGXCreateTransferContextpsTransferContextIntRelease);
+       if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateTransferContext_exit;
+       }
+
+       psRGXCreateTransferContextOUT->eError =
+           PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+                                        &psRGXCreateTransferContextOUT->hCLIPMRMem,
+                                        (void *)psCLIPMRMemInt,
+                                        PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+                                        PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                        psRGXCreateTransferContextOUT->hTransferContext);
+       if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateTransferContext_exit;
+       }
+
+       psRGXCreateTransferContextOUT->eError =
+           PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+                                        &psRGXCreateTransferContextOUT->hUSCPMRMem,
+                                        (void *)psUSCPMRMemInt,
+                                        PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+                                        PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                        psRGXCreateTransferContextOUT->hTransferContext);
+       if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXCreateTransferContext_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateTransferContext_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hPrivDataInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+       {
+               if (psRGXCreateTransferContextOUT->hTransferContext)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psHandleBase);
+
+                       eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+                                                            (IMG_HANDLE)
+                                                            psRGXCreateTransferContextOUT->
+                                                            hTransferContext,
+                                                            PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psTransferContextInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psHandleBase);
+
+               }
+
+               if (psTransferContextInt)
+               {
+                       PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXCreateTransferContextOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psRGXDestroyTransferContextIN_UI8,
+                                     IMG_UINT8 * psRGXDestroyTransferContextOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN =
+           (PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyTransferContextIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT =
+           (PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *)
+           IMG_OFFSET_ADDR(psRGXDestroyTransferContextOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRGXDestroyTransferContextOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRGXDestroyTransferContextIN->
+                                             hTransferContext,
+                                             PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+       if (unlikely
+           ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK)
+            && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRGXDestroyTransferContextOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXDestroyTransferContext_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyTransferContext_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psRGXSetTransferContextPriorityIN_UI8,
+                                         IMG_UINT8 * psRGXSetTransferContextPriorityOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN =
+           (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *)
+           IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityOUT_UI8, 0);
+
+       IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext;
+       RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetTransferContextPriorityOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetTransferContextPriority_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetTransferContextPriorityOUT->eError =
+           PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+                                                 psTransferContextInt,
+                                                 psRGXSetTransferContextPriorityIN->ui32Priority);
+
+RGXSetTransferContextPriority_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+             "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+             "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psRGXSubmitTransfer2IN_UI8,
+                              IMG_UINT8 * psRGXSubmitTransfer2OUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2IN =
+           (PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2IN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2OUT =
+           (PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2OUT_UI8,
+                                                                    0);
+
+       IMG_HANDLE hTransferContext = psRGXSubmitTransfer2IN->hTransferContext;
+       RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+       IMG_UINT32 *ui32ClientUpdateCountInt = NULL;
+       SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL;
+       IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL;
+       IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL;
+       IMG_UINT32 **ui32UpdateValueInt = NULL;
+       IMG_CHAR *uiUpdateFenceNameInt = NULL;
+       IMG_UINT32 *ui32CommandSizeInt = NULL;
+       IMG_UINT8 **ui8FWCommandInt = NULL;
+       IMG_UINT32 *ui32TQPrepareFlagsInt = NULL;
+       IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+       PMR **psSyncPMRsInt = NULL;
+       IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+       IMG_BYTE *pArrayArgsBuffer2 = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+           ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+           ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+       IMG_UINT32 ui32BufferSize2 = 0;
+       IMG_UINT32 ui32NextOffset2 = 0;
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+
+               ui64BufferSize +=
+                   psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+               ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+               ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+               ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+               ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *);
+       }
+
+       if (unlikely(psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+       {
+               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RGXSubmitTransfer2_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RGXSubmitTransfer2_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXSubmitTransfer2IN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RGXSubmitTransfer2_exit;
+                       }
+               }
+       }
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               ui32ClientUpdateCountInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32ClientUpdateCountInt,
+                    (const void __user *)psRGXSubmitTransfer2IN->pui32ClientUpdateCount,
+                    psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+               psUpdateUFOSyncPrimBlockInt =
+                   (SYNC_PRIMITIVE_BLOCK ***) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+               /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+               hUpdateUFOSyncPrimBlockInt2 =
+                   (IMG_HANDLE **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE);
+       }
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */
+               ui32UpdateSyncOffsetInt =
+                   (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+       }
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */
+               ui32UpdateValueInt =
+                   (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+       }
+
+       {
+               uiUpdateFenceNameInt =
+                   (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiUpdateFenceNameInt,
+                    (const void __user *)psRGXSubmitTransfer2IN->puiUpdateFenceName,
+                    PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXSubmitTransfer2_exit;
+               }
+               ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+                                                   1] = '\0';
+       }
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               ui32CommandSizeInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32CommandSizeInt,
+                    (const void __user *)psRGXSubmitTransfer2IN->pui32CommandSize,
+                    psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */
+               ui8FWCommandInt = (IMG_UINT8 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *);
+       }
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               ui32TQPrepareFlagsInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32TQPrepareFlagsInt,
+                    (const void __user *)psRGXSubmitTransfer2IN->pui32TQPrepareFlags,
+                    psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0)
+       {
+               ui32SyncPMRFlagsInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32SyncPMRFlagsInt,
+                    (const void __user *)psRGXSubmitTransfer2IN->pui32SyncPMRFlags,
+                    psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXSubmitTransfer2_exit;
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0)
+       {
+               psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               OSCachedMemSet(psSyncPMRsInt, 0,
+                              psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *));
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *);
+               hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+       }
+
+       /* Copy the data over */
+       if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, hSyncPMRsInt2, (const void __user *)psRGXSubmitTransfer2IN->phSyncPMRs,
+                    psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RGXSubmitTransfer2_exit;
+               }
+       }
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               IMG_UINT32 i;
+               ui64BufferSize = 0;
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       ui64BufferSize +=
+                           ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+                       ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
+                       ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+                       ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+                       ui64BufferSize += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+               }
+               if (ui64BufferSize > IMG_UINT32_MAX)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+                       goto RGXSubmitTransfer2_exit;
+               }
+               ui32BufferSize2 = (IMG_UINT32) ui64BufferSize;
+       }
+
+       if (ui32BufferSize2 != 0)
+       {
+               pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2);
+
+               if (!pArrayArgsBuffer2)
+               {
+                       psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto RGXSubmitTransfer2_exit;
+               }
+       }
+
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               IMG_UINT32 i;
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNCS)
+                       {
+                               psRGXSubmitTransfer2OUT->eError =
+                                   PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+                               goto RGXSubmitTransfer2_exit;
+                       }
+
+                       /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+                       psUpdateUFOSyncPrimBlockInt[i] =
+                           (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer2,
+                                                                     ui32NextOffset2);
+                       OSCachedMemSet(psUpdateUFOSyncPrimBlockInt[i], 0,
+                                      ui32ClientUpdateCountInt[i] *
+                                      sizeof(SYNC_PRIMITIVE_BLOCK *));
+                       ui32NextOffset2 +=
+                           ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+                       /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+                       hUpdateUFOSyncPrimBlockInt2[i] =
+                           (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2);
+                       ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE);
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               IMG_UINT32 i;
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+                       ui32UpdateSyncOffsetInt[i] =
+                           (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2);
+                       ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               IMG_UINT32 i;
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */
+                       ui32UpdateValueInt[i] =
+                           (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2);
+                       ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+               }
+       }
+       if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0)
+       {
+               IMG_UINT32 i;
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       if (ui32CommandSizeInt[i] > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)
+                       {
+                               psRGXSubmitTransfer2OUT->eError =
+                                   PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+                               goto RGXSubmitTransfer2_exit;
+                       }
+
+                       /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */
+                       ui8FWCommandInt[i] =
+                           (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2);
+                       ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+               IMG_HANDLE **psPtr;
+
+               /* Loop over all the pointers in the array copying the data into the kernel */
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       /* Copy the pointer over from the client side */
+                       if (OSCopyFromUser
+                           (NULL, &psPtr,
+                            (const void __user *)&psRGXSubmitTransfer2IN->
+                            phUpdateUFOSyncPrimBlock[i], sizeof(IMG_HANDLE **)) != PVRSRV_OK)
+                       {
+                               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                               goto RGXSubmitTransfer2_exit;
+                       }
+
+                       /* Copy the data over */
+                       if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+                       {
+                               if (OSCopyFromUser
+                                   (NULL, (hUpdateUFOSyncPrimBlockInt2[i]),
+                                    (const void __user *)psPtr,
+                                    (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) !=
+                                   PVRSRV_OK)
+                               {
+                                       psRGXSubmitTransfer2OUT->eError =
+                                           PVRSRV_ERROR_INVALID_PARAMS;
+
+                                       goto RGXSubmitTransfer2_exit;
+                               }
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+               IMG_UINT32 **psPtr;
+
+               /* Loop over all the pointers in the array copying the data into the kernel */
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       /* Copy the pointer over from the client side */
+                       if (OSCopyFromUser
+                           (NULL, &psPtr,
+                            (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateSyncOffset[i],
+                            sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+                       {
+                               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                               goto RGXSubmitTransfer2_exit;
+                       }
+
+                       /* Copy the data over */
+                       if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+                       {
+                               if (OSCopyFromUser
+                                   (NULL, (ui32UpdateSyncOffsetInt[i]), (const void __user *)psPtr,
+                                    (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) !=
+                                   PVRSRV_OK)
+                               {
+                                       psRGXSubmitTransfer2OUT->eError =
+                                           PVRSRV_ERROR_INVALID_PARAMS;
+
+                                       goto RGXSubmitTransfer2_exit;
+                               }
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+               IMG_UINT32 **psPtr;
+
+               /* Loop over all the pointers in the array copying the data into the kernel */
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       /* Copy the pointer over from the client side */
+                       if (OSCopyFromUser
+                           (NULL, &psPtr,
+                            (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateValue[i],
+                            sizeof(IMG_UINT32 **)) != PVRSRV_OK)
+                       {
+                               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                               goto RGXSubmitTransfer2_exit;
+                       }
+
+                       /* Copy the data over */
+                       if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+                       {
+                               if (OSCopyFromUser
+                                   (NULL, (ui32UpdateValueInt[i]), (const void __user *)psPtr,
+                                    (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) !=
+                                   PVRSRV_OK)
+                               {
+                                       psRGXSubmitTransfer2OUT->eError =
+                                           PVRSRV_ERROR_INVALID_PARAMS;
+
+                                       goto RGXSubmitTransfer2_exit;
+                               }
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+               IMG_UINT8 **psPtr;
+
+               /* Loop over all the pointers in the array copying the data into the kernel */
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       /* Copy the pointer over from the client side */
+                       if (OSCopyFromUser
+                           (NULL, &psPtr,
+                            (const void __user *)&psRGXSubmitTransfer2IN->pui8FWCommand[i],
+                            sizeof(IMG_UINT8 **)) != PVRSRV_OK)
+                       {
+                               psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                               goto RGXSubmitTransfer2_exit;
+                       }
+
+                       /* Copy the data over */
+                       if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0)
+                       {
+                               if (OSCopyFromUser
+                                   (NULL, (ui8FWCommandInt[i]), (const void __user *)psPtr,
+                                    (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK)
+                               {
+                                       psRGXSubmitTransfer2OUT->eError =
+                                           PVRSRV_ERROR_INVALID_PARAMS;
+
+                                       goto RGXSubmitTransfer2_exit;
+                               }
+                       }
+               }
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSubmitTransfer2OUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSubmitTransfer2_exit;
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       IMG_UINT32 j;
+                       for (j = 0; j < ui32ClientUpdateCountInt[i]; j++)
+                       {
+                               /* Look up the address from the handle */
+                               psRGXSubmitTransfer2OUT->eError =
+                                   PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                              (void **)
+                                                              &psUpdateUFOSyncPrimBlockInt[i][j],
+                                                              hUpdateUFOSyncPrimBlockInt2[i][j],
+                                                              PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                              IMG_TRUE);
+                               if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK))
+                               {
+                                       UnlockHandle(psConnection->psHandleBase);
+                                       goto RGXSubmitTransfer2_exit;
+                               }
+                       }
+               }
+       }
+
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++)
+               {
+                       /* Look up the address from the handle */
+                       psRGXSubmitTransfer2OUT->eError =
+                           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                                      (void **)&psSyncPMRsInt[i],
+                                                      hSyncPMRsInt2[i],
+                                                      PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+                       if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK))
+                       {
+                               UnlockHandle(psConnection->psHandleBase);
+                               goto RGXSubmitTransfer2_exit;
+                       }
+               }
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSubmitTransfer2OUT->eError =
+           PVRSRVRGXSubmitTransferKM(psTransferContextInt,
+                                     psRGXSubmitTransfer2IN->ui32PrepareCount,
+                                     ui32ClientUpdateCountInt,
+                                     psUpdateUFOSyncPrimBlockInt,
+                                     ui32UpdateSyncOffsetInt,
+                                     ui32UpdateValueInt,
+                                     psRGXSubmitTransfer2IN->hCheckFenceFD,
+                                     psRGXSubmitTransfer2IN->h2DUpdateTimeline,
+                                     &psRGXSubmitTransfer2OUT->h2DUpdateFence,
+                                     psRGXSubmitTransfer2IN->h3DUpdateTimeline,
+                                     &psRGXSubmitTransfer2OUT->h3DUpdateFence,
+                                     uiUpdateFenceNameInt,
+                                     ui32CommandSizeInt,
+                                     ui8FWCommandInt,
+                                     ui32TQPrepareFlagsInt,
+                                     psRGXSubmitTransfer2IN->ui32ExtJobRef,
+                                     psRGXSubmitTransfer2IN->ui32SyncPMRCount,
+                                     ui32SyncPMRFlagsInt, psSyncPMRsInt);
+
+RGXSubmitTransfer2_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+       }
+
+       if (hUpdateUFOSyncPrimBlockInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
+               {
+                       IMG_UINT32 j;
+                       for (j = 0; j < ui32ClientUpdateCountInt[i]; j++)
+                       {
+
+                               /* Unreference the previously looked up handle */
+                               if (psUpdateUFOSyncPrimBlockInt[i][j])
+                               {
+                                       PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                                   hUpdateUFOSyncPrimBlockInt2[i]
+                                                                   [j],
+                                                                   PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                               }
+                       }
+               }
+       }
+
+       if (hSyncPMRsInt2)
+       {
+               IMG_UINT32 i;
+
+               for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++)
+               {
+
+                       /* Unreference the previously looked up handle */
+                       if (psSyncPMRsInt[i])
+                       {
+                               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                                           hSyncPMRsInt2[i],
+                                                           PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+                       }
+               }
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXSubmitTransfer2OUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRGXSubmitTransfer2OUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+       if (pArrayArgsBuffer2)
+               OSFreeMemNoStats(pArrayArgsBuffer2);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+                                         IMG_UINT8 * psRGXSetTransferContextPropertyIN_UI8,
+                                         IMG_UINT8 * psRGXSetTransferContextPropertyOUT_UI8,
+                                         CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyIN =
+           (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyOUT =
+           (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *)
+           IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyOUT_UI8, 0);
+
+       IMG_HANDLE hTransferContext = psRGXSetTransferContextPropertyIN->hTransferContext;
+       RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRGXSetTransferContextPropertyOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psTransferContextInt,
+                                      hTransferContext,
+                                      PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE);
+       if (unlikely(psRGXSetTransferContextPropertyOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RGXSetTransferContextProperty_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRGXSetTransferContextPropertyOUT->eError =
+           PVRSRVRGXSetTransferContextPropertyKM(psTransferContextInt,
+                                                 psRGXSetTransferContextPropertyIN->ui32Property,
+                                                 psRGXSetTransferContextPropertyIN->ui64Input,
+                                                 &psRGXSetTransferContextPropertyOUT->ui64Output);
+
+RGXSetTransferContextProperty_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTransferContextInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hTransferContext,
+                                           PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* SUPPORT_RGXTQ_BRIDGE */
+
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+PVRSRV_ERROR InitRGXTQBridge(void);
+void DeinitRGXTQBridge(void);
+
+/*
+ * Register all RGXTQ functions with services
+ */
+PVRSRV_ERROR InitRGXTQBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT,
+                             PVRSRVBridgeRGXCreateTransferContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT,
+                             PVRSRVBridgeRGXDestroyTransferContext, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+                             PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY,
+                             PVRSRVBridgeRGXSetTransferContextPriority, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2,
+                             PVRSRVBridgeRGXSubmitTransfer2, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+                             PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY,
+                             PVRSRVBridgeRGXSetTransferContextProperty, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq functions with services
+ */
+void DeinitRGXTQBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+                               PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
+                               PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY);
+
+}
+#else /* SUPPORT_RGXTQ_BRIDGE */
+/* This bridge is conditional on SUPPORT_RGXTQ_BRIDGE - when not defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXTQBridge() \
+       PVRSRV_OK
+
+#define DeinitRGXTQBridge()
+
+#endif /* SUPPORT_RGXTQ_BRIDGE */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/client_ri_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/client_ri_bridge.h
new file mode 100644 (file)
index 0000000..b3c42e6
--- /dev/null
@@ -0,0 +1,89 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hPMRHandle,
+                                                   IMG_UINT32 ui32TextBSize,
+                                                   const IMG_CHAR * puiTextB,
+                                                   IMG_UINT64 ui64Offset,
+                                                   IMG_UINT64 ui64Size,
+                                                   IMG_BOOL bIsImport,
+                                                   IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+                                                    IMG_UINT32 ui32TextBSize,
+                                                    const IMG_CHAR * puiTextB,
+                                                    IMG_UINT64 ui64Size,
+                                                    IMG_UINT64 ui64DevVAddr,
+                                                    IMG_HANDLE * phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge,
+                                                        IMG_HANDLE hPMRHandle, IMG_PID ui32Owner);
+
+#endif /* CLIENT_RI_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/client_ri_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/client_ri_direct_bridge.c
new file mode 100644 (file)
index 0000000..2a9934c
--- /dev/null
@@ -0,0 +1,182 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for ri
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRHandleInt = (PMR *) hPMRHandle;
+
+       eError = RIWritePMREntryKM(psPMRHandleInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hPMRHandle,
+                                                   IMG_UINT32 ui32TextBSize,
+                                                   const IMG_CHAR * puiTextB,
+                                                   IMG_UINT64 ui64Offset,
+                                                   IMG_UINT64 ui64Size,
+                                                   IMG_BOOL bIsImport,
+                                                   IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRHandleInt;
+       RI_HANDLE psRIHandleInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRHandleInt = (PMR *) hPMRHandle;
+
+       eError =
+           RIWriteMEMDESCEntryKM(psPMRHandleInt,
+                                 ui32TextBSize,
+                                 puiTextB,
+                                 ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt);
+
+       *phRIHandle = psRIHandleInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+                                                    IMG_UINT32 ui32TextBSize,
+                                                    const IMG_CHAR * puiTextB,
+                                                    IMG_UINT64 ui64Size,
+                                                    IMG_UINT64 ui64DevVAddr,
+                                                    IMG_HANDLE * phRIHandle)
+{
+       PVRSRV_ERROR eError;
+       RI_HANDLE psRIHandleInt = NULL;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError =
+           RIWriteProcListEntryKM(ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt);
+
+       *phRIHandle = psRIHandleInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+                                                   IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr)
+{
+       PVRSRV_ERROR eError;
+       RI_HANDLE psRIHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+       eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle)
+{
+       PVRSRV_ERROR eError;
+       RI_HANDLE psRIHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+       eError = RIDeleteMEMDESCEntryKM(psRIHandleInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRHandleInt = (PMR *) hPMRHandle;
+
+       eError = RIDumpListKM(psPMRHandleInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = RIDumpAllKM();
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid)
+{
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = RIDumpProcessKM(ui32Pid);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge,
+                                                        IMG_HANDLE hPMRHandle, IMG_PID ui32Owner)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMRHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psPMRHandleInt = (PMR *) hPMRHandle;
+
+       eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/common_ri_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/common_ri_bridge.h
new file mode 100644 (file)
index 0000000..ca9b687
--- /dev/null
@@ -0,0 +1,225 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST                     0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY                       PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY                   PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY                  PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR                   PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY                  PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST                    PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDUMPALL                     PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS                 PVRSRV_BRIDGE_RI_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER                      PVRSRV_BRIDGE_RI_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RI_CMD_LAST                      (PVRSRV_BRIDGE_RI_CMD_FIRST+8)
+
+/*******************************************
+            RIWritePMREntry
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+       IMG_HANDLE hPMRHandle;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+/*******************************************
+            RIWriteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+       IMG_UINT64 ui64Offset;
+       IMG_UINT64 ui64Size;
+       IMG_HANDLE hPMRHandle;
+       const IMG_CHAR *puiTextB;
+       IMG_BOOL bIsImport;
+       IMG_BOOL bIsSuballoc;
+       IMG_UINT32 ui32TextBSize;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+       IMG_HANDLE hRIHandle;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+/*******************************************
+            RIWriteProcListEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG
+{
+       IMG_UINT64 ui64DevVAddr;
+       IMG_UINT64 ui64Size;
+       const IMG_CHAR *puiTextB;
+       IMG_UINT32 ui32TextBSize;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY;
+
+/* Bridge out structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG
+{
+       IMG_HANDLE hRIHandle;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY;
+
+/*******************************************
+            RIUpdateMEMDESCAddr
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+       IMG_DEV_VIRTADDR sAddr;
+       IMG_HANDLE hRIHandle;
+} __packed PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+/*******************************************
+            RIDeleteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+       IMG_HANDLE hRIHandle;
+} __packed PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+/*******************************************
+            RIDumpList
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+       IMG_HANDLE hPMRHandle;
+} __packed PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+/*******************************************
+            RIDumpAll
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+/*******************************************
+            RIDumpProcess
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+       IMG_PID ui32Pid;
+} __packed PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+/*******************************************
+            RIWritePMREntryWithOwner
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+       IMG_HANDLE hPMRHandle;
+       IMG_PID ui32Owner;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER;
+
+/* Bridge out structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER;
+
+#endif /* COMMON_RI_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/server_ri_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/ri_bridge/server_ri_bridge.c
new file mode 100644 (file)
index 0000000..80f246c
--- /dev/null
@@ -0,0 +1,760 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psRIWritePMREntryIN_UI8,
+                           IMG_UINT8 * psRIWritePMREntryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN =
+           (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT =
+           (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0);
+
+       IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle;
+       PMR *psPMRHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRIWritePMREntryOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRHandleInt,
+                                      hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIWritePMREntry_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt);
+
+RIWritePMREntry_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData);
+       return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8,
+                               IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN =
+           (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT =
+           (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle;
+       PMR *psPMRHandleInt = NULL;
+       IMG_CHAR *uiTextBInt = NULL;
+       RI_HANDLE psRIHandleInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psRIWriteMEMDESCEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RIWriteMEMDESCEntry_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RIWriteMEMDESCEntry_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RIWriteMEMDESCEntry_exit;
+                       }
+               }
+       }
+
+       if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+       {
+               uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextBInt, (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB,
+                    psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RIWriteMEMDESCEntry_exit;
+               }
+               ((IMG_CHAR *)
+                uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRIWriteMEMDESCEntryOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRHandleInt,
+                                      hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIWriteMEMDESCEntry_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRIWriteMEMDESCEntryOUT->eError =
+           RIWriteMEMDESCEntryKM(psPMRHandleInt,
+                                 psRIWriteMEMDESCEntryIN->ui32TextBSize,
+                                 uiTextBInt,
+                                 psRIWriteMEMDESCEntryIN->ui64Offset,
+                                 psRIWriteMEMDESCEntryIN->ui64Size,
+                                 psRIWriteMEMDESCEntryIN->bIsImport,
+                                 psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+       {
+               goto RIWriteMEMDESCEntry_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                    &psRIWriteMEMDESCEntryOUT->
+                                                                    hRIHandle,
+                                                                    (void *)psRIHandleInt,
+                                                                    PVRSRV_HANDLE_TYPE_RI_HANDLE,
+                                                                    PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                    (PFN_HANDLE_RELEASE) &
+                                                                    _RIWriteMEMDESCEntrypsRIHandleIntRelease);
+       if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIWriteMEMDESCEntry_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RIWriteMEMDESCEntry_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+       {
+               if (psRIHandleInt)
+               {
+                       RIDeleteMEMDESCEntryKM(psRIHandleInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRIWriteMEMDESCEntryOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData);
+       return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+             "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psRIWriteProcListEntryIN_UI8,
+                                IMG_UINT8 * psRIWriteProcListEntryOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN =
+           (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT =
+           (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *)
+           IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0);
+
+       IMG_CHAR *uiTextBInt = NULL;
+       RI_HANDLE psRIHandleInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psRIWriteProcListEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN))
+       {
+               psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto RIWriteProcListEntry_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto RIWriteProcListEntry_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteProcListEntryIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto RIWriteProcListEntry_exit;
+                       }
+               }
+       }
+
+       if (psRIWriteProcListEntryIN->ui32TextBSize != 0)
+       {
+               uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTextBInt, (const void __user *)psRIWriteProcListEntryIN->puiTextB,
+                    psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto RIWriteProcListEntry_exit;
+               }
+               ((IMG_CHAR *)
+                uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       psRIWriteProcListEntryOUT->eError =
+           RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize,
+                                  uiTextBInt,
+                                  psRIWriteProcListEntryIN->ui64Size,
+                                  psRIWriteProcListEntryIN->ui64DevVAddr, &psRIHandleInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK))
+       {
+               goto RIWriteProcListEntry_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                     &psRIWriteProcListEntryOUT->
+                                                                     hRIHandle,
+                                                                     (void *)psRIHandleInt,
+                                                                     PVRSRV_HANDLE_TYPE_RI_HANDLE,
+                                                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                     (PFN_HANDLE_RELEASE) &
+                                                                     _RIWriteProcListEntrypsRIHandleIntRelease);
+       if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIWriteProcListEntry_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RIWriteProcListEntry_exit:
+
+       if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+       {
+               if (psRIHandleInt)
+               {
+                       RIDeleteMEMDESCEntryKM(psRIHandleInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psRIWriteProcListEntryOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8,
+                               IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN =
+           (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT =
+           (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle;
+       RI_HANDLE psRIHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRIUpdateMEMDESCAddrOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psRIHandleInt,
+                                      hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE);
+       if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIUpdateMEMDESCAddr_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRIUpdateMEMDESCAddrOUT->eError =
+           RIUpdateMEMDESCAddrKM(psRIHandleInt, psRIUpdateMEMDESCAddrIN->sAddr);
+
+RIUpdateMEMDESCAddr_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psRIHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+                                IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8,
+                                IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8,
+                                CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN =
+           (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8,
+                                                                     0);
+       PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT =
+           (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *)
+           IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psRIDeleteMEMDESCEntryOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle,
+                                             PVRSRV_HANDLE_TYPE_RI_HANDLE);
+       if (unlikely((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) &&
+                    (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIDeleteMEMDESCEntry_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+RIDeleteMEMDESCEntry_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+                      IMG_UINT8 * psRIDumpListIN_UI8,
+                      IMG_UINT8 * psRIDumpListOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN =
+           (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT =
+           (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0);
+
+       IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle;
+       PMR *psPMRHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRIDumpListOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRHandleInt,
+                                      hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIDumpList_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt);
+
+RIDumpList_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+                     IMG_UINT8 * psRIDumpAllIN_UI8,
+                     IMG_UINT8 * psRIDumpAllOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN =
+           (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT =
+           (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+       psRIDumpAllOUT->eError = RIDumpAllKM();
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psRIDumpProcessIN_UI8,
+                         IMG_UINT8 * psRIDumpProcessOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN =
+           (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT =
+           (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       psRIDumpProcessOUT->eError = RIDumpProcessKM(psRIDumpProcessIN->ui32Pid);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psRIWritePMREntryWithOwnerIN_UI8,
+                                    IMG_UINT8 * psRIWritePMREntryWithOwnerOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN =
+           (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *)
+           IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerOUT =
+           (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *)
+           IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0);
+
+       IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle;
+       PMR *psPMRHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psRIWritePMREntryWithOwnerOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psPMRHandleInt,
+                                      hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+       if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto RIWritePMREntryWithOwner_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psRIWritePMREntryWithOwnerOUT->eError =
+           RIWritePMREntryWithOwnerKM(psPMRHandleInt, psRIWritePMREntryWithOwnerIN->ui32Owner);
+
+RIWritePMREntryWithOwner_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psPMRHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRIBridge(void);
+void DeinitRIBridge(void);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY,
+                             PVRSRVBridgeRIWritePMREntry, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY,
+                             PVRSRVBridgeRIWriteMEMDESCEntry, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY,
+                             PVRSRVBridgeRIWriteProcListEntry, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR,
+                             PVRSRVBridgeRIUpdateMEMDESCAddr, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY,
+                             PVRSRVBridgeRIDeleteMEMDESCEntry, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList,
+                             NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll,
+                             NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS,
+                             PVRSRVBridgeRIDumpProcess, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER,
+                             PVRSRVBridgeRIWritePMREntryWithOwner, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+void DeinitRIBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/srvcore_bridge/common_srvcore_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/srvcore_bridge/common_srvcore_bridge.h
new file mode 100644 (file)
index 0000000..7e9ac6e
--- /dev/null
@@ -0,0 +1,369 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_ops.h"
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST                        0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT                  PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT                       PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT                 PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT                 PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN                  PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT                  PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE                 PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO                    PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED                 PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT                      PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK                   PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS                  PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO                 PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT                   PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS                      PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE                  PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE                  PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST                 (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16)
+
+/*******************************************
+            Connect
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+       IMG_UINT32 ui32ClientBuildOptions;
+       IMG_UINT32 ui32ClientDDKBuild;
+       IMG_UINT32 ui32ClientDDKVersion;
+       IMG_UINT32 ui32Flags;
+} __packed PVRSRV_BRIDGE_IN_CONNECT;
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+       IMG_UINT64 ui64PackedBvnc;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32CapabilityFlags;
+       IMG_UINT8 ui8KernelArch;
+} __packed PVRSRV_BRIDGE_OUT_CONNECT;
+
+/*******************************************
+            Disconnect
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_DISCONNECT;
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+/*******************************************
+            AcquireGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+       IMG_HANDLE hGlobalEventObject;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+/*******************************************
+            ReleaseGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+       IMG_HANDLE hGlobalEventObject;
+} __packed PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+/*******************************************
+            EventObjectOpen
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+       IMG_HANDLE hEventObject;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+       IMG_HANDLE hOSEvent;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+/*******************************************
+            EventObjectWait
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+       IMG_HANDLE hOSEventKM;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+/*******************************************
+            EventObjectClose
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+       IMG_HANDLE hOSEventKM;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+/*******************************************
+            DumpDebugInfo
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+       IMG_UINT32 ui32VerbLevel;
+} __packed PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+/*******************************************
+            GetDevClockSpeed
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32ClockSpeed;
+} __packed PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+/*******************************************
+            HWOpTimeout
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+/*******************************************
+            AlignmentCheck
+ *******************************************/
+
+/* Bridge in structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG
+{
+       IMG_UINT32 *pui32AlignChecks;
+       IMG_UINT32 ui32AlignChecksSize;
+} __packed PVRSRV_BRIDGE_IN_ALIGNMENTCHECK;
+
+/* Bridge out structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK;
+
+/*******************************************
+            GetDeviceStatus
+ *******************************************/
+
+/* Bridge in structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETDEVICESTATUS;
+
+/* Bridge out structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32DeviceSatus;
+} __packed PVRSRV_BRIDGE_OUT_GETDEVICESTATUS;
+
+/*******************************************
+            GetMultiCoreInfo
+ *******************************************/
+
+/* Bridge in structure for GetMultiCoreInfo */
+typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG
+{
+       IMG_UINT64 *pui64Caps;
+       IMG_UINT32 ui32CapsSize;
+} __packed PVRSRV_BRIDGE_IN_GETMULTICOREINFO;
+
+/* Bridge out structure for GetMultiCoreInfo */
+typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG
+{
+       IMG_UINT64 *pui64Caps;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32NumCores;
+} __packed PVRSRV_BRIDGE_OUT_GETMULTICOREINFO;
+
+/*******************************************
+            EventObjectWaitTimeout
+ *******************************************/
+
+/* Bridge in structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG
+{
+       IMG_UINT64 ui64uiTimeoutus;
+       IMG_HANDLE hOSEventKM;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT;
+
+/* Bridge out structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT;
+
+/*******************************************
+            FindProcessMemStats
+ *******************************************/
+
+/* Bridge in structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG
+{
+       IMG_UINT32 *pui32MemStatsArray;
+       IMG_BOOL bbAllProcessStats;
+       IMG_UINT32 ui32ArrSize;
+       IMG_UINT32 ui32PID;
+} __packed PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS;
+
+/* Bridge out structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG
+{
+       IMG_UINT32 *pui32MemStatsArray;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS;
+
+/*******************************************
+            AcquireInfoPage
+ *******************************************/
+
+/* Bridge in structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE;
+
+/* Bridge out structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG
+{
+       IMG_HANDLE hPMR;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE;
+
+/*******************************************
+            ReleaseInfoPage
+ *******************************************/
+
+/* Bridge in structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG
+{
+       IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_RELEASEINFOPAGE;
+
+/* Bridge out structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE;
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/srvcore_bridge/server_srvcore_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/srvcore_bridge/server_srvcore_bridge.c
new file mode 100644 (file)
index 0000000..48abd12
--- /dev/null
@@ -0,0 +1,1072 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+#include "info_page.h"
+#include "proc_stats.h"
+#include "rgx_fwif_alignchecks.h"
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+                   IMG_UINT8 * psConnectIN_UI8,
+                   IMG_UINT8 * psConnectOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_CONNECT *psConnectIN =
+           (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT =
+           (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0);
+
+       psConnectOUT->eError =
+           PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection),
+                           psConnectIN->ui32Flags,
+                           psConnectIN->ui32ClientBuildOptions,
+                           psConnectIN->ui32ClientDDKVersion,
+                           psConnectIN->ui32ClientDDKBuild,
+                           &psConnectOUT->ui8KernelArch,
+                           &psConnectOUT->ui32CapabilityFlags, &psConnectOUT->ui64PackedBvnc);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+                      IMG_UINT8 * psDisconnectIN_UI8,
+                      IMG_UINT8 * psDisconnectOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN =
+           (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT =
+           (PVRSRV_BRIDGE_OUT_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+       psDisconnectOUT->eError = PVRSRVDisconnectKM();
+
+       return 0;
+}
+
+static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psAcquireGlobalEventObjectIN_UI8,
+                                    IMG_UINT8 * psAcquireGlobalEventObjectOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN =
+           (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *)
+           IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT =
+           (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *)
+           IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0);
+
+       IMG_HANDLE hGlobalEventObjectInt = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+       psAcquireGlobalEventObjectOUT->eError =
+           PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK))
+       {
+               goto AcquireGlobalEventObject_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psAcquireGlobalEventObjectOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                     &psAcquireGlobalEventObjectOUT->hGlobalEventObject,
+                                     (void *)hGlobalEventObjectInt,
+                                     PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _AcquireGlobalEventObjecthGlobalEventObjectIntRelease);
+       if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto AcquireGlobalEventObject_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+AcquireGlobalEventObject_exit:
+
+       if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+       {
+               if (hGlobalEventObjectInt)
+               {
+                       PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psReleaseGlobalEventObjectIN_UI8,
+                                    IMG_UINT8 * psReleaseGlobalEventObjectOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN =
+           (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *)
+           IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT =
+           (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *)
+           IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psReleaseGlobalEventObjectOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psReleaseGlobalEventObjectIN->
+                                             hGlobalEventObject,
+                                             PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+       if (unlikely
+           ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK)
+            && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto ReleaseGlobalEventObject_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+ReleaseGlobalEventObject_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = OSEventObjectClose((IMG_HANDLE) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psEventObjectOpenIN_UI8,
+                           IMG_UINT8 * psEventObjectOpenOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN =
+           (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT =
+           (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0);
+
+       IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject;
+       IMG_HANDLE hEventObjectInt = NULL;
+       IMG_HANDLE hOSEventInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psEventObjectOpenOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hEventObjectInt,
+                                      hEventObject,
+                                      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, IMG_TRUE);
+       if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto EventObjectOpen_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psEventObjectOpenOUT->eError = OSEventObjectOpen(hEventObjectInt, &hOSEventInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+       {
+               goto EventObjectOpen_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                &psEventObjectOpenOUT->hOSEvent,
+                                                                (void *)hOSEventInt,
+                                                                PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+                                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                (PFN_HANDLE_RELEASE) &
+                                                                _EventObjectOpenhOSEventIntRelease);
+       if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto EventObjectOpen_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+EventObjectOpen_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hEventObjectInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hEventObject, PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+       {
+               if (hOSEventInt)
+               {
+                       OSEventObjectClose(hOSEventInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psEventObjectWaitIN_UI8,
+                           IMG_UINT8 * psEventObjectWaitOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN =
+           (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT =
+           (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0);
+
+       IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM;
+       IMG_HANDLE hOSEventKMInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psEventObjectWaitOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hOSEventKMInt,
+                                      hOSEventKM,
+                                      PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE);
+       if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto EventObjectWait_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt);
+
+EventObjectWait_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hOSEventKMInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psEventObjectCloseIN_UI8,
+                            IMG_UINT8 * psEventObjectCloseOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN =
+           (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT =
+           (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psEventObjectCloseOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM,
+                                             PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+       if (unlikely((psEventObjectCloseOUT->eError != PVRSRV_OK) &&
+                    (psEventObjectCloseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psEventObjectCloseOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto EventObjectClose_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+EventObjectClose_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psDumpDebugInfoIN_UI8,
+                         IMG_UINT8 * psDumpDebugInfoOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN =
+           (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT =
+           (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0);
+
+       psDumpDebugInfoOUT->eError =
+           PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection),
+                                 psDumpDebugInfoIN->ui32VerbLevel);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psGetDevClockSpeedIN_UI8,
+                            IMG_UINT8 * psGetDevClockSpeedOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN =
+           (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT =
+           (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN);
+
+       psGetDevClockSpeedOUT->eError =
+           PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection),
+                                    &psGetDevClockSpeedOUT->ui32ClockSpeed);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psHWOpTimeoutIN_UI8,
+                       IMG_UINT8 * psHWOpTimeoutOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN =
+           (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT =
+           (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+       psHWOpTimeoutOUT->eError = PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection));
+
+       return 0;
+}
+
+static_assert(RGXFW_ALIGN_CHECKS_UM_MAX <= IMG_UINT32_MAX,
+             "RGXFW_ALIGN_CHECKS_UM_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psAlignmentCheckIN_UI8,
+                          IMG_UINT8 * psAlignmentCheckOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN =
+           (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT =
+           (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0);
+
+       IMG_UINT32 *ui32AlignChecksInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0;
+
+       if (unlikely(psAlignmentCheckIN->ui32AlignChecksSize > RGXFW_ALIGN_CHECKS_UM_MAX))
+       {
+               psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto AlignmentCheck_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto AlignmentCheck_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psAlignmentCheckIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto AlignmentCheck_exit;
+                       }
+               }
+       }
+
+       if (psAlignmentCheckIN->ui32AlignChecksSize != 0)
+       {
+               ui32AlignChecksInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32);
+       }
+
+       /* Copy the data over */
+       if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, ui32AlignChecksInt,
+                    (const void __user *)psAlignmentCheckIN->pui32AlignChecks,
+                    psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK)
+               {
+                       psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto AlignmentCheck_exit;
+               }
+       }
+
+       psAlignmentCheckOUT->eError =
+           PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection),
+                                  psAlignmentCheckIN->ui32AlignChecksSize, ui32AlignChecksInt);
+
+AlignmentCheck_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psAlignmentCheckOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psGetDeviceStatusIN_UI8,
+                           IMG_UINT8 * psGetDeviceStatusOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN =
+           (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT =
+           (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN);
+
+       psGetDeviceStatusOUT->eError =
+           PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection),
+                                   &psGetDeviceStatusOUT->ui32DeviceSatus);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psGetMultiCoreInfoIN_UI8,
+                            IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN =
+           (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT =
+           (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0);
+
+       IMG_UINT64 *pui64CapsInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0;
+
+       if (psGetMultiCoreInfoIN->ui32CapsSize > 8)
+       {
+               psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto GetMultiCoreInfo_exit;
+       }
+
+       psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto GetMultiCoreInfo_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetMultiCoreInfoIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto GetMultiCoreInfo_exit;
+                       }
+               }
+       }
+
+       if (psGetMultiCoreInfoIN->ui32CapsSize != 0)
+       {
+               pui64CapsInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64);
+       }
+
+       psGetMultiCoreInfoOUT->eError =
+           PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection),
+                                    psGetMultiCoreInfoIN->ui32CapsSize,
+                                    &psGetMultiCoreInfoOUT->ui32NumCores, pui64CapsInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psGetMultiCoreInfoOUT->eError != PVRSRV_OK))
+       {
+               goto GetMultiCoreInfo_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((pui64CapsInt) && ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, pui64CapsInt,
+                     (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64))) != PVRSRV_OK))
+               {
+                       psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto GetMultiCoreInfo_exit;
+               }
+       }
+
+GetMultiCoreInfo_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psGetMultiCoreInfoOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8,
+                                  IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN =
+           (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *)
+           IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT =
+           (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *)
+           IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0);
+
+       IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM;
+       IMG_HANDLE hOSEventKMInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psEventObjectWaitTimeoutOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&hOSEventKMInt,
+                                      hOSEventKM,
+                                      PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE);
+       if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto EventObjectWaitTimeout_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psEventObjectWaitTimeoutOUT->eError =
+           OSEventObjectWaitTimeout(hOSEventKMInt, psEventObjectWaitTimeoutIN->ui64uiTimeoutus);
+
+EventObjectWaitTimeout_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (hOSEventKMInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psFindProcessMemStatsIN_UI8,
+                               IMG_UINT8 * psFindProcessMemStatsOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN =
+           (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT =
+           (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8,
+                                                                     0);
+
+       IMG_UINT32 *pui32MemStatsArrayInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0;
+
+       if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT)
+       {
+               psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto FindProcessMemStats_exit;
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray;
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto FindProcessMemStats_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psFindProcessMemStatsIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto FindProcessMemStats_exit;
+                       }
+               }
+       }
+
+       if (psFindProcessMemStatsIN->ui32ArrSize != 0)
+       {
+               pui32MemStatsArrayInt =
+                   (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32);
+       }
+
+       psFindProcessMemStatsOUT->eError =
+           PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID,
+                                       psFindProcessMemStatsIN->ui32ArrSize,
+                                       psFindProcessMemStatsIN->bbAllProcessStats,
+                                       pui32MemStatsArrayInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK))
+       {
+               goto FindProcessMemStats_exit;
+       }
+
+       /* If dest ptr is non-null and we have data to copy */
+       if ((pui32MemStatsArrayInt) &&
+           ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0))
+       {
+               if (unlikely
+                   (OSCopyToUser
+                    (NULL, (void __user *)psFindProcessMemStatsOUT->pui32MemStatsArray,
+                     pui32MemStatsArrayInt,
+                     (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK))
+               {
+                       psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto FindProcessMemStats_exit;
+               }
+       }
+
+FindProcessMemStats_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psFindProcessMemStatsOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVReleaseInfoPageKM((PMR *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psAcquireInfoPageIN_UI8,
+                           IMG_UINT8 * psAcquireInfoPageOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN =
+           (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT =
+           (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0);
+
+       PMR *psPMRInt = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN);
+
+       psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK))
+       {
+               goto AcquireInfoPage_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psAcquireInfoPageOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psAcquireInfoPageOUT->hPMR, (void *)psPMRInt,
+                                     PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) & _AcquireInfoPagepsPMRIntRelease);
+       if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto AcquireInfoPage_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+AcquireInfoPage_exit:
+
+       if (psAcquireInfoPageOUT->eError != PVRSRV_OK)
+       {
+               if (psPMRInt)
+               {
+                       PVRSRVReleaseInfoPageKM(psPMRInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psReleaseInfoPageIN_UI8,
+                           IMG_UINT8 * psReleaseInfoPageOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN =
+           (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT =
+           (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psReleaseInfoPageOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                             (IMG_HANDLE) psReleaseInfoPageIN->hPMR,
+                                             PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+       if (unlikely((psReleaseInfoPageOUT->eError != PVRSRV_OK) &&
+                    (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s", __func__, PVRSRVGetErrorString(psReleaseInfoPageOUT->eError)));
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto ReleaseInfoPage_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ReleaseInfoPage_exit:
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSRVCOREBridge(void);
+void DeinitSRVCOREBridge(void);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT,
+                             PVRSRVBridgeConnect, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT,
+                             PVRSRVBridgeDisconnect, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT,
+                             PVRSRVBridgeAcquireGlobalEventObject, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT,
+                             PVRSRVBridgeReleaseGlobalEventObject, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN,
+                             PVRSRVBridgeEventObjectOpen, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT,
+                             PVRSRVBridgeEventObjectWait, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE,
+                             PVRSRVBridgeEventObjectClose, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO,
+                             PVRSRVBridgeDumpDebugInfo, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED,
+                             PVRSRVBridgeGetDevClockSpeed, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT,
+                             PVRSRVBridgeHWOpTimeout, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK,
+                             PVRSRVBridgeAlignmentCheck, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS,
+                             PVRSRVBridgeGetDeviceStatus, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO,
+                             PVRSRVBridgeGetMultiCoreInfo, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT,
+                             PVRSRVBridgeEventObjectWaitTimeout, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS,
+                             PVRSRVBridgeFindProcessMemStats, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE,
+                             PVRSRVBridgeAcquireInfoPage, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE,
+                             PVRSRVBridgeReleaseInfoPage, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+void DeinitSRVCOREBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+                               PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+                               PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+                               PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/client_sync_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/client_sync_bridge.h
new file mode 100644 (file)
index 0000000..19f1b0e
--- /dev/null
@@ -0,0 +1,102 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+                                                       IMG_HANDLE * phSyncHandle,
+                                                       IMG_UINT32 * pui32SyncPrimVAddr,
+                                                       IMG_UINT32 * pui32SyncPrimBlockSize,
+                                                       IMG_HANDLE * phhSyncPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hSyncHandle,
+                                           IMG_UINT32 ui32Index, IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+                                                  IMG_HANDLE hSyncHandle,
+                                                  IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+                                                IMG_HANDLE hSyncHandle,
+                                                IMG_UINT32 ui32Offset,
+                                                IMG_UINT32 ui32Value,
+                                                IMG_UINT32 ui32Mask,
+                                                PDUMP_POLL_OPERATOR eOperator,
+                                                PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+                                                IMG_HANDLE hSyncHandle,
+                                                IMG_UINT32 ui32Offset,
+                                                IMG_DEVMEM_OFFSET_T uiWriteOffset,
+                                                IMG_DEVMEM_SIZE_T uiPacketSize,
+                                                IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+                                              IMG_BOOL bServerSync,
+                                              IMG_UINT32 ui32FWAddr,
+                                              IMG_UINT32 ui32ClassNameSize,
+                                              const IMG_CHAR * puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge,
+                                                               PVRSRV_FENCE hFence);
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/client_sync_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/client_sync_direct_bridge.c
new file mode 100644 (file)
index 0000000..d631aea
--- /dev/null
@@ -0,0 +1,262 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for sync
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+#include <powervr/pvrsrv_sync_ext.h>
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+                                                       IMG_HANDLE * phSyncHandle,
+                                                       IMG_UINT32 * pui32SyncPrimVAddr,
+                                                       IMG_UINT32 * pui32SyncPrimBlockSize,
+                                                       IMG_HANDLE * phhSyncPMR)
+{
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+       PMR *pshSyncPMRInt = NULL;
+
+       eError =
+           PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                           &psSyncHandleInt,
+                                           pui32SyncPrimVAddr,
+                                           pui32SyncPrimBlockSize, &pshSyncPMRInt);
+
+       *phSyncHandle = psSyncHandleInt;
+       *phhSyncPMR = pshSyncPMRInt;
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle)
+{
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+       eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge,
+                                           IMG_HANDLE hSyncHandle,
+                                           IMG_UINT32 ui32Index, IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+       eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+                                             IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset)
+{
+#if defined(PDUMP)
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+       eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+                                                  IMG_HANDLE hSyncHandle,
+                                                  IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+#if defined(PDUMP)
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+       eError = PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+                                                IMG_HANDLE hSyncHandle,
+                                                IMG_UINT32 ui32Offset,
+                                                IMG_UINT32 ui32Value,
+                                                IMG_UINT32 ui32Mask,
+                                                PDUMP_POLL_OPERATOR eOperator,
+                                                PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+       eError =
+           PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt,
+                                    ui32Offset, ui32Value, ui32Mask, eOperator, uiPDumpFlags);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(eOperator);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+                                                IMG_HANDLE hSyncHandle,
+                                                IMG_UINT32 ui32Offset,
+                                                IMG_DEVMEM_OFFSET_T uiWriteOffset,
+                                                IMG_DEVMEM_SIZE_T uiPacketSize,
+                                                IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+#if defined(PDUMP)
+       PVRSRV_ERROR eError;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+       eError =
+           PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt,
+                                    ui32Offset, uiWriteOffset, uiPacketSize, uiBufferSize);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+       PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+       PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+                                              IMG_BOOL bServerSync,
+                                              IMG_UINT32 ui32FWAddr,
+                                              IMG_UINT32 ui32ClassNameSize,
+                                              const IMG_CHAR * puiClassName)
+{
+       PVRSRV_ERROR eError;
+
+       eError =
+           PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                  bServerSync, ui32FWAddr, ui32ClassNameSize, puiClassName);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32FWAddr);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge,
+                                                               PVRSRV_FENCE hFence)
+{
+#if defined(PDUMP)
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+       PVR_UNREFERENCED_PARAMETER(hFence);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/common_sync_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/common_sync_bridge.h
new file mode 100644 (file)
index 0000000..db48d2e
--- /dev/null
@@ -0,0 +1,254 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+#include <powervr/pvrsrv_sync_ext.h>
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST                   0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK                     PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK                      PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET                 PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP                       PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE                  PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL                    PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP                    PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT                      PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT                       PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL                     PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST                    (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9)
+
+/*******************************************
+            AllocSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+       IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       IMG_HANDLE hhSyncPMR;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32SyncPrimBlockSize;
+       IMG_UINT32 ui32SyncPrimVAddr;
+} __packed PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+/*******************************************
+            FreeSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+       IMG_HANDLE hSyncHandle;
+} __packed PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+/*******************************************
+            SyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       IMG_UINT32 ui32Index;
+       IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+/*******************************************
+            SyncPrimPDump
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       IMG_UINT32 ui32Offset;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+/*******************************************
+            SyncPrimPDumpValue
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       IMG_UINT32 ui32Offset;
+       IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+/*******************************************
+            SyncPrimPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       PDUMP_POLL_OPERATOR eOperator;
+       IMG_UINT32 ui32Mask;
+       IMG_UINT32 ui32Offset;
+       IMG_UINT32 ui32Value;
+       PDUMP_FLAGS_T uiPDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+/*******************************************
+            SyncPrimPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+       IMG_DEVMEM_SIZE_T uiBufferSize;
+       IMG_DEVMEM_SIZE_T uiPacketSize;
+       IMG_DEVMEM_OFFSET_T uiWriteOffset;
+       IMG_HANDLE hSyncHandle;
+       IMG_UINT32 ui32Offset;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+/*******************************************
+            SyncAllocEvent
+ *******************************************/
+
+/* Bridge in structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
+{
+       const IMG_CHAR *puiClassName;
+       IMG_BOOL bServerSync;
+       IMG_UINT32 ui32ClassNameSize;
+       IMG_UINT32 ui32FWAddr;
+} __packed PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
+
+/* Bridge out structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT;
+
+/*******************************************
+            SyncFreeEvent
+ *******************************************/
+
+/* Bridge in structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG
+{
+       IMG_UINT32 ui32FWAddr;
+} __packed PVRSRV_BRIDGE_IN_SYNCFREEEVENT;
+
+/* Bridge out structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFREEEVENT;
+
+/*******************************************
+            SyncCheckpointSignalledPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncCheckpointSignalledPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG
+{
+       PVRSRV_FENCE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL;
+
+/* Bridge out structure for SyncCheckpointSignalledPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL;
+
+#endif /* COMMON_SYNC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/server_sync_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/sync_bridge/server_sync_bridge.c
new file mode 100644 (file)
index 0000000..4788fc1
--- /dev/null
@@ -0,0 +1,746 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint.h"
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8,
+                                   IMG_UINT8 * psAllocSyncPrimitiveBlockOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN =
+           (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *)
+           IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT =
+           (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *)
+           IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0);
+
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+       PMR *pshSyncPMRInt = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN);
+
+       psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL;
+
+       psAllocSyncPrimitiveBlockOUT->eError =
+           PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevNode(psConnection),
+                                           &psSyncHandleInt,
+                                           &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr,
+                                           &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize,
+                                           &pshSyncPMRInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+       {
+               goto AllocSyncPrimitiveBlock_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                                        &psAllocSyncPrimitiveBlockOUT->
+                                                                        hSyncHandle,
+                                                                        (void *)psSyncHandleInt,
+                                                                        PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+                                                                        PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                                        (PFN_HANDLE_RELEASE) &
+                                                                        _AllocSyncPrimitiveBlockpsSyncHandleIntRelease);
+       if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto AllocSyncPrimitiveBlock_exit;
+       }
+
+       psAllocSyncPrimitiveBlockOUT->eError =
+           PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+                                        &psAllocSyncPrimitiveBlockOUT->hhSyncPMR,
+                                        (void *)pshSyncPMRInt,
+                                        PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                        psAllocSyncPrimitiveBlockOUT->hSyncHandle);
+       if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto AllocSyncPrimitiveBlock_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+AllocSyncPrimitiveBlock_exit:
+
+       if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+       {
+               if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psHandleBase);
+
+                       eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+                                                            (IMG_HANDLE)
+                                                            psAllocSyncPrimitiveBlockOUT->
+                                                            hSyncHandle,
+                                                            PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psSyncHandleInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psHandleBase);
+
+               }
+
+               if (psSyncHandleInt)
+               {
+                       PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8,
+                                  IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN =
+           (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *)
+           IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT =
+           (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *)
+           IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psFreeSyncPrimitiveBlockOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle,
+                                             PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       if (unlikely((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) &&
+                    (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto FreeSyncPrimitiveBlock_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+FreeSyncPrimitiveBlock_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+                       IMG_UINT8 * psSyncPrimSetIN_UI8,
+                       IMG_UINT8 * psSyncPrimSetOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN =
+           (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0);
+
+       IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncPrimSetOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSyncHandleInt,
+                                      hSyncHandle,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncPrimSet_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psSyncPrimSetOUT->eError =
+           PVRSRVSyncPrimSetKM(psSyncHandleInt,
+                               psSyncPrimSetIN->ui32Index, psSyncPrimSetIN->ui32Value);
+
+SyncPrimSet_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSyncHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psSyncPrimPDumpIN_UI8,
+                         IMG_UINT8 * psSyncPrimPDumpOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN =
+           (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0);
+
+       IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncPrimPDumpOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSyncHandleInt,
+                                      hSyncHandle,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncPrimPDump_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psSyncPrimPDumpOUT->eError =
+           PVRSRVSyncPrimPDumpKM(psSyncHandleInt, psSyncPrimPDumpIN->ui32Offset);
+
+SyncPrimPDump_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSyncHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDump NULL
+#endif
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psSyncPrimPDumpValueIN_UI8,
+                              IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN =
+           (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8,
+                                                                    0);
+
+       IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncPrimPDumpValueOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSyncHandleInt,
+                                      hSyncHandle,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncPrimPDumpValue_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psSyncPrimPDumpValueOUT->eError =
+           PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt,
+                                      psSyncPrimPDumpValueIN->ui32Offset,
+                                      psSyncPrimPDumpValueIN->ui32Value);
+
+SyncPrimPDumpValue_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSyncHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpValue NULL
+#endif
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psSyncPrimPDumpPolIN_UI8,
+                            IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN =
+           (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0);
+
+       IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncPrimPDumpPolOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSyncHandleInt,
+                                      hSyncHandle,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncPrimPDumpPol_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psSyncPrimPDumpPolOUT->eError =
+           PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt,
+                                    psSyncPrimPDumpPolIN->ui32Offset,
+                                    psSyncPrimPDumpPolIN->ui32Value,
+                                    psSyncPrimPDumpPolIN->ui32Mask,
+                                    psSyncPrimPDumpPolIN->eOperator,
+                                    psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+SyncPrimPDumpPol_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSyncHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8,
+                            IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN =
+           (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0);
+
+       IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle;
+       SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncPrimPDumpCBPOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&psSyncHandleInt,
+                                      hSyncHandle,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncPrimPDumpCBP_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psSyncPrimPDumpCBPOUT->eError =
+           PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt,
+                                    psSyncPrimPDumpCBPIN->ui32Offset,
+                                    psSyncPrimPDumpCBPIN->uiWriteOffset,
+                                    psSyncPrimPDumpCBPIN->uiPacketSize,
+                                    psSyncPrimPDumpCBPIN->uiBufferSize);
+
+SyncPrimPDumpCBP_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psSyncHandleInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpCBP NULL
+#endif
+
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psSyncAllocEventIN_UI8,
+                          IMG_UINT8 * psSyncAllocEventOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN =
+           (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0);
+
+       IMG_CHAR *uiClassNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH))
+       {
+               psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncAllocEvent_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncAllocEvent_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncAllocEventIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncAllocEvent_exit;
+                       }
+               }
+       }
+
+       if (psSyncAllocEventIN->ui32ClassNameSize != 0)
+       {
+               uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiClassNameInt, (const void __user *)psSyncAllocEventIN->puiClassName,
+                    psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncAllocEvent_exit;
+               }
+               ((IMG_CHAR *)
+                uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       psSyncAllocEventOUT->eError =
+           PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection),
+                                  psSyncAllocEventIN->bServerSync,
+                                  psSyncAllocEventIN->ui32FWAddr,
+                                  psSyncAllocEventIN->ui32ClassNameSize, uiClassNameInt);
+
+SyncAllocEvent_exit:
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncAllocEventOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psSyncFreeEventIN_UI8,
+                         IMG_UINT8 * psSyncFreeEventOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN =
+           (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0);
+
+       psSyncFreeEventOUT->eError =
+           PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection),
+                                 psSyncFreeEventIN->ui32FWAddr);
+
+       return 0;
+}
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+                                           IMG_UINT8 * psSyncCheckpointSignalledPDumpPolIN_UI8,
+                                           IMG_UINT8 * psSyncCheckpointSignalledPDumpPolOUT_UI8,
+                                           CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolIN =
+           (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *)
+           IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *)
+           IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       psSyncCheckpointSignalledPDumpPolOUT->eError =
+           PVRSRVSyncCheckpointSignalledPDumpPolKM(psSyncCheckpointSignalledPDumpPolIN->hFence);
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSYNCBridge(void);
+void DeinitSYNCBridge(void);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK,
+                             PVRSRVBridgeAllocSyncPrimitiveBlock, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK,
+                             PVRSRVBridgeFreeSyncPrimitiveBlock, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET,
+                             PVRSRVBridgeSyncPrimSet, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP,
+                             PVRSRVBridgeSyncPrimPDump, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE,
+                             PVRSRVBridgeSyncPrimPDumpValue, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL,
+                             PVRSRVBridgeSyncPrimPDumpPol, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP,
+                             PVRSRVBridgeSyncPrimPDumpCBP, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT,
+                             PVRSRVBridgeSyncAllocEvent, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT,
+                             PVRSRVBridgeSyncFreeEvent, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+                             PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL,
+                             PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+void DeinitSYNCBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+                               PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/syncfallback_bridge/common_syncfallback_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/syncfallback_bridge/common_syncfallback_bridge.h
new file mode 100644 (file)
index 0000000..87016c4
--- /dev/null
@@ -0,0 +1,357 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for syncfallback
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for syncfallback
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNCFALLBACK_BRIDGE_H
+#define COMMON_SYNCFALLBACK_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST                   0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR                     PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE                       PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP                      PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE                    PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE                  PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT                     PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP                     PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW                      PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW                 PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW                     PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE                   PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE                    PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE                   PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE                     PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE                      PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE                     PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST                    (PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+15)
+
+/*******************************************
+            SyncFbTimelineCreatePVR
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineCreatePVR */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR_TAG
+{
+       const IMG_CHAR *puiTimelineName;
+       IMG_UINT32 ui32TimelineNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR;
+
+/* Bridge out structure for SyncFbTimelineCreatePVR */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR_TAG
+{
+       IMG_HANDLE hTimeline;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR;
+
+/*******************************************
+            SyncFbTimelineRelease
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineRelease */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE_TAG
+{
+       IMG_HANDLE hTimeline;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE;
+
+/* Bridge out structure for SyncFbTimelineRelease */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE;
+
+/*******************************************
+            SyncFbFenceDup
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceDup */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP_TAG
+{
+       IMG_HANDLE hInFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP;
+
+/* Bridge out structure for SyncFbFenceDup */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP_TAG
+{
+       IMG_HANDLE hOutFence;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP;
+
+/*******************************************
+            SyncFbFenceMerge
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceMerge */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE_TAG
+{
+       IMG_HANDLE hInFence1;
+       IMG_HANDLE hInFence2;
+       const IMG_CHAR *puiFenceName;
+       IMG_UINT32 ui32FenceNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE;
+
+/* Bridge out structure for SyncFbFenceMerge */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE_TAG
+{
+       IMG_HANDLE hOutFence;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE;
+
+/*******************************************
+            SyncFbFenceRelease
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceRelease */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE_TAG
+{
+       IMG_HANDLE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE;
+
+/* Bridge out structure for SyncFbFenceRelease */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE;
+
+/*******************************************
+            SyncFbFenceWait
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceWait */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT_TAG
+{
+       IMG_HANDLE hFence;
+       IMG_UINT32 ui32Timeout;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT;
+
+/* Bridge out structure for SyncFbFenceWait */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT;
+
+/*******************************************
+            SyncFbFenceDump
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP_TAG
+{
+       IMG_HANDLE hFence;
+       const IMG_CHAR *puiDescStr;
+       const IMG_CHAR *puiFileStr;
+       const IMG_CHAR *puiModuleStr;
+       IMG_UINT32 ui32DescStrLength;
+       IMG_UINT32 ui32FileStrLength;
+       IMG_UINT32 ui32Line;
+       IMG_UINT32 ui32ModuleStrLength;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP;
+
+/* Bridge out structure for SyncFbFenceDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP;
+
+/*******************************************
+            SyncFbTimelineCreateSW
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineCreateSW */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW_TAG
+{
+       const IMG_CHAR *puiTimelineName;
+       IMG_UINT32 ui32TimelineNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW;
+
+/* Bridge out structure for SyncFbTimelineCreateSW */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW_TAG
+{
+       IMG_HANDLE hTimeline;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW;
+
+/*******************************************
+            SyncFbFenceCreateSW
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceCreateSW */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW_TAG
+{
+       IMG_HANDLE hTimeline;
+       const IMG_CHAR *puiFenceName;
+       IMG_UINT32 ui32FenceNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW;
+
+/* Bridge out structure for SyncFbFenceCreateSW */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW_TAG
+{
+       IMG_UINT64 ui64SyncPtIdx;
+       IMG_HANDLE hOutFence;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW;
+
+/*******************************************
+            SyncFbTimelineAdvanceSW
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineAdvanceSW */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW_TAG
+{
+       IMG_HANDLE hTimeline;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW;
+
+/* Bridge out structure for SyncFbTimelineAdvanceSW */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW_TAG
+{
+       IMG_UINT64 ui64SyncPtIdx;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW;
+
+/*******************************************
+            SyncFbFenceExportInsecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportInsecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE_TAG
+{
+       IMG_HANDLE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE;
+
+/* Bridge out structure for SyncFbFenceExportInsecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE_TAG
+{
+       IMG_HANDLE hExport;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE;
+
+/*******************************************
+            SyncFbFenceExportDestroyInsecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportDestroyInsecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE_TAG
+{
+       IMG_HANDLE hExport;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE;
+
+/* Bridge out structure for SyncFbFenceExportDestroyInsecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE;
+
+/*******************************************
+            SyncFbFenceImportInsecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceImportInsecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE_TAG
+{
+       IMG_HANDLE hImport;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE;
+
+/* Bridge out structure for SyncFbFenceImportInsecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE;
+
+/*******************************************
+            SyncFbFenceExportSecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportSecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE_TAG
+{
+       IMG_HANDLE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE;
+
+/* Bridge out structure for SyncFbFenceExportSecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE_TAG
+{
+       IMG_SECURE_TYPE Export;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE;
+
+/*******************************************
+            SyncFbFenceExportDestroySecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportDestroySecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE_TAG
+{
+       IMG_HANDLE hExport;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE;
+
+/* Bridge out structure for SyncFbFenceExportDestroySecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE;
+
+/*******************************************
+            SyncFbFenceImportSecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceImportSecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE_TAG
+{
+       IMG_SECURE_TYPE Import;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE;
+
+/* Bridge out structure for SyncFbFenceImportSecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE_TAG
+{
+       IMG_HANDLE hSyncHandle;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE;
+
+#endif /* COMMON_SYNCFALLBACK_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/syncfallback_bridge/server_syncfallback_bridge.c
new file mode 100644 (file)
index 0000000..800a9dd
--- /dev/null
@@ -0,0 +1,1967 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for syncfallback
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for syncfallback
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync_fallback_server.h"
+#include "pvrsrv_sync_server.h"
+
+#include "common_syncfallback_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+static PVRSRV_ERROR ReleaseExport(void *pvData)
+{
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       return PVRSRV_OK;
+}
+#endif
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _SyncFbTimelineCreatePVRpsTimelineIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbTimelineRelease((PVRSRV_TIMELINE_SERVER *) pvData);
+       return eError;
+}
+
+static_assert(SYNC_FB_TIMELINE_MAX_LENGTH <= IMG_UINT32_MAX,
+             "SYNC_FB_TIMELINE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psSyncFbTimelineCreatePVRIN_UI8,
+                                   IMG_UINT8 * psSyncFbTimelineCreatePVROUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR *psSyncFbTimelineCreatePVRIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineCreatePVRIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR *psSyncFbTimelineCreatePVROUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineCreatePVROUT_UI8, 0);
+
+       IMG_CHAR *uiTimelineNameInt = NULL;
+       PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely
+           (psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize > SYNC_FB_TIMELINE_MAX_LENGTH))
+       {
+               psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbTimelineCreatePVR_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncFbTimelineCreatePVR_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncFbTimelineCreatePVRIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbTimelineCreatePVRIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncFbTimelineCreatePVR_exit;
+                       }
+               }
+       }
+
+       if (psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize != 0)
+       {
+               uiTimelineNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTimelineNameInt,
+                    (const void __user *)psSyncFbTimelineCreatePVRIN->puiTimelineName,
+                    psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) !=
+                   PVRSRV_OK)
+               {
+                       psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbTimelineCreatePVR_exit;
+               }
+               ((IMG_CHAR *)
+                uiTimelineNameInt)[(psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize *
+                                    sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       psSyncFbTimelineCreatePVROUT->eError =
+           SyncFbTimelineCreatePVR(psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize,
+                                   uiTimelineNameInt, &psTimelineInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbTimelineCreatePVROUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbTimelineCreatePVR_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbTimelineCreatePVROUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbTimelineCreatePVROUT->hTimeline,
+                                     (void *)psTimelineInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _SyncFbTimelineCreatePVRpsTimelineIntRelease);
+       if (unlikely(psSyncFbTimelineCreatePVROUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbTimelineCreatePVR_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbTimelineCreatePVR_exit:
+
+       if (psSyncFbTimelineCreatePVROUT->eError != PVRSRV_OK)
+       {
+               if (psTimelineInt)
+               {
+                       SyncFbTimelineRelease(psTimelineInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncFbTimelineCreatePVROUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineRelease(IMG_UINT32 ui32DispatchTableEntry,
+                                 IMG_UINT8 * psSyncFbTimelineReleaseIN_UI8,
+                                 IMG_UINT8 * psSyncFbTimelineReleaseOUT_UI8,
+                                 CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE *psSyncFbTimelineReleaseIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineReleaseIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE *psSyncFbTimelineReleaseOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineReleaseOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbTimelineReleaseOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                             (IMG_HANDLE) psSyncFbTimelineReleaseIN->hTimeline,
+                                             PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+       if (unlikely((psSyncFbTimelineReleaseOUT->eError != PVRSRV_OK) &&
+                    (psSyncFbTimelineReleaseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psSyncFbTimelineReleaseOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psSyncFbTimelineReleaseOUT->eError)));
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbTimelineRelease_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbTimelineRelease_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceDuppsOutFenceIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceDup(IMG_UINT32 ui32DispatchTableEntry,
+                          IMG_UINT8 * psSyncFbFenceDupIN_UI8,
+                          IMG_UINT8 * psSyncFbFenceDupOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP *psSyncFbFenceDupIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP *) IMG_OFFSET_ADDR(psSyncFbFenceDupIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP *psSyncFbFenceDupOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP *) IMG_OFFSET_ADDR(psSyncFbFenceDupOUT_UI8, 0);
+
+       IMG_HANDLE hInFence = psSyncFbFenceDupIN->hInFence;
+       PVRSRV_FENCE_SERVER *psInFenceInt = NULL;
+       PVRSRV_FENCE_SERVER *psOutFenceInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceDupOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psInFenceInt,
+                                      hInFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceDupOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceDup_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceDupOUT->eError = SyncFbFenceDup(psInFenceInt, &psOutFenceInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceDupOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceDup_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceDupOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbFenceDupOUT->hOutFence, (void *)psOutFenceInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) & _SyncFbFenceDuppsOutFenceIntRelease);
+       if (unlikely(psSyncFbFenceDupOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceDup_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceDup_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psInFenceInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hInFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       if (psSyncFbFenceDupOUT->eError != PVRSRV_OK)
+       {
+               if (psOutFenceInt)
+               {
+                       SyncFbFenceRelease(psOutFenceInt);
+               }
+       }
+
+       return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceMergepsOutFenceIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+       return eError;
+}
+
+static_assert(SYNC_FB_FENCE_MAX_LENGTH <= IMG_UINT32_MAX,
+             "SYNC_FB_FENCE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry,
+                            IMG_UINT8 * psSyncFbFenceMergeIN_UI8,
+                            IMG_UINT8 * psSyncFbFenceMergeOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE *psSyncFbFenceMergeIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE *) IMG_OFFSET_ADDR(psSyncFbFenceMergeIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE *psSyncFbFenceMergeOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE *) IMG_OFFSET_ADDR(psSyncFbFenceMergeOUT_UI8, 0);
+
+       IMG_HANDLE hInFence1 = psSyncFbFenceMergeIN->hInFence1;
+       PVRSRV_FENCE_SERVER *psInFence1Int = NULL;
+       IMG_HANDLE hInFence2 = psSyncFbFenceMergeIN->hInFence2;
+       PVRSRV_FENCE_SERVER *psInFence2Int = NULL;
+       IMG_CHAR *uiFenceNameInt = NULL;
+       PVRSRV_FENCE_SERVER *psOutFenceInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psSyncFbFenceMergeIN->ui32FenceNameSize > SYNC_FB_FENCE_MAX_LENGTH))
+       {
+               psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbFenceMerge_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncFbFenceMerge_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncFbFenceMergeIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbFenceMergeIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncFbFenceMerge_exit;
+                       }
+               }
+       }
+
+       if (psSyncFbFenceMergeIN->ui32FenceNameSize != 0)
+       {
+               uiFenceNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFenceNameInt, (const void __user *)psSyncFbFenceMergeIN->puiFenceName,
+                    psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbFenceMerge_exit;
+               }
+               ((IMG_CHAR *)
+                uiFenceNameInt)[(psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceMergeOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psInFence1Int,
+                                      hInFence1, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceMerge_exit;
+       }
+
+       /* Look up the address from the handle */
+       psSyncFbFenceMergeOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psInFence2Int,
+                                      hInFence2, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceMerge_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceMergeOUT->eError =
+           SyncFbFenceMerge(psInFence1Int,
+                            psInFence2Int,
+                            psSyncFbFenceMergeIN->ui32FenceNameSize,
+                            uiFenceNameInt, &psOutFenceInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceMerge_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceMergeOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbFenceMergeOUT->hOutFence, (void *)psOutFenceInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) & _SyncFbFenceMergepsOutFenceIntRelease);
+       if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceMerge_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceMerge_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psInFence1Int)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hInFence1, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+
+       /* Unreference the previously looked up handle */
+       if (psInFence2Int)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hInFence2, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       if (psSyncFbFenceMergeOUT->eError != PVRSRV_OK)
+       {
+               if (psOutFenceInt)
+               {
+                       SyncFbFenceRelease(psOutFenceInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncFbFenceMergeOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceRelease(IMG_UINT32 ui32DispatchTableEntry,
+                              IMG_UINT8 * psSyncFbFenceReleaseIN_UI8,
+                              IMG_UINT8 * psSyncFbFenceReleaseOUT_UI8,
+                              CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE *psSyncFbFenceReleaseIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE *) IMG_OFFSET_ADDR(psSyncFbFenceReleaseIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE *psSyncFbFenceReleaseOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE *) IMG_OFFSET_ADDR(psSyncFbFenceReleaseOUT_UI8,
+                                                                    0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceReleaseOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                             (IMG_HANDLE) psSyncFbFenceReleaseIN->hFence,
+                                             PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       if (unlikely((psSyncFbFenceReleaseOUT->eError != PVRSRV_OK) &&
+                    (psSyncFbFenceReleaseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psSyncFbFenceReleaseOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psSyncFbFenceReleaseOUT->eError)));
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceRelease_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceRelease_exit:
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceWait(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psSyncFbFenceWaitIN_UI8,
+                           IMG_UINT8 * psSyncFbFenceWaitOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT *psSyncFbFenceWaitIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT *) IMG_OFFSET_ADDR(psSyncFbFenceWaitIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT *psSyncFbFenceWaitOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT *) IMG_OFFSET_ADDR(psSyncFbFenceWaitOUT_UI8, 0);
+
+       IMG_HANDLE hFence = psSyncFbFenceWaitIN->hFence;
+       PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceWaitOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psFenceInt,
+                                      hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceWaitOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceWait_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceWaitOUT->eError =
+           SyncFbFenceWait(psFenceInt, psSyncFbFenceWaitIN->ui32Timeout);
+
+SyncFbFenceWait_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psFenceInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       return 0;
+}
+
+static_assert((SYNC_FB_FILE_STRING_MAX + 1) <= IMG_UINT32_MAX,
+             "(SYNC_FB_FILE_STRING_MAX+1) must not be larger than IMG_UINT32_MAX");
+static_assert((SYNC_FB_MODULE_STRING_LEN_MAX + 1) <= IMG_UINT32_MAX,
+             "(SYNC_FB_MODULE_STRING_LEN_MAX+1) must not be larger than IMG_UINT32_MAX");
+static_assert((SYNC_FB_DESC_STRING_LEN_MAX + 1) <= IMG_UINT32_MAX,
+             "(SYNC_FB_DESC_STRING_LEN_MAX+1) must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry,
+                           IMG_UINT8 * psSyncFbFenceDumpIN_UI8,
+                           IMG_UINT8 * psSyncFbFenceDumpOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP *psSyncFbFenceDumpIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP *) IMG_OFFSET_ADDR(psSyncFbFenceDumpIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP *psSyncFbFenceDumpOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP *) IMG_OFFSET_ADDR(psSyncFbFenceDumpOUT_UI8, 0);
+
+       IMG_HANDLE hFence = psSyncFbFenceDumpIN->hFence;
+       PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+       IMG_CHAR *uiFileStrInt = NULL;
+       IMG_CHAR *uiModuleStrInt = NULL;
+       IMG_CHAR *uiDescStrInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR)) +
+           ((IMG_UINT64) psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psSyncFbFenceDumpIN->ui32FileStrLength > (SYNC_FB_FILE_STRING_MAX + 1)))
+       {
+               psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbFenceDump_exit;
+       }
+
+       if (unlikely
+           (psSyncFbFenceDumpIN->ui32ModuleStrLength > (SYNC_FB_MODULE_STRING_LEN_MAX + 1)))
+       {
+               psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbFenceDump_exit;
+       }
+
+       if (unlikely(psSyncFbFenceDumpIN->ui32DescStrLength > (SYNC_FB_DESC_STRING_LEN_MAX + 1)))
+       {
+               psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbFenceDump_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncFbFenceDump_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncFbFenceDumpIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbFenceDumpIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncFbFenceDump_exit;
+                       }
+               }
+       }
+
+       if (psSyncFbFenceDumpIN->ui32FileStrLength != 0)
+       {
+               uiFileStrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFileStrInt, (const void __user *)psSyncFbFenceDumpIN->puiFileStr,
+                    psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbFenceDump_exit;
+               }
+               ((IMG_CHAR *)
+                uiFileStrInt)[(psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+       if (psSyncFbFenceDumpIN->ui32ModuleStrLength != 0)
+       {
+               uiModuleStrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiModuleStrInt, (const void __user *)psSyncFbFenceDumpIN->puiModuleStr,
+                    psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbFenceDump_exit;
+               }
+               ((IMG_CHAR *)
+                uiModuleStrInt)[(psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR)) -
+                                1] = '\0';
+       }
+       if (psSyncFbFenceDumpIN->ui32DescStrLength != 0)
+       {
+               uiDescStrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiDescStrInt, (const void __user *)psSyncFbFenceDumpIN->puiDescStr,
+                    psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbFenceDump_exit;
+               }
+               ((IMG_CHAR *)
+                uiDescStrInt)[(psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceDumpOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psFenceInt,
+                                      hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceDumpOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceDump_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceDumpOUT->eError =
+           SyncFbFenceDump(psFenceInt,
+                           psSyncFbFenceDumpIN->ui32Line,
+                           psSyncFbFenceDumpIN->ui32FileStrLength,
+                           uiFileStrInt,
+                           psSyncFbFenceDumpIN->ui32ModuleStrLength,
+                           uiModuleStrInt, psSyncFbFenceDumpIN->ui32DescStrLength, uiDescStrInt);
+
+SyncFbFenceDump_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psFenceInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncFbFenceDumpOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _SyncFbTimelineCreateSWpsTimelineIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbTimelineRelease((PVRSRV_TIMELINE_SERVER *) pvData);
+       return eError;
+}
+
+static_assert(SYNC_FB_FENCE_MAX_LENGTH <= IMG_UINT32_MAX,
+             "SYNC_FB_FENCE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry,
+                                  IMG_UINT8 * psSyncFbTimelineCreateSWIN_UI8,
+                                  IMG_UINT8 * psSyncFbTimelineCreateSWOUT_UI8,
+                                  CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW *psSyncFbTimelineCreateSWIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineCreateSWIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW *psSyncFbTimelineCreateSWOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineCreateSWOUT_UI8, 0);
+
+       IMG_CHAR *uiTimelineNameInt = NULL;
+       PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psSyncFbTimelineCreateSWIN->ui32TimelineNameSize > SYNC_FB_FENCE_MAX_LENGTH))
+       {
+               psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbTimelineCreateSW_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncFbTimelineCreateSW_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncFbTimelineCreateSWIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbTimelineCreateSWIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncFbTimelineCreateSW_exit;
+                       }
+               }
+       }
+
+       if (psSyncFbTimelineCreateSWIN->ui32TimelineNameSize != 0)
+       {
+               uiTimelineNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset +=
+                   psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiTimelineNameInt,
+                    (const void __user *)psSyncFbTimelineCreateSWIN->puiTimelineName,
+                    psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) !=
+                   PVRSRV_OK)
+               {
+                       psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbTimelineCreateSW_exit;
+               }
+               ((IMG_CHAR *)
+                uiTimelineNameInt)[(psSyncFbTimelineCreateSWIN->ui32TimelineNameSize *
+                                    sizeof(IMG_CHAR)) - 1] = '\0';
+       }
+
+       psSyncFbTimelineCreateSWOUT->eError =
+           SyncFbTimelineCreateSW(psSyncFbTimelineCreateSWIN->ui32TimelineNameSize,
+                                  uiTimelineNameInt, &psTimelineInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbTimelineCreateSWOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbTimelineCreateSW_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbTimelineCreateSWOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbTimelineCreateSWOUT->hTimeline,
+                                     (void *)psTimelineInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _SyncFbTimelineCreateSWpsTimelineIntRelease);
+       if (unlikely(psSyncFbTimelineCreateSWOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbTimelineCreateSW_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbTimelineCreateSW_exit:
+
+       if (psSyncFbTimelineCreateSWOUT->eError != PVRSRV_OK)
+       {
+               if (psTimelineInt)
+               {
+                       SyncFbTimelineRelease(psTimelineInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncFbTimelineCreateSWOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceCreateSWpsOutFenceIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+       return eError;
+}
+
+static_assert(SYNC_FB_FENCE_MAX_LENGTH <= IMG_UINT32_MAX,
+             "SYNC_FB_FENCE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry,
+                               IMG_UINT8 * psSyncFbFenceCreateSWIN_UI8,
+                               IMG_UINT8 * psSyncFbFenceCreateSWOUT_UI8,
+                               CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW *psSyncFbFenceCreateSWIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW *) IMG_OFFSET_ADDR(psSyncFbFenceCreateSWIN_UI8,
+                                                                    0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW *psSyncFbFenceCreateSWOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW *) IMG_OFFSET_ADDR(psSyncFbFenceCreateSWOUT_UI8,
+                                                                     0);
+
+       IMG_HANDLE hTimeline = psSyncFbFenceCreateSWIN->hTimeline;
+       PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+       IMG_CHAR *uiFenceNameInt = NULL;
+       PVRSRV_FENCE_SERVER *psOutFenceInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psSyncFbFenceCreateSWIN->ui32FenceNameSize > SYNC_FB_FENCE_MAX_LENGTH))
+       {
+               psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncFbFenceCreateSW_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncFbFenceCreateSW_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncFbFenceCreateSWIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbFenceCreateSWIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncFbFenceCreateSW_exit;
+                       }
+               }
+       }
+
+       if (psSyncFbFenceCreateSWIN->ui32FenceNameSize != 0)
+       {
+               uiFenceNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiFenceNameInt,
+                    (const void __user *)psSyncFbFenceCreateSWIN->puiFenceName,
+                    psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncFbFenceCreateSW_exit;
+               }
+               ((IMG_CHAR *)
+                uiFenceNameInt)[(psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR)) -
+                                1] = '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceCreateSWOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psTimelineInt,
+                                      hTimeline,
+                                      PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceCreateSW_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceCreateSWOUT->eError =
+           SyncFbFenceCreateSW(psConnection, OSGetDevNode(psConnection),
+                               psTimelineInt,
+                               psSyncFbFenceCreateSWIN->ui32FenceNameSize,
+                               uiFenceNameInt,
+                               &psOutFenceInt, &psSyncFbFenceCreateSWOUT->ui64SyncPtIdx);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceCreateSW_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceCreateSWOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbFenceCreateSWOUT->hOutFence, (void *)psOutFenceInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _SyncFbFenceCreateSWpsOutFenceIntRelease);
+       if (unlikely(psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceCreateSW_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceCreateSW_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTimelineInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hTimeline, PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       if (psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK)
+       {
+               if (psOutFenceInt)
+               {
+                       SyncFbFenceRelease(psOutFenceInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncFbFenceCreateSWOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineAdvanceSW(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psSyncFbTimelineAdvanceSWIN_UI8,
+                                   IMG_UINT8 * psSyncFbTimelineAdvanceSWOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW *psSyncFbTimelineAdvanceSWIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineAdvanceSWIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW *psSyncFbTimelineAdvanceSWOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW *)
+           IMG_OFFSET_ADDR(psSyncFbTimelineAdvanceSWOUT_UI8, 0);
+
+       IMG_HANDLE hTimeline = psSyncFbTimelineAdvanceSWIN->hTimeline;
+       PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbTimelineAdvanceSWOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psTimelineInt,
+                                      hTimeline,
+                                      PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbTimelineAdvanceSWOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbTimelineAdvanceSW_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbTimelineAdvanceSWOUT->eError =
+           SyncFbTimelineAdvanceSW(psTimelineInt, &psSyncFbTimelineAdvanceSWOUT->ui64SyncPtIdx);
+
+SyncFbTimelineAdvanceSW_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psTimelineInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hTimeline, PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       return 0;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+static PVRSRV_ERROR _SyncFbFenceExportInsecurepsExportIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbFenceExportDestroyInsecure((PVRSRV_FENCE_EXPORT *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportInsecure(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psSyncFbFenceExportInsecureIN_UI8,
+                                     IMG_UINT8 * psSyncFbFenceExportInsecureOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE *psSyncFbFenceExportInsecureIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportInsecureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE *psSyncFbFenceExportInsecureOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportInsecureOUT_UI8, 0);
+
+       IMG_HANDLE hFence = psSyncFbFenceExportInsecureIN->hFence;
+       PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+       PVRSRV_FENCE_EXPORT *psExportInt = NULL;
+       IMG_HANDLE hExportInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceExportInsecureOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psFenceInt,
+                                      hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceExportInsecure_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceExportInsecureOUT->eError =
+           SyncFbFenceExportInsecure(psFenceInt, &psExportInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceExportInsecure_exit;
+       }
+
+       /*
+        * For cases where we need a cross process handle we actually allocate two.
+        *
+        * The first one is a connection specific handle and it gets given the real
+        * release function. This handle does *NOT* get returned to the caller. It's
+        * purpose is to release any leaked resources when we either have a bad or
+        * abnormally terminated client. If we didn't do this then the resource
+        * wouldn't be freed until driver unload. If the resource is freed normally,
+        * this handle can be looked up via the cross process handle and then
+        * released accordingly.
+        *
+        * The second one is a cross process handle and it gets given a noop release
+        * function. This handle does get returned to the caller.
+        */
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceExportInsecureOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, &hExportInt,
+                                     (void *)psExportInt, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _SyncFbFenceExportInsecurepsExportIntRelease);
+       if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceExportInsecure_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Lock over handle creation. */
+       LockHandle(KERNEL_HANDLE_BASE);
+       psSyncFbFenceExportInsecureOUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+                                                                          &psSyncFbFenceExportInsecureOUT->
+                                                                          hExport,
+                                                                          (void *)psExportInt,
+                                                                          PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+                                                                          PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                                                          (PFN_HANDLE_RELEASE) &
+                                                                          ReleaseExport);
+       if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(KERNEL_HANDLE_BASE);
+               goto SyncFbFenceExportInsecure_exit;
+       }
+       /* Release now we have created handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+SyncFbFenceExportInsecure_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psFenceInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       if (psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK)
+       {
+               if (psSyncFbFenceExportInsecureOUT->hExport)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(KERNEL_HANDLE_BASE);
+
+                       eError = PVRSRVDestroyHandleUnlocked(KERNEL_HANDLE_BASE,
+                                                            (IMG_HANDLE)
+                                                            psSyncFbFenceExportInsecureOUT->
+                                                            hExport,
+                                                            PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+                       if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(KERNEL_HANDLE_BASE);
+
+               }
+
+               if (hExportInt)
+               {
+                       PVRSRV_ERROR eError;
+                       /* Lock over handle creation cleanup. */
+                       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+                       eError =
+                           PVRSRVDestroyHandleUnlocked(psConnection->psProcessHandleBase->
+                                                       psHandleBase, hExportInt,
+                                                       PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+                       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       /* Releasing the handle should free/destroy/release the resource.
+                        * This should never fail... */
+                       PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+                       /* Avoid freeing/destroying/releasing the resource a second time below */
+                       psExportInt = NULL;
+                       /* Release now we have cleaned up creation handles. */
+                       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               }
+
+               if (psExportInt)
+               {
+                       SyncFbFenceExportDestroyInsecure(psExportInt);
+               }
+       }
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncFbFenceExportInsecure NULL
+#endif
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportDestroyInsecure(IMG_UINT32 ui32DispatchTableEntry,
+                                            IMG_UINT8 * psSyncFbFenceExportDestroyInsecureIN_UI8,
+                                            IMG_UINT8 * psSyncFbFenceExportDestroyInsecureOUT_UI8,
+                                            CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE *psSyncFbFenceExportDestroyInsecureIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportDestroyInsecureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE *psSyncFbFenceExportDestroyInsecureOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportDestroyInsecureOUT_UI8, 0);
+
+       PVRSRV_FENCE_EXPORT *psExportInt = NULL;
+       IMG_HANDLE hExportInt = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* Lock over handle destruction. */
+       LockHandle(KERNEL_HANDLE_BASE);
+       psSyncFbFenceExportDestroyInsecureOUT->eError =
+           PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+                                      (void **)&psExportInt,
+                                      (IMG_HANDLE) psSyncFbFenceExportDestroyInsecureIN->hExport,
+                                      PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, IMG_FALSE);
+       if (unlikely(psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__,
+                        PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+       }
+       PVR_ASSERT(psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_OK);
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+       /*
+        * Find the connection specific handle that represents the same data
+        * as the cross process handle as releasing it will actually call the
+        * data's real release function (see the function where the cross
+        * process handle is allocated for more details).
+        */
+       psSyncFbFenceExportDestroyInsecureOUT->eError =
+           PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                    &hExportInt,
+                                    psExportInt, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+       if (unlikely(psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__,
+                        PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+       }
+       PVR_ASSERT(psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_OK);
+
+       psSyncFbFenceExportDestroyInsecureOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                             hExportInt, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+       if (unlikely((psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK) &&
+                    (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+                    && (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__,
+                        PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+       }
+       PVR_ASSERT((psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_OK) ||
+                  (psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_ERROR_RETRY));
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Lock over handle destruction. */
+       LockHandle(KERNEL_HANDLE_BASE);
+
+       psSyncFbFenceExportDestroyInsecureOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(KERNEL_HANDLE_BASE,
+                                             (IMG_HANDLE) psSyncFbFenceExportDestroyInsecureIN->
+                                             hExport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+       if (unlikely
+           ((psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK)
+            && (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__,
+                        PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+               UnlockHandle(KERNEL_HANDLE_BASE);
+               goto SyncFbFenceExportDestroyInsecure_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+SyncFbFenceExportDestroyInsecure_exit:
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncFbFenceExportDestroyInsecure NULL
+#endif
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+static PVRSRV_ERROR _SyncFbFenceImportInsecurepsSyncHandleIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceImportInsecure(IMG_UINT32 ui32DispatchTableEntry,
+                                     IMG_UINT8 * psSyncFbFenceImportInsecureIN_UI8,
+                                     IMG_UINT8 * psSyncFbFenceImportInsecureOUT_UI8,
+                                     CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE *psSyncFbFenceImportInsecureIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceImportInsecureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE *psSyncFbFenceImportInsecureOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceImportInsecureOUT_UI8, 0);
+
+       IMG_HANDLE hImport = psSyncFbFenceImportInsecureIN->hImport;
+       PVRSRV_FENCE_EXPORT *psImportInt = NULL;
+       PVRSRV_FENCE_SERVER *psSyncHandleInt = NULL;
+
+       /* Lock over handle lookup. */
+       LockHandle(KERNEL_HANDLE_BASE);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceImportInsecureOUT->eError =
+           PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+                                      (void **)&psImportInt,
+                                      hImport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, IMG_TRUE);
+       if (unlikely(psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(KERNEL_HANDLE_BASE);
+               goto SyncFbFenceImportInsecure_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+       psSyncFbFenceImportInsecureOUT->eError =
+           SyncFbFenceImportInsecure(psConnection, OSGetDevNode(psConnection),
+                                     psImportInt, &psSyncHandleInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceImportInsecure_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceImportInsecureOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbFenceImportInsecureOUT->hSyncHandle,
+                                     (void *)psSyncHandleInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _SyncFbFenceImportInsecurepsSyncHandleIntRelease);
+       if (unlikely(psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceImportInsecure_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceImportInsecure_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(KERNEL_HANDLE_BASE);
+
+       /* Unreference the previously looked up handle */
+       if (psImportInt)
+       {
+               PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+                                           hImport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(KERNEL_HANDLE_BASE);
+
+       if (psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK)
+       {
+               if (psSyncHandleInt)
+               {
+                       SyncFbFenceRelease(psSyncHandleInt);
+               }
+       }
+
+       return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncFbFenceImportInsecure NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportSecure(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psSyncFbFenceExportSecureIN_UI8,
+                                   IMG_UINT8 * psSyncFbFenceExportSecureOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE *psSyncFbFenceExportSecureIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportSecureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE *psSyncFbFenceExportSecureOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportSecureOUT_UI8, 0);
+
+       IMG_HANDLE hFence = psSyncFbFenceExportSecureIN->hFence;
+       PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+       PVRSRV_FENCE_EXPORT *psExportInt = NULL;
+       CONNECTION_DATA *psSecureConnection;
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncFbFenceExportSecureOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                      (void **)&psFenceInt,
+                                      hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+       if (unlikely(psSyncFbFenceExportSecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceExportSecure_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceExportSecureOUT->eError =
+           SyncFbFenceExportSecure(psConnection, OSGetDevNode(psConnection),
+                                   psFenceInt,
+                                   &psSyncFbFenceExportSecureOUT->Export,
+                                   &psExportInt, &psSecureConnection);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceExportSecureOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceExportSecure_exit;
+       }
+
+SyncFbFenceExportSecure_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (psFenceInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                           hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       if (psSyncFbFenceExportSecureOUT->eError != PVRSRV_OK)
+       {
+               if (psExportInt)
+               {
+                       SyncFbFenceExportDestroySecure(psExportInt);
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportDestroySecure(IMG_UINT32 ui32DispatchTableEntry,
+                                          IMG_UINT8 * psSyncFbFenceExportDestroySecureIN_UI8,
+                                          IMG_UINT8 * psSyncFbFenceExportDestroySecureOUT_UI8,
+                                          CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE *psSyncFbFenceExportDestroySecureIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportDestroySecureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE *psSyncFbFenceExportDestroySecureOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceExportDestroySecureOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psSyncFbFenceExportDestroySecureOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psSyncFbFenceExportDestroySecureIN->
+                                             hExport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+       if (unlikely
+           ((psSyncFbFenceExportDestroySecureOUT->eError != PVRSRV_OK)
+            && (psSyncFbFenceExportDestroySecureOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+            && (psSyncFbFenceExportDestroySecureOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__,
+                        PVRSRVGetErrorString(psSyncFbFenceExportDestroySecureOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncFbFenceExportDestroySecure_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+SyncFbFenceExportDestroySecure_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceImportSecurepsSyncHandleIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+       return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceImportSecure(IMG_UINT32 ui32DispatchTableEntry,
+                                   IMG_UINT8 * psSyncFbFenceImportSecureIN_UI8,
+                                   IMG_UINT8 * psSyncFbFenceImportSecureOUT_UI8,
+                                   CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE *psSyncFbFenceImportSecureIN =
+           (PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceImportSecureIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE *psSyncFbFenceImportSecureOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE *)
+           IMG_OFFSET_ADDR(psSyncFbFenceImportSecureOUT_UI8, 0);
+
+       PVRSRV_FENCE_SERVER *psSyncHandleInt = NULL;
+
+       psSyncFbFenceImportSecureOUT->eError =
+           SyncFbFenceImportSecure(psConnection, OSGetDevNode(psConnection),
+                                   psSyncFbFenceImportSecureIN->Import, &psSyncHandleInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncFbFenceImportSecureOUT->eError != PVRSRV_OK))
+       {
+               goto SyncFbFenceImportSecure_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+       psSyncFbFenceImportSecureOUT->eError =
+           PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+                                     &psSyncFbFenceImportSecureOUT->hSyncHandle,
+                                     (void *)psSyncHandleInt,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                     PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                     (PFN_HANDLE_RELEASE) &
+                                     _SyncFbFenceImportSecurepsSyncHandleIntRelease);
+       if (unlikely(psSyncFbFenceImportSecureOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+               goto SyncFbFenceImportSecure_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceImportSecure_exit:
+
+       if (psSyncFbFenceImportSecureOUT->eError != PVRSRV_OK)
+       {
+               if (psSyncHandleInt)
+               {
+                       SyncFbFenceRelease(psSyncHandleInt);
+               }
+       }
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void);
+void DeinitSYNCFALLBACKBridge(void);
+
+/*
+ * Register all SYNCFALLBACK functions with services
+ */
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR,
+                             PVRSRVBridgeSyncFbTimelineCreatePVR, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE,
+                             PVRSRVBridgeSyncFbTimelineRelease, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP,
+                             PVRSRVBridgeSyncFbFenceDup, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE,
+                             PVRSRVBridgeSyncFbFenceMerge, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE,
+                             PVRSRVBridgeSyncFbFenceRelease, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT,
+                             PVRSRVBridgeSyncFbFenceWait, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP,
+                             PVRSRVBridgeSyncFbFenceDump, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW,
+                             PVRSRVBridgeSyncFbTimelineCreateSW, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW,
+                             PVRSRVBridgeSyncFbFenceCreateSW, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW,
+                             PVRSRVBridgeSyncFbTimelineAdvanceSW, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE,
+                             PVRSRVBridgeSyncFbFenceExportInsecure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE,
+                             PVRSRVBridgeSyncFbFenceExportDestroyInsecure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE,
+                             PVRSRVBridgeSyncFbFenceImportInsecure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE,
+                             PVRSRVBridgeSyncFbFenceExportSecure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE,
+                             PVRSRVBridgeSyncFbFenceExportDestroySecure, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                             PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE,
+                             PVRSRVBridgeSyncFbFenceImportSecure, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all syncfallback functions with services
+ */
+void DeinitSYNCFALLBACKBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+                               PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/client_synctracking_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/client_synctracking_bridge.h
new file mode 100644 (file)
index 0000000..544efd9
--- /dev/null
@@ -0,0 +1,68 @@
+/*******************************************************************************
+@File
+@Title          Client bridge header for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H
+#define CLIENT_SYNCTRACKING_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_synctracking_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+                                             IMG_HANDLE * phhRecord,
+                                             IMG_HANDLE hhServerSyncPrimBlock,
+                                             IMG_UINT32 ui32ui32FwBlockAddr,
+                                             IMG_UINT32 ui32ui32SyncOffset,
+                                             IMG_BOOL bbServerSync,
+                                             IMG_UINT32 ui32ClassNameSize,
+                                             const IMG_CHAR * puiClassName);
+
+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/client_synctracking_direct_bridge.c
new file mode 100644 (file)
index 0000000..baeb89a
--- /dev/null
@@ -0,0 +1,92 @@
+/*******************************************************************************
+@File
+@Title          Direct client bridge for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the client side of the bridge for synctracking
+                which is used in calls from Server context.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_synctracking_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "sync.h"
+#include "sync_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord)
+{
+       PVRSRV_ERROR eError;
+       SYNC_RECORD_HANDLE pshRecordInt;
+       PVR_UNREFERENCED_PARAMETER(hBridge);
+
+       pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+       eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+                                             IMG_HANDLE * phhRecord,
+                                             IMG_HANDLE hhServerSyncPrimBlock,
+                                             IMG_UINT32 ui32ui32FwBlockAddr,
+                                             IMG_UINT32 ui32ui32SyncOffset,
+                                             IMG_BOOL bbServerSync,
+                                             IMG_UINT32 ui32ClassNameSize,
+                                             const IMG_CHAR * puiClassName)
+{
+       PVRSRV_ERROR eError;
+       SYNC_RECORD_HANDLE pshRecordInt = NULL;
+       SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt;
+
+       pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+       eError =
+           PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+                                 &pshRecordInt,
+                                 pshServerSyncPrimBlockInt,
+                                 ui32ui32FwBlockAddr,
+                                 ui32ui32SyncOffset,
+                                 bbServerSync, ui32ClassNameSize, puiClassName);
+
+       *phhRecord = pshRecordInt;
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/common_synctracking_bridge.h b/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/common_synctracking_bridge.h
new file mode 100644 (file)
index 0000000..036c7dc
--- /dev/null
@@ -0,0 +1,97 @@
+/*******************************************************************************
+@File
+@Title          Common bridge header for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures used by both the client
+                and server side of the bridge for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNCTRACKING_BRIDGE_H
+#define COMMON_SYNCTRACKING_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST                   0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE                    PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD                       PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST                    (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1)
+
+/*******************************************
+            SyncRecordRemoveByHandle
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+       IMG_HANDLE hhRecord;
+} __packed PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+/*******************************************
+            SyncRecordAdd
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+       IMG_HANDLE hhServerSyncPrimBlock;
+       const IMG_CHAR *puiClassName;
+       IMG_BOOL bbServerSync;
+       IMG_UINT32 ui32ClassNameSize;
+       IMG_UINT32 ui32ui32FwBlockAddr;
+       IMG_UINT32 ui32ui32SyncOffset;
+} __packed PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+       IMG_HANDLE hhRecord;
+       PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/server_synctracking_bridge.c b/drivers/gpu/drm/img/img-rogue/generated/rogue/synctracking_bridge/server_synctracking_bridge.c
new file mode 100644 (file)
index 0000000..adc8ab4
--- /dev/null
@@ -0,0 +1,333 @@
+/*******************************************************************************
+@File
+@Title          Server bridge for synctracking
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for synctracking
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+
+#include "common_synctracking_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+                                    IMG_UINT8 * psSyncRecordRemoveByHandleIN_UI8,
+                                    IMG_UINT8 * psSyncRecordRemoveByHandleOUT_UI8,
+                                    CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN =
+           (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *)
+           IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *)
+           IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0);
+
+       /* Lock over handle destruction. */
+       LockHandle(psConnection->psHandleBase);
+
+       psSyncRecordRemoveByHandleOUT->eError =
+           PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+                                             (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord,
+                                             PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+       if (unlikely((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) &&
+                    (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+                    (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s",
+                        __func__, PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT->eError)));
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncRecordRemoveByHandle_exit;
+       }
+
+       /* Release now we have destroyed handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+SyncRecordRemoveByHandle_exit:
+
+       return 0;
+}
+
+static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData)
+{
+       PVRSRV_ERROR eError;
+       eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData);
+       return eError;
+}
+
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+             "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+                         IMG_UINT8 * psSyncRecordAddIN_UI8,
+                         IMG_UINT8 * psSyncRecordAddOUT_UI8, CONNECTION_DATA * psConnection)
+{
+       PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN =
+           (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0);
+       PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT =
+           (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0);
+
+       SYNC_RECORD_HANDLE pshRecordInt = NULL;
+       IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock;
+       SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL;
+       IMG_CHAR *uiClassNameInt = NULL;
+
+       IMG_UINT32 ui32NextOffset = 0;
+       IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+       IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+       IMG_UINT32 ui32BufferSize = 0;
+       IMG_UINT64 ui64BufferSize =
+           ((IMG_UINT64) psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+       if (unlikely(psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH))
+       {
+               psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+               goto SyncRecordAdd_exit;
+       }
+
+       if (ui64BufferSize > IMG_UINT32_MAX)
+       {
+               psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+               goto SyncRecordAdd_exit;
+       }
+
+       ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+       if (ui32BufferSize != 0)
+       {
+#if !defined(INTEGRITY_OS)
+               /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+               IMG_UINT32 ui32InBufferOffset =
+                   PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long));
+               IMG_UINT32 ui32InBufferExcessSize =
+                   ui32InBufferOffset >=
+                   PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+               bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+               if (bHaveEnoughSpace)
+               {
+                       IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncRecordAddIN;
+
+                       pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+               }
+               else
+#endif
+               {
+                       pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+                       if (!pArrayArgsBuffer)
+                       {
+                               psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto SyncRecordAdd_exit;
+                       }
+               }
+       }
+
+       if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+       {
+               uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+               ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+       }
+
+       /* Copy the data over */
+       if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+       {
+               if (OSCopyFromUser
+                   (NULL, uiClassNameInt, (const void __user *)psSyncRecordAddIN->puiClassName,
+                    psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+               {
+                       psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+                       goto SyncRecordAdd_exit;
+               }
+               ((IMG_CHAR *)
+                uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] =
+       '\0';
+       }
+
+       /* Lock over handle lookup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Look up the address from the handle */
+       psSyncRecordAddOUT->eError =
+           PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+                                      (void **)&pshServerSyncPrimBlockInt,
+                                      hhServerSyncPrimBlock,
+                                      PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+       if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncRecordAdd_exit;
+       }
+       /* Release now we have looked up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       psSyncRecordAddOUT->eError =
+           PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection),
+                                 &pshRecordInt,
+                                 pshServerSyncPrimBlockInt,
+                                 psSyncRecordAddIN->ui32ui32FwBlockAddr,
+                                 psSyncRecordAddIN->ui32ui32SyncOffset,
+                                 psSyncRecordAddIN->bbServerSync,
+                                 psSyncRecordAddIN->ui32ClassNameSize, uiClassNameInt);
+       /* Exit early if bridged call fails */
+       if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+       {
+               goto SyncRecordAdd_exit;
+       }
+
+       /* Lock over handle creation. */
+       LockHandle(psConnection->psHandleBase);
+
+       psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+                                                              &psSyncRecordAddOUT->hhRecord,
+                                                              (void *)pshRecordInt,
+                                                              PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+                                                              PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+                                                              (PFN_HANDLE_RELEASE) &
+                                                              _SyncRecordAddpshRecordIntRelease);
+       if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+       {
+               UnlockHandle(psConnection->psHandleBase);
+               goto SyncRecordAdd_exit;
+       }
+
+       /* Release now we have created handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+SyncRecordAdd_exit:
+
+       /* Lock over handle lookup cleanup. */
+       LockHandle(psConnection->psHandleBase);
+
+       /* Unreference the previously looked up handle */
+       if (pshServerSyncPrimBlockInt)
+       {
+               PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+                                           hhServerSyncPrimBlock,
+                                           PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+       }
+       /* Release now we have cleaned up look up handles. */
+       UnlockHandle(psConnection->psHandleBase);
+
+       if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+       {
+               if (pshRecordInt)
+               {
+                       PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+               }
+       }
+
+       /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       if (psSyncRecordAddOUT->eError == PVRSRV_OK)
+               PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+#if defined(INTEGRITY_OS)
+       if (pArrayArgsBuffer)
+#else
+       if (!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+               OSFreeMemNoStats(pArrayArgsBuffer);
+
+       return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+void DeinitSYNCTRACKINGBridge(void);
+
+/*
+ * Register all SYNCTRACKING functions with services
+ */
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void)
+{
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+                             PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE,
+                             PVRSRVBridgeSyncRecordRemoveByHandle, NULL);
+
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD,
+                             PVRSRVBridgeSyncRecordAdd, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Unregister all synctracking functions with services
+ */
+void DeinitSYNCTRACKINGBridge(void)
+{
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+                               PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE);
+
+       UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+                               PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD);
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.2.30.h
new file mode 100644 (file)
index 0000000..88b1231
--- /dev/null
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 1.V.2.30 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_1_V_2_30_H
+#define RGXCONFIG_KM_1_V_2_30_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FBCDC_ALGORITHM (1U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0U)
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+
+#endif /* RGXCONFIG_KM_1_V_2_30_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.19.h
new file mode 100644 (file)
index 0000000..ec1bbd6
--- /dev/null
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 1.V.4.19 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_1_V_4_19_H
+#define RGXCONFIG_KM_1_V_4_19_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 19
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FBCDC_ALGORITHM (1U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+
+#endif /* RGXCONFIG_KM_1_V_4_19_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_1.V.4.5.h
new file mode 100644 (file)
index 0000000..b29afcf
--- /dev/null
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 1.V.4.5 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_1_V_4_5_H
+#define RGXCONFIG_KM_1_V_4_5_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FBCDC_ALGORITHM (1U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+
+#endif /* RGXCONFIG_KM_1_V_4_5_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_15.V.1.64.h
new file mode 100644 (file)
index 0000000..a23aebf
--- /dev/null
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 15.V.1.64 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_15_V_1_64_H
+#define RGXCONFIG_KM_15_V_1_64_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 15
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 64
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_META_COREMEM_SIZE (0U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_15_V_1_64_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.18.h
new file mode 100644 (file)
index 0000000..4654391
--- /dev/null
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.104.18 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_104_18_H
+#define RGXCONFIG_KM_22_V_104_18_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 104
+#define RGX_BNC_KM_C 18
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (7U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_104_18_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.104.218.h
new file mode 100644 (file)
index 0000000..fd5a4d5
--- /dev/null
@@ -0,0 +1,93 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.104.218 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_104_218_H
+#define RGXCONFIG_KM_22_V_104_218_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 104
+#define RGX_BNC_KM_C 218
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (7U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_104_218_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.208.318.h
new file mode 100644 (file)
index 0000000..f98ba97
--- /dev/null
@@ -0,0 +1,93 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.208.318 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_208_318_H
+#define RGXCONFIG_KM_22_V_208_318_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 208
+#define RGX_BNC_KM_C 318
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (2U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (2U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_208_318_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.21.16.h
new file mode 100644 (file)
index 0000000..0563b1b
--- /dev/null
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.21.16 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_21_16_H
+#define RGXCONFIG_KM_22_V_21_16_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 21
+#define RGX_BNC_KM_C 16
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (8U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_21_16_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.25.h
new file mode 100644 (file)
index 0000000..05d0a79
--- /dev/null
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.54.25 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_54_25_H
+#define RGXCONFIG_KM_22_V_54_25_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 25
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_54_25_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.30.h
new file mode 100644 (file)
index 0000000..a16f199
--- /dev/null
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.54.30 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_54_30_H
+#define RGXCONFIG_KM_22_V_54_30_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_54_30_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.330.h
new file mode 100644 (file)
index 0000000..810b58b
--- /dev/null
@@ -0,0 +1,93 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.54.330 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_54_330_H
+#define RGXCONFIG_KM_22_V_54_330_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 330
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (32U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_54_330_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_22.V.54.38.h
new file mode 100644 (file)
index 0000000..84f3454
--- /dev/null
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 22.V.54.38 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_22_V_54_38_H
+#define RGXCONFIG_KM_22_V_54_38_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 22
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 38
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (1U)
+#define RGX_FEATURE_SINGLE_BIF
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_22_V_54_38_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.104.504.h
new file mode 100644 (file)
index 0000000..bd1d4de
--- /dev/null
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 24.V.104.504 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_24_V_104_504_H
+#define RGXCONFIG_KM_24_V_104_504_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 24
+#define RGX_BNC_KM_N 104
+#define RGX_BNC_KM_C 504
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (6U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_24_V_104_504_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.504.h
new file mode 100644 (file)
index 0000000..ee4a2a5
--- /dev/null
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 24.V.208.504 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_24_V_208_504_H
+#define RGXCONFIG_KM_24_V_208_504_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 24
+#define RGX_BNC_KM_N 208
+#define RGX_BNC_KM_C 504
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (2U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (2U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_24_V_208_504_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.208.505.h
new file mode 100644 (file)
index 0000000..9d82a54
--- /dev/null
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 24.V.208.505 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_24_V_208_505_H
+#define RGXCONFIG_KM_24_V_208_505_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 24
+#define RGX_BNC_KM_N 208
+#define RGX_BNC_KM_C 505
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (12U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (2U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (2U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_24_V_208_505_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_24.V.54.204.h
new file mode 100644 (file)
index 0000000..35d2676
--- /dev/null
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 24.V.54.204 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_24_V_54_204_H
+#define RGXCONFIG_KM_24_V_54_204_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 24
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 204
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (3U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (3U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (64U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_24_V_54_204_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_29.V.108.208.h
new file mode 100644 (file)
index 0000000..f4c1bb5
--- /dev/null
@@ -0,0 +1,100 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 29.V.108.208 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_29_V_108_208_H
+#define RGXCONFIG_KM_29_V_108_208_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 29
+#define RGX_BNC_KM_N 108
+#define RGX_BNC_KM_C 208
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (2U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (2U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_29_V_108_208_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_29.V.52.202.h
new file mode 100644 (file)
index 0000000..a7ee725
--- /dev/null
@@ -0,0 +1,100 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 29.V.52.202 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_29_V_52_202_H
+#define RGXCONFIG_KM_29_V_52_202_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 29
+#define RGX_BNC_KM_N 52
+#define RGX_BNC_KM_C 202
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_29_V_52_202_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_33.V.11.3.h
new file mode 100644 (file)
index 0000000..acc75a0
--- /dev/null
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 33.V.11.3 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_33_V_11_3_H
+#define RGXCONFIG_KM_33_V_11_3_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 33
+#define RGX_BNC_KM_N 11
+#define RGX_BNC_KM_C 3
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1U)
+#define RGX_FEATURE_NUM_OSIDS (2U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_33_V_11_3_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_33.V.22.1.h
new file mode 100644 (file)
index 0000000..80a5d3c
--- /dev/null
@@ -0,0 +1,93 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 33.V.22.1 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_33_V_22_1_H
+#define RGXCONFIG_KM_33_V_22_1_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 33
+#define RGX_BNC_KM_N 22
+#define RGX_BNC_KM_C 1
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_PM_MMU_VFP
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_33_V_22_1_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.182.h
new file mode 100644 (file)
index 0000000..0a6cdb0
--- /dev/null
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 36.V.104.182 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_104_182_H
+#define RGXCONFIG_KM_36_V_104_182_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 104
+#define RGX_BNC_KM_C 182
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (6U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_104_182_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.183.h
new file mode 100644 (file)
index 0000000..6de1c1b
--- /dev/null
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 36.V.104.183 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_104_183_H
+#define RGXCONFIG_KM_36_V_104_183_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 104
+#define RGX_BNC_KM_C 183
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (6U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_104_183_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.104.796.h
new file mode 100644 (file)
index 0000000..9fe1913
--- /dev/null
@@ -0,0 +1,109 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 36.V.104.796 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_104_796_H
+#define RGXCONFIG_KM_36_V_104_796_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 104
+#define RGX_BNC_KM_C 796
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_ECC_RAMS (2U)
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (6U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_RISCV_FW_PROCESSOR
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_REGION_PROTECTION
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_WATCHDOG_TIMER
+#define RGX_FEATURE_WORKGROUP_PROTECTION
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_104_796_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.103.h
new file mode 100644 (file)
index 0000000..bb77353
--- /dev/null
@@ -0,0 +1,101 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 36.V.54.103 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_54_103_H
+#define RGXCONFIG_KM_36_V_54_103_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 103
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+
+#endif /* RGXCONFIG_KM_36_V_54_103_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.182.h
new file mode 100644 (file)
index 0000000..cfbf51f
--- /dev/null
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 36.V.54.182 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_54_182_H
+#define RGXCONFIG_KM_36_V_54_182_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 182
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_54_182_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_36.V.54.183.h
new file mode 100644 (file)
index 0000000..244eac0
--- /dev/null
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 36.V.54.183 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_54_183_H
+#define RGXCONFIG_KM_36_V_54_183_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 183
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+                                          /* customer-configurable. True SLC */
+                                          /* size must be sourced from */
+                                          /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_54_183_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.51.h
new file mode 100644 (file)
index 0000000..b12f3c2
--- /dev/null
@@ -0,0 +1,89 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 4.V.2.51 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_4_V_2_51_H
+#define RGXCONFIG_KM_4_V_2_51_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 51
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32U)
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+
+#endif /* RGXCONFIG_KM_4_V_2_51_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.2.58.h
new file mode 100644 (file)
index 0000000..8a67d49
--- /dev/null
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 4.V.2.58 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_4_V_2_58_H
+#define RGXCONFIG_KM_4_V_2_58_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 58
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32U)
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+
+#endif /* RGXCONFIG_KM_4_V_2_58_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.4.55.h
new file mode 100644 (file)
index 0000000..61c165e
--- /dev/null
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 4.V.4.55 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_4_V_4_55_H
+#define RGXCONFIG_KM_4_V_4_55_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 55
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+
+#endif /* RGXCONFIG_KM_4_V_4_55_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_4.V.6.62.h
new file mode 100644 (file)
index 0000000..74aeef7
--- /dev/null
@@ -0,0 +1,92 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 4.V.6.62 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_4_V_6_62_H
+#define RGXCONFIG_KM_4_V_6_62_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 6
+#define RGX_BNC_KM_C 62
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (2U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_BANKS (4U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+
+#endif /* RGXCONFIG_KM_4_V_6_62_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_5.V.1.46.h
new file mode 100644 (file)
index 0000000..c2698fe
--- /dev/null
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 5.V.1.46 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_5_V_1_46_H
+#define RGXCONFIG_KM_5_V_1_46_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 5
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 46
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_META_COREMEM_SIZE (0U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+
+#endif /* RGXCONFIG_KM_5_V_1_46_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/configs/rgxconfig_km_6.V.4.35.h
new file mode 100644 (file)
index 0000000..2bd20b2
--- /dev/null
@@ -0,0 +1,89 @@
+/*************************************************************************/ /*!
+@Title          RGX Configuration for BVNC 6.V.4.35 (kernel defines)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_6_V_4_35_H
+#define RGXCONFIG_KM_6_V_4_35_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 6
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 35
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_FBCDC_ALGORITHM (2U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+
+#endif /* RGXCONFIG_KM_6_V_4_35_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.39.4.19.h
new file mode 100644 (file)
index 0000000..edd7ec5
--- /dev/null
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.39.4.19
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_1_39_4_19_H
+#define RGXCORE_KM_1_39_4_19_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @2784771 */
+
+/******************************************************************************
+ * BVNC = 1.39.4.19
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 39
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 19
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42321
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+
+
+
+#endif /* RGXCORE_KM_1_39_4_19_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.75.2.30.h
new file mode 100644 (file)
index 0000000..ab60c2a
--- /dev/null
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.75.2.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_1_75_2_30_H
+#define RGXCORE_KM_1_75_2_30_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @2309075 */
+
+/******************************************************************************
+ * BVNC = 1.75.2.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_42321
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+
+
+
+#endif /* RGXCORE_KM_1_75_2_30_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_1.82.4.5.h
new file mode 100644 (file)
index 0000000..7629672
--- /dev/null
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.82.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_1_82_4_5_H
+#define RGXCORE_KM_1_82_4_5_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @2503111 */
+
+/******************************************************************************
+ * BVNC = 1.82.4.5
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 82
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+
+
+
+#endif /* RGXCORE_KM_1_82_4_5_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_15.5.1.64.h
new file mode 100644 (file)
index 0000000..b96f1e1
--- /dev/null
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 15.5.1.64
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_15_5_1_64_H
+#define RGXCORE_KM_15_5_1_64_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @3846532 */
+
+/******************************************************************************
+ * BVNC = 15.5.1.64
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 15
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 64
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_64502
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_15_5_1_64_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.102.54.38.h
new file mode 100644 (file)
index 0000000..7cd595a
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.102.54.38
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_102_54_38_H
+#define RGXCORE_KM_22_102_54_38_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @4831550 */
+
+/******************************************************************************
+ * BVNC = 22.102.54.38
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 102
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 38
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_102_54_38_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.104.208.318.h
new file mode 100644 (file)
index 0000000..a3f5e2d
--- /dev/null
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.104.208.318
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_104_208_318_H
+#define RGXCORE_KM_22_104_208_318_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5124208 */
+
+/******************************************************************************
+ * BVNC = 22.104.208.318
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 104
+#define RGX_BVNC_KM_N 208
+#define RGX_BVNC_KM_C 318
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65101
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_104_208_318_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.105.208.318.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.105.208.318.h
new file mode 100644 (file)
index 0000000..52de9fc
--- /dev/null
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.105.208.318
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_105_208_318_H
+#define RGXCORE_KM_22_105_208_318_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5324554 */
+
+/******************************************************************************
+ * BVNC = 22.105.208.318
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 105
+#define RGX_BVNC_KM_N 208
+#define RGX_BVNC_KM_C 318
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65101
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_105_208_318_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.30.54.25.h
new file mode 100644 (file)
index 0000000..aaf0ef7
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.30.54.25
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_30_54_25_H
+#define RGXCORE_KM_22_30_54_25_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @4086500 */
+
+/******************************************************************************
+ * BVNC = 22.30.54.25
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 30
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 25
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65273
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_22_30_54_25_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.40.54.30.h
new file mode 100644 (file)
index 0000000..fe9974c
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.40.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_40_54_30_H
+#define RGXCORE_KM_22_40_54_30_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @4094817 */
+
+/******************************************************************************
+ * BVNC = 22.40.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65273
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_22_40_54_30_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.46.54.330.h
new file mode 100644 (file)
index 0000000..fd40adc
--- /dev/null
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.46.54.330
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_46_54_330_H
+#define RGXCORE_KM_22_46_54_330_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @4136505 */
+
+/******************************************************************************
+ * BVNC = 22.46.54.330
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 46
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 330
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65101
+#define FIX_HW_BRN_65273
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_22_46_54_330_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.49.21.16.h
new file mode 100644 (file)
index 0000000..8183d38
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.49.21.16
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_49_21_16_H
+#define RGXCORE_KM_22_49_21_16_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @4158766 */
+
+/******************************************************************************
+ * BVNC = 22.49.21.16
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 49
+#define RGX_BVNC_KM_N 21
+#define RGX_BVNC_KM_C 16
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_61450
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65273
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_22_49_21_16_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.67.54.30.h
new file mode 100644 (file)
index 0000000..e9253c4
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.67.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_67_54_30_H
+#define RGXCORE_KM_22_67_54_30_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @4339986 */
+
+/******************************************************************************
+ * BVNC = 22.67.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 67
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_60084
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65273
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_67_54_30_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.68.54.30.h
new file mode 100644 (file)
index 0000000..060261f
--- /dev/null
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.68.54.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_68_54_30_H
+#define RGXCORE_KM_22_68_54_30_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @4339984 */
+
+/******************************************************************************
+ * BVNC = 22.68.54.30
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 68
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65273
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_68_54_30_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.86.104.218.h
new file mode 100644 (file)
index 0000000..42aed9e
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.86.104.218
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_86_104_218_H
+#define RGXCORE_KM_22_86_104_218_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @4665024 */
+
+/******************************************************************************
+ * BVNC = 22.86.104.218
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 86
+#define RGX_BVNC_KM_N 104
+#define RGX_BVNC_KM_C 218
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_65101
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_86_104_218_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_22.87.104.18.h
new file mode 100644 (file)
index 0000000..d3ed1e9
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 22.87.104.18
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_22_87_104_18_H
+#define RGXCORE_KM_22_87_104_18_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @4658768 */
+
+/******************************************************************************
+ * BVNC = 22.87.104.18
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 22
+#define RGX_BVNC_KM_V 87
+#define RGX_BVNC_KM_N 104
+#define RGX_BVNC_KM_C 18
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_64502
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_61389
+
+
+
+#endif /* RGXCORE_KM_22_87_104_18_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.50.208.504.h
new file mode 100644 (file)
index 0000000..c367ce7
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 24.50.208.504
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_24_50_208_504_H
+#define RGXCORE_KM_24_50_208_504_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5086680 */
+
+/******************************************************************************
+ * BVNC = 24.50.208.504
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 24
+#define RGX_BVNC_KM_V 50
+#define RGX_BVNC_KM_N 208
+#define RGX_BVNC_KM_C 504
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_66622
+
+
+
+#endif /* RGXCORE_KM_24_50_208_504_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.56.208.505.h
new file mode 100644 (file)
index 0000000..69704f1
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 24.56.208.505
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_24_56_208_505_H
+#define RGXCORE_KM_24_56_208_505_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5203837 */
+
+/******************************************************************************
+ * BVNC = 24.56.208.505
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 24
+#define RGX_BVNC_KM_V 56
+#define RGX_BVNC_KM_N 208
+#define RGX_BVNC_KM_C 505
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_66622
+
+
+
+#endif /* RGXCORE_KM_24_56_208_505_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.66.54.204.h
new file mode 100644 (file)
index 0000000..a580cb2
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 24.66.54.204
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_24_66_54_204_H
+#define RGXCORE_KM_24_66_54_204_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5200207 */
+
+/******************************************************************************
+ * BVNC = 24.66.54.204
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 24
+#define RGX_BVNC_KM_V 66
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 204
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_66622
+
+
+
+#endif /* RGXCORE_KM_24_66_54_204_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_24.67.104.504.h
new file mode 100644 (file)
index 0000000..743fd09
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 24.67.104.504
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_24_67_104_504_H
+#define RGXCORE_KM_24_67_104_504_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5203838 */
+
+/******************************************************************************
+ * BVNC = 24.67.104.504
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 24
+#define RGX_BVNC_KM_V 67
+#define RGX_BVNC_KM_N 104
+#define RGX_BVNC_KM_C 504
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_57596
+#define HW_ERN_66622
+
+
+
+#endif /* RGXCORE_KM_24_67_104_504_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_29.14.108.208.h
new file mode 100644 (file)
index 0000000..b04ced1
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 29.14.108.208
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_29_14_108_208_H
+#define RGXCORE_KM_29_14_108_208_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5682134 */
+
+/******************************************************************************
+ * BVNC = 29.14.108.208
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 29
+#define RGX_BVNC_KM_V 14
+#define RGX_BVNC_KM_N 108
+#define RGX_BVNC_KM_C 208
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_68186
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+#define HW_ERN_66622
+
+
+
+#endif /* RGXCORE_KM_29_14_108_208_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_29.19.52.202.h
new file mode 100644 (file)
index 0000000..42de65b
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 29.19.52.202
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_29_19_52_202_H
+#define RGXCORE_KM_29_19_52_202_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5691248 */
+
+/******************************************************************************
+ * BVNC = 29.19.52.202
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 29
+#define RGX_BVNC_KM_V 19
+#define RGX_BVNC_KM_N 52
+#define RGX_BVNC_KM_C 202
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_68186
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+#define HW_ERN_66622
+
+
+
+#endif /* RGXCORE_KM_29_19_52_202_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_33.15.11.3.h
new file mode 100644 (file)
index 0000000..2ad607d
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 33.15.11.3
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_33_15_11_3_H
+#define RGXCORE_KM_33_15_11_3_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5820045 */
+
+/******************************************************************************
+ * BVNC = 33.15.11.3
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 33
+#define RGX_BVNC_KM_V 15
+#define RGX_BVNC_KM_N 11
+#define RGX_BVNC_KM_C 3
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_33_15_11_3_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_33.8.22.1.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_33.8.22.1.h
new file mode 100644 (file)
index 0000000..367a0c1
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 33.8.22.1
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_33_8_22_1_H
+#define RGXCORE_KM_33_8_22_1_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5544349 */
+
+/******************************************************************************
+ * BVNC = 33.8.22.1
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 33
+#define RGX_BVNC_KM_V 8
+#define RGX_BVNC_KM_N 22
+#define RGX_BVNC_KM_C 1
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_33_8_22_1_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.50.54.182.h
new file mode 100644 (file)
index 0000000..a55cd70
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 36.50.54.182
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_50_54_182_H
+#define RGXCORE_KM_36_50_54_182_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5849603 */
+
+/******************************************************************************
+ * BVNC = 36.50.54.182
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 50
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 182
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_50_54_182_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.52.104.182.h
new file mode 100644 (file)
index 0000000..a3f0919
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 36.52.104.182
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_52_104_182_H
+#define RGXCORE_KM_36_52_104_182_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5849605 */
+
+/******************************************************************************
+ * BVNC = 36.52.104.182
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 52
+#define RGX_BVNC_KM_N 104
+#define RGX_BVNC_KM_C 182
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_52_104_182_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.53.104.796.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.53.104.796.h
new file mode 100644 (file)
index 0000000..80e7050
--- /dev/null
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 36.53.104.796
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_53_104_796_H
+#define RGXCORE_KM_36_53_104_796_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @5896094 */
+
+/******************************************************************************
+ * BVNC = 36.53.104.796
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 53
+#define RGX_BVNC_KM_N 104
+#define RGX_BVNC_KM_C 796
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_53_104_796_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.54.54.183.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.54.54.183.h
new file mode 100644 (file)
index 0000000..d694d1d
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 36.54.54.183
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_54_54_183_H
+#define RGXCORE_KM_36_54_54_183_H
+
+/* Automatically generated file (04/10/2021 09:01:51): Do not edit manually */
+/* CS: @5908021 */
+
+/******************************************************************************
+ * BVNC = 36.54.54.183
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 54
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 183
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_54_54_183_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.55.54.103.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.55.54.103.h
new file mode 100644 (file)
index 0000000..1c47c48
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 36.55.54.103
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_55_54_103_H
+#define RGXCORE_KM_36_55_54_103_H
+
+/* Automatically generated file (04/10/2021 09:01:51): Do not edit manually */
+/* CS: @5908021 */
+
+/******************************************************************************
+ * BVNC = 36.55.54.103
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 55
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 103
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_55_54_103_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.56.104.183.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_36.56.104.183.h
new file mode 100644 (file)
index 0000000..32c2c2e
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 36.56.104.183
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_56_104_183_H
+#define RGXCORE_KM_36_56_104_183_H
+
+/* Automatically generated file (04/10/2021 09:01:51): Do not edit manually */
+/* CS: @5942195 */
+
+/******************************************************************************
+ * BVNC = 36.56.104.183
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 56
+#define RGX_BVNC_KM_N 104
+#define RGX_BVNC_KM_C 183
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_56_104_183_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.31.4.55.h
new file mode 100644 (file)
index 0000000..846af82
--- /dev/null
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.31.4.55
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_4_31_4_55_H
+#define RGXCORE_KM_4_31_4_55_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @2919104 */
+
+/******************************************************************************
+ * BVNC = 4.31.4.55
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 31
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 55
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_4_31_4_55_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.40.2.51.h
new file mode 100644 (file)
index 0000000..c00a3e6
--- /dev/null
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.40.2.51
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_4_40_2_51_H
+#define RGXCORE_KM_4_40_2_51_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @3254374 */
+
+/******************************************************************************
+ * BVNC = 4.40.2.51
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 51
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_4_40_2_51_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.43.6.62.h
new file mode 100644 (file)
index 0000000..2e24ca5
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.43.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_4_43_6_62_H
+#define RGXCORE_KM_4_43_6_62_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @3253129 */
+
+/******************************************************************************
+ * BVNC = 4.43.6.62
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 43
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_63142
+#define FIX_HW_BRN_64502
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_4_43_6_62_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.45.2.58.h
new file mode 100644 (file)
index 0000000..47b619d
--- /dev/null
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.45.2.58
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_4_45_2_58_H
+#define RGXCORE_KM_4_45_2_58_H
+
+/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* CS: @3547765 */
+
+/******************************************************************************
+ * BVNC = 4.45.2.58
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 45
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 58
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63142
+#define FIX_HW_BRN_64502
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_4_45_2_58_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_4.46.6.62.h
new file mode 100644 (file)
index 0000000..7dd20c5
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.46.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_4_46_6_62_H
+#define RGXCORE_KM_4_46_6_62_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @4015666 */
+
+/******************************************************************************
+ * BVNC = 4.46.6.62
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 46
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_63142
+#define FIX_HW_BRN_64502
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_4_46_6_62_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_5.9.1.46.h
new file mode 100644 (file)
index 0000000..6a028bc
--- /dev/null
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 5.9.1.46
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_5_9_1_46_H
+#define RGXCORE_KM_5_9_1_46_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @2967148 */
+
+/******************************************************************************
+ * BVNC = 5.9.1.46
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 5
+#define RGX_BVNC_KM_V 9
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 46
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+
+
+
+#endif /* RGXCORE_KM_5_9_1_46_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/cores/rgxcore_km_6.34.4.35.h
new file mode 100644 (file)
index 0000000..e7fb13f
--- /dev/null
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 6.34.4.35
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_6_34_4_35_H
+#define RGXCORE_KM_6_34_4_35_H
+
+/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* CS: @3533654 */
+
+/******************************************************************************
+ * BVNC = 6.34.4.35
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 6
+#define RGX_BVNC_KM_V 34
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 35
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* RGXCORE_KM_6_34_4_35_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_bvnc_defs_km.h
new file mode 100644 (file)
index 0000000..0aa00be
--- /dev/null
@@ -0,0 +1,377 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_bvnc_defs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ *                 Auto generated file by rgxbvnc_tablegen.py                 *
+ *                  This file should not be edited manually                   *
+ *****************************************************************************/
+
+#ifndef RGX_BVNC_DEFS_KM_H
+#define RGX_BVNC_DEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#if defined(RGX_BVNC_DEFS_UM_H)
+#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h"
+#endif
+
+#define BVNC_FIELD_WIDTH (16U)
+
+#define PVR_ARCH_NAME "rogue"
+
+
+/******************************************************************************
+ * Mask and bit-position macros for features without values
+ *****************************************************************************/
+
+#define        RGX_FEATURE_AXI_ACELITE_POS                                     (0U)
+#define        RGX_FEATURE_AXI_ACELITE_BIT_MASK                                (IMG_UINT64_C(0x0000000000000001))
+
+#define        RGX_FEATURE_CLUSTER_GROUPING_POS                                (1U)
+#define        RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK                           (IMG_UINT64_C(0x0000000000000002))
+
+#define        RGX_FEATURE_COMPUTE_POS                                         (2U)
+#define        RGX_FEATURE_COMPUTE_BIT_MASK                                    (IMG_UINT64_C(0x0000000000000004))
+
+#define        RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS                          (3U)
+#define        RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK                     (IMG_UINT64_C(0x0000000000000008))
+
+#define        RGX_FEATURE_COMPUTE_ONLY_POS                                    (4U)
+#define        RGX_FEATURE_COMPUTE_ONLY_BIT_MASK                               (IMG_UINT64_C(0x0000000000000010))
+
+#define        RGX_FEATURE_COMPUTE_OVERLAP_POS                                 (5U)
+#define        RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK                            (IMG_UINT64_C(0x0000000000000020))
+
+#define        RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS                   (6U)
+#define        RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK              (IMG_UINT64_C(0x0000000000000040))
+
+#define        RGX_FEATURE_COREID_PER_OS_POS                                   (7U)
+#define        RGX_FEATURE_COREID_PER_OS_BIT_MASK                              (IMG_UINT64_C(0x0000000000000080))
+
+#define        RGX_FEATURE_DUST_POWER_ISLAND_S7_POS                            (8U)
+#define        RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK                       (IMG_UINT64_C(0x0000000000000100))
+
+#define        RGX_FEATURE_DYNAMIC_DUST_POWER_POS                              (9U)
+#define        RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK                         (IMG_UINT64_C(0x0000000000000200))
+
+#define        RGX_FEATURE_FASTRENDER_DM_POS                                   (10U)
+#define        RGX_FEATURE_FASTRENDER_DM_BIT_MASK                              (IMG_UINT64_C(0x0000000000000400))
+
+#define        RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS                           (11U)
+#define        RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK                      (IMG_UINT64_C(0x0000000000000800))
+
+#define        RGX_FEATURE_GPU_VIRTUALISATION_POS                              (12U)
+#define        RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK                         (IMG_UINT64_C(0x0000000000001000))
+
+#define        RGX_FEATURE_GS_RTA_SUPPORT_POS                                  (13U)
+#define        RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK                             (IMG_UINT64_C(0x0000000000002000))
+
+#define        RGX_FEATURE_IRQ_PER_OS_POS                                      (14U)
+#define        RGX_FEATURE_IRQ_PER_OS_BIT_MASK                                 (IMG_UINT64_C(0x0000000000004000))
+
+#define        RGX_FEATURE_META_DMA_POS                                        (15U)
+#define        RGX_FEATURE_META_DMA_BIT_MASK                                   (IMG_UINT64_C(0x0000000000008000))
+
+#define        RGX_FEATURE_MIPS_POS                                            (16U)
+#define        RGX_FEATURE_MIPS_BIT_MASK                                       (IMG_UINT64_C(0x0000000000010000))
+
+#define        RGX_FEATURE_PBE2_IN_XE_POS                                      (17U)
+#define        RGX_FEATURE_PBE2_IN_XE_BIT_MASK                                 (IMG_UINT64_C(0x0000000000020000))
+
+#define        RGX_FEATURE_PBE_CHECKSUM_2D_POS                                 (18U)
+#define        RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK                            (IMG_UINT64_C(0x0000000000040000))
+
+#define        RGX_FEATURE_PBVNC_COREID_REG_POS                                (19U)
+#define        RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK                           (IMG_UINT64_C(0x0000000000080000))
+
+#define        RGX_FEATURE_PDS_PER_DUST_POS                                    (20U)
+#define        RGX_FEATURE_PDS_PER_DUST_BIT_MASK                               (IMG_UINT64_C(0x0000000000100000))
+
+#define        RGX_FEATURE_PDS_TEMPSIZE8_POS                                   (21U)
+#define        RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK                              (IMG_UINT64_C(0x0000000000200000))
+
+#define        RGX_FEATURE_PERFBUS_POS                                         (22U)
+#define        RGX_FEATURE_PERFBUS_BIT_MASK                                    (IMG_UINT64_C(0x0000000000400000))
+
+#define        RGX_FEATURE_PERF_COUNTER_BATCH_POS                              (23U)
+#define        RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK                         (IMG_UINT64_C(0x0000000000800000))
+
+#define        RGX_FEATURE_PM_MMU_VFP_POS                                      (24U)
+#define        RGX_FEATURE_PM_MMU_VFP_BIT_MASK                                 (IMG_UINT64_C(0x0000000001000000))
+
+#define        RGX_FEATURE_RISCV_FW_PROCESSOR_POS                              (25U)
+#define        RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK                         (IMG_UINT64_C(0x0000000002000000))
+
+#define        RGX_FEATURE_ROGUEXE_POS                                         (26U)
+#define        RGX_FEATURE_ROGUEXE_BIT_MASK                                    (IMG_UINT64_C(0x0000000004000000))
+
+#define        RGX_FEATURE_S7_CACHE_HIERARCHY_POS                              (27U)
+#define        RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK                         (IMG_UINT64_C(0x0000000008000000))
+
+#define        RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS                           (28U)
+#define        RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK                      (IMG_UINT64_C(0x0000000010000000))
+
+#define        RGX_FEATURE_SCALABLE_VDM_GPP_POS                                (29U)
+#define        RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK                           (IMG_UINT64_C(0x0000000020000000))
+
+#define        RGX_FEATURE_SIGNAL_SNOOPING_POS                                 (30U)
+#define        RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK                            (IMG_UINT64_C(0x0000000040000000))
+
+#define        RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS                (31U)
+#define        RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK           (IMG_UINT64_C(0x0000000080000000))
+
+#define        RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS             (32U)
+#define        RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK        (IMG_UINT64_C(0x0000000100000000))
+
+#define        RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS             (33U)
+#define        RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK        (IMG_UINT64_C(0x0000000200000000))
+
+#define        RGX_FEATURE_SINGLE_BIF_POS                                      (34U)
+#define        RGX_FEATURE_SINGLE_BIF_BIT_MASK                                 (IMG_UINT64_C(0x0000000400000000))
+
+#define        RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS                     (35U)
+#define        RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK                (IMG_UINT64_C(0x0000000800000000))
+
+#define        RGX_FEATURE_SLC_SIZE_CONFIGURABLE_POS                           (36U)
+#define        RGX_FEATURE_SLC_SIZE_CONFIGURABLE_BIT_MASK                      (IMG_UINT64_C(0x0000001000000000))
+
+#define        RGX_FEATURE_SLC_VIVT_POS                                        (37U)
+#define        RGX_FEATURE_SLC_VIVT_BIT_MASK                                   (IMG_UINT64_C(0x0000002000000000))
+
+#define        RGX_FEATURE_SOC_TIMER_POS                                       (38U)
+#define        RGX_FEATURE_SOC_TIMER_BIT_MASK                                  (IMG_UINT64_C(0x0000004000000000))
+
+#define        RGX_FEATURE_SYS_BUS_SECURE_RESET_POS                            (39U)
+#define        RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK                       (IMG_UINT64_C(0x0000008000000000))
+
+#define        RGX_FEATURE_TDM_PDS_CHECKSUM_POS                                (40U)
+#define        RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK                           (IMG_UINT64_C(0x0000010000000000))
+
+#define        RGX_FEATURE_TESSELLATION_POS                                    (41U)
+#define        RGX_FEATURE_TESSELLATION_BIT_MASK                               (IMG_UINT64_C(0x0000020000000000))
+
+#define        RGX_FEATURE_TFBC_DELTA_CORRELATION_POS                          (42U)
+#define        RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK                     (IMG_UINT64_C(0x0000040000000000))
+
+#define        RGX_FEATURE_TFBC_LOSSY_37_PERCENT_POS                           (43U)
+#define        RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK                      (IMG_UINT64_C(0x0000080000000000))
+
+#define        RGX_FEATURE_TFBC_NATIVE_YUV10_POS                               (44U)
+#define        RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK                          (IMG_UINT64_C(0x0000100000000000))
+
+#define        RGX_FEATURE_TILE_REGION_PROTECTION_POS                          (45U)
+#define        RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK                     (IMG_UINT64_C(0x0000200000000000))
+
+#define        RGX_FEATURE_TLA_POS                                             (46U)
+#define        RGX_FEATURE_TLA_BIT_MASK                                        (IMG_UINT64_C(0x0000400000000000))
+
+#define        RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS             (47U)
+#define        RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK        (IMG_UINT64_C(0x0000800000000000))
+
+#define        RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS                         (48U)
+#define        RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK                    (IMG_UINT64_C(0x0001000000000000))
+
+#define        RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS                      (49U)
+#define        RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK                 (IMG_UINT64_C(0x0002000000000000))
+
+#define        RGX_FEATURE_VDM_DRAWINDIRECT_POS                                (50U)
+#define        RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK                           (IMG_UINT64_C(0x0004000000000000))
+
+#define        RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS                            (51U)
+#define        RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK                       (IMG_UINT64_C(0x0008000000000000))
+
+#define        RGX_FEATURE_WATCHDOG_TIMER_POS                                  (52U)
+#define        RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK                             (IMG_UINT64_C(0x0010000000000000))
+
+#define        RGX_FEATURE_WORKGROUP_PROTECTION_POS                            (53U)
+#define        RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK                       (IMG_UINT64_C(0x0020000000000000))
+
+#define        RGX_FEATURE_XE_MEMORY_HIERARCHY_POS                             (54U)
+#define        RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK                        (IMG_UINT64_C(0x0040000000000000))
+
+#define        RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS                           (55U)
+#define        RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK                      (IMG_UINT64_C(0x0080000000000000))
+
+
+/******************************************************************************
+ * Defines for each feature with values used
+ * for handling the corresponding values
+ *****************************************************************************/
+
+#define        RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX     (2)
+#define        RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX      (2)
+#define        RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4)
+#define        RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX       (6)
+#define        RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX    (4)
+#define        RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX   (2)
+#define        RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX     (2)
+#define        RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX   (3)
+#define        RGX_FEATURE_META_MAX_VALUE_IDX  (4)
+#define        RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX    (1)
+#define        RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX     (3)
+#define        RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX        (1)
+#define        RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX  (5)
+#define        RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX     (9)
+#define        RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX     (3)
+#define        RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX      (3)
+#define        RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX        (4)
+#define        RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX      (1)
+#define        RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX  (1)
+#define        RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX       (3)
+#define        RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX     (4)
+#define        RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX      (2)
+#define        RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (6)
+#define        RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX   (3)
+#define        RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX   (3)
+#define        RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX    (2)
+#define        RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX       (2)
+#define        RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX   (2)
+#define        RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX        (2)
+#define        RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX        (2)
+
+/******************************************************************************
+ * Features with values indexes
+ *****************************************************************************/
+
+typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ {
+       RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX,
+       RGX_FEATURE_ECC_RAMS_IDX,
+       RGX_FEATURE_FBCDC_IDX,
+       RGX_FEATURE_FBCDC_ALGORITHM_IDX,
+       RGX_FEATURE_FBCDC_ARCHITECTURE_IDX,
+       RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_IDX,
+       RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_IDX,
+       RGX_FEATURE_LAYOUT_MARS_IDX,
+       RGX_FEATURE_META_IDX,
+       RGX_FEATURE_META_COREMEM_BANKS_IDX,
+       RGX_FEATURE_META_COREMEM_SIZE_IDX,
+       RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX,
+       RGX_FEATURE_NUM_CLUSTERS_IDX,
+       RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX,
+       RGX_FEATURE_NUM_OSIDS_IDX,
+       RGX_FEATURE_NUM_RASTER_PIPES_IDX,
+       RGX_FEATURE_PHYS_BUS_WIDTH_IDX,
+       RGX_FEATURE_SCALABLE_TE_ARCH_IDX,
+       RGX_FEATURE_SCALABLE_VCE_IDX,
+       RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX,
+       RGX_FEATURE_SLC_BANKS_IDX,
+       RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX,
+       RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX,
+       RGX_FEATURE_TILE_SIZE_X_IDX,
+       RGX_FEATURE_TILE_SIZE_Y_IDX,
+       RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX,
+       RGX_FEATURE_XE_ARCHITECTURE_IDX,
+       RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_IDX,
+       RGX_FEATURE_XPU_MAX_SLAVES_IDX,
+       RGX_FEATURE_XPU_REGISTER_BROADCAST_IDX,
+       RGX_FEATURE_WITH_VALUES_MAX_IDX,
+} RGX_FEATURE_WITH_VALUE_INDEX;
+
+
+/******************************************************************************
+ * Mask and bit-position macros for ERNs and BRNs
+ *****************************************************************************/
+
+#define        FIX_HW_BRN_38344_POS                                            (0U)
+#define        FIX_HW_BRN_38344_BIT_MASK                                       (IMG_UINT64_C(0x0000000000000001))
+
+#define        HW_ERN_42290_POS                                                (1U)
+#define        HW_ERN_42290_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000002))
+
+#define        FIX_HW_BRN_42321_POS                                            (2U)
+#define        FIX_HW_BRN_42321_BIT_MASK                                       (IMG_UINT64_C(0x0000000000000004))
+
+#define        HW_ERN_42606_POS                                                (3U)
+#define        HW_ERN_42606_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000008))
+
+#define        HW_ERN_46066_POS                                                (4U)
+#define        HW_ERN_46066_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000010))
+
+#define        HW_ERN_47025_POS                                                (5U)
+#define        HW_ERN_47025_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000020))
+
+#define        HW_ERN_50539_POS                                                (6U)
+#define        HW_ERN_50539_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000040))
+
+#define        FIX_HW_BRN_50767_POS                                            (7U)
+#define        FIX_HW_BRN_50767_BIT_MASK                                       (IMG_UINT64_C(0x0000000000000080))
+
+#define        HW_ERN_57596_POS                                                (8U)
+#define        HW_ERN_57596_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000100))
+
+#define        FIX_HW_BRN_60084_POS                                            (9U)
+#define        FIX_HW_BRN_60084_BIT_MASK                                       (IMG_UINT64_C(0x0000000000000200))
+
+#define        HW_ERN_61389_POS                                                (10U)
+#define        HW_ERN_61389_BIT_MASK                                           (IMG_UINT64_C(0x0000000000000400))
+
+#define        FIX_HW_BRN_61450_POS                                            (11U)
+#define        FIX_HW_BRN_61450_BIT_MASK                                       (IMG_UINT64_C(0x0000000000000800))
+
+#define        FIX_HW_BRN_63142_POS                                            (12U)
+#define        FIX_HW_BRN_63142_BIT_MASK                                       (IMG_UINT64_C(0x0000000000001000))
+
+#define        FIX_HW_BRN_63553_POS                                            (13U)
+#define        FIX_HW_BRN_63553_BIT_MASK                                       (IMG_UINT64_C(0x0000000000002000))
+
+#define        FIX_HW_BRN_64502_POS                                            (14U)
+#define        FIX_HW_BRN_64502_BIT_MASK                                       (IMG_UINT64_C(0x0000000000004000))
+
+#define        FIX_HW_BRN_65101_POS                                            (15U)
+#define        FIX_HW_BRN_65101_BIT_MASK                                       (IMG_UINT64_C(0x0000000000008000))
+
+#define        FIX_HW_BRN_65273_POS                                            (16U)
+#define        FIX_HW_BRN_65273_BIT_MASK                                       (IMG_UINT64_C(0x0000000000010000))
+
+#define        HW_ERN_66622_POS                                                (17U)
+#define        HW_ERN_66622_BIT_MASK                                           (IMG_UINT64_C(0x0000000000020000))
+
+#define        FIX_HW_BRN_68186_POS                                            (18U)
+#define        FIX_HW_BRN_68186_BIT_MASK                                       (IMG_UINT64_C(0x0000000000040000))
+
+/* Macro used for padding the unavailable values for features with values */
+#define RGX_FEATURE_VALUE_INVALID      (0xFFFFFFFEU)
+
+/* Macro used for marking a feature with value as disabled for a specific bvnc */
+#define RGX_FEATURE_VALUE_DISABLED     (0xFFFFFFFFU)
+
+#endif /* RGX_BVNC_DEFS_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_bvnc_table_km.h
new file mode 100644 (file)
index 0000000..4044507
--- /dev/null
@@ -0,0 +1,462 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_bvnc_table_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ *                 Auto generated file by rgxbvnc_tablegen.py                 *
+ *                  This file should not be edited manually                   *
+ *****************************************************************************/
+
+#ifndef RGX_BVNC_TABLE_KM_H
+#define RGX_BVNC_TABLE_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+
+#ifndef RGXBVNC_C
+#error "This file should only be included from rgxbvnc.c"
+#endif
+
+#if defined(RGX_BVNC_TABLE_UM_H)
+#error "This file should not be included in conjunction with rgx_bvnc_table_um.h"
+#endif
+
+
+/******************************************************************************
+ * Arrays for each feature with values used
+ * for handling the corresponding values
+ *****************************************************************************/
+
+static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 50, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 50, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 7, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, 16, 64, 128, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_XE_ARCHITECTURE_values[RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values[RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 19, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_SLAVES_values[RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
+
+
+/******************************************************************************
+ * Table contains pointers to each feature value array for features that have
+ * values.
+ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h
+ *****************************************************************************/
+
+static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = {
+       aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values,
+       aui16_RGX_FEATURE_ECC_RAMS_values,
+       aui16_RGX_FEATURE_FBCDC_values,
+       aui16_RGX_FEATURE_FBCDC_ALGORITHM_values,
+       aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values,
+       aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values,
+       aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values,
+       aui16_RGX_FEATURE_LAYOUT_MARS_values,
+       aui16_RGX_FEATURE_META_values,
+       aui16_RGX_FEATURE_META_COREMEM_BANKS_values,
+       aui16_RGX_FEATURE_META_COREMEM_SIZE_values,
+       aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values,
+       aui16_RGX_FEATURE_NUM_CLUSTERS_values,
+       aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values,
+       aui16_RGX_FEATURE_NUM_OSIDS_values,
+       aui16_RGX_FEATURE_NUM_RASTER_PIPES_values,
+       aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values,
+       aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values,
+       aui16_RGX_FEATURE_SCALABLE_VCE_values,
+       aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values,
+       aui16_RGX_FEATURE_SLC_BANKS_values,
+       aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values,
+       aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values,
+       aui16_RGX_FEATURE_TILE_SIZE_X_values,
+       aui16_RGX_FEATURE_TILE_SIZE_Y_values,
+       aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values,
+       aui16_RGX_FEATURE_XE_ARCHITECTURE_values,
+       aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values,
+       aui16_RGX_FEATURE_XPU_MAX_SLAVES_values,
+       aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values,
+};
+
+
+/******************************************************************************
+ * Array containing the lengths of the arrays containing the values.
+ * Used for indexing the aui16_<FEATURE>_values defined upwards
+ *****************************************************************************/
+
+
+static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = {
+       RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX,
+       RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX,
+       RGX_FEATURE_FBCDC_MAX_VALUE_IDX,
+       RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX,
+       RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX,
+       RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX,
+       RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX,
+       RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX,
+       RGX_FEATURE_META_MAX_VALUE_IDX,
+       RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX,
+       RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX,
+       RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX,
+       RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX,
+       RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX,
+       RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX,
+       RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX,
+       RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX,
+       RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX,
+       RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX,
+       RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX,
+       RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX,
+       RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX,
+       RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX,
+       RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX,
+       RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX,
+       RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX,
+       RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX,
+       RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX,
+       RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX,
+       RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX,
+};
+
+
+/******************************************************************************
+ * Bit-positions for features with values
+ *****************************************************************************/
+
+static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = {
+       (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */
+       (2U), /* RGX_FEATURE_ECC_RAMS_POS */
+       (4U), /* RGX_FEATURE_FBCDC_POS */
+       (7U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */
+       (10U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */
+       (13U), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */
+       (15U), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */
+       (17U), /* RGX_FEATURE_LAYOUT_MARS_POS */
+       (19U), /* RGX_FEATURE_META_POS */
+       (22U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */
+       (23U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */
+       (25U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */
+       (26U), /* RGX_FEATURE_NUM_CLUSTERS_POS */
+       (29U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */
+       (33U), /* RGX_FEATURE_NUM_OSIDS_POS */
+       (35U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */
+       (37U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */
+       (40U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */
+       (41U), /* RGX_FEATURE_SCALABLE_VCE_POS */
+       (42U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */
+       (44U), /* RGX_FEATURE_SLC_BANKS_POS */
+       (47U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */
+       (49U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */
+       (52U), /* RGX_FEATURE_TILE_SIZE_X_POS */
+       (54U), /* RGX_FEATURE_TILE_SIZE_Y_POS */
+       (56U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */
+       (58U), /* RGX_FEATURE_XE_ARCHITECTURE_POS */
+       (60U), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */
+       (62U), /* RGX_FEATURE_XPU_MAX_SLAVES_POS */
+       (64U), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */
+};
+
+
+/******************************************************************************
+ * Bit-masks for features with values
+ *****************************************************************************/
+
+static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = {
+       (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */
+       (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000000070)), /* RGX_FEATURE_FBCDC_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000000380)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000380000)), /* RGX_FEATURE_META_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000400000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */
+       (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */
+       (IMG_UINT64_C(0x0000000002000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */
+       (IMG_UINT64_C(0x000000001C000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */
+       (IMG_UINT64_C(0x00000001E0000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */
+       (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */
+       (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */
+       (IMG_UINT64_C(0x000000E000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */
+       (IMG_UINT64_C(0x0000010000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */
+       (IMG_UINT64_C(0x0000020000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */
+       (IMG_UINT64_C(0x00000C0000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */
+       (IMG_UINT64_C(0x0000700000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */
+       (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */
+       (IMG_UINT64_C(0x000E000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */
+       (IMG_UINT64_C(0x0030000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */
+       (IMG_UINT64_C(0x00C0000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */
+       (IMG_UINT64_C(0x0300000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */
+       (IMG_UINT64_C(0x0C00000000000000)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */
+       (IMG_UINT64_C(0x3000000000000000)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */
+       (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */
+       (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */
+};
+
+
+/******************************************************************************
+ * Table mapping bitmasks for features and features with values
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaFeatures[][4]=
+{
+       { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa8068689aa481), IMG_UINT64_C(0x0000000000000000) },     /* 1.0.2.30 */
+       { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000400000402024), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) },     /* 1.0.4.5 */
+       { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) },     /* 1.0.4.19 */
+       { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068e912a901), IMG_UINT64_C(0x0000000000000000) },     /* 4.0.2.51 */
+       { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aa806ce912a901), IMG_UINT64_C(0x0000000000000000) },     /* 4.0.2.58 */
+       { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0082c04000c0222e), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) },     /* 4.0.4.55 */
+       { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aab074f112a901), IMG_UINT64_C(0x0000000000000000) },     /* 4.0.6.62 */
+       { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000004004402205), IMG_UINT64_C(0x05a69068248aa501), IMG_UINT64_C(0x0000000000000000) },     /* 5.0.1.46 */
+       { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) },     /* 6.0.4.35 */
+       { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000004004403205), IMG_UINT64_C(0x05a8906c448aa501), IMG_UINT64_C(0x0000000000000000) },     /* 15.0.1.64 */
+       { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0554942c44020001), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.21.16 */
+       { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c64020001), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.54.25 */
+       { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c84020001), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.54.30 */
+       { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944c84020001), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.54.38 */
+       { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c8402a591), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.54.330 */
+       { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc4020001), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.104.18 */
+       { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc402a591), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.104.218 */
+       { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558a4550802a591), IMG_UINT64_C(0x0000000000000000) },     /* 22.0.208.318 */
+       { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984c8402a591), IMG_UINT64_C(0x0000000000000000) },     /* 24.0.54.204 */
+       { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984ca402a591), IMG_UINT64_C(0x0000000000000000) },     /* 24.0.104.504 */
+       { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) },     /* 24.0.208.504 */
+       { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) },     /* 24.0.208.505 */
+       { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x0556984c4402a621), IMG_UINT64_C(0x0000000000000000) },     /* 29.0.52.202 */
+       { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x055aa854e802a621), IMG_UINT64_C(0x0000000000000000) },     /* 29.0.108.208 */
+       { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x00400092844b5085), IMG_UINT64_C(0x0552984a24020001), IMG_UINT64_C(0x0000000000000000) },     /* 33.0.11.3 */
+       { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x004180c2854b70a5), IMG_UINT64_C(0x0556984c44020001), IMG_UINT64_C(0x0000000000000000) },     /* 33.0.22.1 */
+       { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x004180d2844b38a5), IMG_UINT64_C(0x0556984c8402aeb1), IMG_UINT64_C(0x0000000000000000) },     /* 36.0.54.103 */
+       { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) },     /* 36.0.54.182 */
+       { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) },     /* 36.0.54.183 */
+       { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) },     /* 36.0.104.182 */
+       { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) },     /* 36.0.104.183 */
+       { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x0071a0d2864a78a5), IMG_UINT64_C(0x5556984ca404aeb5), IMG_UINT64_C(0x0000000000000001) },     /* 36.0.104.796 */
+};
+
+/******************************************************************************
+ * Table mapping bitmasks for ERNs/BRNs
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaErnsBrns[][2]=
+{
+       { IMG_UINT64_C(0x0001002700040013), IMG_UINT64_C(0x0000000000000005) }, /* 1.39.4.19 */
+       { IMG_UINT64_C(0x0001004b0002001e), IMG_UINT64_C(0x0000000000000004) }, /* 1.75.2.30 */
+       { IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) }, /* 1.82.4.5 */
+       { IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x000000000000108a) }, /* 4.31.4.55 */
+       { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000000108a) }, /* 4.40.2.51 */
+       { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.43.6.62 */
+       { IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x000000000000500a) }, /* 4.45.2.58 */
+       { IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.46.6.62 */
+       { IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000001) }, /* 5.9.1.46 */
+       { IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x000000000000100a) }, /* 6.34.4.35 */
+       { IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000004008) }, /* 15.5.1.64 */
+       { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000016b08) }, /* 22.30.54.25 */
+       { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000016b08) }, /* 22.40.54.30 */
+       { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x000000000001ea0a) }, /* 22.46.54.330 */
+       { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000016b08) }, /* 22.49.21.16 */
+       { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000016708) }, /* 22.67.54.30 */
+       { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000016508) }, /* 22.68.54.30 */
+       { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x000000000000e408) }, /* 22.86.104.218 */
+       { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000006508) }, /* 22.87.104.18 */
+       { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000006508) }, /* 22.102.54.38 */
+       { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000000e40a) }, /* 22.104.208.318 */
+       { IMG_UINT64_C(0x0016006900d0013e), IMG_UINT64_C(0x000000000000e40a) }, /* 22.105.208.318 */
+       { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000000002210a) }, /* 24.50.208.504 */
+       { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x000000000002210a) }, /* 24.56.208.505 */
+       { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x000000000002210a) }, /* 24.66.54.204 */
+       { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x000000000002210a) }, /* 24.67.104.504 */
+       { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x000000000006212a) }, /* 29.14.108.208 */
+       { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x000000000006212a) }, /* 29.19.52.202 */
+       { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000000212a) }, /* 33.8.22.1 */
+       { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x000000000000212a) }, /* 33.15.11.3 */
+       { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x000000000000212a) }, /* 36.50.54.182 */
+       { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x000000000000212a) }, /* 36.52.104.182 */
+       { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000000012a) }, /* 36.53.104.796 */
+       { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000000212a) }, /* 36.54.54.183 */
+       { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000000212a) }, /* 36.55.54.103 */
+       { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000000212a) }, /* 36.56.104.183 */
+};
+
+#if defined(DEBUG)
+
+#define        FEATURE_NO_VALUES_NAMES_MAX_IDX (56)
+
+static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] =
+{
+       "AXI_ACELITE",
+       "CLUSTER_GROUPING",
+       "COMPUTE",
+       "COMPUTE_MORTON_CAPABLE",
+       "COMPUTE_ONLY",
+       "COMPUTE_OVERLAP",
+       "COMPUTE_OVERLAP_WITH_BARRIERS",
+       "COREID_PER_OS",
+       "DUST_POWER_ISLAND_S7",
+       "DYNAMIC_DUST_POWER",
+       "FASTRENDER_DM",
+       "GPU_MULTICORE_SUPPORT",
+       "GPU_VIRTUALISATION",
+       "GS_RTA_SUPPORT",
+       "IRQ_PER_OS",
+       "META_DMA",
+       "MIPS",
+       "PBE2_IN_XE",
+       "PBE_CHECKSUM_2D",
+       "PBVNC_COREID_REG",
+       "PDS_PER_DUST",
+       "PDS_TEMPSIZE8",
+       "PERFBUS",
+       "PERF_COUNTER_BATCH",
+       "PM_MMU_VFP",
+       "RISCV_FW_PROCESSOR",
+       "ROGUEXE",
+       "S7_CACHE_HIERARCHY",
+       "S7_TOP_INFRASTRUCTURE",
+       "SCALABLE_VDM_GPP",
+       "SIGNAL_SNOOPING",
+       "SIMPLE_INTERNAL_PARAMETER_FORMAT",
+       "SIMPLE_INTERNAL_PARAMETER_FORMAT_V1",
+       "SIMPLE_INTERNAL_PARAMETER_FORMAT_V2",
+       "SINGLE_BIF",
+       "SLC_HYBRID_CACHELINE_64_128",
+       "SLC_SIZE_CONFIGURABLE",
+       "SLC_VIVT",
+       "SOC_TIMER",
+       "SYS_BUS_SECURE_RESET",
+       "TDM_PDS_CHECKSUM",
+       "TESSELLATION",
+       "TFBC_DELTA_CORRELATION",
+       "TFBC_LOSSY_37_PERCENT",
+       "TFBC_NATIVE_YUV10",
+       "TILE_REGION_PROTECTION",
+       "TLA",
+       "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS",
+       "TPU_DM_GLOBAL_REGISTERS",
+       "TPU_FILTERING_MODE_CONTROL",
+       "VDM_DRAWINDIRECT",
+       "VDM_OBJECT_LEVEL_LLS",
+       "WATCHDOG_TIMER",
+       "WORKGROUP_PROTECTION",
+       "XE_MEMORY_HIERARCHY",
+       "XT_TOP_INFRASTRUCTURE",
+};
+
+#define        ERNSBRNS_IDS_MAX_IDX    (19)
+
+static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] =
+{
+       38344,
+       42290,
+       42321,
+       42606,
+       46066,
+       47025,
+       50539,
+       50767,
+       57596,
+       60084,
+       61389,
+       61450,
+       63142,
+       63553,
+       64502,
+       65101,
+       65273,
+       66622,
+       68186,
+};
+
+#endif /* defined(DEBUG) */
+#endif /* RGX_BVNC_TABLE_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_cr_defs_km.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgx_cr_defs_km.h
new file mode 100644 (file)
index 0000000..2464d91
--- /dev/null
@@ -0,0 +1,8077 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_cr_defs_km.h
+@Brief          The file contains auto-generated hardware definitions without
+                BVNC-specific compile time conditionals.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef RGX_CR_DEFS_KM_H
+#define RGX_CR_DEFS_KM_H
+
+#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGX_CR_DEFS_KM_REVISION 1
+
+/*
+    Register RGX_CR_RASTERISATION_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_INDIRECT                     (0x8238U)
+#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL            (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT       (0U)
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK      (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_USC_INDIRECT
+*/
+#define RGX_CR_USC_INDIRECT                               (0x8000U)
+#define RGX_CR_USC_INDIRECT_MASKFULL                      (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT                 (0U)
+#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK                (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_PBE_INDIRECT
+*/
+#define RGX_CR_PBE_INDIRECT                               (0x83E0U)
+#define RGX_CR_PBE_INDIRECT_MASKFULL                      (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT                 (0U)
+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK                (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_PBE_PERF_INDIRECT
+*/
+#define RGX_CR_PBE_PERF_INDIRECT                          (0x83D8U)
+#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK           (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_TPU_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_PERF_INDIRECT                          (0x83F0U)
+#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK           (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_PERF_INDIRECT                (0x8318U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL       (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT  (0U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT                   (0x8028U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL          (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT     (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK    (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_USC_PERF_INDIRECT
+*/
+#define RGX_CR_USC_PERF_INDIRECT                          (0x8030U)
+#define RGX_CR_USC_PERF_INDIRECT_MASKFULL                 (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT            (0U)
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK           (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_INDIRECT                        (0x8388U)
+#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT                   (0x83F8U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT     (0U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK    (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_TEXAS3_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS3_PERF_INDIRECT                       (0x83D0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL              (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT         (0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK        (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS_PERF_INDIRECT                        (0x8288U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_BX_TU_PERF_INDIRECT
+*/
+#define RGX_CR_BX_TU_PERF_INDIRECT                        (0xC900U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL                                   (0x0000U)
+#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL                (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL                 (IMG_UINT64_C(0xCFCF03000F3F3F0F))
+#define RGX_CR_CLK_CTRL_MASKFULL                          (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT                   (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK                  (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF                     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON                      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO                    (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_SHIFT                         (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK                        (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_ON                            (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_AUTO                          (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_SHIFT                         (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK                        (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_ON                            (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL_FBC_AUTO                          (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT                        (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK                       (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF                          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_ON                           (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_AUTO                         (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT                  (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK                 (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON                     (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO                   (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL_USCS_SHIFT                        (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK                       (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF                          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_USCS_ON                           (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL_USCS_AUTO                         (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL_PBE_SHIFT                         (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK                        (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_PBE_ON                            (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL_PBE_AUTO                          (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT                      (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK                     (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_ON                         (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO                       (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL_CDM_SHIFT                         (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK                        (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_CDM_ON                            (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_CLK_CTRL_CDM_AUTO                          (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT                    (44U)
+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK                   (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_ON                       (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO                     (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT                (42U)
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK               (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON                   (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO                 (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_CLK_CTRL_BIF_SHIFT                         (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK                        (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_ON                            (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL_BIF_AUTO                          (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT               (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON                  (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO                (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT                      (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_ON                         (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO                       (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL_TPU_SHIFT                         (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_ON                            (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL_TPU_AUTO                          (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL_USC_SHIFT                         (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_USC_ON                            (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL_USC_AUTO                          (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL_TLA_SHIFT                         (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TLA_ON                            (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL_TLA_AUTO                          (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL_SLC_SHIFT                         (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_SLC_ON                            (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL_SLC_AUTO                          (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL_UVS_SHIFT                         (14U)
+#define RGX_CR_CLK_CTRL_UVS_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL_UVS_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_UVS_ON                            (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_CTRL_UVS_AUTO                          (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_CTRL_PDS_SHIFT                         (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_PDS_ON                            (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL_PDS_AUTO                          (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL_VDM_SHIFT                         (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_VDM_ON                            (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL_VDM_AUTO                          (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL_PM_SHIFT                          (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK                         (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF                            (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_PM_ON                             (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL_PM_AUTO                           (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL_GPP_SHIFT                         (6U)
+#define RGX_CR_CLK_CTRL_GPP_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL_GPP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_GPP_ON                            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_CTRL_GPP_AUTO                          (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_CTRL_TE_SHIFT                          (4U)
+#define RGX_CR_CLK_CTRL_TE_CLRMSK                         (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL_TE_OFF                            (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TE_ON                             (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL_TE_AUTO                           (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL_TSP_SHIFT                         (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_TSP_ON                            (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL_TSP_AUTO                          (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL_ISP_SHIFT                         (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL_ISP_ON                            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL_ISP_AUTO                          (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+    Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS                                 (0x0008U)
+#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL              (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL               (IMG_UINT64_C(0x00000001B3101773))
+#define RGX_CR_CLK_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT                  (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING                (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT                 (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS_IPP_SHIFT                       (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_IPP_RUNNING                     (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_STATUS_FBC_SHIFT                       (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_FBC_RUNNING                     (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT                      (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING                    (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT                (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING              (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS_USCS_SHIFT                      (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_USCS_RUNNING                    (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS_PBE_SHIFT                       (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_PBE_RUNNING                     (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT                    (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING                  (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS_CDM_SHIFT                       (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_CDM_RUNNING                     (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT                  (22U)
+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING                (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT              (21U)
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED              (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING            (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_STATUS_BIF_SHIFT                       (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_RUNNING                     (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT             (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING           (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT                    (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING                  (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS_TPU_SHIFT                       (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_RUNNING                     (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS_USC_SHIFT                       (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_USC_RUNNING                     (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS_TLA_SHIFT                       (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TLA_RUNNING                     (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS_SLC_SHIFT                       (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_SLC_RUNNING                     (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS_UVS_SHIFT                       (7U)
+#define RGX_CR_CLK_STATUS_UVS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS_UVS_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_UVS_RUNNING                     (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_STATUS_PDS_SHIFT                       (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_PDS_RUNNING                     (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS_VDM_SHIFT                       (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_VDM_RUNNING                     (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS_PM_SHIFT                        (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_PM_RUNNING                      (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS_GPP_SHIFT                       (3U)
+#define RGX_CR_CLK_STATUS_GPP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS_GPP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_GPP_RUNNING                     (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_STATUS_TE_SHIFT                        (2U)
+#define RGX_CR_CLK_STATUS_TE_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS_TE_GATED                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TE_RUNNING                      (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS_TSP_SHIFT                       (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_TSP_RUNNING                     (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS_ISP_SHIFT                       (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS_ISP_RUNNING                     (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID__PBVNC                             (0x0020U)
+#define RGX_CR_CORE_ID__PBVNC__MASKFULL                   (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT            (48U)
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK           (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT           (32U)
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK          (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U)
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT            (0U)
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID                                    (0x0018U)
+#define RGX_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_ID_ID_SHIFT                           (16U)
+#define RGX_CR_CORE_ID_ID_CLRMSK                          (0x0000FFFFU)
+#define RGX_CR_CORE_ID_CONFIG_SHIFT                       (0U)
+#define RGX_CR_CORE_ID_CONFIG_CLRMSK                      (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_CORE_REVISION
+*/
+#define RGX_CR_CORE_REVISION                              (0x0020U)
+#define RGX_CR_CORE_REVISION_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT               (24U)
+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK              (0x00FFFFFFU)
+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT                  (16U)
+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK                 (0xFF00FFFFU)
+#define RGX_CR_CORE_REVISION_MINOR_SHIFT                  (8U)
+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK                 (0xFFFF00FFU)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT            (0U)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK           (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD1
+*/
+#define RGX_CR_DESIGNER_REV_FIELD1                        (0x0028U)
+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD2
+*/
+#define RGX_CR_DESIGNER_REV_FIELD2                        (0x0030U)
+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_CHANGESET_NUMBER
+*/
+#define RGX_CR_CHANGESET_NUMBER                           (0x0040U)
+#define RGX_CR_CHANGESET_NUMBER_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT    (0U)
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK   (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SOC_TIMER_GRAY
+*/
+#define RGX_CR_SOC_TIMER_GRAY                             (0x00E0U)
+#define RGX_CR_SOC_TIMER_GRAY_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT                 (0U)
+#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK                (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SOC_TIMER_BINARY
+*/
+#define RGX_CR_SOC_TIMER_BINARY                           (0x00E8U)
+#define RGX_CR_SOC_TIMER_BINARY_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT               (0U)
+#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK              (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_CTRL
+*/
+#define RGX_CR_CLK_XTPLUS_CTRL                            (0x0080U)
+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000003FFFFF0000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT                  (36U)
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK                 (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON                     (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO                   (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT                 (34U)
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK                (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF                   (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON                    (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO                  (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT                  (32U)
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF                    (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON                     (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO                   (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT              (30U)
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON                 (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT                (28U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON                   (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO                 (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT               (26U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON                  (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO                (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT                (24U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF                  (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON                   (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO                 (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT           (22U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON              (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO            (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT       (20U)
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON          (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO        (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT           (18U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON              (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO            (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT             (16U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF               (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON                (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO              (IMG_UINT64_C(0x0000000000020000))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_STATUS
+*/
+#define RGX_CR_CLK_XTPLUS_STATUS                          (0x0088U)
+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL                 (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT                (10U)
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING              (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT                (9U)
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED                (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING              (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT            (8U)
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED            (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING          (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT               (7U)
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED               (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING             (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT              (6U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED              (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT             (5U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED             (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING           (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT              (4U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED              (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING            (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT         (3U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING       (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT     (2U)
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING   (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT         (1U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING       (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT           (0U)
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING         (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET                                 (0x0100U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL              (IMG_UINT64_C(0xFFEFFFFFFFFFFC3D))
+#define RGX_CR_SOFT_RESET_MASKFULL                        (IMG_UINT64_C(0x00E7FFFFFFFFFC3D))
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT             (63U)
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK            (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_EN                (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT             (62U)
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK            (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_EN                (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_SHIFT             (61U)
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK            (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_EN                (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_SOFT_RESET_JONES_CORE_SHIFT                (60U)
+#define RGX_CR_SOFT_RESET_JONES_CORE_CLRMSK               (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_JONES_CORE_EN                   (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_SOFT_RESET_TILING_CORE_SHIFT               (59U)
+#define RGX_CR_SOFT_RESET_TILING_CORE_CLRMSK              (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TILING_CORE_EN                  (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_SOFT_RESET_TE3_SHIFT                       (58U)
+#define RGX_CR_SOFT_RESET_TE3_CLRMSK                      (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TE3_EN                          (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_SOFT_RESET_VCE_SHIFT                       (57U)
+#define RGX_CR_SOFT_RESET_VCE_CLRMSK                      (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VCE_EN                          (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_SOFT_RESET_VBS_SHIFT                       (56U)
+#define RGX_CR_SOFT_RESET_VBS_CLRMSK                      (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VBS_EN                          (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT                 (55U)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK                (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN                    (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT                 (54U)
+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK                (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN                    (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_SOFT_RESET_FBA_SHIFT                       (53U)
+#define RGX_CR_SOFT_RESET_FBA_CLRMSK                      (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBA_EN                          (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_SOFT_RESET_FB_CDC_SHIFT                    (51U)
+#define RGX_CR_SOFT_RESET_FB_CDC_CLRMSK                   (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_CDC_EN                       (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_SOFT_RESET_SH_SHIFT                        (50U)
+#define RGX_CR_SOFT_RESET_SH_CLRMSK                       (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SH_EN                           (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_SOFT_RESET_VRDM_SHIFT                      (49U)
+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK                     (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VRDM_EN                         (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT                  (48U)
+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN                     (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT             (47U)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK            (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN                (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT             (46U)
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK            (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN                (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT             (45U)
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK            (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN                (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT             (44U)
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK            (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN                (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_SOFT_RESET_IPP_SHIFT                       (43U)
+#define RGX_CR_SOFT_RESET_IPP_CLRMSK                      (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_IPP_EN                          (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT                 (42U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN                    (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT              (41U)
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK             (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN                 (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT               (40U)
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN                  (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT               (39U)
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN                  (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT               (38U)
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN                  (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT               (37U)
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN                  (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT               (36U)
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN                  (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT               (35U)
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN                  (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_SOFT_RESET_MMU_SHIFT                       (34U)
+#define RGX_CR_SOFT_RESET_MMU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MMU_EN                          (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_SOFT_RESET_BIF1_SHIFT                      (33U)
+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF1_EN                         (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT                    (32U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN                       (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_SOFT_RESET_CPU_SHIFT                       (32U)
+#define RGX_CR_SOFT_RESET_CPU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_CPU_EN                          (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT               (31U)
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN                  (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT               (30U)
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN                  (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT               (29U)
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN                  (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT                (28U)
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN                   (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT                       (27U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN                          (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_SOFT_RESET_TLA_SHIFT                       (26U)
+#define RGX_CR_SOFT_RESET_TLA_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_SOFT_RESET_TLA_EN                          (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_SOFT_RESET_UVS_SHIFT                       (25U)
+#define RGX_CR_SOFT_RESET_UVS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SOFT_RESET_UVS_EN                          (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SOFT_RESET_TE_SHIFT                        (24U)
+#define RGX_CR_SOFT_RESET_TE_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SOFT_RESET_TE_EN                           (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SOFT_RESET_GPP_SHIFT                       (23U)
+#define RGX_CR_SOFT_RESET_GPP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_SOFT_RESET_GPP_EN                          (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT                      (22U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN                         (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT                       (21U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN                          (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT                        (20U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN                           (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT                       (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN                          (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT                (18U)
+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_SOFT_RESET_USC_SHARED_EN                   (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT                    (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN                       (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SOFT_RESET_BIF_SHIFT                       (16U)
+#define RGX_CR_SOFT_RESET_BIF_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_BIF_EN                          (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SOFT_RESET_CDM_SHIFT                       (15U)
+#define RGX_CR_SOFT_RESET_CDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_CDM_EN                          (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SOFT_RESET_VDM_SHIFT                       (14U)
+#define RGX_CR_SOFT_RESET_VDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_VDM_EN                          (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SOFT_RESET_TESS_SHIFT                      (13U)
+#define RGX_CR_SOFT_RESET_TESS_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_SOFT_RESET_TESS_EN                         (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_SOFT_RESET_PDS_SHIFT                       (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN                          (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT                       (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN                          (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SOFT_RESET_TSP_SHIFT                       (10U)
+#define RGX_CR_SOFT_RESET_TSP_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TSP_EN                          (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SOFT_RESET_SYSARB_SHIFT                    (5U)
+#define RGX_CR_SOFT_RESET_SYSARB_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_SOFT_RESET_SYSARB_EN                       (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT             (4U)
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN                (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT                    (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK                   (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN                       (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT                       (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN                          (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SOFT_RESET_USC_SHIFT                       (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN                          (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET2
+*/
+#define RGX_CR_SOFT_RESET2                                (0x0108U)
+#define RGX_CR_SOFT_RESET2_MASKFULL                       (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT                 (12U)
+#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK                (0xFFE00FFFU)
+#define RGX_CR_SOFT_RESET2_TDM_SHIFT                      (11U)
+#define RGX_CR_SOFT_RESET2_TDM_CLRMSK                     (0xFFFFF7FFU)
+#define RGX_CR_SOFT_RESET2_TDM_EN                         (0x00000800U)
+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT                     (10U)
+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK                    (0xFFFFFBFFU)
+#define RGX_CR_SOFT_RESET2_ASTC_EN                        (0x00000400U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT               (9U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK              (0xFFFFFDFFU)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN                  (0x00000200U)
+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT                    (8U)
+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK                   (0xFFFFFEFFU)
+#define RGX_CR_SOFT_RESET2_USCPS_EN                       (0x00000100U)
+#define RGX_CR_SOFT_RESET2_IPF_SHIFT                      (7U)
+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK                     (0xFFFFFF7FU)
+#define RGX_CR_SOFT_RESET2_IPF_EN                         (0x00000080U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT                 (6U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK                (0xFFFFFFBFU)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN                    (0x00000040U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT               (5U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK              (0xFFFFFFDFU)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN                  (0x00000020U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT               (4U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK              (0xFFFFFFEFU)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN                  (0x00000010U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT           (3U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK          (0xFFFFFFF7U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN              (0x00000008U)
+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT                    (2U)
+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK                   (0xFFFFFFFBU)
+#define RGX_CR_SOFT_RESET2_PIXEL_EN                       (0x00000004U)
+#define RGX_CR_SOFT_RESET2_CDM_SHIFT                      (1U)
+#define RGX_CR_SOFT_RESET2_CDM_CLRMSK                     (0xFFFFFFFDU)
+#define RGX_CR_SOFT_RESET2_CDM_EN                         (0x00000002U)
+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT                   (0U)
+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK                  (0xFFFFFFFEU)
+#define RGX_CR_SOFT_RESET2_VERTEX_EN                      (0x00000001U)
+
+
+/*
+    Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS                               (0x0130U)
+#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL            (IMG_UINT64_C(0x00000000E01DFFFF))
+#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL            (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT      (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK     (0x7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN         (0x80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT        (30U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK       (0xBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN           (0x40000000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT  (29U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN     (0x20000000U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT       (28U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK      (0xEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN          (0x10000000U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT      (27U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK     (0xF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN         (0x08000000U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT       (26U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK      (0xFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN          (0x04000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT        (25U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK       (0xFDFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN           (0x02000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT        (24U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK       (0xFEFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN           (0x01000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT        (23U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK       (0xFF7FFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN           (0x00800000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT        (22U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK       (0xFFBFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN           (0x00400000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT        (21U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK       (0xFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN           (0x00200000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT        (20U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK       (0xFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN           (0x00100000U)
+#define RGX_CR_EVENT_STATUS_SAFETY_SHIFT                  (20U)
+#define RGX_CR_EVENT_STATUS_SAFETY_CLRMSK                 (0xFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_SAFETY_EN                     (0x00100000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT        (19U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK       (0xFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN           (0x00080000U)
+#define RGX_CR_EVENT_STATUS_SLAVE_REQ_SHIFT               (19U)
+#define RGX_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK              (0xFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_SLAVE_REQ_EN                  (0x00080000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT        (18U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK       (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN           (0x00040000U)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT            (17U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK           (0xFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN               (0x00020000U)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT  (17U)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN     (0x00020000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT    (16U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK   (0xFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN       (0x00010000U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT             (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK            (0xFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN                (0x00008000U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT            (14U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK           (0xFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN               (0x00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT                (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK               (0xFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN                   (0x00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT                (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK               (0xFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN                   (0x00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT             (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK            (0xFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN                (0x00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT          (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK         (0xFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN             (0x00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT          (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK         (0xFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN             (0x00000200U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT          (8U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK         (0xFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN             (0x00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT        (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK       (0xFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN           (0x00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT            (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK           (0xFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN               (0x00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT             (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK            (0xFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN                (0x00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT       (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK      (0xFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN          (0x00000010U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT      (3U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK     (0xFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN         (0x00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT        (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK       (0xFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN           (0x00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT         (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK        (0xFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN            (0x00000002U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT            (0U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_EVENT_CLEAR
+*/
+#define RGX_CR_EVENT_CLEAR                                (0x0138U)
+#define RGX_CR_EVENT_CLEAR__ROGUEXE__MASKFULL             (IMG_UINT64_C(0x00000000E01DFFFF))
+#define RGX_CR_EVENT_CLEAR__SIGNALS__MASKFULL             (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_CLEAR_MASKFULL                       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT       (31U)
+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK      (0x7FFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN          (0x80000000U)
+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT         (30U)
+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK        (0xBFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN            (0x40000000U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT   (29U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK  (0xDFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN      (0x20000000U)
+#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT        (28U)
+#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK       (0xEFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN           (0x10000000U)
+#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT       (27U)
+#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK      (0xF7FFFFFFU)
+#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN          (0x08000000U)
+#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT        (26U)
+#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK       (0xFBFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN           (0x04000000U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT         (25U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK        (0xFDFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN            (0x02000000U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT         (24U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK        (0xFEFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN            (0x01000000U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT         (23U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK        (0xFF7FFFFFU)
+#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN            (0x00800000U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT         (22U)
+#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK        (0xFFBFFFFFU)
+#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN            (0x00400000U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT         (21U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK        (0xFFDFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN            (0x00200000U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT         (20U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK        (0xFFEFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN            (0x00100000U)
+#define RGX_CR_EVENT_CLEAR_SAFETY_SHIFT                   (20U)
+#define RGX_CR_EVENT_CLEAR_SAFETY_CLRMSK                  (0xFFEFFFFFU)
+#define RGX_CR_EVENT_CLEAR_SAFETY_EN                      (0x00100000U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT         (19U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK        (0xFFF7FFFFU)
+#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN            (0x00080000U)
+#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT                (19U)
+#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK               (0xFFF7FFFFU)
+#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_EN                   (0x00080000U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT         (18U)
+#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK        (0xFFFBFFFFU)
+#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN            (0x00040000U)
+#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN  (0x00040000U)
+#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT             (17U)
+#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK            (0xFFFDFFFFU)
+#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_EN                (0x00020000U)
+#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT   (17U)
+#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK  (0xFFFDFFFFU)
+#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN      (0x00020000U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT     (16U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK    (0xFFFEFFFFU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN        (0x00010000U)
+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT              (15U)
+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK             (0xFFFF7FFFU)
+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN                 (0x00008000U)
+#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT             (14U)
+#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK            (0xFFFFBFFFU)
+#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_EN                (0x00004000U)
+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT                 (13U)
+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK                (0xFFFFDFFFU)
+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN                    (0x00002000U)
+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT                 (12U)
+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK                (0xFFFFEFFFU)
+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN                    (0x00001000U)
+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT              (11U)
+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK             (0xFFFFF7FFU)
+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN                 (0x00000800U)
+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT           (10U)
+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK          (0xFFFFFBFFU)
+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN              (0x00000400U)
+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT           (9U)
+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK          (0xFFFFFDFFU)
+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN              (0x00000200U)
+#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT           (8U)
+#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK          (0xFFFFFEFFU)
+#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN              (0x00000100U)
+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT         (7U)
+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK        (0xFFFFFF7FU)
+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN            (0x00000080U)
+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT             (6U)
+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK            (0xFFFFFFBFU)
+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN                (0x00000040U)
+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT              (5U)
+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK             (0xFFFFFFDFU)
+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN                 (0x00000020U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT        (4U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK       (0xFFFFFFEFU)
+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN           (0x00000010U)
+#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT       (3U)
+#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK      (0xFFFFFFF7U)
+#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN          (0x00000008U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT         (2U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK        (0xFFFFFFFBU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN            (0x00000004U)
+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT          (1U)
+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK         (0xFFFFFFFDU)
+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN             (0x00000002U)
+#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT             (0U)
+#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER                                      (0x0160U)
+#define RGX_CR_TIMER_MASKFULL                             (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT                          (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK                         (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN                             (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT                          (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK                         (IMG_UINT64_C(0xFFFF000000000000))
+
+
+/*
+    Register RGX_CR_TLA_STATUS
+*/
+#define RGX_CR_TLA_STATUS                                 (0x0178U)
+#define RGX_CR_TLA_STATUS_MASKFULL                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT                (39U)
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK               (IMG_UINT64_C(0x0000007FFFFFFFFF))
+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT                   (7U)
+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK                  (IMG_UINT64_C(0xFFFFFF800000007F))
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT             (1U)
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFF81))
+#define RGX_CR_TLA_STATUS_BUSY_SHIFT                      (0U)
+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_TLA_STATUS_BUSY_EN                         (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE                   (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT          (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_SIDEKICK_IDLE
+*/
+#define RGX_CR_SIDEKICK_IDLE                              (0x03C8U)
+#define RGX_CR_SIDEKICK_IDLE_MASKFULL                     (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT                 (6U)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK                (0xFFFFFFBFU)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN                    (0x00000040U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT                    (5U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK                   (0xFFFFFFDFU)
+#define RGX_CR_SIDEKICK_IDLE_MMU_EN                       (0x00000020U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT                 (4U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK                (0xFFFFFFEFU)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN                    (0x00000010U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT                    (3U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK                   (0xFFFFFFF7U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_EN                       (0x00000008U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT                 (2U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK                (0xFFFFFFFBU)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN                    (0x00000004U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT                 (1U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK                (0xFFFFFFFDU)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN                    (0x00000002U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT                  (0U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_MARS_IDLE
+*/
+#define RGX_CR_MARS_IDLE                                  (0x08F8U)
+#define RGX_CR_MARS_IDLE_MASKFULL                         (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_MARS_IDLE_MH_SYSARB0_SHIFT                 (2U)
+#define RGX_CR_MARS_IDLE_MH_SYSARB0_CLRMSK                (0xFFFFFFFBU)
+#define RGX_CR_MARS_IDLE_MH_SYSARB0_EN                    (0x00000004U)
+#define RGX_CR_MARS_IDLE_CPU_SHIFT                        (1U)
+#define RGX_CR_MARS_IDLE_CPU_CLRMSK                       (0xFFFFFFFDU)
+#define RGX_CR_MARS_IDLE_CPU_EN                           (0x00000002U)
+#define RGX_CR_MARS_IDLE_SOCIF_SHIFT                      (0U)
+#define RGX_CR_MARS_IDLE_SOCIF_CLRMSK                     (0xFFFFFFFEU)
+#define RGX_CR_MARS_IDLE_SOCIF_EN                         (0x00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS                   (0x0430U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x00000000000000F3))
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT   (4U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK  (0xFFFFFF0FU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0x00000002U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0xFFFFFFFEU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0x00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0                    (0x0438U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT   (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK  (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1                    (0x0440U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK  (0x00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2                    (0x0448U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0                   (0x0450U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1                   (0x0458U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2                   (0x0460U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS                   (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0x00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0x00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0                           (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL                  (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT           (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK          (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE       (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT           (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK          (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE       (16U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1                           (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL      (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL                  (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT         (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK        (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN            (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT         (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK        (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN            (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT  (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN     (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT              (27U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK             (0xF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN                 (0x08000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT        (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK       (0xF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT       (20U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK      (0xFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN          (0x00100000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT         (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK        (0xFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT           (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK          (0xFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT           (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK          (0xFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT               (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS                          (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL                 (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT          (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK         (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE      (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT          (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK         (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE      (16U)
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1                         (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL    (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL                (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT       (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK      (0xDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN          (0x20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT       (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK      (0xEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN          (0x10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN   (0x10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT            (27U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK           (0xF7FFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN               (0x08000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT      (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK     (0xF81FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT     (20U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK    (0xFFEFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN        (0x00100000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT       (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK      (0xFFF007FFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT         (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK        (0xFFFFF87FU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT         (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK        (0xFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT             (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_LOAD_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0                      (0x04D8U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL             (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT      (36U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK     (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE  (16U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK     (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE  (16U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_LOAD_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1                      (0x04E0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL             (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT    (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK   (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN       (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT    (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK   (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN       (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT         (27U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK        (0xF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN            (0x08000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT   (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK  (0xF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT  (20U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN     (0x00100000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT    (11U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK   (0xFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT      (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK     (0xFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT      (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK     (0xFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT          (0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_CONFIG
+*/
+#define RGX_CR_MIPS_WRAPPER_CONFIG                        (0x0810U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL               (IMG_UINT64_C(0x000001030F01FFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT   (40U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK  (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN      (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT     (33U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK    (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN        (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT     (32U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK    (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN        (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT            (25U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT          (24U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN             (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT    (16U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK   (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32   (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1                   (0x0818U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2                   (0x0820U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1                   (0x0828U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2                   (0x0830U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1                   (0x0838U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2                   (0x0840U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1                   (0x0848U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2                   (0x0850U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1                   (0x0858U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2                   (0x0860U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT    (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT       (6U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT     (5U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN        (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS            (0x0868U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL   (IMG_UINT64_C(0x00000001FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN   (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR             (0x0870U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN    (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG               (0x0878U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL      (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT   (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK  (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN    (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT   (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ                 (0x0880U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL        (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT     (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK    (0xFFFFFFC1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT   (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK  (0xFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN      (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA                 (0x0888U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL        (IMG_UINT64_C(0xFFFFFFF7FFFFFF81))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT  (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT     (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK    (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT   (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN      (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN  (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE                    (0x08A0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS                    (0x08A8U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR                     (0x08B0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT         (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK        (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN            (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE                    (0x08B8U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT        (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT                     (0x08C0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT       (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK      (0xFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN          (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_DEBUG_CONFIG
+*/
+#define RGX_CR_MIPS_DEBUG_CONFIG                          (0x08C8U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN   (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_EXCEPTION_STATUS
+*/
+#define RGX_CR_MIPS_EXCEPTION_STATUS                      (0x08D0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT       (5U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK      (0xFFFFFFDFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN          (0x00000020U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT   (4U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK  (0xFFFFFFEFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN      (0x00000010U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT    (3U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK   (0xFFFFFFF7U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN       (0x00000008U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT    (2U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK   (0xFFFFFFFBU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN       (0x00000004U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT         (1U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK        (0xFFFFFFFDU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN            (0x00000002U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT         (0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK        (0xFFFFFFFEU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN            (0x00000001U)
+
+
+/*
+    Register RGX_CR_MIPS_WRAPPER_STATUS
+*/
+#define RGX_CR_MIPS_WRAPPER_STATUS                        (0x08E8U)
+#define RGX_CR_MIPS_WRAPPER_STATUS_MASKFULL               (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_XPU_BROADCAST
+*/
+#define RGX_CR_XPU_BROADCAST                              (0x0890U)
+#define RGX_CR_XPU_BROADCAST_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_XPU_BROADCAST_MASK_SHIFT                   (0U)
+#define RGX_CR_XPU_BROADCAST_MASK_CLRMSK                  (0xFFFFFE00U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX                          (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT                          (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0                          (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT               (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK              (0x00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT           (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK          (0xFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN              (0x00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT                 (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1                          (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL                 (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT       (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK      (0x3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT    (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK   (0xDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN       (0x20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT   (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK  (0xEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN      (0x10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT       (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK      (0xFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN          (0x04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT       (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK      (0xFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN          (0x02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT              (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK             (0xFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN                 (0x01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT           (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK          (0xFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT             (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK            (0xFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN                (0x00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT          (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK         (0xFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN             (0x00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT             (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK            (0xFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK        (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT         (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK        (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE                       (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL              (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT           (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK          (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK         (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK                         (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI                        (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK                         (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI                        (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK                         (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI                        (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK                         (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK       (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI                        (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK     (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST                            (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS                      (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT      (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK     (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN         (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT      (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK     (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN         (0x00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE                      (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT         (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK        (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN            (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK        (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN            (0x00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL                       (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE                               (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL                      (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT                    (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK                   (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER                 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST                     (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT                (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK               (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0                 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1                 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2                 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3                 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT                 (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK                (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX                 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX                (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT                    (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK                   (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED              (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED                  (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT                      (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK                     (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0                        (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1                        (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2                        (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3                        (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4                        (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5                        (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6                        (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7                        (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL                     (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1                              (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2                              (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3                              (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4                              (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5                              (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6                              (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7                              (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK                  (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST                    (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK              (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1                (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2                (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3                (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX                (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX               (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED             (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED                 (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK                    (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0                       (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1                       (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2                       (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3                       (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4                       (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5                       (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6                       (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7                       (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL                    (0x0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC                 (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC                 (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC                (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC                (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG                  (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL         (IMG_UINT64_C(0x0000FFFFFFFFF001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT   (40U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK  (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT (9U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (8U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT  (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META   (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE                   (0x0B58U)
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT  (0U)
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE                   (0x0B60U)
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT  (0U)
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE                   (0x0B68U)
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT  (0U)
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE                   (0x0B70U)
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT  (0U)
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE                   (0x0B78U)
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT  (0U)
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE                   (0x0B80U)
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT  (0U)
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX                                 (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL                        (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT          (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK         (0xC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT                    (18U)
+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK                   (0xFFC3FFFFU)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT             (16U)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK            (0xFFFCFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT         (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK        (0xFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK    (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX                                  (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL                         (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT                     (10U)
+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK                    (0xFFFFC3FFU)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT              (8U)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK             (0xFFFFFCFFU)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK    (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE                 (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT       (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK      (IMG_UINT64_C(0x00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT       (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK      (IMG_UINT64_C(0xFF00FFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT       (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK      (IMG_UINT64_C(0xFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT       (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK      (IMG_UINT64_C(0xFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT       (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK      (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT       (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT       (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT       (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS                         (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT            (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK           (0x00000000U)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE_ENABLE
+*/
+#define RGX_CR_MTS_SCHEDULE_ENABLE                        (0x0BC8U)
+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL               (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT             (0U)
+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK            (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_IRQ_OS0_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS0_EVENT_STATUS                       (0x0BD8U)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS0_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR                        (0x0BE8U)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS1_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS1_EVENT_STATUS                       (0x10BD8U)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS1_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR                        (0x10BE8U)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS2_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS2_EVENT_STATUS                       (0x20BD8U)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS2_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR                        (0x20BE8U)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS3_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS3_EVENT_STATUS                       (0x30BD8U)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS3_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR                        (0x30BE8U)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS4_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS4_EVENT_STATUS                       (0x40BD8U)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS4_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR                        (0x40BE8U)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS5_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS5_EVENT_STATUS                       (0x50BD8U)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS5_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR                        (0x50BE8U)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS6_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS6_EVENT_STATUS                       (0x60BD8U)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS6_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR                        (0x60BE8U)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS7_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS7_EVENT_STATUS                       (0x70BD8U)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT          (0U)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_IRQ_OS7_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR                        (0x70BE8U)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT           (0U)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT                                  (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL                         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT                       (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN                          (0x00000001U)
+
+
+/*
+    Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC                                 (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL                        (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT           (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_PPP
+*/
+#define RGX_CR_PPP                                        (0x0CD0U)
+#define RGX_CR_PPP_MASKFULL                               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CHECKSUM_SHIFT                         (0U)
+#define RGX_CR_PPP_CHECKSUM_CLRMSK                        (0x00000000U)
+
+
+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK                   (0x00000003U)
+/*
+Top-left to bottom-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR                  (0x00000000U)
+/*
+Top-right to bottom-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL                  (0x00000001U)
+/*
+Bottom-left to top-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR                  (0x00000002U)
+/*
+Bottom-right to top-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL                  (0x00000003U)
+
+
+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK                  (0x00000003U)
+/*
+Normal render     */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM                  (0x00000000U)
+/*
+Fast 2D render    */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D               (0x00000002U)
+/*
+Fast scale render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE            (0x00000003U)
+
+
+/*
+    Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER                                 (0x0F08U)
+#define RGX_CR_ISP_RENDER_MASKFULL                        (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT (8U)
+#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN    (0x00000100U)
+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT   (7U)
+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK  (0xFFFFFF7FU)
+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN      (0x00000080U)
+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (6U)
+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN    (0x00000040U)
+#define RGX_CR_ISP_RENDER_DISABLE_EOMT_SHIFT              (5U)
+#define RGX_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK             (0xFFFFFFDFU)
+#define RGX_CR_ISP_RENDER_DISABLE_EOMT_EN                 (0x00000020U)
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT                    (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK                   (0xFFFFFFEFU)
+#define RGX_CR_ISP_RENDER_RESUME_EN                       (0x00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT                       (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK                      (0xFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR                       (0x00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL                       (0x00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR                       (0x00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL                       (0x0000000CU)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT                      (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK                     (0xFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM                       (0x00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D                    (0x00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE                 (0x00000003U)
+
+
+/*
+    Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL                                    (0x0F38U)
+#define RGX_CR_ISP_CTL_MASKFULL                           (IMG_UINT64_C(0x00000000FFFFF3FF))
+#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT               (31U)
+#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK              (0x7FFFFFFFU)
+#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_EN                  (0x80000000U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT                   (30U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK                  (0xBFFFFFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_EN                      (0x40000000U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT               (29U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK              (0xDFFFFFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN                  (0x20000000U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT              (28U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK             (0xEFFFFFFFU)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN                 (0x10000000U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT                   (27U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK                  (0xF7FFFFFFU)
+#define RGX_CR_ISP_CTL_PAIR_TILES_EN                      (0x08000000U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT                  (26U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK                 (0xFBFFFFFFU)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN                     (0x04000000U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT                  (25U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK                 (0xFDFFFFFFU)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN                     (0x02000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT          (23U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK         (0xFE7FFFFFU)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9            (0x00000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10           (0x00800000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL            (0x01000000U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT            (21U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK           (0xFF9FFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT                 (20U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK                (0xFFEFFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN                    (0x00100000U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT           (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK          (0xFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN              (0x00080000U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT     (18U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK    (0xFFFBFFFFU)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN        (0x00040000U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT          (17U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK         (0xFFFDFFFFU)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN             (0x00020000U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT                   (16U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK                  (0xFFFEFFFFU)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN                      (0x00010000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT                  (12U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK                 (0xFFFF0FFFU)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE               (0x00000000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO               (0x00001000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE             (0x00002000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR              (0x00003000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE              (0x00004000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX               (0x00005000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN             (0x00006000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT             (0x00007000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE              (0x00008000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN               (0x00009000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN            (0x0000A000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE            (0x0000B000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN          (0x0000C000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN          (0x0000D000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN           (0x0000E000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN           (0x0000F000U)
+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT                     (4U)
+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK                    (0xFFFFFC0FU)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT                  (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK                 (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_ISP_STATUS
+*/
+#define RGX_CR_ISP_STATUS                                 (0x1038U)
+#define RGX_CR_ISP_STATUS_MASKFULL                        (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT                 (2U)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK                (0xFFFFFFFBU)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN                    (0x00000004U)
+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT                    (1U)
+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK                   (0xFFFFFFFDU)
+#define RGX_CR_ISP_STATUS_ACTIVE_EN                       (0x00000002U)
+#define RGX_CR_ISP_STATUS_EOR_SHIFT                       (0U)
+#define RGX_CR_ISP_STATUS_EOR_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_ISP_STATUS_EOR_EN                          (0x00000001U)
+
+
+/*
+    Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats
+*/
+#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT                 (64U)
+/*
+    Register RGX_CR_ISP_XTP_RESUME0
+*/
+#define RGX_CR_ISP_XTP_RESUME0                            (0x3A00U)
+#define RGX_CR_ISP_XTP_RESUME0_MASKFULL                   (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT               (12U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK              (0xFFC00FFFU)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT               (0U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK              (0xFFFFFC00U)
+
+
+/*
+    Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats
+*/
+#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT                  (32U)
+/*
+    Register RGX_CR_ISP_XTP_STORE0
+*/
+#define RGX_CR_ISP_XTP_STORE0                             (0x3C00U)
+#define RGX_CR_ISP_XTP_STORE0_MASKFULL                    (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT                (30U)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK               (0xBFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN                   (0x40000000U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT                   (29U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK                  (0xDFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_EOR_EN                      (0x20000000U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT             (28U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK            (0xEFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN                (0x10000000U)
+#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT                    (24U)
+#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK                   (0xF0FFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT                (12U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK               (0xFFC00FFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT                (0U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK               (0xFFFFFC00U)
+
+
+/*
+    Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT                   (8U)
+/*
+    Register RGX_CR_BIF_CAT_BASE0
+*/
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE0_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE1
+*/
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+#define RGX_CR_BIF_CAT_BASE1_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE2
+*/
+#define RGX_CR_BIF_CAT_BASE2                              (0x1210U)
+#define RGX_CR_BIF_CAT_BASE2_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE3
+*/
+#define RGX_CR_BIF_CAT_BASE3                              (0x1218U)
+#define RGX_CR_BIF_CAT_BASE3_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE4
+*/
+#define RGX_CR_BIF_CAT_BASE4                              (0x1220U)
+#define RGX_CR_BIF_CAT_BASE4_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE5
+*/
+#define RGX_CR_BIF_CAT_BASE5                              (0x1228U)
+#define RGX_CR_BIF_CAT_BASE5_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE6
+*/
+#define RGX_CR_BIF_CAT_BASE6                              (0x1230U)
+#define RGX_CR_BIF_CAT_BASE6_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE7
+*/
+#define RGX_CR_BIF_CAT_BASE7                              (0x1238U)
+#define RGX_CR_BIF_CAT_BASE7_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK                  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE_INDEX
+*/
+#define RGX_CR_BIF_CAT_BASE_INDEX                         (0x1240U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL                (IMG_UINT64_C(0x00070707073F0707))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT              (48U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK             (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT               (40U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK              (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT              (32U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK             (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT               (24U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT               (19U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT               (16U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT             (8U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT                (0U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0                       (0x1248U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN               (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN              (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE0                        (0x1250U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK            (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN               (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0                     (0x1260U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN             (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1                       (0x1268U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN               (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN              (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE1                        (0x1270U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK            (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN               (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1                     (0x1280U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0xF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN             (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_BIF_MMU_ENTRY_STATUS                       (0x1288U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF0F3))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT         (12U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK        (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT        (4U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT       (0U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY
+*/
+#define RGX_CR_BIF_MMU_ENTRY                              (0x1290U)
+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL                     (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT                 (1U)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK                (0xFFFFFFFDU)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN                    (0x00000002U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT                (0U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK               (0xFFFFFFFEU)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN                   (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL_INVAL
+*/
+#define RGX_CR_BIF_CTRL_INVAL                             (0x12A0U)
+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL                    (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK                 (0xFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN                     (0x00000008U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT                    (2U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK                   (0xFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_INVAL_PC_EN                       (0x00000004U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT                    (1U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK                   (0xFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_INVAL_PD_EN                       (0x00000002U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT                    (0U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK                   (0xFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_INVAL_PT_EN                       (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL
+*/
+#define RGX_CR_BIF_CTRL                                   (0x12A8U)
+#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL                 (IMG_UINT64_C(0x000000000000033F))
+#define RGX_CR_BIF_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT               (9U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK              (0xFFFFFDFFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_EN                  (0x00000200U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT              (8U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK             (0xFFFFFEFFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN                 (0x00000100U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT     (7U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK    (0xFFFFFF7FU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN        (0x00000080U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT    (6U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK   (0xFFFFFFBFU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN       (0x00000040U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT              (5U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK             (0xFFFFFFDFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN                 (0x00000020U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT              (4U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK             (0xFFFFFFEFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN                 (0x00000010U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK                 (0xFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN                     (0x00000008U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT                (2U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK               (0xFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN                   (0x00000004U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT              (1U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK             (0xFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN                 (0x00000002U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT              (0U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK             (0xFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN                 (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS                 (0x12B0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN     (0x00000010U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK    (0xFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN        (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS                 (0x12B8U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS                 (0x12C0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN     (0x00000010U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK    (0xFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN        (0x00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS                 (0x12C8U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_MMU_STATUS
+*/
+#define RGX_CR_BIF_MMU_STATUS                             (0x12D0U)
+#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL           (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT              (28U)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK             (0xEFFFFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN                 (0x10000000U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0xF00FFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0xFFF00FFFU)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0xFFFFF00FU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0xFFFFFFFBU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN                  (0x00000004U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0xFFFFFFFDU)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0x00000002U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN                     (0x00000001U)
+
+
+/*
+    Register group: RGX_CR_BIF_TILING_CFG, with 8 repeats
+*/
+#define RGX_CR_BIF_TILING_CFG_REPEATCOUNT                 (8U)
+/*
+    Register RGX_CR_BIF_TILING_CFG0
+*/
+#define RGX_CR_BIF_TILING_CFG0                            (0x12D8U)
+#define RGX_CR_BIF_TILING_CFG0_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG0_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG0_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG0_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG1
+*/
+#define RGX_CR_BIF_TILING_CFG1                            (0x12E0U)
+#define RGX_CR_BIF_TILING_CFG1_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG1_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG1_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG1_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG2
+*/
+#define RGX_CR_BIF_TILING_CFG2                            (0x12E8U)
+#define RGX_CR_BIF_TILING_CFG2_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG2_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG2_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG2_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG3
+*/
+#define RGX_CR_BIF_TILING_CFG3                            (0x12F0U)
+#define RGX_CR_BIF_TILING_CFG3_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG3_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG3_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG3_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG4
+*/
+#define RGX_CR_BIF_TILING_CFG4                            (0x12F8U)
+#define RGX_CR_BIF_TILING_CFG4_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG4_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG4_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG4_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG5
+*/
+#define RGX_CR_BIF_TILING_CFG5                            (0x1300U)
+#define RGX_CR_BIF_TILING_CFG5_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG5_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG5_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG5_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG6
+*/
+#define RGX_CR_BIF_TILING_CFG6                            (0x1308U)
+#define RGX_CR_BIF_TILING_CFG6_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG6_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG6_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG6_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_TILING_CFG7
+*/
+#define RGX_CR_BIF_TILING_CFG7                            (0x1310U)
+#define RGX_CR_BIF_TILING_CFG7_MASKFULL                   (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT              (61U)
+#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK             (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG7_ENABLE_SHIFT               (60U)
+#define RGX_CR_BIF_TILING_CFG7_ENABLE_CLRMSK              (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG7_ENABLE_EN                  (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT          (32U)
+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK         (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE      (4096U)
+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT          (0U)
+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT     (12U)
+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_BIF_READS_EXT_STATUS
+*/
+#define RGX_CR_BIF_READS_EXT_STATUS                       (0x1320U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK            (0xF000FFFFU)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK          (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_READS_INT_STATUS
+*/
+#define RGX_CR_BIF_READS_INT_STATUS                       (0x1328U)
+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL              (IMG_UINT64_C(0x0000000007FFFFFF))
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK            (0xF800FFFFU)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK          (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_INT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_INT_STATUS                     (0x1330U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK        (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_EXT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_EXT_STATUS                     (0x1338U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK        (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_STATUS_MMU
+*/
+#define RGX_CR_BIFPM_STATUS_MMU                           (0x1350U)
+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT            (0U)
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK           (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_STATUS_MMU
+*/
+#define RGX_CR_BIF_STATUS_MMU                             (0x1358U)
+#define RGX_CR_BIF_STATUS_MMU_MASKFULL                    (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT              (0U)
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK             (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_READ
+*/
+#define RGX_CR_BIF_FAULT_READ                             (0x13E0U)
+#define RGX_CR_BIF_FAULT_READ_MASKFULL                    (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT               (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK              (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT          (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE           (16U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS           (0x1430U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL  (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN  (0x00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS           (0x1438U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL  (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN    (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_TFBC_COMPRESSION_CONTROL
+*/
+#define RGX_CR_TFBC_COMPRESSION_CONTROL                   (0x14A0U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_SHIFT (7U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN (0x00000080U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_SHIFT (4U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_CLRMSK (0xFFFFFF8FU)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_SHIFT (3U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_EN (0x00000008U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT      (1U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK     (0xFFFFFFF9U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT    (0x00000000U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION (0x00000002U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD (0x00000004U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_RESERVED   (0x00000006U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT (0U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0 (0x00000000U)
+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1 (0x00000001U)
+
+
+/*
+    Register RGX_CR_MCU_FENCE
+*/
+#define RGX_CR_MCU_FENCE                                  (0x1740U)
+#define RGX_CR_MCU_FENCE_MASKFULL                         (IMG_UINT64_C(0x000007FFFFFFFFE0))
+#define RGX_CR_MCU_FENCE_DM_SHIFT                         (40U)
+#define RGX_CR_MCU_FENCE_DM_CLRMSK                        (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_MCU_FENCE_DM_VERTEX                        (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_MCU_FENCE_DM_PIXEL                         (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MCU_FENCE_DM_COMPUTE                       (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX                    (IMG_UINT64_C(0x0000030000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY                           (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_MCU_FENCE_DM_FASTRENDER                    (IMG_UINT64_C(0x0000050000000000))
+#define RGX_CR_MCU_FENCE_ADDR_SHIFT                       (5U)
+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK                      (IMG_UINT64_C(0xFFFFFF000000001F))
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT                  (5U)
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE                   (32U)
+
+
+/*
+    Register group: RGX_CR_SCRATCH, with 16 repeats
+*/
+#define RGX_CR_SCRATCH_REPEATCOUNT                        (16U)
+/*
+    Register RGX_CR_SCRATCH0
+*/
+#define RGX_CR_SCRATCH0                                   (0x1A00U)
+#define RGX_CR_SCRATCH0_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH0_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH0_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH1
+*/
+#define RGX_CR_SCRATCH1                                   (0x1A08U)
+#define RGX_CR_SCRATCH1_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH1_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH1_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH2
+*/
+#define RGX_CR_SCRATCH2                                   (0x1A10U)
+#define RGX_CR_SCRATCH2_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH2_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH2_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH3
+*/
+#define RGX_CR_SCRATCH3                                   (0x1A18U)
+#define RGX_CR_SCRATCH3_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH3_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH3_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH4
+*/
+#define RGX_CR_SCRATCH4                                   (0x1A20U)
+#define RGX_CR_SCRATCH4_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH4_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH4_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH5
+*/
+#define RGX_CR_SCRATCH5                                   (0x1A28U)
+#define RGX_CR_SCRATCH5_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH5_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH5_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH6
+*/
+#define RGX_CR_SCRATCH6                                   (0x1A30U)
+#define RGX_CR_SCRATCH6_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH6_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH6_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH7
+*/
+#define RGX_CR_SCRATCH7                                   (0x1A38U)
+#define RGX_CR_SCRATCH7_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH7_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH7_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH8
+*/
+#define RGX_CR_SCRATCH8                                   (0x1A40U)
+#define RGX_CR_SCRATCH8_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH8_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH8_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH9
+*/
+#define RGX_CR_SCRATCH9                                   (0x1A48U)
+#define RGX_CR_SCRATCH9_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH9_DATA_SHIFT                        (0U)
+#define RGX_CR_SCRATCH9_DATA_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH10
+*/
+#define RGX_CR_SCRATCH10                                  (0x1A50U)
+#define RGX_CR_SCRATCH10_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH10_DATA_SHIFT                       (0U)
+#define RGX_CR_SCRATCH10_DATA_CLRMSK                      (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH11
+*/
+#define RGX_CR_SCRATCH11                                  (0x1A58U)
+#define RGX_CR_SCRATCH11_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH11_DATA_SHIFT                       (0U)
+#define RGX_CR_SCRATCH11_DATA_CLRMSK                      (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH12
+*/
+#define RGX_CR_SCRATCH12                                  (0x1A60U)
+#define RGX_CR_SCRATCH12_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH12_DATA_SHIFT                       (0U)
+#define RGX_CR_SCRATCH12_DATA_CLRMSK                      (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH13
+*/
+#define RGX_CR_SCRATCH13                                  (0x1A68U)
+#define RGX_CR_SCRATCH13_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH13_DATA_SHIFT                       (0U)
+#define RGX_CR_SCRATCH13_DATA_CLRMSK                      (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH14
+*/
+#define RGX_CR_SCRATCH14                                  (0x1A70U)
+#define RGX_CR_SCRATCH14_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH14_DATA_SHIFT                       (0U)
+#define RGX_CR_SCRATCH14_DATA_CLRMSK                      (0x00000000U)
+
+
+/*
+    Register RGX_CR_SCRATCH15
+*/
+#define RGX_CR_SCRATCH15                                  (0x1A78U)
+#define RGX_CR_SCRATCH15_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SCRATCH15_DATA_SHIFT                       (0U)
+#define RGX_CR_SCRATCH15_DATA_CLRMSK                      (0x00000000U)
+
+
+/*
+    Register group: RGX_CR_OS0_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS0_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS0_SCRATCH0
+*/
+#define RGX_CR_OS0_SCRATCH0                               (0x1A80U)
+#define RGX_CR_OS0_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS0_SCRATCH1
+*/
+#define RGX_CR_OS0_SCRATCH1                               (0x1A88U)
+#define RGX_CR_OS0_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS0_SCRATCH2
+*/
+#define RGX_CR_OS0_SCRATCH2                               (0x1A90U)
+#define RGX_CR_OS0_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS0_SCRATCH3
+*/
+#define RGX_CR_OS0_SCRATCH3                               (0x1A98U)
+#define RGX_CR_OS0_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS1_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS1_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS1_SCRATCH0
+*/
+#define RGX_CR_OS1_SCRATCH0                               (0x11A80U)
+#define RGX_CR_OS1_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS1_SCRATCH1
+*/
+#define RGX_CR_OS1_SCRATCH1                               (0x11A88U)
+#define RGX_CR_OS1_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS1_SCRATCH2
+*/
+#define RGX_CR_OS1_SCRATCH2                               (0x11A90U)
+#define RGX_CR_OS1_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS1_SCRATCH3
+*/
+#define RGX_CR_OS1_SCRATCH3                               (0x11A98U)
+#define RGX_CR_OS1_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS2_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS2_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS2_SCRATCH0
+*/
+#define RGX_CR_OS2_SCRATCH0                               (0x21A80U)
+#define RGX_CR_OS2_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS2_SCRATCH1
+*/
+#define RGX_CR_OS2_SCRATCH1                               (0x21A88U)
+#define RGX_CR_OS2_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS2_SCRATCH2
+*/
+#define RGX_CR_OS2_SCRATCH2                               (0x21A90U)
+#define RGX_CR_OS2_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS2_SCRATCH3
+*/
+#define RGX_CR_OS2_SCRATCH3                               (0x21A98U)
+#define RGX_CR_OS2_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS3_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS3_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS3_SCRATCH0
+*/
+#define RGX_CR_OS3_SCRATCH0                               (0x31A80U)
+#define RGX_CR_OS3_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS3_SCRATCH1
+*/
+#define RGX_CR_OS3_SCRATCH1                               (0x31A88U)
+#define RGX_CR_OS3_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS3_SCRATCH2
+*/
+#define RGX_CR_OS3_SCRATCH2                               (0x31A90U)
+#define RGX_CR_OS3_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS3_SCRATCH3
+*/
+#define RGX_CR_OS3_SCRATCH3                               (0x31A98U)
+#define RGX_CR_OS3_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS4_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS4_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS4_SCRATCH0
+*/
+#define RGX_CR_OS4_SCRATCH0                               (0x41A80U)
+#define RGX_CR_OS4_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS4_SCRATCH1
+*/
+#define RGX_CR_OS4_SCRATCH1                               (0x41A88U)
+#define RGX_CR_OS4_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS4_SCRATCH2
+*/
+#define RGX_CR_OS4_SCRATCH2                               (0x41A90U)
+#define RGX_CR_OS4_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS4_SCRATCH3
+*/
+#define RGX_CR_OS4_SCRATCH3                               (0x41A98U)
+#define RGX_CR_OS4_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS5_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS5_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS5_SCRATCH0
+*/
+#define RGX_CR_OS5_SCRATCH0                               (0x51A80U)
+#define RGX_CR_OS5_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS5_SCRATCH1
+*/
+#define RGX_CR_OS5_SCRATCH1                               (0x51A88U)
+#define RGX_CR_OS5_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS5_SCRATCH2
+*/
+#define RGX_CR_OS5_SCRATCH2                               (0x51A90U)
+#define RGX_CR_OS5_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS5_SCRATCH3
+*/
+#define RGX_CR_OS5_SCRATCH3                               (0x51A98U)
+#define RGX_CR_OS5_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS6_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS6_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS6_SCRATCH0
+*/
+#define RGX_CR_OS6_SCRATCH0                               (0x61A80U)
+#define RGX_CR_OS6_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS6_SCRATCH1
+*/
+#define RGX_CR_OS6_SCRATCH1                               (0x61A88U)
+#define RGX_CR_OS6_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS6_SCRATCH2
+*/
+#define RGX_CR_OS6_SCRATCH2                               (0x61A90U)
+#define RGX_CR_OS6_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS6_SCRATCH3
+*/
+#define RGX_CR_OS6_SCRATCH3                               (0x61A98U)
+#define RGX_CR_OS6_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register group: RGX_CR_OS7_SCRATCH, with 2 repeats
+*/
+#define RGX_CR_OS7_SCRATCH_REPEATCOUNT                    (2U)
+/*
+    Register RGX_CR_OS7_SCRATCH0
+*/
+#define RGX_CR_OS7_SCRATCH0                               (0x71A80U)
+#define RGX_CR_OS7_SCRATCH0_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT                    (0U)
+#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS7_SCRATCH1
+*/
+#define RGX_CR_OS7_SCRATCH1                               (0x71A88U)
+#define RGX_CR_OS7_SCRATCH1_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT                    (0U)
+#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_OS7_SCRATCH2
+*/
+#define RGX_CR_OS7_SCRATCH2                               (0x71A90U)
+#define RGX_CR_OS7_SCRATCH2_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT                    (0U)
+#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_OS7_SCRATCH3
+*/
+#define RGX_CR_OS7_SCRATCH3                               (0x71A98U)
+#define RGX_CR_OS7_SCRATCH3_MASKFULL                      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT                    (0U)
+#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK                   (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_SPFILTER_SIGNAL_DESCR
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR                      (0x2700U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL             (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT           (0U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK          (0xFFFF0000U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT      (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE       (16U)
+
+
+/*
+    Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN                  (0x2708U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL         (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT       (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK      (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT  (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE   (16U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT       (16U)
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0                  (0x3000U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1                  (0x3008U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2                  (0x3010U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3                  (0x3018U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4                  (0x3020U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5                  (0x3028U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6                  (0x3030U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7                  (0x3038U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8                  (0x3040U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9                  (0x3048U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL         (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT    (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK   (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN       (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT   (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK  (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN      (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT       (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK      (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT      (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK     (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT   (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK  (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10                 (0x3050U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL        (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT   (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK  (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT  (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN     (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT      (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK     (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT     (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK    (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT  (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11                 (0x3058U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL        (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT   (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK  (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT  (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN     (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT      (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK     (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT     (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK    (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT  (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12                 (0x3060U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL        (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT   (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK  (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT  (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN     (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT      (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK     (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT     (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK    (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT  (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13                 (0x3068U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL        (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT   (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK  (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT  (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN     (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT      (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK     (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT     (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK    (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT  (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14                 (0x3070U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL        (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT   (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK  (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT  (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN     (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT      (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK     (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT     (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK    (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT  (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15                 (0x3078U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL        (IMG_UINT64_C(0x7FFFF7FFFFFFF000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT   (62U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK  (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT  (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN     (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT      (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK     (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT     (40U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK    (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT  (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_BOOT
+*/
+#define RGX_CR_FWCORE_BOOT                                (0x3090U)
+#define RGX_CR_FWCORE_BOOT_MASKFULL                       (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_BOOT_ENABLE_SHIFT                   (0U)
+#define RGX_CR_FWCORE_BOOT_ENABLE_CLRMSK                  (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_BOOT_ENABLE_EN                      (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_RESET_ADDR
+*/
+#define RGX_CR_FWCORE_RESET_ADDR                          (0x3098U)
+#define RGX_CR_FWCORE_RESET_ADDR_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFE))
+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT               (1U)
+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK              (0x00000001U)
+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT          (1U)
+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE           (2U)
+
+
+/*
+    Register RGX_CR_FWCORE_WRAPPER_NMI_ADDR
+*/
+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR                    (0x30A0U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFE))
+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT         (1U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK        (0x00000001U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT    (1U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE     (2U)
+
+
+/*
+    Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT                   (0x30A8U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT  (0U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN     (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS
+*/
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS                (0x30B0U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL       (IMG_UINT64_C(0x000000000000F771))
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN    (0x00000010U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT    (0U)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK   (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN       (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS
+*/
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS                (0x30B8U)
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL       (IMG_UINT64_C(0x001FFFFFFFFFFFF0))
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT      (52U)
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK     (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN         (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT   (46U)
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK  (IMG_UINT64_C(0xFFF03FFFFFFFFFFF))
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT   (40U)
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK  (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF))
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT  (4U)
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CTRL_INVAL
+*/
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL                      (0x30C0U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT            (3U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK           (0xFFFFFFF7U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN               (0x00000008U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT             (2U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK            (0xFFFFFFFBU)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_EN                (0x00000004U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT             (1U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_EN                (0x00000002U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT             (0U)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_MMU_STATUS
+*/
+#define RGX_CR_FWCORE_MEM_MMU_STATUS                      (0x30C8U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000FFFFFF7))
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT        (20U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK       (0xF00FFFFFU)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT        (12U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK       (0xFFF00FFFU)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT        (4U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK       (0xFFFFF00FU)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT        (2U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK       (0xFFFFFFFBU)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN           (0x00000004U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT         (1U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK        (0xFFFFFFFDU)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN            (0x00000002U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT           (0U)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_READS_EXT_STATUS
+*/
+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS                (0x30D8U)
+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL       (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT      (0U)
+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK     (0xFFFFF000U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_READS_INT_STATUS
+*/
+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS                (0x30E0U)
+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL       (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT      (0U)
+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK     (0xFFFFF800U)
+
+
+/*
+    Register RGX_CR_FWCORE_WRAPPER_FENCE
+*/
+#define RGX_CR_FWCORE_WRAPPER_FENCE                       (0x30E8U)
+#define RGX_CR_FWCORE_WRAPPER_FENCE_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT              (0U)
+#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK             (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_EN                 (0x00000001U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_MEM_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT            (8U)
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE0
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE0                       (0x30F0U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE1
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE1                       (0x30F8U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE2
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE2                       (0x3100U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE3
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE3                       (0x3108U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE4
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE4                       (0x3110U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE5
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE5                       (0x3118U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE6
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE6                       (0x3120U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_MEM_CAT_BASE7
+*/
+#define RGX_CR_FWCORE_MEM_CAT_BASE7                       (0x3128U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT            (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK           (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT       (12U)
+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE        (4096U)
+
+
+/*
+    Register RGX_CR_FWCORE_WDT_RESET
+*/
+#define RGX_CR_FWCORE_WDT_RESET                           (0x3130U)
+#define RGX_CR_FWCORE_WDT_RESET_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT                  (0U)
+#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WDT_RESET_EN_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_WDT_CTRL
+*/
+#define RGX_CR_FWCORE_WDT_CTRL                            (0x3138U)
+#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL                   (IMG_UINT64_C(0x00000000FFFF1F01))
+#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT                 (16U)
+#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK                (0x0000FFFFU)
+#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT            (8U)
+#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK           (0xFFFFE0FFU)
+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_WDT_COUNT
+*/
+#define RGX_CR_FWCORE_WDT_COUNT                           (0x3140U)
+#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT               (0U)
+#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 4 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT           (4U)
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED00
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED00                      (0x3400U)
+#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED01
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED01                      (0x3408U)
+#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED02
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED02                      (0x3410U)
+#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED03
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED03                      (0x3418U)
+#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_DATA0
+*/
+#define RGX_CR_FWCORE_DMI_DATA0                           (0x3420U)
+#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT                 (0U)
+#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK                (0x00000000U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_DATA1
+*/
+#define RGX_CR_FWCORE_DMI_DATA1                           (0x3428U)
+#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT                 (0U)
+#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK                (0x00000000U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 10 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT           (10U)
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED10
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED10                      (0x3430U)
+#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED11
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED11                      (0x3438U)
+#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED12
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED12                      (0x3440U)
+#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED13
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED13                      (0x3448U)
+#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED14
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED14                      (0x3450U)
+#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_DMCONTROL
+*/
+#define RGX_CR_FWCORE_DMI_DMCONTROL                       (0x3480U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL              (IMG_UINT64_C(0x00000000D0000003))
+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT         (31U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK        (0x7FFFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN            (0x80000000U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT       (30U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK      (0xBFFFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN          (0x40000000U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT    (28U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK   (0xEFFFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN       (0x10000000U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT        (1U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK       (0xFFFFFFFDU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN           (0x00000002U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT        (0U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK       (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN           (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_DMSTATUS
+*/
+#define RGX_CR_FWCORE_DMI_DMSTATUS                        (0x3488U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL               (IMG_UINT64_C(0x00000000004FFFFF))
+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT        (22U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK       (0xFFBFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN           (0x00400000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT     (19U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK    (0xFFF7FFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN        (0x00080000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT     (18U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK    (0xFFFBFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN        (0x00040000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT     (17U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK    (0xFFFDFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN        (0x00020000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT     (16U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK    (0xFFFEFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN        (0x00010000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT   (15U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK  (0xFFFF7FFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN      (0x00008000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT   (14U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK  (0xFFFFBFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN      (0x00004000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT       (13U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK      (0xFFFFDFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN          (0x00002000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT       (12U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK      (0xFFFFEFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN          (0x00001000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT       (11U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK      (0xFFFFF7FFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN          (0x00000800U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT       (10U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK      (0xFFFFFBFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN          (0x00000400U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT        (9U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK       (0xFFFFFDFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN           (0x00000200U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT        (8U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK       (0xFFFFFEFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN           (0x00000100U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT    (7U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK   (0xFFFFFF7FU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN       (0x00000080U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT         (6U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK        (0xFFFFFFBFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN            (0x00000040U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT  (5U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN     (0x00000020U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT  (4U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN     (0x00000010U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT          (0U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK         (0xFFFFFFF0U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 4 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT           (4U)
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED20
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED20                      (0x3490U)
+#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED21
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED21                      (0x3498U)
+#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED22
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED22                      (0x34A0U)
+#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED23
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED23                      (0x34A8U)
+#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_ABSTRACTCS
+*/
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS                      (0x34B0U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL             (IMG_UINT64_C(0x000000001F00170F))
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT    (24U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK   (0xE0FFFFFFU)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT           (12U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK          (0xFFFFEFFFU)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN              (0x00001000U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT         (8U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK        (0xFFFFF8FFU)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT      (0U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK     (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_COMMAND
+*/
+#define RGX_CR_FWCORE_DMI_COMMAND                         (0x34B8U)
+#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT           (24U)
+#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK          (0x00FFFFFFU)
+#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT           (0U)
+#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK          (0xFF000000U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 32 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT           (32U)
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED30
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED30                      (0x34C0U)
+#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_RESERVED31
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED31                      (0x34C8U)
+#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL             (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_SBCS
+*/
+#define RGX_CR_FWCORE_DMI_SBCS                            (0x35C0U)
+#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL                   (IMG_UINT64_C(0x00000000E07FFFFF))
+#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT            (29U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK           (0x1FFFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT          (22U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK         (0xFFBFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN             (0x00400000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT               (21U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK              (0xFFDFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN                  (0x00200000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT         (20U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK        (0xFFEFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN            (0x00100000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT             (17U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK            (0xFFF1FFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT      (16U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK     (0xFFFEFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN         (0x00010000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT         (15U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK        (0xFFFF7FFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN            (0x00008000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT              (12U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK             (0xFFFF8FFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT              (5U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK             (0xFFFFF01FU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT          (4U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK         (0xFFFFFFEFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN             (0x00000010U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT           (3U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK          (0xFFFFFFF7U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN              (0x00000008U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT           (2U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK          (0xFFFFFFFBU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN              (0x00000004U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT           (1U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK          (0xFFFFFFFDU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN              (0x00000002U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT            (0U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_SBADDRESS0
+*/
+#define RGX_CR_FWCORE_DMI_SBADDRESS0                      (0x35C8U)
+#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT        (0U)
+#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK       (0x00000000U)
+
+
+/*
+    Register group: RGX_CR_FWCORE_DMI_SBDATA, with 4 repeats
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA_REPEATCOUNT              (4U)
+/*
+    Register RGX_CR_FWCORE_DMI_SBDATA0
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA0                         (0x35E0U)
+#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_SHIFT              (0U)
+#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_SBDATA1
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA1                         (0x35E8U)
+#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_SHIFT              (0U)
+#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_SBDATA2
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA2                         (0x35F0U)
+#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_SHIFT              (0U)
+#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_SBDATA3
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA3                         (0x35F8U)
+#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_SHIFT              (0U)
+#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_FWCORE_DMI_HALTSUM0
+*/
+#define RGX_CR_FWCORE_DMI_HALTSUM0                        (0x3600U)
+#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT              (0U)
+#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC
+*/
+#define RGX_CR_SLC_CTRL_MISC                              (0x3800U)
+#define RGX_CR_SLC_CTRL_MISC_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFF03FF010F))
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT          (32U)
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_SHIFT   (25U)
+#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_EN      (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT        (24U)
+#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN           (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT       (16U)
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK      (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT                  (8U)
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN                     (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT          (3U)
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN             (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT  (2U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN     (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN   (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT  (0U)
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN     (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_FLUSH_INVAL
+*/
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL                       (0x3818U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL              (IMG_UINT64_C(0x0000000080000FFF))
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT            (31U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK           (0x7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN               (0x80000000U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT   (11U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK  (0xFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN      (0x00000800U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT   (10U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK  (0xFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN      (0x00000400U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT          (9U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK         (0xFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN             (0x00000200U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT          (8U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK         (0xFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN             (0x00000100U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT          (7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK         (0xFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN             (0x00000080U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT          (6U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK         (0xFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN             (0x00000040U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT    (5U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK   (0xFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN       (0x00000020U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT          (4U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK         (0xFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN             (0x00000010U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT      (3U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK     (0xFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN         (0x00000008U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT        (2U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK       (0xFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN           (0x00000004U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT           (1U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK          (0xFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN              (0x00000002U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT             (0U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS0
+*/
+#define RGX_CR_SLC_STATUS0                                (0x3820U)
+#define RGX_CR_SLC_STATUS0_MASKFULL                       (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT      (2U)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK     (0xFFFFFFFBU)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN         (0x00000004U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT            (1U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK           (0xFFFFFFFDU)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN               (0x00000002U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT            (0U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_BYPASS
+*/
+#define RGX_CR_SLC_CTRL_BYPASS                            (0x3828U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL          (IMG_UINT64_C(0x0FFFFFFFFFFF7FFF))
+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL                   (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT    (59U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK   (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN       (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT (58U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT (57U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT (56U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT (55U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT    (54U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK   (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN       (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT   (53U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK  (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN      (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT     (52U)
+#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK    (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN        (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT       (51U)
+#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK      (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN          (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT              (50U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK             (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_EN                 (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT         (49U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK        (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN            (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT         (48U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK        (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN            (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT         (47U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK        (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN            (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT         (46U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK        (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN            (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT              (45U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK             (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_EN                 (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT              (44U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK             (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_EN                 (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT              (43U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK             (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_EN                 (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT               (42U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK              (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_EN                  (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT              (41U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK             (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_EN                 (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT              (40U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK             (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_EN                 (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT   (39U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK  (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN      (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT          (38U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK         (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN             (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT     (37U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK    (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN        (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT              (36U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK             (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_EN                 (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT    (35U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK   (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN       (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT    (34U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK   (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN       (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT           (33U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK          (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN              (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT           (32U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK          (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN              (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT        (31U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK       (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN           (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT          (30U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN             (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT             (29U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN                (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT            (28U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN               (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT        (27U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN           (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT               (26U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN                  (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT          (25U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN             (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT              (24U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN                 (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT             (23U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN                (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT              (22U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN                 (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT             (21U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN                (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT               (20U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN                  (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT              (19U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN                 (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT              (18U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN                 (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT              (17U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN                 (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT           (16U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN              (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT          (15U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN             (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT              (14U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN                 (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT             (13U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN                (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT             (12U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN                (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT           (11U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN              (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT           (10U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN              (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT           (9U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN              (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT               (8U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN                  (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT               (7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN                  (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT               (6U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN                  (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT         (5U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN            (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT               (4U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN                  (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT           (3U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN              (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT             (2U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN                (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT                (1U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN                   (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT                  (0U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN                     (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1                                (0x3870U)
+#define RGX_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0x800003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT                   (63U)
+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK                  (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_EN                      (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_SLC_STATUS1_READS1_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK                  (IMG_UINT64_C(0xFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS1_READS0_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE                                   (0x3898U)
+#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL                 (IMG_UINT64_C(0x00000000000003FF))
+#define RGX_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SLC_IDLE_MH_SYSARB1_SHIFT                  (9U)
+#define RGX_CR_SLC_IDLE_MH_SYSARB1_CLRMSK                 (0xFFFFFDFFU)
+#define RGX_CR_SLC_IDLE_MH_SYSARB1_EN                     (0x00000200U)
+#define RGX_CR_SLC_IDLE_MH_SYSARB0_SHIFT                  (8U)
+#define RGX_CR_SLC_IDLE_MH_SYSARB0_CLRMSK                 (0xFFFFFEFFU)
+#define RGX_CR_SLC_IDLE_MH_SYSARB0_EN                     (0x00000100U)
+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT                      (7U)
+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK                     (0xFFFFFF7FU)
+#define RGX_CR_SLC_IDLE_IMGBV4_EN                         (0x00000080U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (6U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0xFFFFFFBFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN                    (0x00000040U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT                     (5U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK                    (0xFFFFFFDFU)
+#define RGX_CR_SLC_IDLE_RBOFIFO_EN                        (0x00000020U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT                    (4U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK                   (0xFFFFFFEFU)
+#define RGX_CR_SLC_IDLE_FRC_CONV_EN                       (0x00000010U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT                    (3U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK                   (0xFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_EN                       (0x00000008U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT                    (2U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK                   (0xFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_VXD_CONV_EN                       (0x00000004U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT                   (1U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK                  (0xFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN                      (0x00000002U)
+#define RGX_CR_SLC_IDLE_CBAR_SHIFT                        (0U)
+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK                       (0xFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_CBAR_EN                           (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2                                (0x3908U)
+#define RGX_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x000003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS2_READS3_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK                  (IMG_UINT64_C(0xFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS2_READS2_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC2
+*/
+#define RGX_CR_SLC_CTRL_MISC2                             (0x3930U)
+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT         (0U)
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK        (0x00000000U)
+
+
+/*
+    Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE
+*/
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE                  (0x3938U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT     (0U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK    (0xFFFFFFFEU)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN        (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC_SIZE_IN_KB
+*/
+#define RGX_CR_SLC_SIZE_IN_KB                             (0x3970U)
+#define RGX_CR_SLC_SIZE_IN_KB_MASKFULL                    (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_SLC_SIZE_IN_KB_SIZE_SHIFT                  (0U)
+#define RGX_CR_SLC_SIZE_IN_KB_SIZE_CLRMSK                 (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_USC_TIMER
+*/
+#define RGX_CR_USC_TIMER                                  (0x46C8U)
+#define RGX_CR_USC_TIMER_MASKFULL                         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_TIMER_CNT_SHIFT                        (0U)
+#define RGX_CR_USC_TIMER_CNT_CLRMSK                       (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_TIMER_CNT
+*/
+#define RGX_CR_USC_TIMER_CNT                              (0x46D0U)
+#define RGX_CR_USC_TIMER_CNT_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT                  (0U)
+#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_USC_TIMER_CNT_RESET_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_USC_UVS0_CHECKSUM
+*/
+#define RGX_CR_USC_UVS0_CHECKSUM                          (0x5000U)
+#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS1_CHECKSUM
+*/
+#define RGX_CR_USC_UVS1_CHECKSUM                          (0x5008U)
+#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS2_CHECKSUM
+*/
+#define RGX_CR_USC_UVS2_CHECKSUM                          (0x5010U)
+#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS3_CHECKSUM
+*/
+#define RGX_CR_USC_UVS3_CHECKSUM                          (0x5018U)
+#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_PPP_SIGNATURE
+*/
+#define RGX_CR_PPP_SIGNATURE                              (0x5020U)
+#define RGX_CR_PPP_SIGNATURE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT                  (0U)
+#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_TE_SIGNATURE
+*/
+#define RGX_CR_TE_SIGNATURE                               (0x5028U)
+#define RGX_CR_TE_SIGNATURE_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT                   (0U)
+#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK                  (0x00000000U)
+
+
+/*
+    Register RGX_CR_TE_CHECKSUM
+*/
+#define RGX_CR_TE_CHECKSUM                                (0x5110U)
+#define RGX_CR_TE_CHECKSUM_MASKFULL                       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT                    (0U)
+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK                   (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVB_CHECKSUM
+*/
+#define RGX_CR_USC_UVB_CHECKSUM                           (0x5118U)
+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_VCE_CHECKSUM
+*/
+#define RGX_CR_VCE_CHECKSUM                               (0x5030U)
+#define RGX_CR_VCE_CHECKSUM_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT                   (0U)
+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK                  (0x00000000U)
+
+
+/*
+    Register RGX_CR_ISP_PDS_CHECKSUM
+*/
+#define RGX_CR_ISP_PDS_CHECKSUM                           (0x5038U)
+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_ISP_TPF_CHECKSUM
+*/
+#define RGX_CR_ISP_TPF_CHECKSUM                           (0x5040U)
+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_TFPU_PLANE0_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE0_CHECKSUM                       (0x5048U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT           (0U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_TFPU_PLANE1_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE1_CHECKSUM                       (0x5050U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT           (0U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PBE_CHECKSUM
+*/
+#define RGX_CR_PBE_CHECKSUM                               (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT                   (0U)
+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK                  (0x00000000U)
+
+
+/*
+    Register RGX_CR_PDS_DOUTM_STM_SIGNATURE
+*/
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE                    (0x5060U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT        (0U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_IFPU_ISP_CHECKSUM
+*/
+#define RGX_CR_IFPU_ISP_CHECKSUM                          (0x5068U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS4_CHECKSUM
+*/
+#define RGX_CR_USC_UVS4_CHECKSUM                          (0x5100U)
+#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_UVS5_CHECKSUM
+*/
+#define RGX_CR_USC_UVS5_CHECKSUM                          (0x5108U)
+#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_PPP_CLIP_CHECKSUM
+*/
+#define RGX_CR_PPP_CLIP_CHECKSUM                          (0x5120U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT              (0U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK             (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_PHASE
+*/
+#define RGX_CR_PERF_TA_PHASE                              (0x6008U)
+#define RGX_CR_PERF_TA_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_PHASE
+*/
+#define RGX_CR_PERF_3D_PHASE                              (0x6010U)
+#define RGX_CR_PERF_3D_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_PHASE
+*/
+#define RGX_CR_PERF_COMPUTE_PHASE                         (0x6018U)
+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_CYCLE
+*/
+#define RGX_CR_PERF_TA_CYCLE                              (0x6020U)
+#define RGX_CR_PERF_TA_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_CYCLE
+*/
+#define RGX_CR_PERF_3D_CYCLE                              (0x6028U)
+#define RGX_CR_PERF_3D_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK                 (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_CYCLE
+*/
+#define RGX_CR_PERF_COMPUTE_CYCLE                         (0x6030U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_OR_3D_CYCLE
+*/
+#define RGX_CR_PERF_TA_OR_3D_CYCLE                        (0x6038U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT            (0U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK           (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_INITIAL_TA_CYCLE
+*/
+#define RGX_CR_PERF_INITIAL_TA_CYCLE                      (0x6040U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL                       (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL                      (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL                       (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL                      (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL                       (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL                      (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL                       (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL                      (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_SPINUP
+*/
+#define RGX_CR_PERF_3D_SPINUP                             (0x6220U)
+#define RGX_CR_PERF_3D_SPINUP_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT                (0U)
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_AXI_ACE_LITE_CONFIGURATION
+*/
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION                 (0x38C0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL        (IMG_UINT64_C(0x00003FFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register RGX_CR_POWER_ESTIMATE_RESULT
+*/
+#define RGX_CR_POWER_ESTIMATE_RESULT                      (0x6328U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT          (0U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF
+*/
+#define RGX_CR_TA_PERF                                    (0x7600U)
+#define RGX_CR_TA_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TA_PERF_CLR_3_SHIFT                        (4U)
+#define RGX_CR_TA_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define RGX_CR_TA_PERF_CLR_3_EN                           (0x00000010U)
+#define RGX_CR_TA_PERF_CLR_2_SHIFT                        (3U)
+#define RGX_CR_TA_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define RGX_CR_TA_PERF_CLR_2_EN                           (0x00000008U)
+#define RGX_CR_TA_PERF_CLR_1_SHIFT                        (2U)
+#define RGX_CR_TA_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define RGX_CR_TA_PERF_CLR_1_EN                           (0x00000004U)
+#define RGX_CR_TA_PERF_CLR_0_SHIFT                        (1U)
+#define RGX_CR_TA_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define RGX_CR_TA_PERF_CLR_0_EN                           (0x00000002U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT0
+*/
+#define RGX_CR_TA_PERF_SELECT0                            (0x7608U)
+#define RGX_CR_TA_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT1
+*/
+#define RGX_CR_TA_PERF_SELECT1                            (0x7610U)
+#define RGX_CR_TA_PERF_SELECT1_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT2
+*/
+#define RGX_CR_TA_PERF_SELECT2                            (0x7618U)
+#define RGX_CR_TA_PERF_SELECT2_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECT3
+*/
+#define RGX_CR_TA_PERF_SELECT3                            (0x7620U)
+#define RGX_CR_TA_PERF_SELECT3_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT                 (21U)
+#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_SELECTED_BITS
+*/
+#define RGX_CR_TA_PERF_SELECTED_BITS                      (0x7648U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT           (48U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK          (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT           (32U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK          (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT           (16U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK          (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT           (0U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_0
+*/
+#define RGX_CR_TA_PERF_COUNTER_0                          (0x7650U)
+#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_1
+*/
+#define RGX_CR_TA_PERF_COUNTER_1                          (0x7658U)
+#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_2
+*/
+#define RGX_CR_TA_PERF_COUNTER_2                          (0x7660U)
+#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_TA_PERF_COUNTER_3
+*/
+#define RGX_CR_TA_PERF_COUNTER_3                          (0x7668U)
+#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT                (0U)
+#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF
+*/
+#define RGX_CR_RASTERISATION_PERF                         (0x7700U)
+#define RGX_CR_RASTERISATION_PERF_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT             (4U)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK            (0xFFFFFFEFU)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_EN                (0x00000010U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT             (3U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK            (0xFFFFFFF7U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_EN                (0x00000008U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT             (2U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK            (0xFFFFFFFBU)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_EN                (0x00000004U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT             (1U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_EN                (0x00000002U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT       (0U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK      (0xFFFFFFFEU)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN          (0x00000001U)
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_SELECT0
+*/
+#define RGX_CR_RASTERISATION_PERF_SELECT0                 (0x7708U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL        (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT      (21U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN         (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_RASTERISATION_PERF_COUNTER_0
+*/
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0               (0x7750U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT     (0U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK    (0x00000000U)
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF                        (0x7800U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT            (4U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK           (0xFFFFFFEFU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN               (0x00000010U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT            (3U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK           (0xFFFFFFF7U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN               (0x00000008U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT            (2U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK           (0xFFFFFFFBU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN               (0x00000004U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT            (1U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK           (0xFFFFFFFDU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN               (0x00000002U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT      (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK     (0xFFFFFFFEU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN         (0x00000001U)
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0                (0x7808U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL       (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT     (21U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK    (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN        (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0              (0x7850U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT    (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK   (0x00000000U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF
+*/
+#define RGX_CR_TPU_MCU_L0_PERF                            (0x7900U)
+#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL                   (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT                (4U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK               (0xFFFFFFEFU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN                   (0x00000010U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT                (3U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK               (0xFFFFFFF7U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN                   (0x00000008U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT                (2U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK               (0xFFFFFFFBU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN                   (0x00000004U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT                (1U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK               (0xFFFFFFFDU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN                   (0x00000002U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT          (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_SELECT0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0                    (0x7908U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL           (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT    (48U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK   (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT    (32U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK   (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT         (21U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN            (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT   (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0                  (0x7950U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT        (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_USC_PERF
+*/
+#define RGX_CR_USC_PERF                                   (0x8100U)
+#define RGX_CR_USC_PERF_MASKFULL                          (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_USC_PERF_CLR_3_SHIFT                       (4U)
+#define RGX_CR_USC_PERF_CLR_3_CLRMSK                      (0xFFFFFFEFU)
+#define RGX_CR_USC_PERF_CLR_3_EN                          (0x00000010U)
+#define RGX_CR_USC_PERF_CLR_2_SHIFT                       (3U)
+#define RGX_CR_USC_PERF_CLR_2_CLRMSK                      (0xFFFFFFF7U)
+#define RGX_CR_USC_PERF_CLR_2_EN                          (0x00000008U)
+#define RGX_CR_USC_PERF_CLR_1_SHIFT                       (2U)
+#define RGX_CR_USC_PERF_CLR_1_CLRMSK                      (0xFFFFFFFBU)
+#define RGX_CR_USC_PERF_CLR_1_EN                          (0x00000004U)
+#define RGX_CR_USC_PERF_CLR_0_SHIFT                       (1U)
+#define RGX_CR_USC_PERF_CLR_0_CLRMSK                      (0xFFFFFFFDU)
+#define RGX_CR_USC_PERF_CLR_0_EN                          (0x00000002U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT                 (0U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_USC_PERF_SELECT0
+*/
+#define RGX_CR_USC_PERF_SELECT0                           (0x8108U)
+#define RGX_CR_USC_PERF_SELECT0_MASKFULL                  (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT           (48U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK          (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT           (32U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK          (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT                (21U)
+#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_EN                   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT        (16U)
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT          (0U)
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_USC_PERF_COUNTER_0
+*/
+#define RGX_CR_USC_PERF_COUNTER_0                         (0x8150U)
+#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT               (0U)
+#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_JONES_IDLE
+*/
+#define RGX_CR_JONES_IDLE                                 (0x8328U)
+#define RGX_CR_JONES_IDLE_MASKFULL                        (IMG_UINT64_C(0x0000000000007FFF))
+#define RGX_CR_JONES_IDLE_TDM_SHIFT                       (14U)
+#define RGX_CR_JONES_IDLE_TDM_CLRMSK                      (0xFFFFBFFFU)
+#define RGX_CR_JONES_IDLE_TDM_EN                          (0x00004000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT                (13U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK               (0xFFFFDFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN                   (0x00002000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT                    (12U)
+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK                   (0xFFFFEFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_EN                       (0x00001000U)
+#define RGX_CR_JONES_IDLE_MMU_SHIFT                       (11U)
+#define RGX_CR_JONES_IDLE_MMU_CLRMSK                      (0xFFFFF7FFU)
+#define RGX_CR_JONES_IDLE_MMU_EN                          (0x00000800U)
+#define RGX_CR_JONES_IDLE_TLA_SHIFT                       (10U)
+#define RGX_CR_JONES_IDLE_TLA_CLRMSK                      (0xFFFFFBFFU)
+#define RGX_CR_JONES_IDLE_TLA_EN                          (0x00000400U)
+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT                    (9U)
+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK                   (0xFFFFFDFFU)
+#define RGX_CR_JONES_IDLE_GARTEN_EN                       (0x00000200U)
+#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT                    (8U)
+#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK                   (0xFFFFFEFFU)
+#define RGX_CR_JONES_IDLE_HOSTIF_EN                       (0x00000100U)
+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT                     (7U)
+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK                    (0xFFFFFF7FU)
+#define RGX_CR_JONES_IDLE_SOCIF_EN                        (0x00000080U)
+#define RGX_CR_JONES_IDLE_TILING_SHIFT                    (6U)
+#define RGX_CR_JONES_IDLE_TILING_CLRMSK                   (0xFFFFFFBFU)
+#define RGX_CR_JONES_IDLE_TILING_EN                       (0x00000040U)
+#define RGX_CR_JONES_IDLE_IPP_SHIFT                       (5U)
+#define RGX_CR_JONES_IDLE_IPP_CLRMSK                      (0xFFFFFFDFU)
+#define RGX_CR_JONES_IDLE_IPP_EN                          (0x00000020U)
+#define RGX_CR_JONES_IDLE_USCS_SHIFT                      (4U)
+#define RGX_CR_JONES_IDLE_USCS_CLRMSK                     (0xFFFFFFEFU)
+#define RGX_CR_JONES_IDLE_USCS_EN                         (0x00000010U)
+#define RGX_CR_JONES_IDLE_PM_SHIFT                        (3U)
+#define RGX_CR_JONES_IDLE_PM_CLRMSK                       (0xFFFFFFF7U)
+#define RGX_CR_JONES_IDLE_PM_EN                           (0x00000008U)
+#define RGX_CR_JONES_IDLE_CDM_SHIFT                       (2U)
+#define RGX_CR_JONES_IDLE_CDM_CLRMSK                      (0xFFFFFFFBU)
+#define RGX_CR_JONES_IDLE_CDM_EN                          (0x00000004U)
+#define RGX_CR_JONES_IDLE_VDM_SHIFT                       (1U)
+#define RGX_CR_JONES_IDLE_VDM_CLRMSK                      (0xFFFFFFFDU)
+#define RGX_CR_JONES_IDLE_VDM_EN                          (0x00000002U)
+#define RGX_CR_JONES_IDLE_BIF_SHIFT                       (0U)
+#define RGX_CR_JONES_IDLE_BIF_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_JONES_IDLE_BIF_EN                          (0x00000001U)
+
+
+/*
+    Register RGX_CR_TORNADO_PERF
+*/
+#define RGX_CR_TORNADO_PERF                               (0x8228U)
+#define RGX_CR_TORNADO_PERF_MASKFULL                      (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT                   (4U)
+#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK                  (0xFFFFFFEFU)
+#define RGX_CR_TORNADO_PERF_CLR_3_EN                      (0x00000010U)
+#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT                   (3U)
+#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK                  (0xFFFFFFF7U)
+#define RGX_CR_TORNADO_PERF_CLR_2_EN                      (0x00000008U)
+#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT                   (2U)
+#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK                  (0xFFFFFFFBU)
+#define RGX_CR_TORNADO_PERF_CLR_1_EN                      (0x00000004U)
+#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT                   (1U)
+#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK                  (0xFFFFFFFDU)
+#define RGX_CR_TORNADO_PERF_CLR_0_EN                      (0x00000002U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT             (0U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK            (0xFFFFFFFEU)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN                (0x00000001U)
+
+
+/*
+    Register RGX_CR_TORNADO_PERF_SELECT0
+*/
+#define RGX_CR_TORNADO_PERF_SELECT0                       (0x8230U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL              (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT       (48U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK      (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT       (32U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK      (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT            (21U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN               (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT    (16U)
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK   (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT      (0U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TORNADO_PERF_COUNTER_0
+*/
+#define RGX_CR_TORNADO_PERF_COUNTER_0                     (0x8268U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT           (0U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK          (0x00000000U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF
+*/
+#define RGX_CR_TEXAS_PERF                                 (0x8290U)
+#define RGX_CR_TEXAS_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT                     (6U)
+#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK                    (0xFFFFFFBFU)
+#define RGX_CR_TEXAS_PERF_CLR_5_EN                        (0x00000040U)
+#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT                     (5U)
+#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK                    (0xFFFFFFDFU)
+#define RGX_CR_TEXAS_PERF_CLR_4_EN                        (0x00000020U)
+#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT                     (4U)
+#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK                    (0xFFFFFFEFU)
+#define RGX_CR_TEXAS_PERF_CLR_3_EN                        (0x00000010U)
+#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT                     (3U)
+#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK                    (0xFFFFFFF7U)
+#define RGX_CR_TEXAS_PERF_CLR_2_EN                        (0x00000008U)
+#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT                     (2U)
+#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK                    (0xFFFFFFFBU)
+#define RGX_CR_TEXAS_PERF_CLR_1_EN                        (0x00000004U)
+#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT                     (1U)
+#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK                    (0xFFFFFFFDU)
+#define RGX_CR_TEXAS_PERF_CLR_0_EN                        (0x00000002U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_SELECT0
+*/
+#define RGX_CR_TEXAS_PERF_SELECT0                         (0x8298U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT              (31U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_TEXAS_PERF_COUNTER_0
+*/
+#define RGX_CR_TEXAS_PERF_COUNTER_0                       (0x82D8U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_JONES_PERF
+*/
+#define RGX_CR_JONES_PERF                                 (0x8330U)
+#define RGX_CR_JONES_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_JONES_PERF_CLR_3_SHIFT                     (4U)
+#define RGX_CR_JONES_PERF_CLR_3_CLRMSK                    (0xFFFFFFEFU)
+#define RGX_CR_JONES_PERF_CLR_3_EN                        (0x00000010U)
+#define RGX_CR_JONES_PERF_CLR_2_SHIFT                     (3U)
+#define RGX_CR_JONES_PERF_CLR_2_CLRMSK                    (0xFFFFFFF7U)
+#define RGX_CR_JONES_PERF_CLR_2_EN                        (0x00000008U)
+#define RGX_CR_JONES_PERF_CLR_1_SHIFT                     (2U)
+#define RGX_CR_JONES_PERF_CLR_1_CLRMSK                    (0xFFFFFFFBU)
+#define RGX_CR_JONES_PERF_CLR_1_EN                        (0x00000004U)
+#define RGX_CR_JONES_PERF_CLR_0_SHIFT                     (1U)
+#define RGX_CR_JONES_PERF_CLR_0_CLRMSK                    (0xFFFFFFFDU)
+#define RGX_CR_JONES_PERF_CLR_0_EN                        (0x00000002U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_JONES_PERF_SELECT0
+*/
+#define RGX_CR_JONES_PERF_SELECT0                         (0x8338U)
+#define RGX_CR_JONES_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT              (21U)
+#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_JONES_PERF_COUNTER_0
+*/
+#define RGX_CR_JONES_PERF_COUNTER_0                       (0x8368U)
+#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK            (0x00000000U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF
+*/
+#define RGX_CR_BLACKPEARL_PERF                            (0x8400U)
+#define RGX_CR_BLACKPEARL_PERF_MASKFULL                   (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT                (6U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK               (0xFFFFFFBFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN                   (0x00000040U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT                (5U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK               (0xFFFFFFDFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN                   (0x00000020U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT                (4U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK               (0xFFFFFFEFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN                   (0x00000010U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT                (3U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK               (0xFFFFFFF7U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN                   (0x00000008U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT                (2U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK               (0xFFFFFFFBU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN                   (0x00000004U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT                (1U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK               (0xFFFFFFFDU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN                   (0x00000002U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT          (0U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK         (0xFFFFFFFEU)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN             (0x00000001U)
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_SELECT0
+*/
+#define RGX_CR_BLACKPEARL_PERF_SELECT0                    (0x8408U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL           (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT    (48U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK   (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT    (32U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK   (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT         (31U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK        (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN            (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT   (0U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK  (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_BLACKPEARL_PERF_COUNTER_0
+*/
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0                  (0x8448U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT        (0U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_PBE_PERF
+*/
+#define RGX_CR_PBE_PERF                                   (0x8478U)
+#define RGX_CR_PBE_PERF_MASKFULL                          (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_PBE_PERF_CLR_3_SHIFT                       (4U)
+#define RGX_CR_PBE_PERF_CLR_3_CLRMSK                      (0xFFFFFFEFU)
+#define RGX_CR_PBE_PERF_CLR_3_EN                          (0x00000010U)
+#define RGX_CR_PBE_PERF_CLR_2_SHIFT                       (3U)
+#define RGX_CR_PBE_PERF_CLR_2_CLRMSK                      (0xFFFFFFF7U)
+#define RGX_CR_PBE_PERF_CLR_2_EN                          (0x00000008U)
+#define RGX_CR_PBE_PERF_CLR_1_SHIFT                       (2U)
+#define RGX_CR_PBE_PERF_CLR_1_CLRMSK                      (0xFFFFFFFBU)
+#define RGX_CR_PBE_PERF_CLR_1_EN                          (0x00000004U)
+#define RGX_CR_PBE_PERF_CLR_0_SHIFT                       (1U)
+#define RGX_CR_PBE_PERF_CLR_0_CLRMSK                      (0xFFFFFFFDU)
+#define RGX_CR_PBE_PERF_CLR_0_EN                          (0x00000002U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT                 (0U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_PBE_PERF_SELECT0
+*/
+#define RGX_CR_PBE_PERF_SELECT0                           (0x8480U)
+#define RGX_CR_PBE_PERF_SELECT0_MASKFULL                  (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT           (48U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK          (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT           (32U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK          (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT                (21U)
+#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_EN                   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT        (16U)
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT          (0U)
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_PBE_PERF_COUNTER_0
+*/
+#define RGX_CR_PBE_PERF_COUNTER_0                         (0x84B0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT               (0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_OCP_REVINFO
+*/
+#define RGX_CR_OCP_REVINFO                                (0x9000U)
+#define RGX_CR_OCP_REVINFO_MASKFULL                       (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT            (33U)
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK           (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT            (32U)
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK           (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN               (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT                 (0U)
+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK                (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_OCP_SYSCONFIG
+*/
+#define RGX_CR_OCP_SYSCONFIG                              (0x9010U)
+#define RGX_CR_OCP_SYSCONFIG_MASKFULL                     (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT     (10U)
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK    (0xFFFFF3FFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT     (8U)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK    (0xFFFFFCFFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT     (6U)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK    (0xFFFFFF3FU)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT     (4U)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK    (0xFFFFFFCFU)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT           (2U)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK          (0xFFFFFFF3U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT              (0U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK             (0xFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_0                        (0x9020U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_1                        (0x9028U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_2                        (0x9030U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT      (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK     (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN         (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_0                            (0x9038U)
+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN  (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_1                            (0x9040U)
+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_2                            (0x9048U)
+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT       (0U)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK      (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN          (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_0
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_0                        (0x9050U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_1
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_1                        (0x9058U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_2
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_2                        (0x9060U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT   (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK  (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN      (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_0
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_0                        (0x9068U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_1
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_1                        (0x9070U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_2
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_2                        (0x9078U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT  (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN     (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQ_EVENT
+*/
+#define RGX_CR_OCP_IRQ_EVENT                              (0x9080U)
+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL                     (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_CONFIG
+*/
+#define RGX_CR_OCP_DEBUG_CONFIG                           (0x9088U)
+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT                 (0U)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_STATUS
+*/
+#define RGX_CR_OCP_DEBUG_STATUS                           (0x9090U)
+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL                  (IMG_UINT64_C(0x001F1F77FFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT    (51U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK   (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT    (50U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN       (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT    (48U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT    (43U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK   (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT    (42U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN       (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT    (40U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK   (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT        (38U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK       (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN           (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN    (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT        (34U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK       (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN           (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN    (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT      (31U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN         (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT         (30U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN            (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT      (29U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN         (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT      (27U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT      (26U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN         (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT      (24U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT      (23U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN         (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT         (22U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN            (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT      (21U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN         (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT      (19U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT      (18U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN         (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT      (16U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT      (15U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN         (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT         (14U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN            (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT      (13U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN         (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT      (11U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT      (10U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN         (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT      (8U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT      (7U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN         (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT         (6U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT      (5U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN         (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT      (3U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT      (2U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN         (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT      (0U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT           (6U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK          (0xFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN              (0x00000040U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT               (5U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK              (0xFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN                  (0x00000020U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT               (4U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK              (0xFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN                  (0x00000010U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT             (3U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK            (0xFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN                (0x00000008U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT              (2U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK             (0xFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN                 (0x00000004U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT             (1U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN                (0x00000002U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT                (0U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK               (0xFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN                   (0x00000001U)
+
+
+#define RGX_CR_BIF_TRUST_DM_MASK                          (0x0000007FU)
+
+
+/*
+    Register RGX_CR_BIF_TRUST
+*/
+#define RGX_CR_BIF_TRUST                                  (0xA000U)
+#define RGX_CR_BIF_TRUST_MASKFULL                         (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN   (0x00100000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT  (19U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN     (0x00080000U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT       (18U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK      (0xFFFBFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN          (0x00040000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT         (17U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK        (0xFFFDFFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN            (0x00020000U)
+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT                     (16U)
+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK                    (0xFFFEFFFFU)
+#define RGX_CR_BIF_TRUST_ENABLE_EN                        (0x00010000U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT                 (9U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK                (0xFFFF01FFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT   (8U)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK  (0xFFFFFEFFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN      (0x00000100U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT     (7U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK    (0xFFFFFF7FU)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN        (0x00000080U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT     (6U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK    (0xFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN        (0x00000040U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT     (5U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK    (0xFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN        (0x00000020U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT       (4U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK      (0xFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN          (0x00000010U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT       (3U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK      (0xFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN          (0x00000008U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT    (2U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK   (0xFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN       (0x00000004U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT      (1U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK     (0xFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN         (0x00000002U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT      (0U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK     (0xFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN         (0x00000001U)
+
+
+/*
+    Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE                             (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL             (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT                (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK               (0xFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN                   (0x00000001U)
+
+
+/*
+    Register RGX_CR_FBA_FC0_CHECKSUM
+*/
+#define RGX_CR_FBA_FC0_CHECKSUM                           (0xD170U)
+#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC1_CHECKSUM
+*/
+#define RGX_CR_FBA_FC1_CHECKSUM                           (0xD178U)
+#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC2_CHECKSUM
+*/
+#define RGX_CR_FBA_FC2_CHECKSUM                           (0xD180U)
+#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_FBA_FC3_CHECKSUM
+*/
+#define RGX_CR_FBA_FC3_CHECKSUM                           (0xD188U)
+#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_CLK_CTRL2
+*/
+#define RGX_CR_CLK_CTRL2                                  (0xD200U)
+#define RGX_CR_CLK_CTRL2_MASKFULL                         (IMG_UINT64_C(0x0000000000000F33))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT                   (10U)
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF                     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON                      (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO                    (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT                       (8U)
+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL2_VRDM_OFF                         (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_VRDM_ON                          (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL2_VRDM_AUTO                        (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL2_SH_SHIFT                         (4U)
+#define RGX_CR_CLK_CTRL2_SH_CLRMSK                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL2_SH_OFF                           (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_SH_ON                            (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL2_SH_AUTO                          (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL2_FBA_SHIFT                        (0U)
+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL2_FBA_OFF                          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL2_FBA_ON                           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL2_FBA_AUTO                         (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+    Register RGX_CR_CLK_STATUS2
+*/
+#define RGX_CR_CLK_STATUS2                                (0xD208U)
+#define RGX_CR_CLK_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000000000000015))
+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT                     (4U)
+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS2_VRDM_GATED                     (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING                   (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS2_SH_SHIFT                       (2U)
+#define RGX_CR_CLK_STATUS2_SH_CLRMSK                      (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS2_SH_GATED                       (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS2_SH_RUNNING                     (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS2_FBA_SHIFT                      (0U)
+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS2_FBA_GATED                      (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS2_FBA_RUNNING                    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL
+*/
+#define RGX_CR_RPM_SHF_FPL                                (0xD520U)
+#define RGX_CR_RPM_SHF_FPL_MASKFULL                       (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT                     (40U)
+#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK                    (IMG_UINT64_C(0xC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT                     (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK                    (IMG_UINT64_C(0xFFFFFF0000000003))
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT                (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE                 (4U)
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL_READ
+*/
+#define RGX_CR_RPM_SHF_FPL_READ                           (0xD528U)
+#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL                  (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT              (22U)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK             (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN                 (0x00400000U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT              (0U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK             (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHF_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHF_FPL_WRITE                          (0xD530U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL                 (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT             (22U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK            (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN                (0x00400000U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT             (0U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK            (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL
+*/
+#define RGX_CR_RPM_SHG_FPL                                (0xD538U)
+#define RGX_CR_RPM_SHG_FPL_MASKFULL                       (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT                     (40U)
+#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK                    (IMG_UINT64_C(0xC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT                     (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK                    (IMG_UINT64_C(0xFFFFFF0000000003))
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT                (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE                 (4U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL_READ
+*/
+#define RGX_CR_RPM_SHG_FPL_READ                           (0xD540U)
+#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL                  (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT              (22U)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK             (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN                 (0x00400000U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT              (0U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK             (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_RPM_SHG_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHG_FPL_WRITE                          (0xD548U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL                 (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT             (22U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK            (0xFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN                (0x00400000U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT             (0U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK            (0xFFC00000U)
+
+
+/*
+    Register RGX_CR_SH_PERF
+*/
+#define RGX_CR_SH_PERF                                    (0xD5F8U)
+#define RGX_CR_SH_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_SH_PERF_CLR_3_SHIFT                        (4U)
+#define RGX_CR_SH_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define RGX_CR_SH_PERF_CLR_3_EN                           (0x00000010U)
+#define RGX_CR_SH_PERF_CLR_2_SHIFT                        (3U)
+#define RGX_CR_SH_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define RGX_CR_SH_PERF_CLR_2_EN                           (0x00000008U)
+#define RGX_CR_SH_PERF_CLR_1_SHIFT                        (2U)
+#define RGX_CR_SH_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define RGX_CR_SH_PERF_CLR_1_EN                           (0x00000004U)
+#define RGX_CR_SH_PERF_CLR_0_SHIFT                        (1U)
+#define RGX_CR_SH_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define RGX_CR_SH_PERF_CLR_0_EN                           (0x00000002U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register RGX_CR_SH_PERF_SELECT0
+*/
+#define RGX_CR_SH_PERF_SELECT0                            (0xD600U)
+#define RGX_CR_SH_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_SH_PERF_COUNTER_0
+*/
+#define RGX_CR_SH_PERF_COUNTER_0                          (0xD628U)
+#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHF_SHG_CHECKSUM
+*/
+#define RGX_CR_SHF_SHG_CHECKSUM                           (0xD1C0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM                    (0xD1C8U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT        (0U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK       (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHF_VARY_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM                      (0xD1D0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT          (0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK         (0x00000000U)
+
+
+/*
+    Register RGX_CR_RPM_BIF_CHECKSUM
+*/
+#define RGX_CR_RPM_BIF_CHECKSUM                           (0xD1D8U)
+#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHG_BIF_CHECKSUM
+*/
+#define RGX_CR_SHG_BIF_CHECKSUM                           (0xD1E0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT               (0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK              (0x00000000U)
+
+
+/*
+    Register RGX_CR_SHG_FE_BE_CHECKSUM
+*/
+#define RGX_CR_SHG_FE_BE_CHECKSUM                         (0xD1E8U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT             (0U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK            (0x00000000U)
+
+
+/*
+    Register DPX_CR_BF_PERF
+*/
+#define DPX_CR_BF_PERF                                    (0xC458U)
+#define DPX_CR_BF_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BF_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define DPX_CR_BF_PERF_CLR_3_EN                           (0x00000010U)
+#define DPX_CR_BF_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define DPX_CR_BF_PERF_CLR_2_EN                           (0x00000008U)
+#define DPX_CR_BF_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define DPX_CR_BF_PERF_CLR_1_EN                           (0x00000004U)
+#define DPX_CR_BF_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define DPX_CR_BF_PERF_CLR_0_EN                           (0x00000002U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_BF_PERF_SELECT0
+*/
+#define DPX_CR_BF_PERF_SELECT0                            (0xC460U)
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BF_PERF_COUNTER_0
+*/
+#define DPX_CR_BF_PERF_COUNTER_0                          (0xC488U)
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register DPX_CR_BT_PERF
+*/
+#define DPX_CR_BT_PERF                                    (0xC3D0U)
+#define DPX_CR_BT_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BT_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define DPX_CR_BT_PERF_CLR_3_EN                           (0x00000010U)
+#define DPX_CR_BT_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define DPX_CR_BT_PERF_CLR_2_EN                           (0x00000008U)
+#define DPX_CR_BT_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define DPX_CR_BT_PERF_CLR_1_EN                           (0x00000004U)
+#define DPX_CR_BT_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define DPX_CR_BT_PERF_CLR_0_EN                           (0x00000002U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_BT_PERF_SELECT0
+*/
+#define DPX_CR_BT_PERF_SELECT0                            (0xC3D8U)
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BT_PERF_COUNTER_0
+*/
+#define DPX_CR_BT_PERF_COUNTER_0                          (0xC420U)
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register DPX_CR_RQ_USC_DEBUG
+*/
+#define DPX_CR_RQ_USC_DEBUG                               (0xC110U)
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT                (0U)
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK               (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS                  (0xC5C8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL         (IMG_UINT64_C(0x000000000000F775))
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT   (12U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK  (0xFFFF0FFFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT  (8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT  (5U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT   (4U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK  (0xFFFFFFEFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN      (0x00000010U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT      (0U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK     (0xFFFFFFFEU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN         (0x00000001U)
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS                  (0xC5D0U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL         (IMG_UINT64_C(0x03FFFFFFFFFFFFF0))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT        (57U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK       (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN           (IMG_UINT64_C(0x0200000000000000))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT     (44U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK    (IMG_UINT64_C(0xFE000FFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT     (40U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK    (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT    (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK   (IMG_UINT64_C(0xFFFFFF000000000F))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register DPX_CR_BIF_MMU_STATUS
+*/
+#define DPX_CR_BIF_MMU_STATUS                             (0xC5D8U)
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000000FFFFFF7))
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0xF00FFFFFU)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0xFFF00FFFU)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0xFFFFF00FU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0xFFFFFFFBU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN                  (0x00000004U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0xFFFFFFFDU)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0x00000002U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_RT_PERF
+*/
+#define DPX_CR_RT_PERF                                    (0xC700U)
+#define DPX_CR_RT_PERF_MASKFULL                           (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_RT_PERF_CLR_3_SHIFT                        (4U)
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK                       (0xFFFFFFEFU)
+#define DPX_CR_RT_PERF_CLR_3_EN                           (0x00000010U)
+#define DPX_CR_RT_PERF_CLR_2_SHIFT                        (3U)
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK                       (0xFFFFFFF7U)
+#define DPX_CR_RT_PERF_CLR_2_EN                           (0x00000008U)
+#define DPX_CR_RT_PERF_CLR_1_SHIFT                        (2U)
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK                       (0xFFFFFFFBU)
+#define DPX_CR_RT_PERF_CLR_1_EN                           (0x00000004U)
+#define DPX_CR_RT_PERF_CLR_0_SHIFT                        (1U)
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK                       (0xFFFFFFFDU)
+#define DPX_CR_RT_PERF_CLR_0_EN                           (0x00000002U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT                  (0U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK                 (0xFFFFFFFEU)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN                     (0x00000001U)
+
+
+/*
+    Register DPX_CR_RT_PERF_SELECT0
+*/
+#define DPX_CR_RT_PERF_SELECT0                            (0xC708U)
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL                   (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT            (48U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK           (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT            (32U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK           (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT                 (21U)
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN                    (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT         (16U)
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT           (0U)
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_RT_PERF_COUNTER_0
+*/
+#define DPX_CR_RT_PERF_COUNTER_0                          (0xC730U)
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT                (0U)
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK               (0x00000000U)
+
+
+/*
+    Register DPX_CR_BX_TU_PERF
+*/
+#define DPX_CR_BX_TU_PERF                                 (0xC908U)
+#define DPX_CR_BX_TU_PERF_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT                     (4U)
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK                    (0xFFFFFFEFU)
+#define DPX_CR_BX_TU_PERF_CLR_3_EN                        (0x00000010U)
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT                     (3U)
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK                    (0xFFFFFFF7U)
+#define DPX_CR_BX_TU_PERF_CLR_2_EN                        (0x00000008U)
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT                     (2U)
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK                    (0xFFFFFFFBU)
+#define DPX_CR_BX_TU_PERF_CLR_1_EN                        (0x00000004U)
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT                     (1U)
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK                    (0xFFFFFFFDU)
+#define DPX_CR_BX_TU_PERF_CLR_0_EN                        (0x00000002U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT               (0U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK              (0xFFFFFFFEU)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN                  (0x00000001U)
+
+
+/*
+    Register DPX_CR_BX_TU_PERF_SELECT0
+*/
+#define DPX_CR_BX_TU_PERF_SELECT0                         (0xC910U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL                (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT         (48U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK        (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT         (32U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK        (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT              (21U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN                 (IMG_UINT64_C(0x0000000000200000))
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT      (16U)
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK     (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT        (0U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register DPX_CR_BX_TU_PERF_COUNTER_0
+*/
+#define DPX_CR_BX_TU_PERF_COUNTER_0                       (0xC938U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT             (0U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK            (0x00000000U)
+
+
+/*
+    Register DPX_CR_RS_PDS_RR_CHECKSUM
+*/
+#define DPX_CR_RS_PDS_RR_CHECKSUM                         (0xC0F0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT             (0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK            (IMG_UINT64_C(0xFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT                  (0xE140U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL         (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT         (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK        (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING                          (0xE148U)
+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL                 (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT          (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK         (0xF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT     (12U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE      (4096U)
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS
+*/
+#define RGX_CR_MMU_FAULT_STATUS                           (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT             (28U)
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK            (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT             (20U)
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK            (IMG_UINT64_C(0xFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT              (12U)
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT              (6U)
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT               (4U)
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT                 (3U)
+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK                (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN                    (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT                (1U)
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK               (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT               (0U)
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN                  (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META                      (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (28U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (20U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT         (12U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (6U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC3_CTRL_MISC
+*/
+#define RGX_CR_SLC3_CTRL_MISC                             (0xE200U)
+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL                    (IMG_UINT64_C(0x0000000000000107))
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT        (8U)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK       (0xFFFFFEFFU)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN           (0x00000100U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT      (0U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK     (0xFFFFFFF8U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR     (0x00000000U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U)
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE
+*/
+#define RGX_CR_SLC3_SCRAMBLE                              (0xE208U)
+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT                   (0U)
+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK                  (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE2
+*/
+#define RGX_CR_SLC3_SCRAMBLE2                             (0xE210U)
+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK                 (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE3
+*/
+#define RGX_CR_SLC3_SCRAMBLE3                             (0xE218U)
+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK                 (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE4
+*/
+#define RGX_CR_SLC3_SCRAMBLE4                             (0xE260U)
+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK                 (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_STATUS
+*/
+#define RGX_CR_SLC3_STATUS                                (0xE220U)
+#define RGX_CR_SLC3_STATUS_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT                  (48U)
+#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK                 (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT                  (32U)
+#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK                 (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_SLC3_STATUS_READS1_SHIFT                   (16U)
+#define RGX_CR_SLC3_STATUS_READS1_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_SLC3_STATUS_READS0_SHIFT                   (0U)
+#define RGX_CR_SLC3_STATUS_READS0_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+    Register RGX_CR_SLC3_IDLE
+*/
+#define RGX_CR_SLC3_IDLE                                  (0xE228U)
+#define RGX_CR_SLC3_IDLE_MASKFULL                         (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT               (18U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK              (0xFFF3FFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_SHIFT                        (17U)
+#define RGX_CR_SLC3_IDLE_MMU_CLRMSK                       (0xFFFDFFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_EN                           (0x00020000U)
+#define RGX_CR_SLC3_IDLE_RDI_SHIFT                        (16U)
+#define RGX_CR_SLC3_IDLE_RDI_CLRMSK                       (0xFFFEFFFFU)
+#define RGX_CR_SLC3_IDLE_RDI_EN                           (0x00010000U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT                     (12U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK                    (0xFFFF0FFFU)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT                (4U)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK               (0xFFFFF00FU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT                (2U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK               (0xFFFFFFF3U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT               (1U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK              (0xFFFFFFFDU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN                  (0x00000002U)
+#define RGX_CR_SLC3_IDLE_XBAR_SHIFT                       (0U)
+#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK                      (0xFFFFFFFEU)
+#define RGX_CR_SLC3_IDLE_XBAR_EN                          (0x00000001U)
+
+
+/*
+    Register RGX_CR_SLC3_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC3_FAULT_STOP_STATUS                     (0xE248U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL            (IMG_UINT64_C(0x0000000000001FFF))
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT           (0U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK          (0xFFFFE000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_MODE
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_MODE                     (0xF048U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL            (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT          (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK         (0xFFFFFFFCU)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX          (0x00000000U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE       (0x00000001U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST           (0x00000002U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING0
+*/
+#define RGX_CR_CONTEXT_MAPPING0                           (0xF078U)
+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT                  (24U)
+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK                 (0x00FFFFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT                 (16U)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK                (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT                  (8U)
+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK                 (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT                  (0U)
+#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK                 (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING1
+*/
+#define RGX_CR_CONTEXT_MAPPING1                           (0xF080U)
+#define RGX_CR_CONTEXT_MAPPING1_MASKFULL                  (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT                (8U)
+#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK               (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT                 (0U)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK                (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING2
+*/
+#define RGX_CR_CONTEXT_MAPPING2                           (0xF088U)
+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT              (16U)
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK             (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT                 (8U)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK                (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT                (0U)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK               (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING3
+*/
+#define RGX_CR_CONTEXT_MAPPING3                           (0xF090U)
+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT              (16U)
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK             (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT                 (8U)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK                (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT                (0U)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK               (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_JONES_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ                 (0xF098U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT   (0U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK  (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ            (0xF0A0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL   (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_DUST_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ                  (0xF0A8U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL         (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT    (0U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK   (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_JONES_FIX
+*/
+#define RGX_CR_JONES_FIX                                  (0xF0C0U)
+#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_JONES_FIX_MASKFULL                         (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_JONES_FIX_DISABLE_SHIFT                    (0U)
+#define RGX_CR_JONES_FIX_DISABLE_CLRMSK                   (0xFFFF0000U)
+
+
+/*
+    Register RGX_CR_CONTEXT_MAPPING4
+*/
+#define RGX_CR_CONTEXT_MAPPING4                           (0xF210U)
+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL                  (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT        (40U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK       (IMG_UINT64_C(0xFFFF00FFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT          (32U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK         (IMG_UINT64_C(0xFFFFFF00FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT           (24U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK          (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT        (16U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK       (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT          (8U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK         (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT           (0U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_MULTICORE_GPU
+*/
+#define RGX_CR_MULTICORE_GPU                              (0xF300U)
+#define RGX_CR_MULTICORE_GPU_MASKFULL                     (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT    (6U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK   (0xFFFFFFBFU)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN       (0x00000040U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT    (5U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK   (0xFFFFFFDFU)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN       (0x00000020U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT     (4U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK    (0xFFFFFFEFU)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN        (0x00000010U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT     (3U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK    (0xFFFFFFF7U)
+#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN        (0x00000008U)
+#define RGX_CR_MULTICORE_GPU_ID_SHIFT                     (0U)
+#define RGX_CR_MULTICORE_GPU_ID_CLRMSK                    (0xFFFFFFF8U)
+
+
+/*
+    Register RGX_CR_MULTICORE_SYSTEM
+*/
+#define RGX_CR_MULTICORE_SYSTEM                           (0xF308U)
+#define RGX_CR_MULTICORE_SYSTEM_MASKFULL                  (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT           (0U)
+#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK          (0xFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON
+*/
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON             (0xF310U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON
+*/
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON             (0xF320U)
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U)
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U)
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU)
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT (0U)
+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON
+*/
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON              (0xF330U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+    Register RGX_CR_ECC_RAM_ERR_INJ
+*/
+#define RGX_CR_ECC_RAM_ERR_INJ                            (0xF340U)
+#define RGX_CR_ECC_RAM_ERR_INJ_MASKFULL                   (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT         (4U)
+#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK        (0xFFFFFFEFU)
+#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN            (0x00000010U)
+#define RGX_CR_ECC_RAM_ERR_INJ_USC_SHIFT                  (3U)
+#define RGX_CR_ECC_RAM_ERR_INJ_USC_CLRMSK                 (0xFFFFFFF7U)
+#define RGX_CR_ECC_RAM_ERR_INJ_USC_EN                     (0x00000008U)
+#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT           (2U)
+#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK          (0xFFFFFFFBU)
+#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN              (0x00000004U)
+#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT               (1U)
+#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK              (0xFFFFFFFDU)
+#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN                  (0x00000002U)
+#define RGX_CR_ECC_RAM_ERR_INJ_MARS_SHIFT                 (0U)
+#define RGX_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK                (0xFFFFFFFEU)
+#define RGX_CR_ECC_RAM_ERR_INJ_MARS_EN                    (0x00000001U)
+
+
+/*
+    Register RGX_CR_ECC_RAM_INIT_KICK
+*/
+#define RGX_CR_ECC_RAM_INIT_KICK                          (0xF348U)
+#define RGX_CR_ECC_RAM_INIT_KICK_MASKFULL                 (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT       (4U)
+#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK      (0xFFFFFFEFU)
+#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN          (0x00000010U)
+#define RGX_CR_ECC_RAM_INIT_KICK_USC_SHIFT                (3U)
+#define RGX_CR_ECC_RAM_INIT_KICK_USC_CLRMSK               (0xFFFFFFF7U)
+#define RGX_CR_ECC_RAM_INIT_KICK_USC_EN                   (0x00000008U)
+#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT         (2U)
+#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK        (0xFFFFFFFBU)
+#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN            (0x00000004U)
+#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT             (1U)
+#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_EN                (0x00000002U)
+#define RGX_CR_ECC_RAM_INIT_KICK_MARS_SHIFT               (0U)
+#define RGX_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_ECC_RAM_INIT_KICK_MARS_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_ECC_RAM_INIT_DONE
+*/
+#define RGX_CR_ECC_RAM_INIT_DONE                          (0xF350U)
+#define RGX_CR_ECC_RAM_INIT_DONE_MASKFULL                 (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT       (4U)
+#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK      (0xFFFFFFEFU)
+#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN          (0x00000010U)
+#define RGX_CR_ECC_RAM_INIT_DONE_USC_SHIFT                (3U)
+#define RGX_CR_ECC_RAM_INIT_DONE_USC_CLRMSK               (0xFFFFFFF7U)
+#define RGX_CR_ECC_RAM_INIT_DONE_USC_EN                   (0x00000008U)
+#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT         (2U)
+#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK        (0xFFFFFFFBU)
+#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN            (0x00000004U)
+#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT             (1U)
+#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK            (0xFFFFFFFDU)
+#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_EN                (0x00000002U)
+#define RGX_CR_ECC_RAM_INIT_DONE_MARS_SHIFT               (0U)
+#define RGX_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK              (0xFFFFFFFEU)
+#define RGX_CR_ECC_RAM_INIT_DONE_MARS_EN                  (0x00000001U)
+
+
+/*
+    Register RGX_CR_SAFETY_EVENT_ENABLE
+*/
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE               (0xF390U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN  (0x00000008U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN  (0x00000004U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_SAFETY_EVENT_STATUS
+*/
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE               (0xF398U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_SHIFT (7U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_EN (0x00000080U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT (3U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN  (0x00000008U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT (2U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN  (0x00000004U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT (1U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN (0x00000002U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_SAFETY_EVENT_CLEAR
+*/
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE                (0xF3A0U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL      (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_SHIFT (7U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_EN (0x00000080U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT (3U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN   (0x00000008U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT (2U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN   (0x00000004U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT (1U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN  (0x00000002U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U)
+
+
+/*
+    Register RGX_CR_FAULT_FW_STATUS
+*/
+#define RGX_CR_FAULT_FW_STATUS                            (0xF3B0U)
+#define RGX_CR_FAULT_FW_STATUS_MASKFULL                   (IMG_UINT64_C(0x0000000000010001))
+#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT          (16U)
+#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK         (0xFFFEFFFFU)
+#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_EN             (0x00010000U)
+#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT           (0U)
+#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK          (0xFFFFFFFEU)
+#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_EN              (0x00000001U)
+
+
+/*
+    Register RGX_CR_FAULT_FW_CLEAR
+*/
+#define RGX_CR_FAULT_FW_CLEAR                             (0xF3B8U)
+#define RGX_CR_FAULT_FW_CLEAR_MASKFULL                    (IMG_UINT64_C(0x0000000000010001))
+#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT           (16U)
+#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK          (0xFFFEFFFFU)
+#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN              (0x00010000U)
+#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT            (0U)
+#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK           (0xFFFFFFFEU)
+#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_EN               (0x00000001U)
+
+
+/*
+    Register RGX_CR_MTS_SAFETY_EVENT_ENABLE
+*/
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE           (0xF3D8U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U)
+
+
+#endif /* RGX_CR_DEFS_KM_H */
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxdefs_km.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxdefs_km.h
new file mode 100644 (file)
index 0000000..64f4b36
--- /dev/null
@@ -0,0 +1,338 @@
+/*************************************************************************/ /*!
+@Title          Rogue hw definitions (kernel mode)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXDEFS_KM_H
+#define RGXDEFS_KM_H
+
+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER)
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+#endif
+
+#define IMG_EXPLICIT_INCLUDE_HWDEFS
+#if defined(__KERNEL__)
+#include "rgx_cr_defs_km.h"
+#endif
+#undef IMG_EXPLICIT_INCLUDE_HWDEFS
+
+#include "rgx_heap_firmware.h"
+
+/* The following Macros are picked up through BVNC headers for no hardware
+ * operations to be compatible with old build infrastructure.
+ */
+#if defined(NO_HARDWARE)
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C)
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define RGX_BVNC_KM_ST2(S)     #S
+#define RGX_BVNC_KM_ST(S)      RGX_BVNC_KM_ST2(S)
+#define RGX_BVNC_KM                    RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C)
+#define RGX_BVNC_KM_V_ST       RGX_BVNC_KM_ST(RGX_BVNC_KM_V)
+
+/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */
+#define RGX_BVNC_STR_SIZE_MAX (2U+1U+4U+1U+4U+1U+4U+1U)
+#define RGX_BVNC_STR_FMTSPEC  "%u.%u.%u.%u"
+#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u"
+
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+#define BVNC_FIELD_MASK     ((1UL << BVNC_FIELD_WIDTH) - 1U)
+#define C_POSITION          (0U)
+#define N_POSITION          ((C_POSITION) + (BVNC_FIELD_WIDTH))
+#define V_POSITION          ((N_POSITION) + (BVNC_FIELD_WIDTH))
+#define B_POSITION          ((V_POSITION) + (BVNC_FIELD_WIDTH))
+
+#define B_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION)))
+#define V_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION)))
+#define N_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION)))
+#define C_POSTION_MASK      (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION)))
+
+#define GET_B(x)            (((x) & (B_POSTION_MASK)) >> (B_POSITION))
+#define GET_V(x)            (((x) & (V_POSTION_MASK)) >> (V_POSITION))
+#define GET_N(x)            (((x) & (N_POSTION_MASK)) >> (N_POSITION))
+#define GET_C(x)            (((x) & (C_POSTION_MASK)) >> (C_POSITION))
+
+#define BVNC_PACK(B,V,N,C)  ((((IMG_UINT64)(B))) << (B_POSITION) | \
+                             (((IMG_UINT64)(V))) << (V_POSITION) | \
+                             (((IMG_UINT64)(N))) << (N_POSITION) | \
+                             (((IMG_UINT64)(C))) << (C_POSITION) \
+                            )
+
+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT    (8U)
+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT    (0U)
+
+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK   (0XFFFF00FFU)
+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK   (0XFFFFFF00U)
+
+#define RGXFW_MAX_NUM_OS                                  (8U)
+#define RGXFW_HOST_OS                                     (0U)
+#define RGXFW_GUEST_OSID_START                            (1U)
+
+#define RGXFW_THREAD_0                                    (0U)
+#define RGXFW_THREAD_1                                    (1U)
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218   (1U)
+#define MTP219   (2U)
+#define LTP218   (3U)
+#define LTP217   (4U)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K      (32*1024)
+#define RGX_META_COREMEM_48K      (48*1024)
+#define RGX_META_COREMEM_64K      (64*1024)
+#define RGX_META_COREMEM_96K      (96*1024)
+#define RGX_META_COREMEM_128K     (128*1024)
+#define RGX_META_COREMEM_256K     (256*1024)
+
+#if !defined(__KERNEL__)
+#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \
+    (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0)
+#define RGX_META_COREMEM_SIZE     (RGX_FEATURE_META_COREMEM_SIZE*1024U)
+#define RGX_META_COREMEM          (1)
+#define RGX_META_COREMEM_CODE     (1)
+#if !defined(FIX_HW_BRN_50767) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)
+#define RGX_META_COREMEM_DATA     (1)
+#endif
+#else
+#undef SUPPORT_META_COREMEM
+#undef RGX_FEATURE_META_COREMEM_SIZE
+#undef RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_META_COREMEM_SIZE         (0)
+#endif
+#endif
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x)    ((((IMG_UINT32)(x)) > 0U) ? ((IMG_UINT32)(x)/8U) : (0U))
+
+
+#if defined(SUPPORT_AGP)
+#define MAX_HW_TA3DCONTEXTS    3U
+#else
+#define MAX_HW_TA3DCONTEXTS    2U
+#endif
+
+#define RGX_CR_CLK_CTRL_ALL_ON          (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL)
+#define RGX_CR_CLK_CTRL_ALL_AUTO        (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL)
+#define RGX_CR_CLK_CTRL2_ALL_ON         (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL)
+#define RGX_CR_CLK_CTRL2_ALL_AUTO       (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL)
+#define RGX_CR_CLK_XTPLUS_CTRL_ALL_ON   (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_XTPLUS_CTRL_MASKFULL)
+#define RGX_CR_CLK_XTPLUS_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_XTPLUS_CTRL_MASKFULL)
+#define DPX_CR_DPX_CLK_CTRL_ALL_ON      (IMG_UINT64_C(0x5555555555555555)&DPX_CR_DPX_CLK_CTRL_MASKFULL)
+#define DPX_CR_DPX_CLK_CTRL_ALL_AUTO    (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&DPX_CR_DPX_CLK_CTRL_MASKFULL)
+
+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN       (RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN       (RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \
+                                                                                        RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+
+
+
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN  | \
+                                 RGX_CR_SOFT_RESET_VDM_EN | \
+                                 RGX_CR_SOFT_RESET_ISP_EN)
+
+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES  | \
+                                     RGX_CR_SOFT_RESET_BIF_EN | \
+                                     RGX_CR_SOFT_RESET_SLC_EN | \
+                                     RGX_CR_SOFT_RESET_GARTEN_EN)
+
+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \
+                            RGX_CR_SOFT_RESET2_PIXEL_EN | \
+                            RGX_CR_SOFT_RESET2_CDM_EN | \
+                            RGX_CR_SOFT_RESET2_VERTEX_EN)
+
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT            (12U)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE                  (1UL << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT             (14U)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE                   (1UL << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE        (16U)
+
+/* To get the number of required Dusts, divide the number of
+ * clusters by 2 and round up
+ */
+#define RGX_REQ_NUM_DUSTS(CLUSTERS)    (((CLUSTERS) + 1U) / 2U)
+
+/* To get the number of required Bernado/Phantom(s), divide
+ * the number of clusters by 4 and round up
+ */
+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+
+#if !defined(__KERNEL__)
+# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#endif
+
+
+/* META second thread feature depending on META variants and
+ * available CoreMem
+ */
+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256)
+#define RGXFW_META_SUPPORT_2ND_THREAD
+#endif
+
+
+/*
+ * FW MMU contexts
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META)
+#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) /* FW code/private data */
+#define MMU_CONTEXT_MAPPING_FWIF   (0x7U) /* Host/FW data */
+#else
+#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U)
+#define MMU_CONTEXT_MAPPING_FWIF   (0x0U)
+#endif
+
+
+/*
+ * Utility macros to calculate CAT_BASE register addresses
+ */
+#define BIF_CAT_BASEx(n) \
+       (RGX_CR_BIF_CAT_BASE0 + ((n) * (RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0)))
+
+#define FWCORE_MEM_CAT_BASEx(n) \
+       (RGX_CR_FWCORE_MEM_CAT_BASE0 + ((n) * (RGX_CR_FWCORE_MEM_CAT_BASE1 - RGX_CR_FWCORE_MEM_CAT_BASE0)))
+
+/*
+ * FWCORE wrapper register defines
+ */
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT   RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK  RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK
+#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT     (12U)
+
+
+/******************************************************************************
+ * WA HWBRNs
+ *****************************************************************************/
+
+#if defined(RGX_CR_JONES_IDLE_MASKFULL)
+/* Workaround for HW BRN 57289 */
+#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF)
+#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!!
+#endif
+#undef RGX_CR_JONES_IDLE_MASKFULL
+#undef RGX_CR_JONES_IDLE_TDM_SHIFT
+#undef RGX_CR_JONES_IDLE_TDM_CLRMSK
+#undef RGX_CR_JONES_IDLE_TDM_EN
+#define RGX_CR_JONES_IDLE_MASKFULL                        (IMG_UINT64_C(0x0000000000003FFF))
+#endif
+
+#if !defined(__KERNEL__)
+
+#if defined(RGX_FEATURE_ROGUEXE)
+#define RGX_NUM_RASTERISATION_MODULES  RGX_FEATURE_NUM_CLUSTERS
+#else
+#define RGX_NUM_RASTERISATION_MODULES  RGX_NUM_PHANTOMS
+#endif
+
+#endif /* defined(__KERNEL__) */
+
+/* GPU CR timer tick in GPU cycles */
+#define RGX_CRTIME_TICK_IN_CYCLES (256U)
+
+/* for nohw multicore return max cores possible to client */
+#define RGX_MULTICORE_MAX_NOHW_CORES               (4U)
+
+/*
+  If the size of the SLC is less than this value then the TPU bypasses the SLC.
+ */
+#define RGX_TPU_CACHED_SLC_SIZE_THRESHOLD_KB                   (128U)
+
+/*
+ * If the size of the SLC is bigger than this value then the TCU must not be bypassed in the SLC.
+ * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default.
+ */
+#define RGX_TCU_CACHED_SLC_SIZE_THRESHOLD_KB                   (32U)
+
+/*
+ * Register used by the FW to track the current boot stage (not used in MIPS)
+ */
+#define RGX_FW_BOOT_STAGE_REGISTER     (RGX_CR_POWER_ESTIMATE_RESULT)
+
+/*
+ * Virtualisation definitions
+ */
+#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE)
+
+/*
+ * Macro used to indicate which version of HWPerf is active
+ */
+#define RGX_FEATURE_HWPERF_ROGUE
+
+/*
+ * Maximum number of cores supported by TRP
+ */
+#define RGX_TRP_MAX_NUM_CORES                           (4U)
+
+#endif /* RGXDEFS_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxmhdefs_km.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxmhdefs_km.h
new file mode 100644 (file)
index 0000000..fe8272b
--- /dev/null
@@ -0,0 +1,286 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgxmhdefs_km.h
+@Brief          The file contains auto-generated hardware definitions without
+                BVNC-specific compile time conditionals.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ *      rogue_mh.def
+ */
+
+
+#ifndef RGXMHDEFS_KM_H
+#define RGXMHDEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXMHDEFS_KM_REVISION 0
+
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE  (0x00000000U)
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U)
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE  (0x00000002U)
+
+
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U)
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U)
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U)
+
+
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK    (0x00000008U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST     (0x00000009U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK    (0x0000000aU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST     (0x0000000bU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0      (0x0000000cU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1      (0x0000002dU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK     (0x0000000fU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK  (0x00000012U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK  (0x00000013U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK   (0x00000016U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK   (0x00000017U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP       (0x00000019U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP       (0x0000001aU)
+
+
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK    (0x00000000U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST     (0x00000001U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK    (0x00000002U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST     (0x00000003U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0      (0x00000004U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1      (0x00000025U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP        (0x00000006U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK     (0x00000007U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK  (0x00000008U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK  (0x00000009U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK   (0x00000014U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK   (0x00000015U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP       (0x00000018U)
+
+
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP              (0x00000008U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC           (0x00000007U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC          (0x00000006U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC           (0x00000005U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR             (0x00000004U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS             (0x00000003U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC              (0x00000002U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE              (0x00000001U)
+
+
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U)
+
+
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU)
+
+
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U)
+
+
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU)
+
+
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE      (0x00000000U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS     (0x00000001U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA     (0x00000003U)
+
+
+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS            (0x00000000U)
+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS             (0x00000001U)
+
+
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL        (0x00000000U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE          (0x00000001U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX          (0x00000002U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK          (0x00000004U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT        (0x00000008U)
+
+
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA  (0x00000001U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA     (0x00000002U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE  (0x00000003U)
+
+
+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U)
+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS  (0x00000003U)
+
+
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST     (0x00000000U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST     (0x00000001U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST     (0x00000002U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST  (0x00000003U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST  (0x00000004U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST  (0x00000005U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U)
+
+
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU                    (0x00000000U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU                (0x00000001U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU                (0x00000002U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU                (0x00000003U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS                   (0x00000004U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0               (0x00000005U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1               (0x00000006U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2               (0x00000007U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3               (0x00000008U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0               (0x00000009U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1               (0x0000000aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2               (0x0000000bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3               (0x0000000cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4               (0x0000000dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0                  (0x0000000eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1                  (0x0000000fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA               (0x00000010U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB               (0x00000011U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC               (0x00000012U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD               (0x00000013U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA           (0x00000014U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB           (0x00000015U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC           (0x00000016U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD           (0x00000017U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW              (0x00000018U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0                  (0x00000019U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1                  (0x0000001aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0                (0x0000001bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1                (0x0000001cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2                (0x0000001dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3                (0x0000001eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_USC                    (0x0000001fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS                (0x00000020U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS                 (0x00000021U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF                    (0x00000022U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS           (0x00000023U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF                (0x00000024U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ               (0x00000025U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS             (0x00000026U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5               (0x00000027U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP                 (0x00000028U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC              (0x00000029U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC             (0x0000002aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC              (0x0000002bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION           (0x0000002cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM           (0x0000002dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW                 (0x0000002eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC                 (0x0000002fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC               (0x00000030U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC             (0x00000031U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA                (0x00000032U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL                (0x00000033U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0                   (0x00000034U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1                   (0x00000035U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE2                   (0x00000036U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE3                   (0x00000037U)
+
+
+#endif /* RGXMHDEFS_KM_H */
+/*****************************************************************************
+ End of file (rgxmhdefs_km.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxmmudefs_km.h b/drivers/gpu/drm/img/img-rogue/hwdefs/rogue/km/rgxmmudefs_km.h
new file mode 100644 (file)
index 0000000..6518664
--- /dev/null
@@ -0,0 +1,216 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgxmmudefs_km.h
+@Brief          The file contains auto-generated hardware definitions without
+                BVNC-specific compile time conditionals.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ *      rogue_bif.def
+ */
+
+
+#ifndef RGXMMUDEFS_KM_H
+#define RGXMMUDEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXMMUDEFS_KM_REVISION 0
+
+#define RGX_BIF_DM_ENCODING_VERTEX                        (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL                         (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE                       (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA                           (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE                        (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE                         (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META                          (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST                          (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST                      (0x00000009U)
+
+
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT                  (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK                 (IMG_UINT64_C(0xFFFFFF003FFFFFFF))
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT                  (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFC01FFFFF))
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT                  (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFE00FFF))
+
+
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE                      (0x00000400U)
+
+
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE                      (0x00000200U)
+
+
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE                      (0x00000200U)
+
+
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE                   (0x00000020U)
+
+
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE                   (0x00000040U)
+
+
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE                   (0x00000040U)
+
+
+#define RGX_MMUCTRL_PAGE_SIZE_MASK                        (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB                         (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB                        (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB                        (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB                       (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB                         (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB                         (0x00000005U)
+
+
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT                  (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK                 (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT                 (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK                (IMG_UINT64_C(0xFFFFFF0000003FFF))
+
+
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT                 (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK                (IMG_UINT64_C(0xFFFFFF000000FFFF))
+
+
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT                (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK               (IMG_UINT64_C(0xFFFFFF000003FFFF))
+
+
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT                  (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK                 (IMG_UINT64_C(0xFFFFFF00000FFFFF))
+
+
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT                  (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK                 (IMG_UINT64_C(0xFFFFFF00001FFFFF))
+
+
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT               (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK              (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT              (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK             (IMG_UINT64_C(0xFFFFFF00000003FF))
+
+
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT              (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK             (IMG_UINT64_C(0xFFFFFF00000000FF))
+
+
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT             (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK            (IMG_UINT64_C(0xFFFFFF000000003F))
+
+
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK              (IMG_UINT64_C(0xFFFFFF000000001F))
+
+
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK              (IMG_UINT64_C(0xFFFFFF000000001F))
+
+
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT         (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK        (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN            (IMG_UINT64_C(0x4000000000000000))
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT              (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK             (IMG_UINT64_C(0xC00000FFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT                    (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK                   (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT              (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK             (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT           (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0x0000000000000020))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT                  (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK                 (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN                     (IMG_UINT64_C(0x0000000000000010))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT         (3U)
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK        (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN            (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT                      (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN                         (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT               (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN                  (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN                      (IMG_UINT64_C(0x0000000000000001))
+
+
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT           (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0x0000010000000000))
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT                 (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK                (IMG_UINT64_C(0xFFFFFF000000001F))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT               (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK              (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB                 (IMG_UINT64_C(0x0000000000000000))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB                (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB               (IMG_UINT64_C(0x0000000000000006))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB                 (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB                 (IMG_UINT64_C(0x000000000000000a))
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN                      (IMG_UINT64_C(0x0000000000000001))
+
+
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT                 (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK                (0x0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT            (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE             (4096U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT           (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK          (0xFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN              (0x00000002U)
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK                  (0xFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN                      (0x00000001U)
+
+
+#endif /* RGXMMUDEFS_KM_H */
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/cache_ops.h b/drivers/gpu/drm/img/img-rogue/include/cache_ops.h
new file mode 100644 (file)
index 0000000..a1d7145
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally
+                and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CACHE_OPS_H
+#define CACHE_OPS_H
+#include "img_types.h"
+/*!
+* @Defgroup CPUCacheAPIs
+* @{
+*/
+#define CACHE_BATCH_MAX (8U)
+#define MAX_DMA_OPS (34)
+typedef IMG_UINT32 PVRSRV_CACHE_OP;                            /*!< Type represents cache maintenance operation */
+#define PVRSRV_CACHE_OP_NONE                           0x0U    /*!< No operation */
+#define PVRSRV_CACHE_OP_CLEAN                          0x1U    /*!< Flush w/o invalidate */
+#define PVRSRV_CACHE_OP_INVALIDATE                     0x2U    /*!< Invalidate w/o flush */
+#define PVRSRV_CACHE_OP_FLUSH                          0x3U    /*!< Flush w/ invalidate */
+/*! @} End of Defgroup CPUCacheAPIs */
+
+#endif /* CACHE_OPS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/devicemem_typedefs.h b/drivers/gpu/drm/img/img-rogue/include/devicemem_typedefs.h
new file mode 100644 (file)
index 0000000..dd66fcc
--- /dev/null
@@ -0,0 +1,142 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of device memory management -- this file
+                is forked from new_devmem_allocation.h as this one has to
+                reside in the top level include so that client code is able
+                to make use of the typedefs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_TYPEDEFS_H
+#define DEVICEMEM_TYPEDEFS_H
+
+#include <powervr/mem_types.h>
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+
+typedef struct DEVMEM_CONTEXT_TAG DEVMEM_CONTEXT;     /*!< Convenience typedef for struct DEVMEM_CONTEXT_TAG */
+typedef struct DEVMEM_HEAP_TAG DEVMEM_HEAP;           /*!< Convenience typedef for struct DEVMEM_HEAP_TAG */
+typedef struct DEVMEM_MEMDESC_TAG DEVMEM_MEMDESC;     /*!< Convenience typedef for struct DEVMEM_MEMDESC_TAG */
+typedef struct DEVMEM_PAGELIST_TAG DEVMEM_PAGELIST;   /*!< Convenience typedef for struct DEVMEM_PAGELIST_TAG */
+
+typedef IMG_HANDLE DEVMEM_EXPORTHANDLE;             /*!< Typedef for DeviceMem Export Handle */
+typedef IMG_UINT64 DEVMEM_EXPORTKEY;                /*!< Typedef for DeviceMem Export Key */
+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T;            /*!< Typedef for DeviceMem SIZE_T */
+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T;  /*!< Typedef for DeviceMem LOG2 Alignment */
+
+typedef struct DEVMEMX_PHYS_MEMDESC_TAG DEVMEMX_PHYSDESC;    /*!< Convenience typedef for DevmemX physical */
+typedef struct DEVMEMX_VIRT_MEMDESC_TAG DEVMEMX_VIRTDESC;    /*!< Convenience typedef for DevmemX virtual */
+
+/*! calling code needs all the info in this struct, to be able to pass it around */
+typedef struct
+{
+    /*! A handle to the PMR. */
+    IMG_HANDLE hPMRExportHandle;
+    /*! The "key" to prove we have authorisation to use this PMR */
+    IMG_UINT64 uiPMRExportPassword;
+    /*! Size and alignment properties for this PMR.  Note, these
+       numbers are not trusted in kernel, but we need to cache them
+       client-side in order to allocate from the VM arena.  The kernel
+       will know the actual alignment and size of the PMR and thus
+       would prevent client code from breaching security here.  Ditto
+       for physmem granularity (aka page size) if this is different
+       from alignment */
+    IMG_DEVMEM_SIZE_T uiSize;
+    /*! We call this "contiguity guarantee" to be more precise than
+       calling it "alignment" or "page size", terms which may seem
+       similar but have different emphasis.  The number reported here
+       is the minimum contiguity guarantee from the creator of the
+       PMR.  Now, there is no requirement to allocate that coarsely
+       from the RA.  The alignment given to the RA simply needs to be
+       at least as coarse as the device page size for the heap we
+       ultimately intend to map into.  What is important is that the
+       device MMU data page size is not greater than the minimum
+       contiguity guarantee from the PMR.  This value is reported to
+       the client in order that it can choose to make early checks and
+       perhaps decide which heap (in a variable page size scenario) it
+       would be safe to map this PMR into.  For convenience, the
+       client may choose to use this argument as the alignment of the
+       virtual range he chooses to allocate, but this is _not_
+       necessary and in many cases would be able to get away with a
+       finer alignment, should the heap into which this PMR will be
+       mapped support it. */
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+} DEVMEM_EXPORTCOOKIE;
+
+/* Enum that describes the operation associated with changing sparse memory */
+typedef IMG_UINT32 SPARSE_MEM_RESIZE_FLAGS;
+#define SPARSE_RESIZE_NONE 0U
+
+       /* This should be set to indicate the change needs allocation */
+#define SPARSE_RESIZE_ALLOC 1U
+
+       /* This should be set to indicate the change needs free */
+#define SPARSE_RESIZE_FREE 2U
+
+#define SPARSE_RESIZE_BOTH (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE)
+
+       /* This should be set to silently swap underlying physical memory
+        * without disturbing its device or cpu virtual maps.
+        * This flag is not supported in the case of PDUMP and could lead to
+        * PDUMP panic when used.
+        */
+#define SPARSE_REMAP_MEM 4U
+
+       /* Should be set to get the sparse changes appear in cpu virtual map */
+#define SPARSE_MAP_CPU_ADDR 8U
+
+
+/* To be used with all the sparse allocations that gets mapped to CPU Virtual
+ * space. The sparse allocation CPU mapping is torn down and re-mapped every
+ * time the sparse allocation layout changes.
+ */
+#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1
+
+/* To use with DevmemSubAllocate() as the default factor if no over-allocation
+ * is desired.
+ */
+#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER (1U)
+
+/* Defines the max length for PMR, MemDesc, Device memory History and RI debug
+ * annotations stored in memory, including the null terminator.
+ */
+#define DEVMEM_ANNOTATION_MAX_LEN ((IMG_UINT32)PVR_ANNOTATION_MAX_LEN + 1U)
+
+#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/dllist.h b/drivers/gpu/drm/img/img-rogue/include/dllist.h
new file mode 100644 (file)
index 0000000..fa73dff
--- /dev/null
@@ -0,0 +1,408 @@
+/*************************************************************************/ /*!
+@File
+@Title          Double linked list header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Double linked list interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DLLIST_H
+#define DLLIST_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+/*!
+       Pointer to a linked list node
+*/
+typedef struct DLLIST_NODE_    *PDLLIST_NODE;
+
+
+/*!
+       Node in a linked list
+*/
+/*
+ * Note: the following structure's size is architecture-dependent and clients
+ * may need to create a mirror of the structure definition if it needs to be
+ * used in a structure shared between host and device.
+ * Consider such clients if any changes are made to this structure.
+ */
+typedef struct DLLIST_NODE_
+{
+       struct DLLIST_NODE_     *psPrevNode;
+       struct DLLIST_NODE_     *psNextNode;
+} DLLIST_NODE;
+
+
+/*!
+       Static initialiser
+*/
+#define DECLARE_DLLIST(n) \
+DLLIST_NODE (n) = {&(n), &(n)}
+
+/*************************************************************************/ /*!
+@Function       dllist_foreach_node
+
+@Description    Walk through all the nodes on the list.
+                Safe against removal of (node).
+
+@Input          list_head              List node to start the operation
+@Input          node                   Current list node
+@Input          next                   Node after the current one
+
+*/
+/*****************************************************************************/
+#define dllist_foreach_node(list_head, node, next)                                             \
+       for ((node) = (list_head)->psNextNode, (next) = (node)->psNextNode;             \
+                (node) != (list_head);                                                                                 \
+                (node) = (next), (next) = (node)->psNextNode)
+
+#define dllist_foreach_node_backwards(list_head, node, prev)                   \
+       for ((node) = (list_head)->psPrevNode, (prev) = (node)->psPrevNode;             \
+                (node) != (list_head);                                                                                 \
+                (node) = (prev), (prev) = (node)->psPrevNode)
+
+
+/*************************************************************************/ /*!
+@Function       dllist_foreach
+
+@Description    Simplification of dllist_foreach_node.
+                Walk through all the nodes on the list.
+                Safe against removal of currently-iterated node.
+
+                Adds utility-macro dllist_cur() to typecast the current node.
+
+@Input          list_head              List node to start the operation
+
+*/
+/*****************************************************************************/
+#define dllist_foreach(list_head)      \
+       for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode;          \
+                _DllNode != &(list_head);                                                                                                                              \
+                _DllNode = _DllNext, _DllNext = _DllNode->psNextNode)
+
+#define dllist_foreach_backwards(list_head)    \
+       for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode;          \
+                _DllNode != &(list_head);                                                                                                                              \
+                _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode)
+
+#define dllist_cur(type, member)       IMG_CONTAINER_OF(_DllNode, type, member)
+
+/*************************************************************************/ /*!
+@Function       dllist_init
+
+@Description    Initialize a new double linked list
+
+@Input          psListHead             List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_init(PDLLIST_NODE psListHead)
+{
+       psListHead->psPrevNode = psListHead;
+       psListHead->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_is_empty
+
+@Description    Returns whether the list is empty
+
+@Input          psListHead             List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+bool dllist_is_empty(PDLLIST_NODE psListHead)
+{
+       return ((psListHead->psPrevNode == psListHead)
+                       && (psListHead->psNextNode == psListHead));
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_head
+
+@Description    Add psNewNode to head of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+       PDLLIST_NODE psTmp;
+
+       psTmp = psListHead->psNextNode;
+
+       psListHead->psNextNode = psNewNode;
+       psNewNode->psNextNode = psTmp;
+
+       psTmp->psPrevNode = psNewNode;
+       psNewNode->psPrevNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_tail
+
+@Description    Add psNewNode to tail of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+       PDLLIST_NODE psTmp;
+
+       psTmp = psListHead->psPrevNode;
+
+       psListHead->psPrevNode = psNewNode;
+       psNewNode->psPrevNode = psTmp;
+
+       psTmp->psNextNode = psNewNode;
+       psNewNode->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_node_is_in_list
+
+@Description    Returns true if psNode is in a list
+
+@Input          psNode                 List node
+
+*/
+/*****************************************************************************/
+static INLINE
+bool dllist_node_is_in_list(PDLLIST_NODE psNode)
+{
+       return (psNode->psNextNode != NULL);
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_get_next_node
+
+@Description    Returns the list node after psListHead or NULL psListHead is
+                the only element in the list.
+
+@Input          psListHead             List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead)
+{
+       if (psListHead->psNextNode == psListHead)
+       {
+               return NULL;
+       }
+       else
+       {
+               return psListHead->psNextNode;
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_get_prev_node
+
+@Description    Returns the list node preceding psListHead or NULL if
+                psListHead is the only element in the list.
+
+@Input          psListHead             List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_prev_node(PDLLIST_NODE psListHead)
+{
+       if (psListHead->psPrevNode == psListHead)
+       {
+               return NULL;
+       }
+       else
+       {
+               return psListHead->psPrevNode;
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_remove_node
+
+@Description    Removes psListNode from the list where it currently belongs
+
+@Input          psListNode             List node to be removed
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_remove_node(PDLLIST_NODE psListNode)
+{
+       psListNode->psNextNode->psPrevNode = psListNode->psPrevNode;
+       psListNode->psPrevNode->psNextNode = psListNode->psNextNode;
+
+       /* Clear the node to show it's not in a list */
+       psListNode->psPrevNode = NULL;
+       psListNode->psNextNode = NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       dllist_replace_head
+
+@Description    Moves the list from psOldHead to psNewHead
+
+@Input          psOldHead              List node to be replaced. Will become a
+                                       head node of an empty list.
+@Input          psNewHead              List node to be inserted. Must be an
+                                       empty list head.
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead)
+{
+       if (dllist_is_empty(psOldHead))
+       {
+               psNewHead->psNextNode = psNewHead;
+               psNewHead->psPrevNode = psNewHead;
+       }
+       else
+       {
+               /* Change the neighbouring nodes */
+               psOldHead->psNextNode->psPrevNode = psNewHead;
+               psOldHead->psPrevNode->psNextNode = psNewHead;
+
+               /* Copy the old data to the new node */
+               psNewHead->psNextNode = psOldHead->psNextNode;
+               psNewHead->psPrevNode = psOldHead->psPrevNode;
+
+               /* Remove links to the previous list */
+               psOldHead->psNextNode = psOldHead;
+               psOldHead->psPrevNode = psOldHead;
+       }
+}
+
+/**************************************************************************/ /*!
+@Function       dllist_insert_list_at_head
+
+@Description    Inserts psInHead list into the head of the psOutHead list.
+                After this operation psOutHead will contain psInHead at the
+                head of the list and the remaining elements that were
+                already in psOutHead will be places after the psInList (so
+                at a tail of the original list).
+
+@Input          psOutHead       List node psInHead will be inserted to.
+@Input          psInHead        List node to be inserted to psOutHead.
+                                After this operation this becomes an empty list.
+*/ /***************************************************************************/
+static INLINE
+void dllist_insert_list_at_head(PDLLIST_NODE psOutHead, PDLLIST_NODE psInHead)
+{
+       PDLLIST_NODE psInHeadNextNode = psInHead->psNextNode;
+       PDLLIST_NODE psOutHeadNextNode = psOutHead->psNextNode;
+
+       if (!dllist_is_empty(psInHead))
+       {
+               psOutHead->psNextNode = psInHeadNextNode;
+               psInHeadNextNode->psPrevNode = psOutHead;
+
+               psInHead->psPrevNode->psNextNode = psOutHeadNextNode;
+               psOutHeadNextNode->psPrevNode = psInHead->psPrevNode;
+
+               dllist_init(psInHead);
+       }
+ }
+
+/*************************************************************************/ /*!
+@Description    Pointer to a dllist comparison callback function.
+@Input          psNode  Pointer to a node in a dllist.
+@Input          psNext  Pointer to psNode's next neighbour.
+*/ /**************************************************************************/
+typedef bool (*DLLIST_CMP_CB)(const DLLIST_NODE *psNode, const DLLIST_NODE *psNext);
+
+/*************************************************************************/ /*!
+@Function       dllist_sort
+
+@Description    Insert-sorts the List in place
+                The cmpr function passes the current and next node,
+                From which the user writes the function responsible
+                for choosing to swap order or not.
+                The function returns true if a swap is required
+
+@Input          psListHead              List Head to be sorted.
+
+@Input          cmpr                    Function pointer to use for sorting
+
+*/
+/*****************************************************************************/
+static INLINE void dllist_sort(PDLLIST_NODE psListHead,
+                        DLLIST_CMP_CB cmpr)
+{
+       DLLIST_NODE *node, *next;
+       DLLIST_NODE sTempHead;
+
+       dllist_init(&sTempHead);
+
+       dllist_foreach_node(psListHead, node, next)
+       {
+               dllist_remove_node(node);
+               dllist_add_to_head(&sTempHead, node);
+       }
+
+       while (!dllist_is_empty(&sTempHead))
+       {
+               DLLIST_NODE *psSmallestNode = NULL;
+
+               dllist_foreach_node(&sTempHead, node, next)
+               {
+                       if (!psSmallestNode || cmpr(psSmallestNode, node))
+                       {
+                               psSmallestNode = node;
+                       }
+               }
+
+               dllist_remove_node(psSmallestNode);
+               dllist_add_to_tail(psListHead, psSmallestNode);
+       }
+}
+
+#endif /* DLLIST_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/drm/netlink.h b/drivers/gpu/drm/img/img-rogue/include/drm/netlink.h
new file mode 100644 (file)
index 0000000..7b0a71f
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * @File
+ * @Title       Nulldisp/Netlink interface definition
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef __NETLINK_H__
+#define __NETLINK_H__
+
+/* For multi-plane pixel formats */
+#define NLPVRDPY_MAX_NUM_PLANES 3
+
+enum nlpvrdpy_cmd {
+       __NLPVRDPY_CMD_INVALID,
+       NLPVRDPY_CMD_CONNECT,
+       NLPVRDPY_CMD_CONNECTED,
+       NLPVRDPY_CMD_DISCONNECT,
+       NLPVRDPY_CMD_FLIP,
+       NLPVRDPY_CMD_FLIPPED,
+       NLPVRDPY_CMD_COPY,
+       NLPVRDPY_CMD_COPIED,
+       __NLPVRDPY_CMD_MAX
+};
+#define NLPVRDPY_CMD_MAX (__NLPVRDPY_CMD_MAX - 1)
+
+enum nlpvrdpy_attr {
+       __NLPVRDPY_ATTR_INVALID,
+       NLPVRDPY_ATTR_NAME,
+       NLPVRDPY_ATTR_MINOR,
+       NLPVRDPY_ATTR_NUM_PLANES,
+       NLPVRDPY_ATTR_WIDTH,
+       NLPVRDPY_ATTR_HEIGHT,
+       NLPVRDPY_ATTR_PIXFMT,
+       NLPVRDPY_ATTR_YUV_CSC,
+       NLPVRDPY_ATTR_YUV_BPP,
+       NLPVRDPY_ATTR_PLANE0_ADDR,
+       NLPVRDPY_ATTR_PLANE0_SIZE,
+       NLPVRDPY_ATTR_PLANE0_OFFSET,
+       NLPVRDPY_ATTR_PLANE0_PITCH,
+       NLPVRDPY_ATTR_PLANE0_GEM_OBJ_NAME,
+       NLPVRDPY_ATTR_PLANE1_ADDR,
+       NLPVRDPY_ATTR_PLANE1_SIZE,
+       NLPVRDPY_ATTR_PLANE1_OFFSET,
+       NLPVRDPY_ATTR_PLANE1_PITCH,
+       NLPVRDPY_ATTR_PLANE1_GEM_OBJ_NAME,
+       NLPVRDPY_ATTR_PLANE2_ADDR,
+       NLPVRDPY_ATTR_PLANE2_SIZE,
+       NLPVRDPY_ATTR_PLANE2_OFFSET,
+       NLPVRDPY_ATTR_PLANE2_PITCH,
+       NLPVRDPY_ATTR_PLANE2_GEM_OBJ_NAME,
+       NLPVRDPY_ATTR_FB_MODIFIER,
+       NLPVRDPY_ATTR_NAMING_REQUIRED,
+       NLPVRDPY_ATTR_PAD,
+       __NLPVRDPY_ATTR_MAX
+};
+#define NLPVRDPY_ATTR_MAX  (__NLPVRDPY_ATTR_MAX - 1)
+
+static struct nla_policy __attribute__((unused))
+nlpvrdpy_policy[NLPVRDPY_ATTR_MAX + 1] = {
+       [NLPVRDPY_ATTR_NAME]                = { .type = NLA_STRING },
+       [NLPVRDPY_ATTR_MINOR]               = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_NUM_PLANES]          = { .type = NLA_U8  },
+       [NLPVRDPY_ATTR_WIDTH]               = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_HEIGHT]              = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_PIXFMT]              = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_YUV_CSC]             = { .type = NLA_U8  },
+       [NLPVRDPY_ATTR_YUV_BPP]             = { .type = NLA_U8  },
+       [NLPVRDPY_ATTR_PLANE0_ADDR]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE0_SIZE]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE0_OFFSET]       = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE0_PITCH]        = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE0_GEM_OBJ_NAME] = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_PLANE1_ADDR]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE1_SIZE]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE1_OFFSET]       = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE1_PITCH]        = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE1_GEM_OBJ_NAME] = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_PLANE2_ADDR]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE2_SIZE]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE2_OFFSET]       = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE2_PITCH]        = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_PLANE2_GEM_OBJ_NAME] = { .type = NLA_U32 },
+       [NLPVRDPY_ATTR_FB_MODIFIER]         = { .type = NLA_U64 },
+       [NLPVRDPY_ATTR_NAMING_REQUIRED]     = { .type = NLA_FLAG },
+};
+
+#define NLPVRDPY_ATTR_PLANE(index, type)                               \
+       ({                                                              \
+               enum nlpvrdpy_attr __retval;                            \
+                                                                       \
+               switch (index) {                                        \
+               case 0:                                                 \
+                       __retval = NLPVRDPY_ATTR_PLANE0_ ## type;       \
+                       break;                                          \
+               case 1:                                                 \
+                       __retval = NLPVRDPY_ATTR_PLANE1_ ## type;       \
+                       break;                                          \
+               case 2:                                                 \
+                       __retval = NLPVRDPY_ATTR_PLANE2_ ## type;       \
+                       break;                                          \
+               default:                                                \
+                       __retval = __NLPVRDPY_ATTR_INVALID;             \
+                       break;                                          \
+               };                                                      \
+                                                                       \
+               __retval;                                               \
+       })
+
+#endif /* __NETLINK_H__ */
diff --git a/drivers/gpu/drm/img/img-rogue/include/drm/nulldisp_drm.h b/drivers/gpu/drm/img/img-rogue/include/drm/nulldisp_drm.h
new file mode 100644 (file)
index 0000000..8403fb5
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * @File
+ * @Title       Nulldisp DRM definitions shared between kernel and user space.
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__NULLDISP_DRM_H__)
+#define __NULLDISP_DRM_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+struct drm_nulldisp_gem_create {
+       __u64 size;   /* in */
+       __u32 flags;  /* in */
+       __u32 handle; /* out */
+};
+
+struct drm_nulldisp_gem_mmap {
+       __u32 handle; /* in */
+       __u32 pad;
+       __u64 offset; /* out */
+};
+
+#define NULLDISP_GEM_CPU_PREP_READ   (1 << 0)
+#define NULLDISP_GEM_CPU_PREP_WRITE  (1 << 1)
+#define NULLDISP_GEM_CPU_PREP_NOWAIT (1 << 2)
+
+struct drm_nulldisp_gem_cpu_prep {
+       __u32 handle; /* in */
+       __u32 flags;  /* in */
+};
+
+struct drm_nulldisp_gem_cpu_fini {
+       __u32 handle; /* in */
+       __u32 pad;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_NULLDISP_GEM_CREATE   0x00
+#define DRM_NULLDISP_GEM_MMAP     0x01
+#define DRM_NULLDISP_GEM_CPU_PREP 0x02
+#define DRM_NULLDISP_GEM_CPU_FINI 0x03
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_NULLDISP_GEM_CREATE \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CREATE, \
+                struct drm_nulldisp_gem_create)
+
+#define DRM_IOCTL_NULLDISP_GEM_MMAP \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_MMAP, \
+                struct drm_nulldisp_gem_mmap)
+
+#define DRM_IOCTL_NULLDISP_GEM_CPU_PREP \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_PREP, \
+               struct drm_nulldisp_gem_cpu_prep)
+
+#define DRM_IOCTL_NULLDISP_GEM_CPU_FINI \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_FINI, \
+               struct drm_nulldisp_gem_cpu_fini)
+
+#endif /* defined(__NULLDISP_DRM_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/include/drm/pdp_drm.h b/drivers/gpu/drm/img/img-rogue/include/drm/pdp_drm.h
new file mode 100644 (file)
index 0000000..f5d747d
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * @File
+ * @Title       PDP DRM definitions shared between kernel and user space.
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PDP_DRM_H__)
+#define __PDP_DRM_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+struct drm_pdp_gem_create {
+       __u64 size;     /* in */
+       __u32 flags;    /* in */
+       __u32 handle;   /* out */
+};
+
+struct drm_pdp_gem_mmap {
+       __u32 handle;   /* in */
+       __u32 pad;
+       __u64 offset;   /* out */
+};
+
+#define PDP_GEM_CPU_PREP_READ  (1 << 0)
+#define PDP_GEM_CPU_PREP_WRITE (1 << 1)
+#define PDP_GEM_CPU_PREP_NOWAIT        (1 << 2)
+
+struct drm_pdp_gem_cpu_prep {
+       __u32 handle;   /* in */
+       __u32 flags;    /* in */
+};
+
+struct drm_pdp_gem_cpu_fini {
+       __u32 handle;   /* in */
+       __u32 pad;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PDP_GEM_CREATE             0x00
+#define DRM_PDP_GEM_MMAP               0x01
+#define DRM_PDP_GEM_CPU_PREP           0x02
+#define DRM_PDP_GEM_CPU_FINI           0x03
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_PDP_GEM_CREATE \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, \
+                struct drm_pdp_gem_create)
+
+#define DRM_IOCTL_PDP_GEM_MMAP\
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, \
+                struct drm_pdp_gem_mmap)
+
+#define DRM_IOCTL_PDP_GEM_CPU_PREP \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, \
+               struct drm_pdp_gem_cpu_prep)
+
+#define DRM_IOCTL_PDP_GEM_CPU_FINI \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, \
+               struct drm_pdp_gem_cpu_fini)
+
+#endif /* defined(__PDP_DRM_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/include/drm/pvr_drm.h b/drivers/gpu/drm/img/img-rogue/include/drm/pvr_drm.h
new file mode 100644 (file)
index 0000000..c0d00c9
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * @File
+ * @Title       PVR DRM definitions shared between kernel and user space.
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_DRM_H__)
+#define __PVR_DRM_H__
+
+#include <linux/types.h>
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <drm.h>
+#endif
+
+/*
+ * IMPORTANT:
+ * All structures below are designed to be the same size when compiled for 32
+ * and/or 64 bit architectures, i.e. there should be no compiler inserted
+ * padding. This is achieved by sticking to the following rules:
+ * 1) only use fixed width types
+ * 2) always naturally align fields by arranging them appropriately and by using
+ *    padding fields when necessary
+ *
+ * These rules should _always_ be followed when modifying or adding new
+ * structures to this file.
+ */
+
+struct drm_pvr_srvkm_cmd {
+       __u32 bridge_id;
+       __u32 bridge_func_id;
+       __u64 in_data_ptr;
+       __u64 out_data_ptr;
+       __u32 in_data_size;
+       __u32 out_data_size;
+};
+
+struct pvr_sync_rename_ioctl_data {
+       char szName[32];
+};
+
+struct pvr_sw_sync_create_fence_data {
+       char name[32];
+       __s32 fence;
+       __u32 pad;
+       __u64 sync_pt_idx;
+};
+
+struct pvr_sw_timeline_advance_data {
+       __u64 sync_pt_idx;
+};
+
+#define PVR_SRVKM_SERVICES_INIT  1
+#define PVR_SRVKM_SYNC_INIT 2
+struct drm_pvr_srvkm_init_data {
+       __u32 init_module;
+};
+
+/* Values used to configure the PVRSRV_DEVICE_INIT_MODE tunable (Linux-only) */
+#define PVRSRV_LINUX_DEV_INIT_ON_PROBE   1
+#define PVRSRV_LINUX_DEV_INIT_ON_OPEN    2
+#define PVRSRV_LINUX_DEV_INIT_ON_CONNECT 3
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+
+/* PVR Services command */
+#define DRM_PVR_SRVKM_CMD                      0
+
+/* PVR Sync commands */
+#define DRM_PVR_SYNC_RENAME_CMD                        1
+#define DRM_PVR_SYNC_FORCE_SW_ONLY_CMD         2
+
+/* PVR Software Sync commands */
+#define DRM_PVR_SW_SYNC_CREATE_FENCE_CMD       3
+#define DRM_PVR_SW_SYNC_INC_CMD                        4
+
+/* PVR Services Render Device Init command */
+#define DRM_PVR_SRVKM_INIT             5
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define        DRM_IOCTL_PVR_SRVKM_CMD \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \
+                struct drm_pvr_srvkm_cmd)
+
+#define DRM_IOCTL_PVR_SYNC_RENAME_CMD \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SYNC_RENAME_CMD, \
+               struct pvr_sync_rename_ioctl_data)
+
+#define DRM_IOCTL_PVR_SYNC_FORCE_SW_ONLY_CMD \
+       DRM_IO(DRM_COMMAND_BASE + DRM_PVR_SYNC_FORCE_SW_ONLY_CMD)
+
+#define        DRM_IOCTL_PVR_SW_SYNC_CREATE_FENCE_CMD \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, \
+               struct pvr_sw_sync_create_fence_data)
+
+#define DRM_IOCTL_PVR_SW_SYNC_INC_CMD \
+       DRM_IOR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_INC_CMD, \
+               struct pvr_sw_timeline_advance_data)
+
+#define DRM_IOCTL_PVR_SRVKM_INIT \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SRVKM_INIT, \
+               struct drm_pvr_srvkm_init_data)
+
+#endif /* defined(__PVR_DRM_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/include/img_3dtypes.h b/drivers/gpu/drm/img/img-rogue/include/img_3dtypes.h
new file mode 100644 (file)
index 0000000..916e3a1
--- /dev/null
@@ -0,0 +1,248 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global 3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines 3D types for use by IMG APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_3DTYPES_H
+#define IMG_3DTYPES_H
+
+#include <powervr/buffer_attribs.h>
+#include "img_types.h"
+#include "img_defs.h"
+
+/**
+ * Comparison functions
+ * This comparison function is defined as:
+ * A {CmpFunc} B
+ * A is a reference value, e.g., incoming depth etc.
+ * B is the sample value, e.g., value in depth buffer.
+ */
+typedef enum _IMG_COMPFUNC_
+{
+       IMG_COMPFUNC_NEVER,                     /**< The comparison never succeeds */
+       IMG_COMPFUNC_LESS,                      /**< The comparison is a less-than operation */
+       IMG_COMPFUNC_EQUAL,                     /**< The comparison is an equal-to operation */
+       IMG_COMPFUNC_LESS_EQUAL,        /**< The comparison is a less-than or equal-to
+                                                                        operation */
+       IMG_COMPFUNC_GREATER,           /**< The comparison is a greater-than operation
+                                                               */
+       IMG_COMPFUNC_NOT_EQUAL,         /**< The comparison is a no-equal-to operation
+                                                               */
+       IMG_COMPFUNC_GREATER_EQUAL,     /**< The comparison is a greater-than or
+                                                                        equal-to operation */
+       IMG_COMPFUNC_ALWAYS,            /**< The comparison always succeeds */
+} IMG_COMPFUNC;
+
+/**
+ * Stencil op functions
+ */
+typedef enum _IMG_STENCILOP_
+{
+       IMG_STENCILOP_KEEP,             /**< Keep original value */
+       IMG_STENCILOP_ZERO,             /**< Set stencil to 0 */
+       IMG_STENCILOP_REPLACE,  /**< Replace stencil entry */
+       IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */
+       IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */
+       IMG_STENCILOP_INVERT,   /**< Invert bits in stencil entry */
+       IMG_STENCILOP_INCR,             /**< Increment stencil entry,
+                                                                wrapping if necessary */
+       IMG_STENCILOP_DECR,             /**< Decrement stencil entry,
+                                                                wrapping if necessary */
+} IMG_STENCILOP;
+
+/**
+ * Alpha blending allows colours and textures on one surface
+ * to be blended with transparency onto another surface.
+ * These definitions apply to both source and destination blending
+ * states
+ */
+typedef enum _IMG_BLEND_
+{
+       IMG_BLEND_ZERO = 0,        /**< Blend factor is (0,0,0,0) */
+       IMG_BLEND_ONE,             /**< Blend factor is (1,1,1,1) */
+       IMG_BLEND_SRC_COLOUR,      /**< Blend factor is the source colour */
+       IMG_BLEND_INV_SRC_COLOUR,  /**< Blend factor is the inverted source colour
+                                                                       (i.e. 1-src_col) */
+       IMG_BLEND_SRC_ALPHA,       /**< Blend factor is the source alpha */
+       IMG_BLEND_INV_SRC_ALPHA,   /**< Blend factor is the inverted source alpha
+                                                                       (i.e. 1-src_alpha) */
+       IMG_BLEND_DEST_ALPHA,      /**< Blend factor is the destination alpha */
+       IMG_BLEND_INV_DEST_ALPHA,  /**< Blend factor is the inverted destination
+                                                                       alpha */
+       IMG_BLEND_DEST_COLOUR,     /**< Blend factor is the destination colour */
+       IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination
+                                                                       colour */
+       IMG_BLEND_SRC_ALPHASAT,    /**< Blend factor is the alpha saturation (the
+                                                                       minimum of (Src alpha,
+                                                                       1 - destination alpha)) */
+       IMG_BLEND_BLEND_FACTOR,    /**< Blend factor is a constant */
+       IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/
+       IMG_BLEND_SRC1_COLOUR,     /**< Blend factor is the colour outputted from
+                                                                       the pixel shader */
+       IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour
+                                                                       outputted from the pixel shader */
+       IMG_BLEND_SRC1_ALPHA,      /**< Blend factor is the alpha outputted from
+                                                                       the pixel shader */
+       IMG_BLEND_INV_SRC1_ALPHA   /**< Blend factor is the inverted alpha
+                                                                       outputted from the pixel shader */
+} IMG_BLEND;
+
+/**
+ * The arithmetic operation to perform when blending
+ */
+typedef enum _IMG_BLENDOP_
+{
+       IMG_BLENDOP_ADD = 0,      /**< Result = (Source + Destination) */
+       IMG_BLENDOP_SUBTRACT,     /**< Result = (Source - Destination) */
+       IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */
+       IMG_BLENDOP_MIN,          /**< Result = min (Source, Destination) */
+       IMG_BLENDOP_MAX           /**< Result = max (Source, Destination) */
+} IMG_BLENDOP;
+
+/**
+ * Logical operation to perform when logic ops are enabled
+ */
+typedef enum _IMG_LOGICOP_
+{
+       IMG_LOGICOP_CLEAR = 0,     /**< Result = 0 */
+       IMG_LOGICOP_SET,           /**< Result = -1 */
+       IMG_LOGICOP_COPY,          /**< Result = Source */
+       IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */
+       IMG_LOGICOP_NOOP,          /**< Result = Destination */
+       IMG_LOGICOP_INVERT,        /**< Result = ~Destination */
+       IMG_LOGICOP_AND,           /**< Result = Source & Destination */
+       IMG_LOGICOP_NAND,          /**< Result = ~(Source & Destination) */
+       IMG_LOGICOP_OR,            /**< Result = Source | Destination */
+       IMG_LOGICOP_NOR,           /**< Result = ~(Source | Destination) */
+       IMG_LOGICOP_XOR,           /**< Result = Source ^ Destination */
+       IMG_LOGICOP_EQUIV,         /**< Result = ~(Source ^ Destination) */
+       IMG_LOGICOP_AND_REVERSE,   /**< Result = Source & ~Destination */
+       IMG_LOGICOP_AND_INVERTED,  /**< Result = ~Source & Destination */
+       IMG_LOGICOP_OR_REVERSE,    /**< Result = Source | ~Destination */
+       IMG_LOGICOP_OR_INVERTED    /**< Result = ~Source | Destination */
+} IMG_LOGICOP;
+
+/**
+ * Type of fog blending supported
+ */
+typedef enum _IMG_FOGMODE_
+{
+       IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are
+                                          *   based on the value output from the vertex phase */
+       IMG_FOGMODE_LINEAR, /**< Linear interpolation */
+       IMG_FOGMODE_EXP, /**< Exponential */
+       IMG_FOGMODE_EXP2, /**< Exponential squaring */
+} IMG_FOGMODE;
+
+/**
+ * Types of filtering
+ */
+typedef enum _IMG_FILTER_
+{
+       IMG_FILTER_DONTCARE,    /**< Any filtering mode is acceptable */
+       IMG_FILTER_POINT,               /**< Point filtering */
+       IMG_FILTER_LINEAR,              /**< Bi-linear filtering */
+       IMG_FILTER_BICUBIC,             /**< Bi-cubic filtering */
+} IMG_FILTER;
+
+/**
+ * Addressing modes for textures
+ */
+typedef enum _IMG_ADDRESSMODE_
+{
+       IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */
+       IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
+       IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
+       IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */
+       IMG_ADDRESSMODE_CLAMPBORDER,
+       IMG_ADDRESSMODE_OGL_CLAMP,
+       IMG_ADDRESSMODE_OVG_TILEFILL,
+       IMG_ADDRESSMODE_DONTCARE,
+} IMG_ADDRESSMODE;
+
+/**
+ * Culling based on winding order of triangle.
+ */
+typedef enum _IMG_CULLMODE_
+{
+       IMG_CULLMODE_NONE,                      /**< Don't cull */
+       IMG_CULLMODE_FRONTFACING,       /**< Front facing triangles */
+       IMG_CULLMODE_BACKFACING,        /**< Back facing triangles */
+} IMG_CULLMODE;
+
+/**
+ * Colour for clearing surfaces.
+ *  The four elements of the 4 x 32 bit array will map to colour
+ *  R,G,B,A components, in order.
+ *  For YUV colour space the order is Y,U,V.
+ *  For Depth and Stencil formats D maps to R and S maps to G.
+ */
+typedef union IMG_CLEAR_COLOUR_TAG {
+       IMG_UINT32        aui32[4];
+       IMG_INT32         ai32[4];
+       IMG_FLOAT         af32[4];
+} IMG_CLEAR_COLOUR;
+
+static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits.");
+
+/*! ************************************************************************//**
+@brief          Specifies the MSAA resolve operation.
+*/ /**************************************************************************/
+typedef enum _IMG_RESOLVE_OP_
+{
+       IMG_RESOLVE_BLEND   = 0,          /*!< box filter on the samples */
+       IMG_RESOLVE_MIN     = 1,          /*!< minimum of the samples */
+       IMG_RESOLVE_MAX     = 2,          /*!< maximum of the samples */
+       IMG_RESOLVE_SAMPLE0 = 3,          /*!< choose sample 0 */
+       IMG_RESOLVE_SAMPLE1 = 4,          /*!< choose sample 1 */
+       IMG_RESOLVE_SAMPLE2 = 5,          /*!< choose sample 2 */
+       IMG_RESOLVE_SAMPLE3 = 6,          /*!< choose sample 3 */
+       IMG_RESOLVE_SAMPLE4 = 7,          /*!< choose sample 4 */
+       IMG_RESOLVE_SAMPLE5 = 8,          /*!< choose sample 5 */
+       IMG_RESOLVE_SAMPLE6 = 9,          /*!< choose sample 6 */
+       IMG_RESOLVE_SAMPLE7 = 10,         /*!< choose sample 7 */
+} IMG_RESOLVE_OP;
+
+
+#endif /* IMG_3DTYPES_H */
+/******************************************************************************
+ End of file (img_3dtypes.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/img_defs.h b/drivers/gpu/drm/img/img-rogue/include/img_defs.h
new file mode 100644 (file)
index 0000000..a79e8a6
--- /dev/null
@@ -0,0 +1,567 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common header containing type definitions for portability
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains variable and structure definitions. Any platform
+                specific types should be defined in this file.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_DEFS_H
+#define IMG_DEFS_H
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/types.h>
+#else
+#include <stddef.h>
+#endif
+#if !(defined(__linux__) && defined(__KERNEL__))
+#include <assert.h>
+#endif
+
+#include "img_types.h"
+
+#if defined(NO_INLINE_FUNCS)
+       #define INLINE
+       #define FORCE_INLINE
+#else
+#if defined(__cplusplus) || defined(INTEGRITY_OS)
+       #if     !defined(INLINE)
+               #define INLINE                          inline
+       #endif
+       #define FORCE_INLINE                    static inline
+#else
+#if    !defined(INLINE)
+       #define INLINE                                  __inline
+#endif
+#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_)
+       #define FORCE_INLINE                    __forceinline
+#else
+       #define FORCE_INLINE                    static __inline
+#endif
+#endif
+#endif
+
+/* True if the GCC version is at least the given version. False for older
+ * versions of GCC, or other compilers.
+ */
+#if defined(__GNUC__)
+#define GCC_VERSION_AT_LEAST(major, minor) \
+       (__GNUC__ > (major) || \
+       (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
+#else
+#define GCC_VERSION_AT_LEAST(major, minor) 0
+#endif
+
+/* Use Clang's __has_extension and __has_builtin macros if available. */
+#if defined(__has_extension)
+#define has_clang_extension(e) __has_extension(e)
+#else
+#define has_clang_extension(e) 0
+#endif
+
+#if defined(__has_builtin)
+#define has_clang_builtin(e) __has_builtin(e)
+#else
+#define has_clang_builtin(e) 0
+#endif
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define        PVR_UNREFERENCED_PARAMETER(param) ((void)(param))
+#endif
+
+/* static_assert(condition, "message to print if it fails");
+ *
+ * Assert something at compile time. If the assertion fails, try to print
+ * the message, otherwise do nothing. static_assert is available if:
+ *
+ * - It's already defined as a macro (e.g. by <assert.h> in C11)
+ * - We're using MSVC which exposes static_assert unconditionally
+ * - We're using a C++ compiler that supports C++11
+ * - We're using GCC 4.6 and up in C mode (in which case it's available as
+ *   _Static_assert)
+ *
+ * In all other cases, fall back to an equivalent that makes an invalid
+ * declaration.
+ */
+#if !defined(static_assert) && !defined(_MSC_VER) && \
+               (!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__)
+       /* static_assert isn't already available */
+       #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \
+                                                                 (defined(__clang__) && has_clang_extension(c_static_assert)))
+               #define static_assert _Static_assert
+       #else
+               #define static_assert(expr, message) \
+                       extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused))
+       #endif
+#endif
+
+/*
+ * unreachable("explanation") can be used to indicate to the compiler that
+ * some parts of the code can never be reached, like the default branch
+ * of a switch that covers all real-world possibilities, even though there
+ * are other ints that exist for instance.
+ *
+ * The message will be printed as an assert() when debugging.
+ *
+ * Note: there is no need to add a 'return' or any error handling after
+ * calling unreachable(), as this call will never return.
+ */
+#if defined(__linux__) && defined(__KERNEL__)
+/* Kernel has its own unreachable(), which is a simple infinite loop */
+#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable)
+       #define unreachable(msg) \
+               do { \
+                       assert(!(msg)); \
+                       __builtin_unreachable(); \
+               } while (false)
+#elif defined(_MSC_VER)
+       #define unreachable(msg) \
+               do { \
+                       assert(!(msg)); \
+                       __assume(0); \
+               } while (false)
+#else
+       #define unreachable(msg) \
+               do { \
+                       assert(!(msg)); \
+                       while (1); \
+               } while (false)
+#endif
+
+/*
+ * assume(x > 2 && x <= 7) works like an assert(), except it hints to the
+ * compiler what it can assume to optimise the code, like a limited range
+ * of parameter values.
+ */
+#if has_clang_builtin(__builtin_assume)
+       #define assume(expr) \
+               do { \
+                       assert(expr); \
+                       __builtin_assume(expr); \
+               } while (false)
+#elif defined(_MSC_VER)
+       #define assume(expr) \
+               do { \
+                       assert(expr); \
+                       __assume(expr); \
+               } while (false)
+#elif defined(__linux__) && defined(__KERNEL__)
+       #define assume(expr) ((void)(expr))
+#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable)
+       #define assume(expr) \
+               do { \
+                       if (unlikely(!(expr))) \
+                               unreachable("Assumption isn't true: " # expr); \
+               } while (false)
+#else
+       #define assume(expr) assert(expr)
+#endif
+
+/*! Macro to calculate the n-byte aligned value from that supplied rounding up.
+ * n must be a power of two.
+ *
+ * Both arguments should be of a type with the same size otherwise the macro may
+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n.
+ */
+#define PVR_ALIGN(_x, _n)      (((_x)+((_n)-1U)) & ~((_n)-1U))
+
+#if defined(_WIN32)
+
+#if defined(WINDOWS_WDF)
+
+       /*
+        * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system.
+        * This 'empty' choice helps to resolve all the calling conv issues.
+        *
+        */
+       #define IMG_CALLCONV
+       #define C_CALLCONV
+
+       #define IMG_INTERNAL
+       #define IMG_RESTRICT __restrict
+
+       /*
+        * The proper way of dll linking under MS compilers is made of two things:
+        * - decorate implementation with __declspec(dllexport)
+        *   this decoration helps compiler with making the so called
+        *   'export library'
+        * - decorate forward-declaration (in a source dependent on a dll) with
+        *   __declspec(dllimport), this decoration helps the compiler to make
+        *   faster and smaller code in terms of calling dll-imported functions
+        *
+        * Usually these decorations are performed by having a single macro define
+        * making that expands to a proper __declspec() depending on the
+        * translation unit, dllexport inside the dll source and dllimport outside
+        * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same
+        * __declspec() makes no sense, but at least works.
+        */
+       #define IMG_IMPORT __declspec(dllexport)
+       #define IMG_EXPORT __declspec(dllexport)
+
+#else
+
+       #define IMG_CALLCONV __stdcall
+       #define IMG_INTERNAL
+       #define IMG_EXPORT      __declspec(dllexport)
+       #define IMG_RESTRICT __restrict
+       #define C_CALLCONV      __cdecl
+
+       /*
+        * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations
+        * match. Some compilers require the header to be declared IMPORT, while
+        * the implementation is declared EXPORT.
+        */
+       #define IMG_IMPORT      IMG_EXPORT
+
+#endif
+
+#if defined(UNDER_WDDM)
+       #ifndef _INC_STDLIB
+               #if defined(__mips)
+                       /* do nothing */
+               #elif defined(UNDER_MSBUILD)
+                       /* do nothing */
+               #else
+                       _CRTIMP void __cdecl abort(void);
+               #endif
+       #endif
+#endif /* UNDER_WDDM */
+#else
+       #if (defined(__linux__) || defined(__QNXNTO__)) && defined(__KERNEL__)
+               #define IMG_INTERNAL
+               #define IMG_EXPORT
+               #define IMG_CALLCONV
+       #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv)
+               #define IMG_CALLCONV
+               #define C_CALLCONV
+
+               #if defined(__METAG)
+                       #define IMG_INTERNAL
+               #else
+                       #define IMG_INTERNAL    __attribute__((visibility("hidden")))
+               #endif
+
+               #define IMG_EXPORT      __attribute__((visibility("default")))
+               #define IMG_RESTRICT    __restrict__
+       #elif defined(INTEGRITY_OS)
+               #define IMG_CALLCONV
+               #define IMG_INTERNAL
+               #define IMG_EXPORT
+               #define IMG_RESTRICT
+               #define C_CALLCONV
+               #define __cdecl
+
+               #ifndef USE_CODE
+                       #define IMG_ABORT() printf("IMG_ABORT was called.\n")
+               #endif
+       #else
+               #error("define an OS")
+       #endif
+
+#endif
+
+/* Use default definition if not overridden */
+#ifndef IMG_ABORT
+       #if defined(EXIT_ON_ABORT)
+               #define IMG_ABORT()     exit(1)
+       #else
+               #define IMG_ABORT()     abort()
+       #endif
+#endif
+
+/* The best way to suppress unused parameter warnings using GCC is to use a
+ * variable attribute. Place the __maybe_unused between the type and name of an
+ * unused parameter in a function parameter list e.g. 'int __maybe_unused var'.
+ * This should only be used in GCC build environments, for example, in files
+ * that compile only on Linux.
+ * Other files should use PVR_UNREFERENCED_PARAMETER
+ */
+
+/* Kernel macros for compiler attributes */
+/* Note: param positions start at 1 */
+#if defined(__linux__) && defined(__KERNEL__)
+       #include <linux/compiler.h>
+
+       #if !defined(__fallthrough)
+               #if GCC_VERSION_AT_LEAST(7, 0)
+                       #define __fallthrough __attribute__((__fallthrough__))
+               #else
+                       #define __fallthrough
+               #endif
+       #endif
+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+       #define __must_check       __attribute__((warn_unused_result))
+       #define __maybe_unused     __attribute__((unused))
+       #define __malloc           __attribute__((malloc))
+
+       /* Bionic's <sys/cdefs.h> might have defined these already */
+       /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */
+       #if !defined(__packed)
+               #define __packed           __attribute__((packed))
+       #endif
+       #if !defined(__aligned)
+               #define __aligned(n)       __attribute__((aligned(n)))
+       #endif
+       #if !defined(__noreturn)
+               #define __noreturn         __attribute__((noreturn))
+       #endif
+
+       /* That one compiler that supports attributes but doesn't support
+        * the printf attribute... */
+       #if defined(__GNUC__)
+               #define __printf(fmt, va)  __attribute__((format(printf, (fmt), (va))))
+       #else
+               #define __printf(fmt, va)
+       #endif /* defined(__GNUC__) */
+
+       #if defined(__cplusplus) && (__cplusplus >= 201703L)
+               #define __fallthrough [[fallthrough]]
+       #elif GCC_VERSION_AT_LEAST(7, 0)
+               #define __fallthrough __attribute__((__fallthrough__))
+       #else
+               #define __fallthrough
+       #endif
+
+       #define __user
+       #define __force
+       #define __iomem
+#else
+       /* Silently ignore those attributes */
+       #define __printf(fmt, va)
+       #define __packed
+       #define __aligned(n)
+       #define __must_check
+       #define __maybe_unused
+       #define __malloc
+
+       #if defined(_MSC_VER) || defined(CC_ARM)
+               #define __noreturn __declspec(noreturn)
+       #else
+               #define __noreturn
+       #endif
+
+       /* This may already been defined, e.g. by SAL (Source Annotation Language) */
+       #if !defined(__fallthrough)
+               #define __fallthrough
+       #endif
+
+       #define __user
+       #define __force
+       #define __iomem
+#endif
+
+
+/* Other attributes, following the same style */
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+       #define __const_function      __attribute__((const))
+#else
+       #define __const_function
+#endif
+
+
+/* GCC builtins */
+#if defined(__linux__) && defined(__KERNEL__)
+       #include <linux/compiler.h>
+#elif defined(__GNUC__) || defined(INTEGRITY_OS)
+
+/* Klocwork does not support __builtin_expect, which makes the actual condition
+ * expressions hidden during analysis, affecting it negatively. */
+#if !defined(__KLOCWORK__) && !defined(INTEGRITY_OS) && !defined(DEBUG)
+       #define likely(x)   __builtin_expect(!!(x), 1)
+       #define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+       /* Compiler memory barrier to prevent reordering */
+       #define barrier() __asm__ __volatile__("": : :"memory")
+#else
+       #define barrier() static_assert(0, "barrier() isn't supported by your compiler");
+#endif
+
+/* That one OS that defines one but not the other... */
+#ifndef likely
+       #define likely(x)   (x)
+#endif
+#ifndef unlikely
+       #define unlikely(x) (x)
+#endif
+
+/* These two macros are also provided by the kernel */
+#ifndef BIT
+#define BIT(b) (1UL << (b))
+#endif
+
+#ifndef BIT_ULL
+#define BIT_ULL(b) (1ULL << (b))
+#endif
+
+#define BIT_SET(f, b)     BITMASK_SET((f),    BIT(b))
+#define BIT_UNSET(f, b)   BITMASK_UNSET((f),  BIT(b))
+#define BIT_TOGGLE(f, b)  BITMASK_TOGGLE((f), BIT(b))
+#define BIT_ISSET(f, b)   BITMASK_HAS((f),    BIT(b))
+
+#define BITMASK_SET(f, m)     do { ((f) |= (m)); } while (false)
+#define BITMASK_UNSET(f, m)   do { ((f) &= ~(m)); } while (false)
+#define BITMASK_TOGGLE(f, m)  do { ((f) ^= (m)); } while (false)
+#define BITMASK_HAS(f, m)     (((f) & (m)) == (m)) /* the bits from the mask are all set */
+#define BITMASK_ANY(f, m)     (((f) & (m)) != 0U)  /* any bit from the mask is set */
+
+#ifndef MAX
+#define MAX(a ,b)      (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)      (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef CLAMP
+#define CLAMP(min, max, n)  ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n)))
+#endif
+
+#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y);
+
+
+#if defined(__linux__) && defined(__KERNEL__)
+       #include <linux/kernel.h>
+       #include <linux/bug.h>
+#endif
+
+/* Get a structure's address from the address of a member */
+#define IMG_CONTAINER_OF(ptr, type, member) \
+       (type *) ((uintptr_t) (ptr) - offsetof(type, member))
+
+/* Get a new pointer with an offset (in bytes) from a base address, useful
+ * when traversing byte buffers and accessing data in buffers through struct
+ * pointers.
+ * Note, this macro is not equivalent to or replacing offsetof() */
+#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \
+       (void*)&(((IMG_UINT8*)(void*)(addr))[offset_in_bytes])
+
+/* Get a new pointer with an offset (in dwords) from a base address, useful
+ * when traversing byte buffers and accessing data in buffers through struct
+ * pointers.
+ * Note, this macro is not equivalent to or replacing offsetof() */
+#define IMG_OFFSET_ADDR_DW(addr, offset_in_dwords) \
+       (void*)(((IMG_UINT32*)(void*)(addr)) + (offset_in_dwords))
+
+/* The number of elements in a fixed-sized array */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0]))
+#endif
+
+/* To guarantee that __func__ can be used, define it as a macro here if it
+   isn't already provided by the compiler. */
+#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L)
+#define __func__ __FUNCTION__
+#endif
+
+#if defined(__cplusplus)
+/* C++ Specific:
+ * Disallow use of copy and assignment operator within a class.
+ * Should be placed under private. */
+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \
+       C(const C&); \
+       void operator=(const C&)
+#endif
+
+#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips) && !defined(__riscv)
+       #include "/usr/include/valgrind/memcheck.h"
+
+       #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size)
+       #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size)
+       #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size)
+       #define VG_ASSERT_DEFINED(pvData,ui32Size) VALGRIND_CHECK_MEM_IS_DEFINED(pvData,ui32Size)
+#else
+       #if defined(_MSC_VER)
+       #       define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+       #else
+       #       define PVR_MSC_SUPPRESS_4127
+       #endif
+
+       #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false)
+       #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false)
+       #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false)
+       #define VG_ASSERT_DEFINED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false)
+#endif
+
+#define IMG_STRINGIFY_IMPL(x) # x
+#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x)
+
+#if defined(INTEGRITY_OS)
+       /* Definitions not present in INTEGRITY. */
+       #define PATH_MAX        200
+#endif
+
+#if defined(__clang__) || defined(__GNUC__)
+       /* __SIZEOF_POINTER__ is defined already by these compilers */
+#elif defined(INTEGRITY_OS)
+       #if defined(__Ptr_Is_64)
+               #define __SIZEOF_POINTER__ 8
+       #else
+               #define __SIZEOF_POINTER__ 4
+       #endif
+#elif defined(_WIN32)
+       #define __SIZEOF_POINTER__ sizeof(char *)
+#else
+       #warning Unknown OS - using default method to determine whether CPU arch is 64-bit.
+       #define __SIZEOF_POINTER__ sizeof(char *)
+#endif
+
+/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with
+ * uncached device memory allocations. Some pointers are made 'volatile'
+ * to prevent those optimisations being applied to writes through those
+ * pointers.
+ */
+#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__))
+#define NOLDSTOPT volatile
+/* after applying 'volatile' to a pointer, we may need to cast it to 'void *'
+ * to keep it compatible with its existing uses.
+ */
+#define NOLDSTOPT_VOID (void *)
+
+#define NOLDSTOPT_REQUIRED 1
+#else
+#define NOLDSTOPT
+#define NOLDSTOPT_VOID
+#endif
+
+#endif /* IMG_DEFS_H */
+/*****************************************************************************
+ End of file (img_defs.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/img_drm_fourcc_internal.h b/drivers/gpu/drm/img/img-rogue/include/img_drm_fourcc_internal.h
new file mode 100644 (file)
index 0000000..ee88e90
--- /dev/null
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          Wrapper around drm_fourcc.h
+@Description    FourCCs and the DRM framebuffer modifiers should be added here
+                unless they are used by kernel code or a known user outside of
+                the DDK. If FourCCs or DRM framebuffer modifiers are required
+                outside of the DDK, they shall be moved to the corresponding
+                public header.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_DRM_FOURCC_INTERNAL_H
+#define IMG_DRM_FOURCC_INTERNAL_H
+
+#include <powervr/img_drm_fourcc.h>
+
+/*
+ * Modifier names are structured using the following convention,
+ * with underscores (_) between items:
+ * - prefix: DRM_FORMAT_MOD
+ * - identifier for our driver: PVR
+ * - category: FBCDC
+ *   - compression tile dimension: 8x8, 16x4, 32x2
+ *   - FBDC version: V0, V1, V2, V3, V7, V8, V10, V12
+ */
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0      fourcc_mod_code(PVR, 1)
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0_FIX  fourcc_mod_code(PVR, 2) /* Fix for HW_BRN_37464 */
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V2      fourcc_mod_code(PVR, 4)
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V3      fourcc_mod_code(PVR, 5)
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V8      fourcc_mod_code(PVR, 18)
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY25_V13 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY50_V13 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY75_V13 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0     fourcc_mod_code(PVR, 7)
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0_FIX fourcc_mod_code(PVR, 8) /* Fix for HW_BRN_37464 */
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V2     fourcc_mod_code(PVR, 10)
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V3     fourcc_mod_code(PVR, 11)
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V8     fourcc_mod_code(PVR, 19)
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY25_V13 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY50_V13 - moved to the public header */
+/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY75_V13 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V1     fourcc_mod_code(PVR, 13)
+#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V3     fourcc_mod_code(PVR, 14)
+#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V8     fourcc_mod_code(PVR, 20)
+/* DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 - moved to the public header */
+#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V12    fourcc_mod_code(PVR, 17)
+
+#endif /* IMG_DRM_FOURCC_INTERNAL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/img_elf.h b/drivers/gpu/drm/img/img-rogue/include/img_elf.h
new file mode 100644 (file)
index 0000000..8837d95
--- /dev/null
@@ -0,0 +1,111 @@
+/*************************************************************************/ /*!
+@File           img_elf.h
+@Title          IMG ELF file definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       RGX
+@Description    Definitions for ELF file structures used in the DDK.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(IMG_ELF_H)
+#define IMG_ELF_H
+
+#include "img_types.h"
+
+/* ELF format defines */
+#define ELF_PT_LOAD     (0x1U)   /* Program header identifier as Load */
+#define ELF_SHT_SYMTAB  (0x2U)   /* Section identifier as Symbol Table */
+#define ELF_SHT_STRTAB  (0x3U)   /* Section identifier as String Table */
+#define MAX_STRTAB_NUM  (0x8U)   /* Maximum number of string table in the ELF file */
+
+/* Redefined structs of ELF format */
+typedef struct
+{
+       IMG_UINT8    ui32Eident[16];
+       IMG_UINT16   ui32Etype;
+       IMG_UINT16   ui32Emachine;
+       IMG_UINT32   ui32Eversion;
+       IMG_UINT32   ui32Eentry;
+       IMG_UINT32   ui32Ephoff;
+       IMG_UINT32   ui32Eshoff;
+       IMG_UINT32   ui32Eflags;
+       IMG_UINT16   ui32Eehsize;
+       IMG_UINT16   ui32Ephentsize;
+       IMG_UINT16   ui32Ephnum;
+       IMG_UINT16   ui32Eshentsize;
+       IMG_UINT16   ui32Eshnum;
+       IMG_UINT16   ui32Eshtrndx;
+} IMG_ELF_HDR;
+
+typedef struct
+{
+       IMG_UINT32   ui32Stname;
+       IMG_UINT32   ui32Stvalue;
+       IMG_UINT32   ui32Stsize;
+       IMG_UINT8    ui32Stinfo;
+       IMG_UINT8    ui32Stother;
+       IMG_UINT16   ui32Stshndx;
+} IMG_ELF_SYM;
+
+typedef struct
+{
+       IMG_UINT32   ui32Shname;
+       IMG_UINT32   ui32Shtype;
+       IMG_UINT32   ui32Shflags;
+       IMG_UINT32   ui32Shaddr;
+       IMG_UINT32   ui32Shoffset;
+       IMG_UINT32   ui32Shsize;
+       IMG_UINT32   ui32Shlink;
+       IMG_UINT32   ui32Shinfo;
+       IMG_UINT32   ui32Shaddralign;
+       IMG_UINT32   ui32Shentsize;
+} IMG_ELF_SHDR;
+
+typedef struct
+{
+       IMG_UINT32   ui32Ptype;
+       IMG_UINT32   ui32Poffset;
+       IMG_UINT32   ui32Pvaddr;
+       IMG_UINT32   ui32Ppaddr;
+       IMG_UINT32   ui32Pfilesz;
+       IMG_UINT32   ui32Pmemsz;
+       IMG_UINT32   ui32Pflags;
+       IMG_UINT32   ui32Palign;
+} IMG_ELF_PROGRAM_HDR;
+
+#endif /* IMG_ELF_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/img_types.h b/drivers/gpu/drm/img/img-rogue/include/img_types.h
new file mode 100644 (file)
index 0000000..c2654d2
--- /dev/null
@@ -0,0 +1,324 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines type aliases for use by IMG APIs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_TYPES_H
+#define IMG_TYPES_H
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* To use C99 types and definitions, there are two special cases we need to
+ * cater for:
+ *
+ * - Visual Studio: in VS2010 or later, some standard headers are available,
+ *   and MSVC has its own built-in sized types. We can define the C99 types
+ *   in terms of these.
+ *
+ * - Linux kernel code: C99 sized types are defined in <linux/types.h>, but
+ *   some other features (like macros for constants or printf format
+ *   strings) are missing, so we need to fill in the gaps ourselves.
+ *
+ * For other cases (userspace code under Linux, Android or Neutrino, or
+ * firmware code), we can include the standard headers.
+ */
+#if defined(_MSC_VER)
+       #include <stdbool.h>            /* bool */
+       #include "msvc_types.h"
+#elif defined(__linux__) && defined(__KERNEL__)
+       #include <linux/version.h>
+       #include <linux/types.h>
+       #include "kernel_types.h"
+#elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \
+       defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv)
+       #include <stddef.h>                     /* NULL */
+       #include <stdint.h>
+       #include <inttypes.h>           /* intX_t/uintX_t, format specifiers */
+       #include <limits.h>                     /* INT_MIN, etc */
+       #include <stdbool.h>            /* bool */
+#elif defined(__mips)
+       #include <stddef.h>                     /* NULL */
+       #include <inttypes.h>           /* intX_t/uintX_t, format specifiers */
+       #include <stdbool.h>            /* bool */
+#else
+       #error C99 support not set up for this build
+#endif
+
+/*
+ * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of
+ * boolean type. This results in large number of false-positives being reported
+ * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char'
+ * is assigned to an object of essential type 'bool'"). Work around this by
+ * redefining those constants with cast to bool added.
+ */
+#if defined(__KLOCWORK__) && !defined(__cplusplus)
+#undef true
+#undef false
+#define true ((bool) 1)
+#define false ((bool) 0)
+#endif
+
+typedef unsigned int   IMG_UINT;
+typedef int                            IMG_INT;
+
+typedef uint8_t                        IMG_UINT8,      *IMG_PUINT8;
+typedef uint8_t                        IMG_BYTE,       *IMG_PBYTE;
+typedef int8_t                 IMG_INT8;
+typedef char                   IMG_CHAR,       *IMG_PCHAR;
+
+typedef uint16_t               IMG_UINT16,     *IMG_PUINT16;
+typedef int16_t                        IMG_INT16;
+typedef uint32_t               IMG_UINT32,     *IMG_PUINT32;
+typedef int32_t                        IMG_INT32,      *IMG_PINT32;
+#if defined(INTEGRITY_OS)
+#if __INT_BIT >= 32U
+#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## U))
+#elif __LONG_BIT >= 32U
+#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## UL))
+#elif defined(__LLONG_BIT) && __LLONG_BIT >= 32U
+#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## ULL))
+#endif
+#else /* defined(INTEGRITY_OS) */
+#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c))
+#endif /* defined(INTEGRITY_OS) */
+
+typedef uint64_t               IMG_UINT64,     *IMG_PUINT64;
+typedef int64_t                        IMG_INT64;
+#define IMG_INT64_C(c) INT64_C(c)
+#if defined(INTEGRITY_OS)
+#if __INT_BIT >= 64U
+#define IMG_UINT64_C(n)        (n ## U)
+#elif defined(__LONG_BIT) && __LONG_BIT >= 64U
+#define IMG_UINT64_C(n)        (n ## UL)
+#elif defined(__LLONG_BIT) && __LLONG_BIT >= 64U
+#define IMG_UINT64_C(n)        (n ## ULL)
+#endif
+#else /* defined(INTEGRITY_OS) */
+#define IMG_UINT64_C(c)        UINT64_C(c)
+#endif /* defined(INTEGRITY_OS) */
+#define IMG_UINT16_C(c)        UINT16_C(c)
+#define IMG_UINT64_FMTSPEC PRIu64
+#define IMG_UINT64_FMTSPECX PRIX64
+#define IMG_UINT64_FMTSPECx PRIx64
+#define IMG_UINT64_FMTSPECo PRIo64
+#define IMG_INT64_FMTSPECd PRId64
+
+#define IMG_UINT16_MAX UINT16_MAX
+#define IMG_UINT32_MAX UINT32_MAX
+#define IMG_UINT64_MAX UINT64_MAX
+
+#define IMG_INT16_MAX  INT16_MAX
+#define IMG_INT32_MAX  INT32_MAX
+#define IMG_INT64_MAX  INT64_MAX
+
+/* Linux kernel mode does not use floating point */
+typedef float                  IMG_FLOAT,      *IMG_PFLOAT;
+typedef double                 IMG_DOUBLE;
+
+typedef union
+{
+       IMG_UINT32 ui32;
+       IMG_FLOAT f;
+} IMG_UINT32_FLOAT;
+
+typedef int                            IMG_SECURE_TYPE;
+
+typedef        enum tag_img_bool
+{
+       IMG_FALSE               = 0,
+       IMG_TRUE                = 1,
+       IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+typedef IMG_CHAR const* IMG_PCCHAR;
+#endif
+
+/* Format specifiers for 'size_t' type */
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define IMG_SIZE_FMTSPEC  "%Iu"
+#define IMG_SIZE_FMTSPECX "%Ix"
+#else
+#define IMG_SIZE_FMTSPEC  "%zu"
+#define IMG_SIZE_FMTSPECX "%zx"
+#endif
+
+#if defined(__linux__) && defined(__KERNEL__)
+/* prints the function name when used with printk */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+#define IMG_PFN_FMTSPEC "%ps"
+#else
+#define IMG_PFN_FMTSPEC "%pf"
+#endif
+#else
+#define IMG_PFN_FMTSPEC "%p"
+#endif
+
+typedef void           *IMG_HANDLE;
+
+/* Process IDs */
+typedef IMG_UINT32      IMG_PID;
+
+/* OS connection type */
+typedef int             IMG_OS_CONNECTION;
+
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ *
+ */
+
+
+/*
+ *
+ * +------------+    +------------+      +------------+        +------------+
+ * |    CPU     |    |    DEV     |      |    DEV     |        |    DEV     |
+ * +------------+    +------------+      +------------+        +------------+
+ *       |                 |                   |                     |
+ *       | void *          |IMG_DEV_VIRTADDR   |IMG_DEV_VIRTADDR     |
+ *       |                 \-------------------/                     |
+ *       |                          |                                |
+ * +------------+             +------------+                         |
+ * |    MMU     |             |    MMU     |                         |
+ * +------------+             +------------+                         |
+ *       |                          |                                |
+ *       |                          |                                |
+ *       |                          |                                |
+ *   +--------+                +---------+                      +--------+
+ *   | Offset |                | (Offset)|                      | Offset |
+ *   +--------+                +---------+                      +--------+
+ *       |                          |                IMG_DEV_PHYADDR |
+ *       |                          |                                |
+ *       |                          | IMG_DEV_PHYADDR                |
+ * +---------------------------------------------------------------------+
+ * |                         System Address bus                          |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+
+/* cpu physical address */
+typedef struct
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+       uintptr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)       (uintptr_t)(var)
+#define CPUPHYADDR_FMTARG(var)                         (IMG_UINT64)(var)
+#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx
+#elif defined(__linux__) && defined(__KERNEL__)
+       phys_addr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)       (phys_addr_t)(var)
+#define CPUPHYADDR_FMTARG(var)                         (&var)
+#define CPUPHYADDR_UINT_FMTSPEC "%pa"
+#else
+       IMG_UINT64 uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)       (IMG_UINT64)(var)
+#define CPUPHYADDR_FMTARG(var)                         (var)
+#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx
+#endif
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct
+{
+       IMG_UINT64 uiAddr;
+} IMG_DEV_PHYADDR;
+
+/* dma address */
+typedef struct
+{
+       IMG_UINT64 uiAddr;
+} IMG_DMA_ADDR;
+
+/*
+       rectangle structure
+*/
+typedef struct
+{
+       IMG_INT32       x0;
+       IMG_INT32       y0;
+       IMG_INT32       x1;
+       IMG_INT32       y1;
+} IMG_RECT;
+
+typedef struct
+{
+       IMG_INT16       x0;
+       IMG_INT16       y0;
+       IMG_INT16       x1;
+       IMG_INT16       y1;
+} IMG_RECT_16;
+
+/*
+ * box structure
+ */
+typedef struct
+{
+       IMG_INT32       x0;
+       IMG_INT32       y0;
+       IMG_INT32       z0;
+       IMG_INT32       x1;
+       IMG_INT32       y1;
+       IMG_INT32       z1;
+} IMG_BOX;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* IMG_TYPES_H */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/kernel_types.h b/drivers/gpu/drm/img/img-rogue/include/kernel_types.h
new file mode 100644 (file)
index 0000000..c330510
--- /dev/null
@@ -0,0 +1,137 @@
+/*************************************************************************/ /*!
+@Title          C99-compatible types and definitions for Linux kernel code
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+
+/* Limits of specified-width integer types */
+
+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for
+ * earlier kernels. They can be removed once older kernels don't need to be
+ * supported.
+ */
+#ifdef S8_MIN
+       #define INT8_MIN        S8_MIN
+#else
+       #define INT8_MIN        (-128)
+#endif
+
+#ifdef S8_MAX
+       #define INT8_MAX        S8_MAX
+#else
+       #define INT8_MAX        127
+#endif
+
+#ifdef U8_MAX
+       #define UINT8_MAX       U8_MAX
+#else
+       #define UINT8_MAX       0xFF
+#endif
+
+#ifdef S16_MIN
+       #define INT16_MIN       S16_MIN
+#else
+       #define INT16_MIN       (-32768)
+#endif
+
+#ifdef S16_MAX
+       #define INT16_MAX       S16_MAX
+#else
+       #define INT16_MAX       32767
+#endif
+
+#ifdef U16_MAX
+       #define UINT16_MAX      U16_MAX
+#else
+       #define UINT16_MAX      0xFFFF
+#endif
+
+#ifdef S32_MIN
+       #define INT32_MIN       S32_MIN
+#else
+       #define INT32_MIN       (-2147483647 - 1)
+#endif
+
+#ifdef S32_MAX
+       #define INT32_MAX       S32_MAX
+#else
+       #define INT32_MAX       2147483647
+#endif
+
+#ifdef U32_MAX
+       #define UINT32_MAX      U32_MAX
+#else
+       #define UINT32_MAX      0xFFFFFFFF
+#endif
+
+#ifdef S64_MIN
+       #define INT64_MIN       S64_MIN
+#else
+       #define INT64_MIN       (-9223372036854775807LL)
+#endif
+
+#ifdef S64_MAX
+       #define INT64_MAX       S64_MAX
+#else
+       #define INT64_MAX       9223372036854775807LL
+#endif
+
+#ifdef U64_MAX
+       #define UINT64_MAX      U64_MAX
+#else
+       #define UINT64_MAX      0xFFFFFFFFFFFFFFFFULL
+#endif
+
+/* Macros for integer constants */
+#define INT8_C                 S8_C
+#define UINT8_C                        U8_C
+#define INT16_C                        S16_C
+#define UINT16_C               U16_C
+#define INT32_C                        S32_C
+#define UINT32_C               U32_C
+#define INT64_C                        S64_C
+#define UINT64_C               U64_C
+
+/* Format conversion of integer types <inttypes.h> */
+
+#define PRIX64         "llX"
+#define PRIx64         "llx"
+#define PRIu64         "llu"
+#define PRId64         "lld"
diff --git a/drivers/gpu/drm/img/img-rogue/include/linux_sw_sync.h b/drivers/gpu/drm/img/img-rogue/include/linux_sw_sync.h
new file mode 100644 (file)
index 0000000..c12c650
--- /dev/null
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _UAPI_LINUX_PVR_SW_SYNC_H
+#define _UAPI_LINUX_PVR_SW_SYNC_H
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+
+#include <linux/types.h>
+
+#include "pvrsrv_sync_km.h"
+#include "pvr_drm.h"
+
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/include/lock_types.h b/drivers/gpu/drm/img/img-rogue/include/lock_types.h
new file mode 100644 (file)
index 0000000..370ffc0
--- /dev/null
@@ -0,0 +1,92 @@
+/*************************************************************************/ /*!
+@File           lock_types.h
+@Title          Locking types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Locking specific enums, defines and structures
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOCK_TYPES_H
+#define LOCK_TYPES_H
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#if defined(__linux__) && defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+/* The mutex is defined as a pointer to be compatible with the other code. This
+ * isn't ideal and usually you wouldn't do that in kernel code. */
+typedef struct mutex *POS_LOCK;
+typedef struct rw_semaphore *POSWR_LOCK;
+typedef spinlock_t *POS_SPINLOCK;
+typedef atomic_t ATOMIC_T;
+
+#else /* defined(__linux__) && defined(__KERNEL__) */
+#include "img_types.h" /* needed for IMG_INT */
+typedef struct OS_LOCK_TAG *POS_LOCK;
+
+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+typedef struct OSWR_LOCK_TAG *POSWR_LOCK;
+#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+typedef struct OSWR_LOCK_TAG {
+       IMG_UINT32 ui32Dummy;
+} *POSWR_LOCK;
+#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+#if defined(__linux__)
+       typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(__QNXNTO__)
+       typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(_WIN32)
+       /*
+        * Dummy definition. WDDM doesn't use Services, but some headers
+        * still have to be shared. This is one such case.
+        */
+       typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(INTEGRITY_OS)
+       /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */
+       typedef struct OS_ATOMIC_TAG {IMG_INT64 counter;} ATOMIC_T;
+#else
+       #error "Please type-define an atomic lock for this environment"
+#endif
+
+#endif /* defined(__linux__) && defined(__KERNEL__) */
+
+#endif /* LOCK_TYPES_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/log2.h b/drivers/gpu/drm/img/img-rogue/include/log2.h
new file mode 100644 (file)
index 0000000..2182a02
--- /dev/null
@@ -0,0 +1,417 @@
+/*************************************************************************/ /*!
+@Title          Integer log2 and related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOG2_H
+#define LOG2_H
+
+#include "img_defs.h"
+
+/*************************************************************************/ /*!
+@Description    Determine if a number is a power of two.
+@Input          n
+@Return         True if n is a power of 2, false otherwise. True if n == 0.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL __const_function IsPower2(uint32_t n)
+{
+       /* C++ needs this cast. */
+       return (IMG_BOOL)((n & (n - 1U)) == 0U);
+}
+
+/*************************************************************************/ /*!
+@Description    Determine if a number is a power of two.
+@Input          n
+@Return         True if n is a power of 2, false otherwise. True if n == 0.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n)
+{
+       /* C++ needs this cast. */
+       return (IMG_BOOL)((n & (n - 1U)) == 0U);
+}
+
+/* Code using GNU GCC intrinsics */
+#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER))
+
+/* CHAR_BIT is typically found in <limits.h>. For all the platforms where
+ * CHAR_BIT is not available, defined it here with the assumption that there
+ * are 8 bits in a byte */
+#ifndef CHAR_BIT
+#define CHAR_BIT 8U
+#endif
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2(uint32_t n)
+{
+       if (unlikely(n == 0U))
+       {
+               return 0;
+       }
+       else
+       {
+               uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n);
+               return uNumBits - (uint32_t)__builtin_clz(n) - 1U;
+       }
+}
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2_64(uint64_t n)
+{
+       if (unlikely(n == 0U))
+       {
+               return 0;
+       }
+       else
+       {
+               uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n);
+               return uNumBits - (uint32_t)__builtin_clzll(n) - 1U;
+       }
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2(uint32_t n)
+{
+       if (unlikely(n == 0U || n == 1U))
+       {
+               return 0;
+       }
+       else
+       {
+               uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n);
+
+               n--; /* Handle powers of 2 */
+               return uNumBits - (uint32_t)__builtin_clz(n);
+       }
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2_64(uint64_t n)
+{
+       if (unlikely(n == 0U || n == 1U))
+       {
+               return 0;
+       }
+       else
+       {
+               uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n);
+
+               n--; /* Handle powers of 2 */
+               return uNumBits - (uint32_t)__builtin_clzll(n);
+       }
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2(uint32_t n)
+{
+       return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clz(n) - 1U;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2_64(uint64_t n)
+{
+       return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clzll(n) - 1U;
+}
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n)
+{
+       /* Cases with n greater than 2^31 needs separate handling
+        * as result of (1<<32) is undefined. */
+       if (unlikely( n == 0U || n > (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U)))
+       {
+               return 0;
+       }
+
+       /* Return n if it is already a power of 2 */
+       if ((IMG_BOOL)((n & (n - 1U)) == 0U))
+       {
+               return n;
+       }
+
+       return (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - (uint32_t)__builtin_clz(n));
+}
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+       /* Cases with n greater than 2^63 needs separate handling
+        * as result of (1<<64) is undefined. */
+       if (unlikely( n == 0U || n > (uint64_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U)))
+       {
+               return 0;
+       }
+
+       /* Return n if it is already a power of 2 */
+       if ((IMG_BOOL)((n & (n - 1U)) == 0U))
+       {
+               return n;
+       }
+
+       return (uint64_t)1 << ((uint64_t)CHAR_BIT * sizeof(n) - (uint64_t)__builtin_clzll(n));
+}
+
+#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n)
+{
+       n--;
+       n |= n >> 1;  /* handle  2 bit numbers */
+       n |= n >> 2;  /* handle  4 bit numbers */
+       n |= n >> 4;  /* handle  8 bit numbers */
+       n |= n >> 8;  /* handle 16 bit numbers */
+       n |= n >> 16; /* handle 32 bit numbers */
+       n++;
+
+       return n;
+}
+
+/*************************************************************************/ /*!
+@Description    Round a non-power-of-two number up to the next power of two.
+@Input          n
+@Return         n rounded up to the next power of two. If n is zero or
+                already a power of two, return n unmodified.
+*/ /**************************************************************************/
+static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+       n--;
+       n |= n >> 1;  /* handle  2 bit numbers */
+       n |= n >> 2;  /* handle  4 bit numbers */
+       n |= n >> 4;  /* handle  8 bit numbers */
+       n |= n >> 8;  /* handle 16 bit numbers */
+       n |= n >> 16; /* handle 32 bit numbers */
+       n |= n >> 32; /* handle 64 bit numbers */
+       n++;
+
+       return n;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2(uint32_t n)
+{
+       uint32_t ui32log2 = 0;
+
+       while ((n >>= 1) != 0U)
+       {
+               ui32log2++;
+       }
+
+       return ui32log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(n))
+@Input          n
+@Return         log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function FloorLog2_64(uint64_t n)
+{
+       uint32_t ui32log2 = 0;
+
+       while ((n >>= 1) != 0U)
+       {
+               ui32log2++;
+       }
+
+       return ui32log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2(uint32_t n)
+{
+       uint32_t ui32log2 = 0;
+
+       if (n == 0U)
+       {
+               return 0;
+       }
+
+       n--; /* Handle powers of 2 */
+
+       while (n != 0U)
+       {
+               ui32log2++;
+               n >>= 1;
+       }
+
+       return ui32log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute ceil(log2(n))
+@Input          n
+@Return         log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function CeilLog2_64(uint64_t n)
+{
+       uint32_t ui32log2 = 0;
+
+       if (n == 0U)
+       {
+               return 0;
+       }
+
+       n--; /* Handle powers of 2 */
+
+       while (n != 0U)
+       {
+               ui32log2++;
+               n >>= 1;
+       }
+
+       return ui32log2;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2(uint32_t n)
+{
+       static const uint32_t b[] =
+               {0xAAAAAAAAU, 0xCCCCCCCCU, 0xF0F0F0F0U, 0xFF00FF00U, 0xFFFF0000U};
+       uint32_t r = (n & b[0]) != 0U;
+
+       r |= (uint32_t) ((n & b[4]) != 0U) << 4;
+       r |= (uint32_t) ((n & b[3]) != 0U) << 3;
+       r |= (uint32_t) ((n & b[2]) != 0U) << 2;
+       r |= (uint32_t) ((n & b[1]) != 0U) << 1;
+
+       return r;
+}
+
+/*************************************************************************/ /*!
+@Description    Compute log2(n) for exact powers of two only
+@Input          n                   Must be a power of two
+@Return         log2(n)
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function ExactLog2_64(uint64_t n)
+{
+       static const uint64_t b[] =
+               {0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL,
+                0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL,
+                0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL};
+       uint32_t r = (n & b[0]) != 0U;
+
+       r |= (uint32_t) ((n & b[5]) != 0U) << 5;
+       r |= (uint32_t) ((n & b[4]) != 0U) << 4;
+       r |= (uint32_t) ((n & b[3]) != 0U) << 3;
+       r |= (uint32_t) ((n & b[2]) != 0U) << 2;
+       r |= (uint32_t) ((n & b[1]) != 0U) << 1;
+
+       return r;
+}
+
+#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */
+
+/*************************************************************************/ /*!
+@Description    Compute floor(log2(size)) , where size is the max of 3 sizes
+                               This is almost always the ONLY EVER valid use of FloorLog2.
+                               Usually CeilLog2() should be used instead.
+                               For a 5x5x1 texture, the 3 miplevels are:
+                                       0:  5x5x1
+                                       1:      2x2x1
+                                       2:      1x1x1
+
+                               For an 8x8x1 texture, the 4 miplevels are:
+                                       0:  8x8x1
+                                       1:      4x4x1
+                                       2:  2x2x1
+                                       3:  1x1x1
+
+
+@Input          sizeX, sizeY, sizeZ
+@Return         Count of mipmap levels for given dimensions
+*/ /**************************************************************************/
+static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ)
+{
+
+       uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ);
+       return FloorLog2(maxSize) + 1U;
+}
+
+#endif /* LOG2_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/multicore_defs.h b/drivers/gpu/drm/img/img-rogue/include/multicore_defs.h
new file mode 100644 (file)
index 0000000..2ca4e06
--- /dev/null
@@ -0,0 +1,53 @@
+/**************************************************************************/ /*!
+@File
+@Title          RGX Multicore Information flags
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_MULTICORE_DEFS_H
+#define RGX_MULTICORE_DEFS_H
+
+/* Capability bits returned to client in RGXGetMultiCoreInfo */
+#define RGX_MULTICORE_CAPABILITY_FRAGMENT_EN    (0x00000040U)
+#define RGX_MULTICORE_CAPABILITY_GEOMETRY_EN    (0x00000020U)
+#define RGX_MULTICORE_CAPABILITY_COMPUTE_EN     (0x00000010U)
+#define RGX_MULTICORE_CAPABILITY_PRIMARY_EN     (0x00000008U)
+#define RGX_MULTICORE_ID_CLRMSK                 (0xFFFFFFF8U)
+
+#endif  /* RGX_MULTICORE_DEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/osfunc_common.h b/drivers/gpu/drm/img/img-rogue/include/osfunc_common.h
new file mode 100644 (file)
index 0000000..539ef2c
--- /dev/null
@@ -0,0 +1,300 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS functions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef OSFUNC_COMMON_H
+/*! @cond Doxygen_Suppress */
+#define OSFUNC_COMMON_H
+/*! @endcond */
+
+#if defined(__KERNEL__) && defined(__linux__)
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif
+
+#include "img_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**************************************************************************/ /*!
+@Function       DeviceMemSet
+@Description    Set memory, whose mapping may be uncached, to a given value.
+                Safe implementation for all architectures for uncached mapping,
+                optimised for speed where supported by tool chains.
+                In such cases, OSDeviceMemSet() is defined as a call to this
+                function.
+@Input          pvDest     void pointer to the memory to be set
+@Input          ui8Value   byte containing the value to be set
+@Input          ui32Size   the number of bytes to be set to the given value
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function       DeviceMemCopy
+@Description    Copy values from one area of memory. Safe implementation for
+                all architectures for uncached mapping, of either the source
+                or destination, optimised for speed where supported by tool
+                chains. In such cases, OSDeviceMemCopy() is defined as a call
+                to this function.
+@Input          pvDst      void pointer to the destination memory
+@Input          pvSrc      void pointer to the source memory
+@Input          ui32Size   the number of bytes to be copied
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function       DeviceMemSetBytes
+@Description    Potentially very slow (but safe) memset fallback for non-GNU C
+                compilers for arm64/aarch64
+@Input          pvDest     void pointer to the memory to be set
+@Input          ui8Value   byte containing the value to be set
+@Input          ui32Size   the number of bytes to be set to the given value
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function       DeviceMemCopyBytes
+@Description    Potentially very slow (but safe) memcpy fallback for non-GNU C
+                compilers for arm64/aarch64
+@Input          pvDst      void pointer to the destination memory
+@Input          pvSrc      void pointer to the source memory
+@Input          ui32Size   the number of bytes to be copied
+@Return         None
+ */ /**************************************************************************/
+void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function       StringLCopy
+@Description    Copy at most uDataSize-1 bytes from pszSrc to pszDest.
+                If no null byte ('\0') is contained within the first uDataSize-1
+                characters of the source string, the destination string will be
+                truncated. If the length of the source string is less than uDataSize
+                an additional NUL byte will be copied to the destination string
+                to ensure that the string is NUL-terminated.
+@Input          pszDest       char pointer to the destination string
+@Input          pszSrc        const char pointer to the source string
+@Input          uDataSize     the maximum number of bytes to be copied
+@Return         Size of the source string
+ */ /**************************************************************************/
+size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize);
+
+#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+#if defined(__GNUC__)
+/* Workarounds for assumptions made that memory will not be mapped uncached
+ * in kernel or user address spaces on arm64 platforms (or other testing).
+ */
+
+#define OSDeviceMemSet(a,b,c)  DeviceMemSet((a), (b), (c))
+#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c))
+
+#else /* defined __GNUC__ */
+
+#define OSDeviceMemSet(a,b,c)  DeviceMemSetBytes((a), (b), (c))
+#define OSDeviceMemCopy(a,b,c) DeviceMemCopyBytes((a), (b), (c))
+
+#endif /* defined __GNUC__ */
+
+#else /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/* Everything else */
+
+/**************************************************************************/ /*!
+@Function       OSDeviceMemSet
+@Description    Set memory, whose mapping may be uncached, to a given value.
+                On some architectures, additional processing may be needed
+                if the mapping is uncached.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemSet(a,b,c) \
+       do { \
+               if ((c) != 0) \
+               { \
+                       (void) memset((a), (b), (c)); \
+                       (void) *(volatile IMG_UINT32*)((void*)(a)); \
+               } \
+       } while (false)
+
+/**************************************************************************/ /*!
+@Function       OSDeviceMemCopy
+@Description    Copy values from one area of memory, to another, when one
+                or both mappings may be uncached.
+                On some architectures, additional processing may be needed
+                if mappings are uncached.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemCopy(a,b,c) \
+       do { \
+               if ((c) != 0) \
+               { \
+                       memcpy((a), (b), (c)); \
+                       (void) *(volatile IMG_UINT32*)((void*)(a)); \
+               } \
+       } while (false)
+
+#endif /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemSet
+@Description    Set memory, where the mapping is known to be cached, to a
+                given value. This function exists to allow an optimal memset
+                to be performed when memory is known to be cached.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemSet(a,b,c)  (void) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemCopy
+@Description    Copy values from one area of memory, to another, when both
+                mappings are known to be cached.
+                This function exists to allow an optimal memcpy to be
+                performed when memory is known to be cached.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#if defined(__KERNEL__)
+
+/**************************************************************************/ /*!
+@Function       OSCachedMemSetWMB
+@Description    Set memory, where the mapping is known to be cached or
+                write-combine, to a given value and issue a write memory barrier
+                after. This
+                function exists to allow an optimal memset to be performed when
+                memory is known to be cached or write-combine.
+@Input          a     void pointer to the memory to be set
+@Input          b     byte containing the value to be set
+@Input          c     the number of bytes to be set to the given value
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#if !defined(SERVICES_SC)
+#define OSCachedMemSetWMB(a,b,c) \
+       do { \
+               if ((c) != 0) \
+               { \
+                       (void) memset((a), (b), (c)); \
+                       OSWriteMemoryBarrier(a); \
+               } \
+       } while (false)
+#else
+#define OSCachedMemSetWMB(a,b,c) \
+       do { \
+               (void) memset((a), (b), (c)); \
+               OSWriteMemoryBarrier(); \
+       } while (false)
+#endif /* !defined(SERVICES_SC) */
+/**************************************************************************/ /*!
+@Function       OSCachedMemCopy
+@Description    Copy values from one area of memory, to another, when both
+                mappings are known to be cached or write-combine and issue
+                a write memory barrier after.
+                This function exists to allow an optimal memcpy to be
+                performed when memory is known to be cached or write-combine.
+@Input          a     void pointer to the destination memory
+@Input          b     void pointer to the source memory
+@Input          c     the number of bytes to be copied
+@Return         Pointer to the destination memory.
+ */ /**************************************************************************/
+#if !defined(SERVICES_SC)
+#define OSCachedMemCopyWMB(a,b,c) \
+       do { \
+               if ((c) != 0) \
+               { \
+                       (void) memcpy((a), (b), (c)); \
+                       OSWriteMemoryBarrier(a); \
+               } \
+       } while (false)
+#else
+#define OSCachedMemCopyWMB(a,b,c) \
+       do { \
+               (void) memcpy((a), (b), (c)); \
+               OSWriteMemoryBarrier(); \
+       } while (false)
+#endif /* !defined(SERVICES_SC) */
+#endif /* defined(__KERNEL__) */
+
+/**************************************************************************/ /*!
+@Function       OSStringLCopy
+@Description    Copy at most uDataSize-1 bytes from pszSrc to pszDest.
+                If no null byte ('\0') is contained within the first uDataSize-1
+                characters of the source string, the destination string will be
+                truncated. If the length of the source string is less than uDataSize
+                an additional NUL byte will be copied to the destination string
+                to ensure that the string is NUL-terminated.
+@Input          a     char pointer to the destination string
+@Input          b     const char pointer to the source string
+@Input          c     the maximum number of bytes to be copied
+@Return         Size of the source string
+ */ /**************************************************************************/
+#if defined(__QNXNTO__) || (defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG))
+#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c))
+#else /* defined(__QNXNTO__) ... */
+#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c))
+#endif /* defined(__QNXNTO__) ... */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* OSFUNC_COMMON_H */
+
+/******************************************************************************
+ End of file (osfunc_common.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/pdumpdefs.h b/drivers/gpu/drm/img/img-rogue/include/pdumpdefs.h
new file mode 100644 (file)
index 0000000..3f8ccca
--- /dev/null
@@ -0,0 +1,249 @@
+/*************************************************************************/ /*!
+@File
+@Title          PDUMP definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    PDUMP definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PDUMPDEFS_H
+#define PDUMPDEFS_H
+
+/*! PDump Pixel Format Enumeration */
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+       PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+       PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+/*     PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */
+       PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+       PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+       PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+       PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+       PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+       PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+       PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+       PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+       PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+       PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+       PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+       PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+       PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+       PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+       PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+       PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+       PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+       PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+       PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+       PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+       PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49,
+       PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50,
+       PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51,
+       PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56,
+       PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64,
+       PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65,
+       PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66,
+
+       PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+typedef enum _PDUMP_FBC_SWIZZLE_
+{
+       PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0,
+       PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1,
+       PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2,
+       PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3,
+       PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4,
+       PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5,
+       PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8,
+       PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9,
+       PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA,
+       PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB,
+       PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC,
+       PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD,
+} PDUMP_FBC_SWIZZLE;
+
+/*! PDump addrmode */
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT                  0
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK                   0x000000FF
+
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT                        8
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE             (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT               12
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK                        0x000FF000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT                            20
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK                             0x00F00000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT                   24
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT                   25
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT                  28
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK                   0xF0000000
+
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE                 (0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED               (9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED              (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED              (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE                             (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT               (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT              (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT              (3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT             (4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT            (5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE       (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE      (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR                                        (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY                                        (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE                   (1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED               (2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2                             (3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE             (4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE            (5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_SURFACE   (6U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE  (7U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4                             (8U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4PLUS                 (9U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_TFBCDC                 (10U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+
+/*! PDump Poll Operator */
+typedef enum _PDUMP_POLL_OPERATOR
+{
+       PDUMP_POLL_OPERATOR_EQUAL = 0,
+       PDUMP_POLL_OPERATOR_LESS = 1,
+       PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+       PDUMP_POLL_OPERATOR_GREATER = 3,
+       PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+       PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE                 75  /*!< Max length of a pdump log file name */
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE                  350 /*!< Max length of a pdump comment */
+
+/*!
+       PDump MMU type
+       (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
+*/
+typedef enum
+{
+       PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE  = 1,
+       PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2,
+       PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE  = 3,
+       PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE  = 4,
+       PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE  = 5,
+       PDUMP_MMU_TYPE_VARPAGE_40BIT         = 6,
+       PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE   = 7,
+       PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE   = 8,
+       PDUMP_MMU_TYPE_MIPS_MICROAPTIV       = 9,
+       PDUMP_MMU_TYPE_LAST
+} PDUMP_MMU_TYPE;
+
+/*!
+       PDump states
+       These values are used by the bridge call PVRSRVPDumpGetState
+*/
+#define PDUMP_STATE_CAPTURE_FRAME              (1U)            /*!< Flag represents the PDump being in capture range or not*/
+#define PDUMP_STATE_CONNECTED                  (2U)            /*!< Flag represents the PDump Client App being connected on not */
+#define PDUMP_STATE_SUSPENDED                  (4U)            /*!< Flag represents the PDump being suspended or not */
+#define PDUMP_STATE_CAPTURE_IN_INTERVAL        (8U)            /*!< Flag represents the PDump being in a capture range interval */
+
+/*!
+       PDump Capture modes
+       Values used with calls to PVRSRVPDumpSetDefaultCaptureParams
+*/
+#define PDUMP_CAPMODE_UNSET                     0x00000000UL
+#define PDUMP_CAPMODE_FRAMED                    0x00000001UL
+#define PDUMP_CAPMODE_CONTINUOUS                0x00000002UL
+#define PDUMP_CAPMODE_BLOCKED                   0x00000003UL
+
+#define PDUMP_CAPMODE_MAX                       PDUMP_CAPMODE_BLOCKED
+
+#endif /* PDUMPDEFS_H */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/pdumpdesc.h b/drivers/gpu/drm/img/img-rogue/include/pdumpdesc.h
new file mode 100644 (file)
index 0000000..d159bf4
--- /dev/null
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File           pdumpdesc.h
+@Title          PDump Descriptor format
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Describes PDump descriptors that may be passed to the
+                extraction routines (SAB).
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(PDUMPDESC_H)
+#define PDUMPDESC_H
+
+#include "pdumpdefs.h"
+
+/*
+ * Common fields
+ */
+#define HEADER_WORD0_TYPE_SHIFT                                (0)
+#define HEADER_WORD0_TYPE_CLRMSK                       (0xFFFFFFFFU)
+
+#define HEADER_WORD1_SIZE_SHIFT                                (0)
+#define HEADER_WORD1_SIZE_CLRMSK                       (0x0000FFFFU)
+#define HEADER_WORD1_VERSION_SHIFT                     (16)
+#define HEADER_WORD1_VERSION_CLRMSK                    (0xFFFF0000U)
+
+#define HEADER_WORD2_DATA_SIZE_SHIFT           (0)
+#define HEADER_WORD2_DATA_SIZE_CLRMSK          (0xFFFFFFFFU)
+
+
+/*
+ * The image type descriptor
+ */
+
+/*
+ * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2
+ * Header size - 64 bytes
+ */
+#define IMAGE_HEADER_TYPE                                      (0x42474D49)
+#define IMAGE_HEADER_SIZE                                      (64)
+#define IMAGE_HEADER_VERSION                           (2)
+
+/*
+ * Image type-specific fields
+ */
+#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT         (0)
+#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK                (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT                (0)
+#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK       (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD5_FORMAT_SHIFT                                (0)
+#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK                       (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT                (0)
+#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK       (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT       (0)
+#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK      (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT                     (0)
+#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK                    (0x000000FFU)
+#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED           (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT)
+#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE          (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT)
+#define IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE          (12 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT)
+
+
+#define IMAGE_HEADER_WORD8_STRIDE_SHIFT                                (8)
+#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK                       (0x0000FF00U)
+#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE                     (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT)
+#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE                     (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT)
+
+#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT                       (16)
+#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK                      (0x00FF0000U)
+#define IMAGE_HEADER_WORD8_BIFTYPE_NONE                                (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT)
+
+#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT                       (24)
+#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK                      (0xFF000000U)
+#define IMAGE_HEADER_WORD8_FBCTYPE_8X8                         (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT)
+#define IMAGE_HEADER_WORD8_FBCTYPE_16x4                                (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT)
+#define IMAGE_HEADER_WORD8_FBCTYPE_32x2                                (3 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT)
+
+#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT                      (0)
+#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK                     (0x000000FFU)
+#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE                     (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT)
+
+/* Align with fbcomp_export_c.h in pdump_tools branch */
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT                     (8)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK                    (0x0000FF00U)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU       (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE                      (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN       (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* TWIDDLED_ENHANCED */
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2                                (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT1   (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2   (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V30_WITH_HEADER_REMAP */
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT1   (6 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2   (7 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V31_WITH_HEADER_REMAP */
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4                                (8 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4_PLUS           (9 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC                      (10 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+
+#define IMAGE_HEADER_WORD9_LOSSY_SHIFT                         (16)
+#define IMAGE_HEADER_WORD9_LOSSY_CLRMSK                                (0x00FF0000U)
+/* Non-TFBC */
+#define IMAGE_HEADER_WORD9_LOSSY_ON                                    (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT)
+
+/* TFBC */
+#define IMAGE_HEADER_WORD9_LOSSY_75                                    (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT)
+#define IMAGE_HEADER_WORD9_LOSSY_37                                    (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT)
+#define IMAGE_HEADER_WORD9_LOSSY_50                                    (2 << IMAGE_HEADER_WORD9_LOSSY_SHIFT)
+#define IMAGE_HEADER_WORD9_LOSSY_25                                    (3 << IMAGE_HEADER_WORD9_LOSSY_SHIFT)
+#define IMAGE_HEADER_WORD9_LOSSY_OFF                           (0 << IMAGE_HEADER_WORD9_LOSSY_SHIFT)
+
+#define IMAGE_HEADER_WORD9_SWIZZLE_SHIFT                       (24)
+#define IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK                      (0xFF000000U)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARGB           (0x0 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARBG           (0x1 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGRB           (0x2 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGBR           (0x3 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABGR           (0x4 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABRG           (0x5 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RGBA           (0x8 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RBGA           (0x9 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GRBA           (0xA << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GBRA           (0xB << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BGRA           (0xC << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BRGA           (0xD << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT)
+
+#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT         (0)
+#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK                (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT         (0)
+#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK                (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT         (0)
+#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK                (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT         (0)
+#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK                (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT           (0)
+#define IMAGE_HEADER_WORD14_TFBC_GROUP_CLRMSK          (0x000000FFU)
+#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75                (0 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT)
+#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_37_50                (1 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT)
+
+#define IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT          (8)
+#define IMAGE_HEADER_WORD14_COMP_SCHEME_CLRMSK         (0x0000FF00U)
+#define IMAGE_HEADER_WORD14_COMP_SCHEME_ALL                    (0 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT)
+#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_CORR     (1 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT)
+#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY     (2 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT)
+#define IMAGE_HEADER_WORD14_COMP_SCHEME_PTC_ONLY       (3 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT)
+
+#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT  (16)
+#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_CLRMSK (0x00FF0000U)
+#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN             (1 << IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT) /* Treat YUV10 optimal formats as 8 bits */
+
+/* IMAGE_HEADER_WORD15_RESERVED2 */
+
+/*
+ * The data type descriptor
+ */
+
+/*
+ * Header type (IMGCv1) - 'IMGC' in hex + VERSION 0
+ * Header size - 20 bytes (5 x 32 bit WORDS)
+ */
+#define DATA_HEADER_TYPE                    (0x43474D49)
+#define DATA_HEADER_SIZE                    (20)
+#define DATA_HEADER_VERSION                 (0)
+
+/*
+ * The IBIN type descriptor
+ */
+
+/*
+ * Header type (IBIN) - 'IBIN' in hex + VERSION 0
+ * Header size - 12 bytes (3 x 32 bit WORDS)
+ */
+#define IBIN_HEADER_TYPE                    (0x4e494249)
+#define IBIN_HEADER_SIZE                    (12)
+#define IBIN_HEADER_VERSION                 (0)
+
+/*
+ * Data type-specific fields
+ */
+#define DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT        (0)
+#define DATA_HEADER_WORD3_ELEMENT_TYPE_CLRMSK       (0xFFFFFFFFU)
+
+#define DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT       (0)
+#define DATA_HEADER_WORD4_ELEMENT_COUNT_CLRMSK      (0xFFFFFFFFU)
+
+#endif /* PDUMPDESC_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/public/powervr/buffer_attribs.h b/drivers/gpu/drm/img/img-rogue/include/public/powervr/buffer_attribs.h
new file mode 100644 (file)
index 0000000..41eaaae
--- /dev/null
@@ -0,0 +1,193 @@
+/*************************************************************************/ /*!
+@File
+@Title          3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWERVR_BUFFER_ATTRIBS_H
+#define POWERVR_BUFFER_ATTRIBS_H
+
+/*!
+ * Memory layouts
+ * Defines how pixels are laid out within a surface.
+ */
+typedef enum
+{
+       IMG_MEMLAYOUT_STRIDED,       /**< Resource is strided, one row at a time */
+       IMG_MEMLAYOUT_TWIDDLED,      /**< Resource is 2D twiddled to match HW */
+       IMG_MEMLAYOUT_3DTWIDDLED,    /**< Resource is 3D twiddled, classic style */
+       IMG_MEMLAYOUT_TILED,         /**< Resource is tiled, tiling config specified elsewhere. */
+       IMG_MEMLAYOUT_PAGETILED,     /**< Resource is pagetiled */
+       IMG_MEMLAYOUT_INVNTWIDDLED,  /**< Resource is 2D twiddled !N style */
+} IMG_MEMLAYOUT;
+
+/*!
+ * Rotation types
+ */
+typedef enum
+{
+       IMG_ROTATION_0DEG = 0,
+       IMG_ROTATION_90DEG = 1,
+       IMG_ROTATION_180DEG = 2,
+       IMG_ROTATION_270DEG = 3,
+       IMG_ROTATION_FLIP_Y = 4,
+
+       IMG_ROTATION_BAD = 255,
+} IMG_ROTATION;
+
+/*!
+ * Alpha types.
+ */
+typedef enum
+{
+       IMG_COLOURSPACE_FORMAT_UNKNOWN                 =  0x0UL << 16,
+       IMG_COLOURSPACE_FORMAT_LINEAR                  =  0x1UL << 16,
+       IMG_COLOURSPACE_FORMAT_SRGB                    =  0x2UL << 16,
+       IMG_COLOURSPACE_FORMAT_SCRGB                   =  0x3UL << 16,
+       IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR            =  0x4UL << 16,
+       IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR       =  0x5UL << 16,
+       IMG_COLOURSPACE_FORMAT_DISPLAY_P3              =  0x6UL << 16,
+       IMG_COLOURSPACE_FORMAT_BT2020_PQ               =  0x7UL << 16,
+       IMG_COLOURSPACE_FORMAT_BT2020_LINEAR           =  0x8UL << 16,
+       IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH  =  0x9UL << 16,
+       IMG_COLOURSPACE_FORMAT_MASK                    =  0xFUL << 16,
+} IMG_COLOURSPACE_FORMAT;
+
+/*!
+ * Determines if FB Compression is Lossy
+ */
+#define IS_FBCDC_LOSSY(mode)                   ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8)  ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8)  ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8)  ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_TRUE : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_TRUE : IMG_FALSE)
+
+/*!
+ * Determines if FB Compression is Packed
+ */
+#define IS_FBCDC_PACKED(mode)                  ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_TRUE : IMG_FALSE)
+
+/*!
+ * Returns type of FB Compression
+ */
+#define GET_FBCDC_BLOCK_TYPE(mode)             ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8)   ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8)  ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8)  ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8)  ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode)
+
+/*!
+ * Adds Packing compression setting to mode if viable
+ */
+#define FBCDC_MODE_ADD_PACKING(mode)   ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_PACKED_8x8 : mode)
+
+/*!
+ * Removes Packing compression setting from mode
+ */
+#define FBCDC_MODE_REMOVE_PACKING(mode)        ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : mode)
+
+/*!
+ * Adds Lossy25 compression setting to mode if viable
+ */
+#define FBCDC_MODE_ADD_LOSSY25(mode)   ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2 : mode)
+
+/*!
+ * Adds Lossy37 compression setting to mode if viable
+ */
+#define FBCDC_MODE_ADD_LOSSY37(mode)   ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2 : mode)
+
+/*!
+ * Adds Lossy50 compression setting to mode if viable
+ */
+#define FBCDC_MODE_ADD_LOSSY50(mode)   ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2 : mode)
+
+/*!
+ * Adds Lossy75 compression setting to mode if viable
+ */
+#define FBCDC_MODE_ADD_LOSSY75(mode)   ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2 : mode)
+
+/*!
+ * Removes Lossy compression setting from mode
+ */
+#define FBCDC_MODE_REMOVE_LOSSY(mode)  ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8)  ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8)  ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8)  ? IMG_FB_COMPRESSION_DIRECT_8x8  : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \
+                                                                               (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode)
+
+/*!
+ * Types of framebuffer compression
+ */
+typedef enum
+{
+       IMG_FB_COMPRESSION_NONE,
+       IMG_FB_COMPRESSION_DIRECT_8x8,
+       IMG_FB_COMPRESSION_DIRECT_16x4,
+       IMG_FB_COMPRESSION_DIRECT_32x2,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2,
+       IMG_FB_COMPRESSION_DIRECT_PACKED_8x8,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4,
+       IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2,
+} IMG_FB_COMPRESSION;
+
+
+#endif /* POWERVR_BUFFER_ATTRIBS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/public/powervr/img_drm_fourcc.h b/drivers/gpu/drm/img/img-rogue/include/public/powervr/img_drm_fourcc.h
new file mode 100644 (file)
index 0000000..5fd79a6
--- /dev/null
@@ -0,0 +1,140 @@
+/*************************************************************************/ /*!
+@File
+@Title          Wrapper around drm_fourcc.h
+@Description    FourCCs and DRM framebuffer modifiers that are not in the
+                Kernel's and libdrm's drm_fourcc.h can be added here.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_DRM_FOURCC_H
+#define IMG_DRM_FOURCC_H
+
+#if defined(__KERNEL__)
+#include <drm/drm_fourcc.h>
+#else
+/*
+ * Include types.h to workaround versions of libdrm older than 2.4.68
+ * not including the correct headers.
+ */
+#include <linux/types.h>
+
+#include <drm_fourcc.h>
+#endif
+
+/*
+ * Don't get too inspired by this example :)
+ * ADF doesn't support DRM modifiers, so the memory layout had to be
+ * included in the fourcc name, but the proper way to specify information
+ * additional to pixel formats is to use DRM modifiers.
+ *
+ * See upstream drm_fourcc.h for the proper naming convention.
+ */
+#ifndef DRM_FORMAT_BGRA8888_DIRECT_16x4
+#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0')
+#endif
+
+#if !defined(__KERNEL__)
+/*
+ * A definition for the same format was added in Linux kernel 5.2 in commit
+ * 88ab9c76d191ad8645b483f31e2b394b0f3e280e. As such, this definition has been
+ * deprecated and the DRM_FORMAT_ABGR16161616F kernel define should be used
+ * instead of this one.
+ */
+#define DRM_FORMAT_ABGR16_IMG_DEPRECATED fourcc_code('I', 'M', 'G', '1')
+#endif
+
+/*
+ * Upstream does not have a packed 10 Bits Per Channel YVU format yet,
+ * so let`s make one up.
+ * Note: at the moment this format is not intended to be used with
+ *       a framebuffer, so the kernels core DRM doesn`t need to know
+ *       about this format. This means that the kernel doesn`t need
+ *       to be patched.
+ */
+#if !defined(__KERNEL__)
+#define DRM_FORMAT_YVU444_PACK10_IMG fourcc_code('I', 'M', 'G', '2')
+#define DRM_FORMAT_YUV422_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '3')
+#define DRM_FORMAT_YUV420_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '4')
+#endif
+
+/*
+ * Value chosen in the middle of 255 pool to minimise the chance of hitting
+ * the same value potentially defined by other vendors in the drm_fourcc.h
+ */
+#define DRM_FORMAT_MOD_VENDOR_PVR 0x92
+
+#ifndef DRM_FORMAT_MOD_VENDOR_NONE
+#define DRM_FORMAT_MOD_VENDOR_NONE 0
+#endif
+
+#ifndef DRM_FORMAT_RESERVED
+#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#endif
+
+#define img_fourcc_mod_combine(uiModHi, uiModLo) \
+       ((__u64) ((__u32) (uiModHi)) << 32 | (__u64) ((__u32) (uiModLo)))
+
+#define img_fourcc_mod_hi(ui64Mod) \
+       ((__u32) ((__u64) (ui64Mod) >> 32))
+
+#define img_fourcc_mod_lo(ui64Mod) \
+       ((__u32) ((__u64) (ui64Mod)) & 0xffffffff)
+
+#ifndef fourcc_mod_code
+#define fourcc_mod_code(vendor, val) \
+       ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+#endif
+
+#ifndef DRM_FORMAT_MOD_INVALID
+#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
+#endif
+
+#ifndef DRM_FORMAT_MOD_LINEAR
+#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+#endif
+
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1      fourcc_mod_code(PVR, 3)
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1     fourcc_mod_code(PVR, 9)
+
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7      fourcc_mod_code(PVR, 6)
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7     fourcc_mod_code(PVR, 12)
+
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10     fourcc_mod_code(PVR, 21)
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10    fourcc_mod_code(PVR, 22)
+#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10    fourcc_mod_code(PVR, 23)
+
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12     fourcc_mod_code(PVR, 15)
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12    fourcc_mod_code(PVR, 16)
+
+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13             fourcc_mod_code(PVR, 24)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13     fourcc_mod_code(PVR, 25)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13     fourcc_mod_code(PVR, 26)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13     fourcc_mod_code(PVR, 27)
+
+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13            fourcc_mod_code(PVR, 28)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13    fourcc_mod_code(PVR, 29)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13    fourcc_mod_code(PVR, 30)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13    fourcc_mod_code(PVR, 31)
+
+#endif /* IMG_DRM_FOURCC_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/public/powervr/mem_types.h b/drivers/gpu/drm/img/img-rogue/include/public/powervr/mem_types.h
new file mode 100644 (file)
index 0000000..a6dce8f
--- /dev/null
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          Public types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef POWERVR_TYPES_H
+#define POWERVR_TYPES_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined(_MSC_VER)
+       #include "msvc_types.h"
+#elif defined(__linux__) && defined(__KERNEL__)
+       #include <linux/types.h>
+       #include <linux/compiler.h>
+#else
+       #include <stdint.h>
+       #define __iomem
+#endif
+
+typedef void *IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct
+{
+       uint64_t uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var)         (uint64_t)(var)
+
+} IMG_DEV_VIRTADDR;
+
+typedef uint64_t IMG_DEVMEM_SIZE_T;
+typedef uint64_t IMG_DEVMEM_ALIGN_T;
+typedef uint64_t IMG_DEVMEM_OFFSET_T;
+typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/include/public/powervr/pvrsrv_sync_ext.h b/drivers/gpu/drm/img/img-rogue/include/public/powervr/pvrsrv_sync_ext.h
new file mode 100644 (file)
index 0000000..30f7972
--- /dev/null
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation interface header
+@Description    Defines synchronisation structures that are visible internally
+                and externally
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef POWERVR_SYNC_EXT_H
+#define POWERVR_SYNC_EXT_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*!
+ * Number of sync prims still used internally in operations
+ */
+#define PVRSRV_MAX_SYNC_PRIMS 4U
+
+/*!
+ * Maximum number of dev var updates passed in a kick call
+ */
+#define PVRSRV_MAX_DEV_VARS 13U
+
+/*!
+ * Number of UFOs in operations
+ */
+#define        PVRSRV_MAX_SYNCS (PVRSRV_MAX_SYNC_PRIMS + PVRSRV_MAX_DEV_VARS)
+
+/*! Implementation independent types for passing fence/timeline to Services.
+ */
+typedef int32_t PVRSRV_FENCE;
+typedef int32_t PVRSRV_TIMELINE;
+
+/*! Maximum length for an annotation name string for fence sync model objects.
+ */
+#define PVRSRV_SYNC_NAME_LENGTH 32U
+
+/* Macros for API callers using the fence sync model
+ */
+#define PVRSRV_NO_TIMELINE      ((PVRSRV_TIMELINE) -1)
+#define PVRSRV_NO_FENCE         ((PVRSRV_FENCE)    -1)
+#define PVRSRV_NO_FENCE_PTR     NULL
+#define PVRSRV_NO_TIMELINE_PTR  NULL
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvr_buffer_sync_shared.h b/drivers/gpu/drm/img/img-rogue/include/pvr_buffer_sync_shared.h
new file mode 100644 (file)
index 0000000..7a11091
--- /dev/null
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR buffer sync shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_BUFFER_SYNC_SHARED_H
+#define PVR_BUFFER_SYNC_SHARED_H
+
+#define PVR_BUFFER_FLAG_READ           (1U << 0)
+#define PVR_BUFFER_FLAG_WRITE          (1U << 1)
+#define PVR_BUFFER_FLAG_MASK           (PVR_BUFFER_FLAG_READ | \
+                                                                        PVR_BUFFER_FLAG_WRITE)
+
+/* Maximum number of PMRs passed
+ * in a kick when using buffer sync
+ */
+#define PVRSRV_MAX_BUFFERSYNC_PMRS 32
+
+#endif /* PVR_BUFFER_SYNC_SHARED_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvr_debug.h b/drivers/gpu/drm/img/img-rogue/include/pvr_debug.h
new file mode 100644 (file)
index 0000000..56bbb13
--- /dev/null
@@ -0,0 +1,898 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Debug Declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DEBUG_H
+#define PVR_DEBUG_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*! @cond Doxygen_Suppress */
+#if defined(_MSC_VER)
+#      define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+#else
+#      define MSC_SUPPRESS_4127
+#endif
+/*! @endcond */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN      (512)   /*!< Max length of a Debug Message */
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL     0x001UL  /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR     0x002UL  /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING   0x004UL  /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE   0x008UL  /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE   0x010UL  /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE 0x020UL  /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC     0x040UL  /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED  0x080UL  /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG     0x100UL  /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_LAST      0x100UL  /*!< Always set to highest mask value. Privately used by pvr_debug. */
+
+/* Enable DPF logging for locally from some make targets */
+#if defined(PVRSRV_NEED_PVR_DPF_LOCAL)
+#undef PVRSRV_NEED_PVR_DPF
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+#if !defined(DOXYGEN)
+/*************************************************************************/ /*
+PVRSRVGetErrorString
+Returns a string describing the provided PVRSRV_ERROR code
+NB No doxygen comments provided as this function does not require porting
+   for other operating systems
+*/ /**************************************************************************/
+const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+#define PVRSRVGETERRORSTRING PVRSRVGetErrorString
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(__KLOCWORK__)
+/* A dummy no-return function to be used under Klocwork to mark unreachable
+   paths instead of abort() in order to avoid MISRA.STDLIB.ABORT issues. */
+__noreturn void klocwork_abort(void);
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN)
+
+/* Unfortunately the Klocwork static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by Klocwork avoids
+ * them.
+ */
+#if defined(__KLOCWORK__)
+#define PVR_ASSERT(x) do { if (!(x)) {klocwork_abort();} } while (false)
+#else /* ! __KLOCWORKS__ */
+
+#if defined(_WIN32)
+#define PVR_ASSERT(expr) do                                                                            \
+       {                                                                                                                       \
+               MSC_SUPPRESS_4127                                                                               \
+               if (unlikely(!(expr)))                                                          \
+               {                                                                                                               \
+                       PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\
+                                         "*** Debug assertion failed!");                       \
+                       __debugbreak();                                                                         \
+               }                                                                                                               \
+       MSC_SUPPRESS_4127                                                                                       \
+       } while (false)
+
+#else
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/* In Linux kernel mode, use WARN_ON() directly. This produces the
+ * correct filename and line number in the warning message.
+ */
+#define PVR_ASSERT(EXPR) do                                                                                    \
+       {                                                                                                                               \
+               if (unlikely(!(EXPR)))                                                                          \
+               {                                                                                                                       \
+                       PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,    \
+                                                         "Debug assertion failed!");                   \
+                       WARN_ON(1);                                                                                             \
+               }                                                                                                                       \
+       } while (false)
+
+#else /* defined(__linux__) && defined(__KERNEL__) */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugAssertFail
+@Description    Indicate to the user that a debug assertion has failed and
+                prevent the program from continuing.
+                Invoked from the macro PVR_ASSERT().
+@Input          pszFile       The name of the source file where the assertion failed
+@Input          ui32Line      The line number of the failed assertion
+@Input          pszAssertion  String describing the assertion
+@Return         NEVER!
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV __noreturn
+PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+                      IMG_UINT32 ui32Line,
+                      const IMG_CHAR *pszAssertion);
+
+#define PVR_ASSERT(EXPR) do                                                                            \
+       {                                                                                                                       \
+               if (unlikely(!(EXPR)))                                                                  \
+               {                                                                                                               \
+                       PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR);       \
+               }                                                                                                               \
+       } while (false)
+
+#endif /* defined(__linux__) && defined(__KERNEL__) */
+#endif /* defined(_WIN32) */
+#endif /* defined(__KLOCWORK__) */
+
+#if defined(__KLOCWORK__)
+       #define PVR_DBG_BREAK do { klocwork_abort(); } while (false)
+#else
+       #if defined(WIN32)
+               #define PVR_DBG_BREAK __debugbreak()   /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */
+       #else
+               #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
+               /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */
+                       #if defined(_WIN32)
+                               #define PVR_DBG_BREAK   DBG_BREAK
+                       #else
+                               #if defined(__linux__) && defined(__KERNEL__)
+                                       #define PVR_DBG_BREAK BUG()
+                               #else
+                                       #define PVR_DBG_BREAK   PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK")
+                               #endif
+                       #endif
+               #else
+                       /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+                       #define PVR_DBG_BREAK
+               #endif
+       #endif
+#endif
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+       /* Unfortunately the Klocwork static analysis checker doesn't understand our
+       * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+       * macros in a special way when the code is analysed by Klocwork avoids
+       * them.
+       */
+       #if defined(__KLOCWORK__) && !defined(SERVICES_SC)
+               #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {klocwork_abort();} } while (false)
+       #else
+               #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */
+       #endif
+
+       #define PVR_DBG_BREAK    /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+
+       /* New logging mechanism */
+       #define PVR_DBG_FATAL     DBGPRIV_FATAL     /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */
+       #define PVR_DBG_ERROR     DBGPRIV_ERROR     /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */
+       #define PVR_DBG_WARNING   DBGPRIV_WARNING   /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */
+       #define PVR_DBG_MESSAGE   DBGPRIV_MESSAGE   /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */
+       #define PVR_DBG_VERBOSE   DBGPRIV_VERBOSE   /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */
+       #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */
+       #define PVR_DBG_ALLOC     DBGPRIV_ALLOC     /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */
+       #define PVR_DBG_BUFFERED  DBGPRIV_BUFFERED  /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */
+       #define PVR_DBG_DEBUG     DBGPRIV_DEBUG     /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */
+
+       /* These levels are always on with PVRSRV_NEED_PVR_DPF */
+       /*! @cond Doxygen_Suppress */
+       #define PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+       #define PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+       #define PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+
+       /*
+        * The AdHoc-Debug level is only supported when enabled in the local
+        * build environment and may need to be used in both debug and release
+        * builds. An error is generated in the formal build if it is checked in.
+        */
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+       #define PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+#else
+       /* Use an undefined token here to stop compilation dead in the offending module */
+       #define PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+#endif
+
+       /* Some are compiled out completely in release builds */
+#if defined(DEBUG) || defined(DOXYGEN)
+       #define PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+       #define PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+       #define PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+       #define PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+       #define PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+#else
+       #define PVR_DPF_0x004UL(...)
+       #define PVR_DPF_0x008UL(...)
+       #define PVR_DPF_0x010UL(...)
+       #define PVR_DPF_0x020UL(...)
+       #define PVR_DPF_0x040UL(...)
+#endif
+
+       /* Translate the different log levels to separate macros
+        * so they can each be compiled out.
+        */
+#if defined(DEBUG)
+       #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__)
+#else
+       #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl ("", __LINE__, __VA_ARGS__)
+#endif
+       /*! @endcond */
+
+       /* Get rid of the double bracketing */
+       #define PVR_DPF(x) PVR_DPF_EX x
+
+       #define PVR_LOG_ERROR(_rc, _call) \
+               PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__))
+
+       #define PVR_LOG_IF_ERROR(_rc, _call) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+                 } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_WARN_IF_ERROR(_rc, _call) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+                 } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \
+               { if (unlikely(_expr == NULL)) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+                       return PVRSRV_ERROR_OUT_OF_MEMORY; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \
+               { if (unlikely(_expr == NULL)) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \
+                       _err = PVRSRV_ERROR_OUT_OF_MEMORY; \
+                       goto _go; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+                       return _rc; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+                       return; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+                       goto _go; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \
+               { PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+                       _err = _rc; \
+                       goto _go; \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_IF_FALSE(_expr, _msg) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+                 } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+                       return _rc; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+                       return; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+                       goto _go; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \
+                       return PVRSRV_ERROR_INVALID_PARAMS; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \
+                       _err = PVRSRV_ERROR_INVALID_PARAMS; \
+                       goto _go; } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_MSG(_lvl, _msg) \
+               PVR_DPF((_lvl, ("In %s() "_msg), __func__))
+
+       #define PVR_LOG_VA(_lvl, _msg, ...) \
+               PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__))
+
+       #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \
+               } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \
+               } \
+               MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \
+                       return _rc; \
+               } MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do \
+               { if (unlikely(_rc != PVRSRV_OK)) { \
+                       PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \
+                       goto _go; \
+               } MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \
+                       return _rc; \
+               } MSC_SUPPRESS_4127\
+               } while (false)
+
+       #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do \
+               { if (unlikely(!(_expr))) { \
+                       PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \
+                       goto _go; \
+               } MSC_SUPPRESS_4127\
+               } while (false)
+
+#else /* defined(PVRSRV_NEED_PVR_DPF) */
+
+       #define PVR_DPF(X)  /*!< Null Implementation of PowerVR Debug Printf (does nothing) */
+
+       #define PVR_LOG_MSG(_lvl, _msg)
+       #define PVR_LOG_VA(_lvl, _msg, ...)
+       #define PVR_LOG_ERROR(_rc, _call) (void)(_rc)
+       #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc)
+       #define PVR_WARN_IF_ERROR(_rc, _call) (void)(_rc)
+
+       #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) (void)(_rc)
+       #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) (void)(_expr)
+
+       #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127    } while (false)
+
+       #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do { _err = _rc; goto _go; MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr)
+       #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do { if (unlikely(!(_expr))) { return PVRSRV_ERROR_INVALID_PARAMS; } MSC_SUPPRESS_4127 } while (false)
+       #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do { if (unlikely(!(_expr))) { _err = PVRSRV_ERROR_INVALID_PARAMS; goto _go; } MSC_SUPPRESS_4127 } while (false)
+
+       #undef PVR_DPF_FUNCTION_TRACE_ON
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    Output a debug message to the user, using an OS-specific
+                method, to a log or console which can be read by developers
+                Invoked from the macro PVR_DPF().
+@Input          ui32DebugLevel   The debug level of the message. This can
+                                 be used to restrict the output of debug
+                                 messages based on their severity.
+                                 If this is PVR_DBG_BUFFERED, the message
+                                 should be written into a debug circular
+                                 buffer instead of being output immediately
+                                 (useful when performance would otherwise
+                                 be adversely affected).
+                                 The debug circular buffer shall only be
+                                 output when PVRSRVDebugPrintfDumpCCB() is
+                                 called.
+@Input          pszFileName      The source file containing the code that is
+                                 generating the message
+@Input          ui32Line         The line number in the source file
+@Input          pszFormat        The formatted message string
+@Input          ...              Zero or more arguments for use by the
+                                 formatted string
+@Return         None
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+                                               const IMG_CHAR *pszFileName,
+                                               IMG_UINT32 ui32Line,
+                                               const IMG_CHAR *pszFormat,
+                                               ...) __printf(4, 5);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintfDumpCCB
+@Description    When PVRSRVDebugPrintf() is called with the ui32DebugLevel
+                specified as DBGPRIV_BUFFERED, the debug shall be written to
+                the debug circular buffer instead of being output immediately.
+                (This could be used to obtain debug without incurring a
+                performance hit by printing it at that moment).
+                This function shall dump the contents of that debug circular
+                buffer to be output in an OS-specific method to a log or
+                console which can be read by developers.
+@Return         None
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void);
+
+#if !defined(DOXYGEN)
+#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__))
+#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x
+#endif /*!defined(DOXYGEN) */
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro.
+ */
+#define PVR_RETURN_IF_ERROR(_rc) do \
+       { if (unlikely(_rc != PVRSRV_OK)) { \
+               return _rc; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_RETURN_IF_FALSE macro.
+ */
+#define PVR_RETURN_IF_FALSE(_expr, _rc) do \
+       { if (unlikely(!(_expr))) { \
+               return _rc; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_RETURN_IF_INVALID_PARAM macro.
+ */
+#define PVR_RETURN_IF_INVALID_PARAM(_expr) do \
+       { if (unlikely(!(_expr))) { \
+               return PVRSRV_ERROR_INVALID_PARAMS; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_RETURN_IF_NOMEM macro.
+ */
+#define PVR_RETURN_IF_NOMEM(_expr) do \
+       { if (unlikely(!(_expr))) { \
+               return PVRSRV_ERROR_OUT_OF_MEMORY; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_GOTO_IF_NOMEM macro.
+ */
+#define PVR_GOTO_IF_NOMEM(_expr, _err, _go) do \
+       { if (unlikely(_expr == NULL)) { \
+               _err = PVRSRV_ERROR_OUT_OF_MEMORY; \
+               goto _go; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_GOTO_IF_INVALID_PARAM macro.
+ */
+#define PVR_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \
+       { if (unlikely(!(_expr))) { \
+               _err = PVRSRV_ERROR_INVALID_PARAMS; \
+               goto _go; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_GOTO_IF_FALSE macro.
+ */
+#define PVR_GOTO_IF_FALSE(_expr, _go) do \
+       { if (unlikely(!(_expr))) { \
+               goto _go; } \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_GOTO_IF_ERROR macro.
+ */
+#define PVR_GOTO_IF_ERROR(_rc, _go) do \
+       { if (unlikely(_rc != PVRSRV_OK)) { \
+               goto _go; } \
+       MSC_SUPPRESS_4127\
+       } while (false)
+
+/* Note: Use only when a log message due to the error absolutely should not
+ *       be printed. Otherwise use PVR_LOG_GOTO_WITH_ERROR macro.
+ */
+#define PVR_GOTO_WITH_ERROR(_err, _rc, _go) do \
+       { _err = _rc; goto _go; \
+       MSC_SUPPRESS_4127 \
+       } while (false)
+
+/*! @cond Doxygen_Suppress */
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+
+       #define PVR_DPF_ENTERED \
+       PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__))
+
+       #define PVR_DPF_ENTERED1(p1) \
+               PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1)))
+
+       #define PVR_DPF_RETURN_RC(a) \
+       do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_DPF_RETURN_RC1(a,p1) \
+               do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_DPF_RETURN_VAL(a) \
+               do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__)); return (a); MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_DPF_RETURN_OK \
+               do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (false)
+
+       #define PVR_DPF_RETURN \
+               do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (false)
+
+       #if !defined(DEBUG)
+       #error PVR DPF Function trace enabled in release build, rectify
+       #endif
+
+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+       #define PVR_DPF_ENTERED
+       #define PVR_DPF_ENTERED1(p1)
+       #define PVR_DPF_RETURN_RC(a)     return (a)
+       #define PVR_DPF_RETURN_RC1(a,p1) return (a)
+       #define PVR_DPF_RETURN_VAL(a)    return (a)
+       #define PVR_DPF_RETURN_OK        return PVRSRV_OK
+       #define PVR_DPF_RETURN           return
+
+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+/*! @endcond */
+
+#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__)
+/*Use PVR_DPF() unless message is necessary in release build */
+#define PVR_LOG(X) PVRSRVReleasePrintf X
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    Output an important message, using an OS-specific method,
+                to the Server log or console which will always be output in
+                both release and debug builds.
+                Invoked from the macro PVR_LOG(). Used in Services Server only.
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+@Return         None
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2);
+#endif
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN)
+
+       #define PVR_TRACE(X)    PVRSRVTrace X    /*!< PowerVR Debug Trace Macro */
+       /* Empty string implementation that is -O0 build friendly */
+       #define PVR_TRACE_EMPTY_LINE()  PVR_TRACE(("%s", ""))
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    Output a debug message to the user
+                Invoked from the macro PVR_TRACE().
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+       __printf(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+       /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */
+       #define PVR_TRACE(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_32BITS)
+#endif
+       INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput)
+       {
+               IMG_UINT32 uiTruncated;
+
+               uiTruncated = (IMG_UINT32)uiInput;
+               PVR_ASSERT(uiInput == uiTruncated);
+               return uiTruncated;
+       }
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T)
+#endif
+       INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput)
+       {
+               size_t uiTruncated;
+
+               uiTruncated = (size_t)uiInput;
+               PVR_ASSERT(uiInput == uiTruncated);
+               return uiTruncated;
+       }
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS)
+#endif
+       INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput)
+       {
+               IMG_UINT32 uiTruncated;
+
+               uiTruncated = (IMG_UINT32)uiInput;
+               PVR_ASSERT(uiInput == uiTruncated);
+               return uiTruncated;
+       }
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+       #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr))
+       #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr))
+       #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr))
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+/*! @cond Doxygen_Suppress */
+/* Macros used to trace calls */
+#if defined(DEBUG)
+       #define PVR_DBG_FILELINE , (__FILE__), (__LINE__)
+       #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line
+       #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line
+       #define PVR_DBG_FILELINE_FMT " %s:%u"
+       #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \
+                               PVR_UNREFERENCED_PARAMETER(ui32Line); } while (false)
+#else
+       #define PVR_DBG_FILELINE
+       #define PVR_DBG_FILELINE_PARAM
+       #define PVR_DBG_FILELINE_ARG
+       #define PVR_DBG_FILELINE_FMT
+       #define PVR_DBG_FILELINE_UNREF()
+#endif
+/*! @endcond */
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*!
+    @def PVR_ASSERT
+    @brief Aborts the program if assertion fails.
+
+    The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is
+    enabled. It's ignored otherwise.
+
+    @def PVR_DPF
+    @brief PowerVR Debug Printf logging macro used throughout the driver.
+
+    The macro allows to print logging messages to appropriate log. The
+    destination log is based on the component (user space / kernel space) and
+    operating system (Linux, Android, etc.).
+
+    The macro also supports severity levels that allow to turn on/off messages
+    based on their importance.
+
+    This macro will print messages with severity level higher that error only
+    if PVRSRV_NEED_PVR_DPF macro is defined.
+
+    @def PVR_LOG_ERROR
+    @brief Logs error.
+
+    @def PVR_LOG_IF_ERROR
+    @brief Logs error if not PVRSRV_OK.
+
+    @def PVR_WARN_IF_ERROR
+    @brief Logs warning if not PVRSRV_OK.
+
+    @def PVR_LOG_RETURN_IF_NOMEM
+    @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY.
+
+    @def PVR_LOG_GOTO_IF_NOMEM
+    @brief Logs error if expression is NULL and jumps to given label.
+
+    @def PVR_LOG_RETURN_IF_ERROR
+    @brief Logs error if not PVRSRV_OK and returns the error.
+
+    @def PVR_LOG_RETURN_VOID_IF_ERROR
+    @brief Logs error if not PVRSRV_OK and returns (used in function that return void).
+
+    @def PVR_LOG_GOTO_IF_ERROR
+    @brief Logs error if not PVRSRV_OK and jumps to label.
+
+    @def PVR_LOG_GOTO_WITH_ERROR
+    @brief Logs error, goes to a label and sets the error code.
+
+    @def PVR_LOG_IF_FALSE
+    @brief Prints error message if expression is false.
+
+    @def PVR_LOG_RETURN_IF_FALSE
+    @brief Prints error message if expression is false and returns given error.
+
+    @def PVR_LOG_RETURN_VOID_IF_FALSE
+    @brief Prints error message if expression is false and returns (used in function that return void).
+
+    @def PVR_LOG_GOTO_IF_FALSE
+    @brief Prints error message if expression is false and jumps to label.
+
+    @def PVR_LOG_RETURN_IF_INVALID_PARAM
+    @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS.
+
+    @def PVR_LOG_GOTO_IF_INVALID_PARAM
+    @brief Prints error message if expression is false and jumps to label.
+
+    @def PVR_RETURN_IF_ERROR
+    @brief Returns passed error code if it's different than PVRSRV_OK;
+
+    @def PVR_RETURN_IF_FALSE
+    @brief Returns passed error code if expression is false.
+
+    @def PVR_RETURN_IF_INVALID_PARAM
+    @brief Returns PVRSRV_ERROR_INVALID_PARAMS if expression is false.
+
+    @def PVR_RETURN_IF_NOMEM
+    @brief Returns PVRSRV_ERROR_OUT_OF_MEMORY if expression is NULL.
+
+    @def PVR_GOTO_IF_NOMEM
+    @brief Goes to a label if expression is NULL.
+
+    @def PVR_GOTO_IF_INVALID_PARAM
+    @brief Goes to a label if expression is false.
+
+    @def PVR_GOTO_IF_FALSE
+    @brief Goes to a label if expression is false.
+
+    @def PVR_GOTO_IF_ERROR
+    @brief Goes to a label if the error code is different than PVRSRV_OK;
+
+    @def PVR_GOTO_WITH_ERROR
+    @brief Goes to a label and sets the error code.
+
+    @def PVR_LOG
+    @brief Prints message to a log unconditionally.
+
+    This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined.
+    @def PVR_LOG_MSG
+    @brief Prints message to a log with the given log-level.
+
+    @def PVR_LOG_VA
+    @brief Prints message with var-args to a log with the given log-level.
+
+    @def PVR_LOG_IF_ERROR_VA
+    @brief Prints message with var-args to a log if the error code is different than PVRSRV_OK.
+
+    @def PVR_LOG_IF_FALSE_VA
+    @brief Prints message with var-args if expression is false.
+
+    @def PVR_LOG_RETURN_IF_ERROR_VA
+    @brief Prints message with var-args to a log and returns the error code.
+
+    @def PVR_LOG_GOTO_IF_ERROR_VA
+    @brief Prints message with var-args to a log and goes to a label if the error code is different than PVRSRV_OK.
+
+    @def PVR_LOG_RETURN_IF_FALSE_VA
+    @brief Logs the error message with var-args if the expression is false and returns the error code.
+
+    @def PVR_LOG_GOTO_IF_FALSE_VA
+    @brief Logs the error message with var-args and goes to a label if the expression is false.
+
+    @def PVR_TRACE_EMPTY_LINE
+    @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined).
+
+    @def TRUNCATE_64BITS_TO_32BITS
+    @brief Truncates 64 bit value to 32 bit value (with possible precision loss).
+
+    @def TRUNCATE_64BITS_TO_SIZE_T
+    @brief Truncates 64 bit value to size_t value (with possible precision loss).
+
+    @def TRUNCATE_SIZE_T_TO_32BITS
+    @brief Truncates size_t value to 32 bit value (with possible precision loss).
+ */
+
+#endif /* PVR_DEBUG_H */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvr_fd_sync_kernel.h b/drivers/gpu/drm/img/img-rogue/include/pvr_fd_sync_kernel.h
new file mode 100644 (file)
index 0000000..3645e29
--- /dev/null
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File           pvr_fd_sync_kernel.h
+@Title          Kernel/userspace interface definitions to use the kernel sync
+                driver
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef _PVR_FD_SYNC_KERNEL_H_
+#define _PVR_FD_SYNC_KERNEL_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "pvr_drm.h"
+
+#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14
+
+struct pvr_sync_pt_info {
+       /* Output */
+       __u32 id;
+       __u32 ui32FWAddr;
+       __u32 ui32CurrOp;
+       __u32 ui32NextOp;
+       __u32 ui32TlTaken;
+} __attribute__((packed, aligned(8)));
+
+#endif /* _PVR_FD_SYNC_KERNEL_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvr_intrinsics.h b/drivers/gpu/drm/img/img-rogue/include/pvr_intrinsics.h
new file mode 100644 (file)
index 0000000..410a2f5
--- /dev/null
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title          Intrinsics definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_INTRINSICS_H
+#define PVR_INTRINSICS_H
+
+/* PVR_CTZLL:
+ * Count the number of trailing zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__)
+
+       #define PVR_CTZLL __builtin_ctzll
+#endif
+#endif
+
+/* PVR_CLZLL:
+ * Count the number of leading zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
+                                       defined(__arm__) || defined(__mips)
+
+#define PVR_CLZLL __builtin_clzll
+
+#endif
+#endif
+
+#endif /* PVR_INTRINSICS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrmodule.h b/drivers/gpu/drm/img/img-rogue/include/pvrmodule.h
new file mode 100644 (file)
index 0000000..267c7b6
--- /dev/null
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@Title          Module Author and License.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef        _PVRMODULE_H_
+#define        _PVRMODULE_H_
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif /* _PVRMODULE_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_device_types.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_device_types.h
new file mode 100644 (file)
index 0000000..662e3bc
--- /dev/null
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR device type definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(PVRSRV_DEVICE_TYPES_H)
+#define PVRSRV_DEVICE_TYPES_H
+
+#include "img_types.h"
+
+#define PVRSRV_MAX_DEVICES             16U     /*!< Largest supported number of devices on the system */
+
+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* PVRSRV_DEVICE_TYPES_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_devvar.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_devvar.h
new file mode 100644 (file)
index 0000000..a8c64e3
--- /dev/null
@@ -0,0 +1,291 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Device Variable interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for device variables
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_DEVVAR_H
+#define PVRSRV_DEVVAR_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include <powervr/pvrsrv_sync_ext.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DEVVAR_MAX_NAME_LEN 32
+
+typedef struct SYNC_PRIM_CONTEXT_TAG *PDEVVARCTX;
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG *PDEVVAR;
+
+typedef struct PVRSRV_DEV_VAR_UPDATE_TAG
+{
+       PDEVVAR                                 psDevVar;                       /*!< Pointer to the dev var */
+       IMG_UINT32                              ui32UpdateValue;        /*!< the update value */
+} PVRSRV_DEV_VAR_UPDATE;
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarContextCreate
+
+@Description    Create a new device variable context
+
+@Input          psDevConnection         Device to create the device
+                                        variable context on
+
+@Output         phDevVarContext         Handle to the created device
+                                        variable context
+
+@Return         PVRSRV_OK if the device variable context was successfully
+                created
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevVarContextCreate(const PVRSRV_DEV_CONNECTION *psDevConnection,
+                          PDEVVARCTX                  *phDevVarContext);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarContextDestroy
+
+@Description    Destroy a device variable context
+
+@Input          hDevVarContext          Handle to the device variable
+                                        context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT void
+PVRSRVDevVarContextDestroy(PDEVVARCTX hDevVarContext);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarAlloc
+
+@Description    Allocate a new device variable on the specified device
+                variable context. The device variable's value is initialised
+                with the value passed in ui32InitialValue.
+
+@Input          hDevVarContext          Handle to the device variable
+                                        context
+@Input          ui32InitialValue        Value to initially assign to the
+                                        new variable
+@Input          pszDevVarName           Name assigned to the device variable
+                                        (for debug purposes)
+
+@Output         ppsDevVar               Created device variable
+
+@Return         PVRSRV_OK if the device variable was successfully created
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevVarAllocI(PDEVVARCTX hDevVarContext,
+                   PDEVVAR *ppsDevVar,
+                   IMG_UINT32 ui32InitialValue,
+                   const IMG_CHAR *pszDevVarName
+                   PVR_DBG_FILELINE_PARAM);
+#define PVRSRVDevVarAlloc(hDevVarContext, ppsDevVar, ui32InitialValue, pszDevVarName) \
+       PVRSRVDevVarAllocI( (hDevVarContext), (ppsDevVar), (ui32InitialValue), (pszDevVarName) \
+                           PVR_DBG_FILELINE )
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarFree
+
+@Description    Free a device variable
+
+@Input          psDevVar                The device variable to free
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT void
+PVRSRVDevVarFree(PDEVVAR psDevVar);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarSet
+
+@Description    Set the device variable to a value
+
+@Input          psDevVar                The device variable to set
+
+@Input          ui32Value               Value to set it to
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT void
+PVRSRVDevVarSet(PDEVVAR                psDevVar,
+                IMG_UINT32     ui32Value);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarGet
+
+@Description    Get the current value of the device variable
+
+@Input          psDevVar                The device variable to get the
+                                        value of
+
+@Return         Value of the variable
+*/
+/*****************************************************************************/
+IMG_EXPORT IMG_UINT32
+PVRSRVDevVarGet(PDEVVAR                psDevVar);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarGetFirmwareAddr
+
+@Description    Returns the address of the associated firmware value for a
+                specified device integer (not exposed to client)
+
+@Input          psDevVar               The device variable to resolve
+
+@Return         The firmware address of the device variable
+*/
+/*****************************************************************************/
+IMG_EXPORT IMG_UINT32
+PVRSRVDevVarGetFirmwareAddr(PDEVVAR    psDevVar);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarPDump
+
+@Description    PDump the current value of the device variable
+
+@Input          psDevVar                The device variable to PDump
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT void
+PVRSRVDevVarPDump(PDEVVAR psDevVar);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarPDumpPol
+
+@Description    Do a PDump poll of the device variable
+
+@Input          psDevVar                The device variable to PDump
+
+@Input          ui32Value               Value to Poll for
+
+@Input          ui32Mask                PDump mask operator
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT void
+PVRSRVDevVarPDumpPol(PDEVVAR psDevVar,
+                     IMG_UINT32 ui32Value,
+                     IMG_UINT32 ui32Mask,
+                     PDUMP_POLL_OPERATOR eOperator,
+                     IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevVarPDumpCBP
+
+@Description    Do a PDump CB poll using the device variable
+
+@Input          psDevVar                The device variable to PDump
+
+@Input          uiWriteOffset           Current write offset of buffer
+
+@Input          uiPacketSize            Size of the packet to write into CB
+
+@Input          uiBufferSize            Size of the CB
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT void
+PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar,
+                     IMG_UINT64 uiWriteOffset,
+                     IMG_UINT64 uiPacketSize,
+                     IMG_UINT64 uiBufferSize);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDevVarPDump)
+#endif
+static INLINE void
+PVRSRVDevVarPDump(PDEVVAR psDevVar)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevVar);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDevVarPDumpPol)
+#endif
+static INLINE void
+PVRSRVDevVarPDumpPol(PDEVVAR psDevVar,
+                     IMG_UINT32 ui32Value,
+                     IMG_UINT32 ui32Mask,
+                     PDUMP_POLL_OPERATOR eOperator,
+                     IMG_UINT32 ui32PDumpFlags)
+{
+        PVR_UNREFERENCED_PARAMETER(psDevVar);
+        PVR_UNREFERENCED_PARAMETER(ui32Value);
+        PVR_UNREFERENCED_PARAMETER(ui32Mask);
+        PVR_UNREFERENCED_PARAMETER(eOperator);
+        PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDevVarPDumpCBP)
+#endif
+static INLINE void
+PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar,
+                     IMG_UINT64 uiWriteOffset,
+                     IMG_UINT64 uiPacketSize,
+                     IMG_UINT64 uiBufferSize)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevVar);
+       PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+       PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+       PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif /* PDUMP */
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* PVRSRV_DEVVAR_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_error.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_error.h
new file mode 100644 (file)
index 0000000..0bbf843
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_error.h
+@Title          services error enumerant
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(PVRSRV_ERROR_H)
+#define PVRSRV_ERROR_H
+
+/*!
+ *****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum PVRSRV_ERROR_TAG
+{
+       PVRSRV_OK,
+#define PVRE(x) x,
+#include "pvrsrv_errors.h"
+#undef PVRE
+       PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+#endif /* !defined(PVRSRV_ERROR_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_errors.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_errors.h
new file mode 100644 (file)
index 0000000..59b9cfe
--- /dev/null
@@ -0,0 +1,410 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_errors.h
+@Title          services error codes
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Don't add include guards to this file! */
+
+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY)
+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS)
+PVRE(PVRSRV_ERROR_INVALID_PARAMS)
+PVRE(PVRSRV_ERROR_INIT_FAILURE)
+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK)
+PVRE(PVRSRV_ERROR_INVALID_DEVICE)
+PVRE(PVRSRV_ERROR_NOT_OWNER)
+PVRE(PVRSRV_ERROR_BAD_MAPPING)
+PVRE(PVRSRV_ERROR_TIMEOUT)
+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED)
+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS)
+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL)
+PVRE(PVRSRV_ERROR_SCENE_INVALID)
+PVRE(PVRSRV_ERROR_STREAM_ERROR)
+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES)
+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED)
+PVRE(PVRSRV_ERROR_CMD_TOO_BIG)
+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED)
+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS)
+PVRE(PVRSRV_ERROR_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED)
+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS)
+PVRE(PVRSRV_ERROR_RETRY)
+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH)
+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH)
+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH)
+PVRE(PVRSRV_ERROR_BVNC_MISMATCH)
+PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH)
+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG)
+PVRE(PVRSRV_ERROR_INVALID_FLAGS)
+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY)
+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR)
+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED)
+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED)
+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR)
+PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG)
+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE)
+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP)
+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP)
+PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY)
+PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED)
+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY)
+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES)
+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE)
+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED)
+PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED)
+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE)
+PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE)
+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE)
+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH)
+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING)
+PVRE(PVRSRV_ERROR_PMR_EMPTY)
+PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED)
+PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT)
+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED)
+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL)
+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH)
+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES)
+PVRE(PVRSRV_ERROR_STILL_MAPPED)
+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK)
+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA)
+PVRE(PVRSRV_ERROR_INVALID_DEVINFO)
+PVRE(PVRSRV_ERROR_INVALID_MEMINFO)
+PVRE(PVRSRV_ERROR_INVALID_MISCINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL)
+PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
+PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_PERPROC)
+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST)
+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP)
+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE)
+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS)
+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD)
+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR)
+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED)
+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE)
+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND)
+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL)
+PVRE(PVRSRV_ERROR_FLIP_FAILED)
+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED)
+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE)
+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB)
+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED)
+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID)
+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED)
+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL)
+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE)
+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES)
+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED)
+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND)
+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED)
+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED)
+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE)
+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND)
+PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE)
+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE)
+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND)
+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND)
+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND)
+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER)
+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE)
+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP)
+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE)
+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE)
+PVRE(PVRSRV_ERROR_INVALID_DEVICEID)
+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK)
+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED)
+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK)
+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR)
+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID)
+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID)
+PVRE(PVRSRV_ERROR_PHYSHEAP_CONFIG)
+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT)
+PVRE(PVRSRV_ERROR_BP_NOT_SET)
+PVRE(PVRSRV_ERROR_BP_ALREADY_SET)
+PVRE(PVRSRV_ERROR_FEATURE_DISABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL)
+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE)
+PVRE(PVRSRV_ERROR_MEMORY_ACCESS)
+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER)
+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG)
+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS)
+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM)
+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE)
+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM)
+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES)
+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA)
+PVRE(PVRSRV_ERROR_NOT_READY)
+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER)
+PVRE(PVRSRV_ERROR_NOT_FOUND)
+PVRE(PVRSRV_ERROR_ALREADY_OPEN)
+PVRE(PVRSRV_ERROR_STREAM_MISUSE)
+PVRE(PVRSRV_ERROR_STREAM_FULL)
+PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED)
+PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE)
+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN)
+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG)
+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED)
+PVRE(PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDFWMEM_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED)
+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE)
+PVRE(PVRSRV_ERROR_TASK_FAILED)
+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_OFFSET)
+PVRE(PVRSRV_ERROR_CCCB_STALLED)
+PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL)
+PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL)
+PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT)
+PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_COMPILE_PDS)
+PVRE(PVRSRV_ERROR_INTERNAL_ERROR)
+PVRE(PVRSRV_ERROR_BRIDGE_EFAULT)
+PVRE(PVRSRV_ERROR_BRIDGE_EINVAL)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM)
+PVRE(PVRSRV_ERROR_BRIDGE_ERANGE)
+PVRE(PVRSRV_ERROR_BRIDGE_EPERM)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY)
+PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_PROBE_DEFER)
+PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_CLOSE_FAILED)
+PVRE(PVRSRV_ERROR_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_CONVERSION_FAILED)
+PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)
+PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED)
+PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED)
+PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED)
+PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS)
+PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT)
+PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_SIGNAL_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM)
+PVRE(PVRSRV_ERROR_INVALID_SPU_MASK)
+PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG)
+PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED)
+PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE)
+PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT)
+PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID)
+PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE)
+PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG)
+PVRE(PVRSRV_ERROR_INTERRUPTED)
+PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN)
+PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)
+PVRE(PVRSRV_ERROR_MULTIPLE_SECURITY_PDUMPS)
+PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE)
+PVRE(PVRSRV_ERROR_INVALID_REQUEST)
+PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES)
+PVRE(PVRSRV_ERROR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE)
+PVRE(PVRSRV_ERROR_TOO_MANY_SYNCS)
+PVRE(PVRSRV_ERROR_ION_NO_CLIENT)
+PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC)
+PVRE(PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW)
+PVRE(PVRSRV_ERROR_OUT_OF_RANGE)
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_memalloc_physheap.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_memalloc_physheap.h
new file mode 100644 (file)
index 0000000..1072ba8
--- /dev/null
@@ -0,0 +1,170 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_memalloc_physheap.h
+@Title          Services Phys Heap types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Used in creating and allocating from Physical Heaps.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVRSRV_MEMALLOC_PHYSHEAP_H
+#define PVRSRV_MEMALLOC_PHYSHEAP_H
+
+#include "img_defs.h"
+
+/*
+ * These IDs are replicated in the Device Memory allocation flags to allow
+ * allocations to be made in terms of their locality/use to ensure the correct
+ * physical heap is accessed for the given system/platform configuration.
+ * A system Phys Heap Config is linked to one or more Phys Heaps. When a heap
+ * is not present in the system configuration the allocation will fallback to
+ * the default GPU_LOCAL physical heap which all systems must define.
+ * See PVRSRV_MEMALLOCFLAGS_*_MAPPABLE_MASK.
+ *
+ * NOTE: Enum order important, table in physheap.c must change if order changed.
+ */
+typedef IMG_UINT32 PVRSRV_PHYS_HEAP;
+/* Services client accessible heaps */
+#define PVRSRV_PHYS_HEAP_DEFAULT      0U  /* default phys heap for device memory allocations */
+#define PVRSRV_PHYS_HEAP_GPU_LOCAL    1U  /* used for buffers with more GPU access than CPU */
+#define PVRSRV_PHYS_HEAP_CPU_LOCAL    2U  /* used for buffers with more CPU access than GPU */
+#define PVRSRV_PHYS_HEAP_GPU_PRIVATE  3U  /* used for buffers that only required GPU read/write access, not visible to the CPU. */
+
+#define HEAPSTR(x) #x
+static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeapID)
+{
+       switch (ePhysHeapID)
+       {
+               case PVRSRV_PHYS_HEAP_DEFAULT:
+                       return HEAPSTR(PVRSRV_PHYS_HEAP_DEFAULT);
+               case PVRSRV_PHYS_HEAP_GPU_LOCAL:
+                       return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_LOCAL);
+               case PVRSRV_PHYS_HEAP_CPU_LOCAL:
+                       return HEAPSTR(PVRSRV_PHYS_HEAP_CPU_LOCAL);
+               case PVRSRV_PHYS_HEAP_GPU_PRIVATE:
+                       return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_PRIVATE);
+               default:
+                       return "Unknown Heap";
+       }
+}
+
+/* Services internal heaps */
+#define PVRSRV_PHYS_HEAP_FW_MAIN      4U  /* runtime data, e.g. CCBs, sync objects */
+#define PVRSRV_PHYS_HEAP_EXTERNAL     5U  /* used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */
+#define PVRSRV_PHYS_HEAP_GPU_COHERENT 6U  /* used for a cache coherent region */
+#define PVRSRV_PHYS_HEAP_GPU_SECURE   7U  /* used by security validation */
+#define PVRSRV_PHYS_HEAP_FW_CONFIG    8U  /* subheap of FW_MAIN, configuration data for FW init */
+#define PVRSRV_PHYS_HEAP_FW_CODE      9U  /* used by security validation or dedicated fw */
+#define PVRSRV_PHYS_HEAP_FW_PRIV_DATA 10U /* internal FW data (like the stack, FW control data structures, etc.) */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP0   11U /* Host OS premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP1   12U /* Guest OS 1 premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP2   13U /* Guest OS 2 premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP3   14U /* Guest OS 3 premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP4   15U /* Guest OS 4 premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP5   16U /* Guest OS 5 premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP6   17U /* Guest OS 6 premap fw heap */
+#define PVRSRV_PHYS_HEAP_FW_PREMAP7   18U /* Guest OS 7 premap fw heap */
+#define PVRSRV_PHYS_HEAP_LAST         19U
+
+
+static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1FU + 1U), "Ensure enum fits in memalloc flags bitfield.");
+
+/*! Type conveys the class of physical heap to instantiate within Services
+ * for the physical pool of memory. */
+typedef enum _PHYS_HEAP_TYPE_
+{
+       PHYS_HEAP_TYPE_UNKNOWN = 0,     /*!< Not a valid value for any config */
+       PHYS_HEAP_TYPE_UMA,             /*!< Heap represents OS managed physical memory heap
+                                            i.e. system RAM. Unified Memory Architecture
+                                            physmem_osmem PMR factory */
+       PHYS_HEAP_TYPE_LMA,             /*!< Heap represents physical memory pool managed by
+                                            Services i.e. carve out from system RAM or local
+                                            card memory. Local Memory Architecture
+                                            physmem_lma PMR factory */
+#if defined(__KERNEL__)
+       PHYS_HEAP_TYPE_DMA,             /*!< Heap represents a physical memory pool managed by
+                                            Services, alias of LMA and is only used on
+                                            VZ non-native system configurations for
+                                            a heap used for PHYS_HEAP_USAGE_FW_MAIN tagged
+                                            buffers */
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+       PHYS_HEAP_TYPE_WRAP,            /*!< Heap used to group UM buffers given
+                                            to Services. Integrity OS port only. */
+#endif
+#endif
+} PHYS_HEAP_TYPE;
+
+/* Defines used when interpreting the ui32PhysHeapFlags in PHYS_HEAP_MEM_STATS
+     0x000000000000dttt
+     d = is this the default heap? (1=yes, 0=no)
+   ttt = heap type (000 = PHYS_HEAP_TYPE_UNKNOWN,
+                    001 = PHYS_HEAP_TYPE_UMA,
+                    010 = PHYS_HEAP_TYPE_LMA,
+                    011 = PHYS_HEAP_TYPE_DMA)
+*/
+#define PVRSRV_PHYS_HEAP_FLAGS_TYPE_MASK  (0x7U << 0)
+#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U << 7)
+
+typedef struct PHYS_HEAP_MEM_STATS_TAG
+{
+       IMG_UINT64      ui64TotalSize;
+       IMG_UINT64      ui64FreeSize;
+       IMG_UINT32      ui32PhysHeapFlags;
+}PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR;
+
+typedef struct PHYS_HEAP_MEM_STATS_PKD_TAG
+{
+       IMG_UINT64      ui64TotalSize;
+       IMG_UINT64      ui64FreeSize;
+       IMG_UINT32      ui32PhysHeapFlags;
+       IMG_UINT32      ui32Dummy;
+}PHYS_HEAP_MEM_STATS_PKD, *PHYS_HEAP_MEM_STATS_PKD_PTR;
+
+static inline const IMG_CHAR *PVRSRVGetClientPhysHeapTypeName(PHYS_HEAP_TYPE ePhysHeapType)
+{
+       switch (ePhysHeapType)
+       {
+               case PHYS_HEAP_TYPE_UMA:
+                       return HEAPSTR(PHYS_HEAP_TYPE_UMA);
+               case PHYS_HEAP_TYPE_LMA:
+                       return HEAPSTR(PHYS_HEAP_TYPE_LMA);
+               default:
+                       return "Unknown Heap Type";
+       }
+}
+#undef HEAPSTR
+
+#endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_memallocflags.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_memallocflags.h
new file mode 100644 (file)
index 0000000..3b87dbf
--- /dev/null
@@ -0,0 +1,969 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines flags used on memory allocations and mappings
+                These flags are relevant throughout the memory management
+                software stack and are specified by users of services and
+                understood by all levels of the memory management in both
+                client and server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_H
+#define PVRSRV_MEMALLOCFLAGS_H
+
+#include "img_types.h"
+#include "pvrsrv_memalloc_physheap.h"
+
+/*!
+  Type for specifying memory allocation flags.
+ */
+
+typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T;
+#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx
+
+#if defined(__KERNEL__)
+#include "pvrsrv_memallocflags_internal.h"
+#endif /* __KERNEL__ */
+
+/*
+ * --- MAPPING FLAGS      0..14 (15-bits) ---
+ * | 0-3    | 4-7    | 8-10        | 11-13       | 14          |
+ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable |
+ *
+ * --- MISC FLAGS         15..23 (9-bits) ---
+ * | 15    | 17  | 18                | 19              | 20               |
+ * | Defer | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page |
+ *
+ * --- DEV CONTROL FLAGS  26..27 (2-bits) ---
+ * | 26-27        |
+ * | Device-Flags |
+ *
+ * --- MISC FLAGS         28..31 (4-bits) ---
+ * | 28             | 29             | 30          | 31            |
+ * | No-Cache-Align | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc |
+ *
+ * --- VALIDATION FLAGS ---
+ * | 35             |
+ * | Shared-buffer  |
+ *
+ * --- PHYS HEAP HINTS ---
+ * | 59-63          |
+ * | PhysHeap Hints |
+ *
+ */
+
+/*
+ *  **********************************************************
+ *  *                                                        *
+ *  *                      MAPPING FLAGS                     *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*!
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be read by the GPU.
+ *
+ * Typically all device memory allocations would specify this flag.
+ *
+ * At the moment, memory allocations without this flag are not supported
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be read by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a readable mapping
+ *
+ * To be clear:
+ * - When used as an argument on PMR creation; it specifies
+ *       that GPU readable mappings will be _permitted_
+ * - When used as an argument to a "map" function: it specifies
+ *       that a GPU readable mapping is _desired_
+ * - When used as an argument to "AllocDeviceMem": it specifies
+ *       that the PMR will be created with permission to be mapped
+ *       with a GPU readable mapping, _and_ that this PMR will be
+ *       mapped with a GPU readable mapping.
+ * This distinction becomes important when (a) we export allocations;
+ * and (b) when we separate the creation of the PMR from the mapping.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE               (1ULL<<0)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_READABLE(uiFlags)             (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0U)
+
+/*!
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be written by the GPU
+ *
+ * Using this flag on an allocation signifies that the allocation is
+ * intended to be written by the GPU.
+ *
+ * Omitting this flag causes a read-only mapping.
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be written by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a writable mapping (see note above about
+ * permission vs. mapping mode, and why this flag causes permissions
+ * to be inferred from mapping mode on first allocation)
+ *
+ * N.B.  This flag has no relevance to the CPU's MMU mapping, if any,
+ * and would therefore not enforce read-only mapping on CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE       (1ULL<<1)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags)                            (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0U)
+
+/*!
+  The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED  (1ULL<<2)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags)               (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0U)
+
+/*!
+  The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1ULL<<3)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags)              (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0U)
+
+/*!
+  The flag indicates that an allocation is mapped as readable to the CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE        (1ULL<<4)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_READABLE(uiFlags)                             (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0U)
+
+/*!
+  The flag indicates that an allocation is mapped as writable to the CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE       (1ULL<<5)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)                            (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0U)
+
+/*!
+  The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED  (1ULL<<6)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags)               (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0U)
+
+/*!
+  The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1ULL<<7)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags)              (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0U)
+
+
+/*
+ *  **********************************************************
+ *  *                                                        *
+ *  *                   CACHE CONTROL FLAGS                  *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*
+       GPU domain
+       ==========
+
+       The following defines are used to control the GPU cache bit field.
+       The defines are mutually exclusive.
+
+       A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU
+       cache bit field from the flags. This should be used whenever the GPU cache
+       mode needs to be determined.
+*/
+
+/*!
+  GPU domain. Flag indicating uncached memory. This means that any writes to memory
+  allocated with this flag are written straight to memory and thus are
+  coherent for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED                               (1ULL<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags)                             (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED)
+
+/*!
+   GPU domain. Use write combiner (if supported) to combine sequential writes
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC                    (0ULL<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags)                        (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC)
+
+/*!
+    GPU domain. This flag affects the GPU MMU protection flags.
+    The allocation will be cached.
+    Services will try to set the coherent bit in the GPU MMU tables so the
+    GPU cache is snooping the CPU cache. If coherency is not supported the
+    caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT                 (2ULL<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags)               (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+
+/*!
+   GPU domain. Request cached memory, but not coherent (i.e. no cache
+   snooping). Services will flush the GPU internal caches after every GPU
+   task so no cache maintenance requests from the users are necessary.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT               (3ULL<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)             (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT)
+
+/*!
+    GPU domain. This flag is for internal use only and is used to indicate
+    that the underlying allocation should be cached on the GPU after all
+    the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED                                 (7ULL<<8)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_GPU_CACHED(uiFlags)                               (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED)
+
+/*!
+    GPU domain. GPU cache mode mask.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK                        (7ULL<<8)
+
+/*!
+  @Description    A helper macro to obtain just the GPU        cache bit field from the flags.
+                  This should be used whenever the GPU cache mode needs to be determined.
+  @Input  uiFlags Allocation flags.
+  @Return         Value of the GPU cache bit field.
+ */
+#define PVRSRV_GPU_CACHE_MODE(uiFlags)                                 ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+
+/*
+       CPU domain
+       ==========
+
+       The following defines are used to control the CPU cache bit field.
+       The defines are mutually exclusive.
+
+       A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU
+       cache bit field from the flags. This should be used whenever the CPU cache
+       mode needs to be determined.
+*/
+
+/*!
+   CPU domain. Use write combiner (if supported) to combine sequential writes
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC                    (0ULL<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)                        (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC)
+
+/*!
+    CPU domain. This flag affects the CPU MMU protection flags.
+    The allocation will be cached.
+    Services will try to set the coherent bit in the CPU MMU tables so the
+    CPU cache is snooping the GPU cache. If coherency is not supported the
+    caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT                 (2ULL<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags)               (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+
+/*!
+    CPU domain. Request cached memory, but not coherent (i.e. no cache
+    snooping). This means that if the allocation needs to transition from
+    one device to another services has to be informed so it can
+    flush/invalidate the appropriate caches.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT               (3ULL<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)             (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*!
+    CPU domain. This flag is for internal use only and is used to indicate
+    that the underlying allocation should be cached on the CPU
+    after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED                                 (7ULL<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHED(uiFlags)                               (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED)
+
+/*!
+       CPU domain. CPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK                        (7ULL<<11)
+
+/*!
+  @Description    A helper macro to obtain just the CPU        cache bit field from the flags.
+                  This should be used whenever the CPU cache mode needs to be determined.
+  @Input  uiFlags Allocation flags.
+  @Return         Value of the CPU cache bit field.
+ */
+#define PVRSRV_CPU_CACHE_MODE(uiFlags)                                 ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/* Helper flags for usual cases */
+
+/*!
+ * Memory will be write-combined on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED_WC                (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED_WC mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags)                            (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED_WC)
+
+/*!
+ * Memory will be cached.
+ * Services will try to set the correct flags in the MMU tables.
+ * In case there is no coherency support the caller has to ensure caches are up to date */
+#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT                             (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_COHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags)                   (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT)
+
+/*!
+ * Memory will be cache-incoherent on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT                   (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags)                 (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT)
+
+/*!
+       Cache mode mask
+*/
+#define PVRSRV_CACHE_MODE(uiFlags)                                             (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags))
+
+
+/*!
+   CPU MMU Flags mask -- intended for use internal to services only
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                                                                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                                                                               PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/*!
+   MMU Flags mask -- intended for use internal to services only - used for
+   partitioning the flags bits and determining which flags to pass down to
+   mmu_common.c
+ */
+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+/*!
+    Indicates that the PMR created due to this allocation will support
+    in-kernel CPU mappings.  Only privileged processes may use this flag as
+    it may cause wastage of precious kernel virtual memory on some platforms.
+ */
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE                        (1ULL<<14)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)              (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0U)
+
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                   ALLOC MEMORY FLAGS                   *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 15)
+ *
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC                        (1ULL<<15)
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags)                                        (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0U)
+
+/*!
+    Indicates that the allocation will be accessed by the CPU and GPU using
+    the same virtual address, i.e. for all SVM allocs,
+    IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR
+ */
+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC                                  (1ULL<<17)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_SVM_ALLOC(uiFlags)                                        (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0U)
+
+/*!
+    Indicates the particular memory that's being allocated is sparse and the
+    sparse regions should not be backed by dummy page
+ */
+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING            (1ULL << 18)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags)               (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0U)
+
+/*!
+  Used to force Services to carry out at least one CPU cache invalidate on a
+  CPU cached buffer during allocation of the memory. Applicable to incoherent
+  systems, it must be used for buffers which are CPU cached and which will not
+  be 100% written to by the CPU before the GPU accesses it. For performance
+  reasons, avoid usage if the whole buffer that is allocated is written to by
+  the CPU anyway before the next GPU kick, or if the system is coherent.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN                            (1ULL<<19)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)                  (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0U)
+
+/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING
+
+    Indicates the particular memory that's being allocated is sparse and the
+    sparse regions should be backed by zero page. This is different with
+    zero on alloc flag such that only physically unbacked pages are backed
+    by zero page at the time of mapping.
+    The zero backed page is always with read only attribute irrespective of its
+    original attributes.
+ */
+#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING                        (1ULL << 20)
+#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags)                (((uiFlags) & \
+                       PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING)
+
+/*!
+  @Description    Macro extracting the OS id from a variable containing memalloc flags
+  @Input uiFlags  Allocation flags
+  @Return         returns the value of the FW_ALLOC_OSID bitfield
+ */
+#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags)                              (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \
+                                                                                                                       >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT)
+
+/*!
+  @Description    Macro converting an OS id value into a memalloc bitfield
+  @Input uiFlags  OS id
+  @Return         returns a shifted bitfield with the OS id value
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid)            (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT)  \
+                                                                                                                       & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *           MEMORY ZEROING AND POISONING FLAGS           *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * Zero / Poison, on alloc/free
+ *
+ * We think the following usecases are required:
+ *
+ *  don't poison or zero on alloc or free
+ *     (normal operation, also most efficient)
+ *  poison on alloc
+ *     (for helping to highlight bugs)
+ *  poison on alloc and free
+ *     (for helping to highlight bugs)
+ *  zero on alloc
+ *     (avoid highlighting security issues in other uses of memory)
+ *  zero on alloc and poison on free
+ *     (avoid highlighting security issues in other uses of memory, while
+ *      helping to highlight a subset of bugs e.g. memory freed prematurely)
+ *
+ * Since there are more than 4, we can't encode this in just two bits,
+ * so we might as well have a separate flag for each of the three
+ * actions.
+ */
+
+/*!
+    Ensures that the memory allocated is initialised with zeroes.
+ */
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC                              (1ULL<<31)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)                            (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0U)
+
+/*!
+    Scribbles over the allocated memory with a poison value
+
+    Not compatible with ZERO_ON_ALLOC
+
+    Poisoning is very deliberately _not_ reflected in PDump as we want
+    a simulation to cry loudly if the initialised data propagates to a
+    result.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC                            (1ULL<<30)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)                  (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0U)
+
+#if defined(DEBUG) || defined(SERVICES_SC)
+/*!
+    Causes memory to be trashed when freed, used when debugging only, not to be used
+    as a security measure.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE                             (1ULL<<29)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags)                   (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0U)
+#endif /* DEBUG */
+
+/*!
+    Avoid address alignment to a CPU or GPU cache line size.
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN                        (1ULL<<28)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags)              (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN) != 0U)
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                Device specific MMU flags               *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 26 to 27)
+ *
+ * Some services controlled devices have device specific control bits in
+ * their page table entries, we need to allow these flags to be passed down
+ * the memory management layers so the user can control these bits.
+ * For example, RGX device has the file rgx_memallocflags.h
+ */
+
+/*!
+ * Offset of device specific MMU flags.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET                26
+
+/*!
+ * Mask for retrieving device specific MMU flags.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK          (0x3ULL << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+
+/*!
+  @Description    Helper macro for setting device specific MMU flags.
+  @Input    n     Flag index.
+  @Return         Flag vector with the specified bit set.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n)     \
+                       (((PVRSRV_MEMALLOCFLAGS_T)(n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                 Secure validation flags                *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bit 35)
+ *
+ */
+
+/*!
+    PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER
+ */
+
+#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER           (1ULL<<35)
+#define PVRSRV_CHECK_SHARED_BUFFER(uiFlags)             (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0U)
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                 Phys Heap Hints                        *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 59 to 63)
+ *
+ */
+
+/*!
+ * Value of enum PVRSRV_PHYS_HEAP stored in memalloc flags. If not set
+ * i.e. PVRSRV_PHYS_HEAP_DEFAULT (value 0) used, the system layer defined default physical heap is used.
+ */
+#define PVRSRV_PHYS_HEAP_HINT_SHIFT        (59)
+#define PVRSRV_PHYS_HEAP_HINT_MASK         (0x1FULL << PVRSRV_PHYS_HEAP_HINT_SHIFT)
+
+
+/*!
+  @Description    Macro extracting the Phys Heap hint from memalloc flag value.
+  @Input uiFlags  Allocation flags
+  @Return         returns the value of the PHYS_HEAP_HINT bitfield
+ */
+#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags)      (((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \
+                                                 >> PVRSRV_PHYS_HEAP_HINT_SHIFT)
+
+/*!
+  @Description    Macro converting a Phys Heap value into a memalloc bitfield
+  @Input uiFlags  Device Phys Heap
+  @Return         returns a shifted bitfield with the Device Phys Heap value
+ */
+#define PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap)      ((((PVRSRV_MEMALLOCFLAGS_T)PVRSRV_PHYS_HEAP_ ## PhysHeap) << \
+                                                            PVRSRV_PHYS_HEAP_HINT_SHIFT) \
+                                                           & PVRSRV_PHYS_HEAP_HINT_MASK)
+/*!
+  @Description    Macro to replace an existing phys heap hint value in flags.
+  @Input PhysHeap Phys Heap Macro
+  @Input uiFlags  Allocation flags
+  @Return         N/A
+ */
+#define PVRSRV_SET_PHYS_HEAP_HINT(PhysHeap, uiFlags)     (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \
+                                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap)
+
+/*!
+  @Description    Macro to replace an existing phys heap hint value using Phys Heap value.
+  @Input PhysHeap Phys Heap Value
+  @Input uiFlags  Allocation flags
+  @Return         N/A
+ */
+#define PVRSRV_CHANGE_PHYS_HEAP_HINT(Physheap, uiFlags)          (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \
+                                                           (((PVRSRV_MEMALLOCFLAGS_T)(Physheap) << \
+                                                            PVRSRV_PHYS_HEAP_HINT_SHIFT) \
+                                                           & PVRSRV_PHYS_HEAP_HINT_MASK)
+
+/*!
+  @Description    Macros checking if a Phys Heap hint is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the hint is set, false otherwise
+ */
+#define PVRSRV_CHECK_PHYS_HEAP(PhysHeap, uiFlags) (PVRSRV_PHYS_HEAP_ ## PhysHeap == PVRSRV_GET_PHYS_HEAP_HINT(uiFlags))
+
+#define PVRSRV_CHECK_FW_MAIN(uiFlags)            (PVRSRV_CHECK_PHYS_HEAP(FW_MAIN, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_CONFIG, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_CODE, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP0, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP1, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP2, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP3, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP4, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP5, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP6, uiFlags) || \
+                                                  PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP7, uiFlags))
+
+/*!
+ * Secure buffer mask -- Flags in the mask are allowed for secure buffers
+ * because they are not related to CPU mappings.
+ */
+#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK  ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                           PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED)
+
+/*!
+ * Trusted device mask -- Flags in the mask are allowed for trusted device
+ * because the driver cannot access the memory
+ */
+#if defined(DEBUG) || defined(SERVICES_SC)
+#define PVRSRV_MEMALLOCFLAGS_TDFWMASK    ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                           PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                           PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+#else
+#define PVRSRV_MEMALLOCFLAGS_TDFWMASK    ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                           PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+#endif
+
+/*!
+  PMR flags mask -- for internal services use only.  This is the set of flags
+  that will be passed down and stored with the PMR, this also includes the
+  MMU flags which the PMR has to pass down to mm_common.c at PMRMap time.
+*/
+#if defined(DEBUG) || defined(SERVICES_SC)
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK  (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                            PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                            PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                            PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \
+                                            PVRSRV_PHYS_HEAP_HINT_MASK)
+#else
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK  (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                            PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                            PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
+                                            PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \
+                                            PVRSRV_PHYS_HEAP_HINT_MASK)
+#endif
+
+/*!
+ * CPU mappable mask -- Any flag set in the mask requires memory to be CPU mappable
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                                PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                                PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                                PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                                PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE)
+/*!
+  RA differentiation mask
+
+  for use internal to services
+
+  this is the set of flags bits that are able to determine whether a pair of
+  allocations are permitted to live in the same page table. Allocations
+  whose flags differ in any of these places would be allocated from separate
+  RA Imports and therefore would never coexist in the same page.
+  Special cases are zeroing and poisoning of memory. The caller is responsible
+  to set the sub-allocations to the value he wants it to be. To differentiate
+  between zeroed and poisoned RA Imports does not make sense because the
+  memory might be reused.
+
+*/
+#if defined(DEBUG) || defined(SERVICES_SC)
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
+                                                      & \
+                                                      ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC   | \
+                                                        PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                                        PVRSRV_MEMALLOCFLAG_POISON_ON_FREE))
+#else
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
+                                                      & \
+                                                      ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC   | \
+                                                        PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+#endif
+/*!
+  Flags that affect _allocation_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
+
+/*!
+  Flags that affect _mapping_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK   (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+                                                    PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
+                                                    PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0U)
+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+
+/*!
+  Flags that affect _physical allocations_ in the DevMemX API
+ */
+#if defined(DEBUG) || defined(SERVICES_SC)
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                                    PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                                    PVRSRV_PHYS_HEAP_HINT_MASK)
+#else
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+                                                    PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                                    PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                                    PVRSRV_PHYS_HEAP_HINT_MASK)
+#endif
+
+/*!
+  Flags that affect _virtual allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK  (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED)
+
+#endif /* PVRSRV_MEMALLOCFLAGS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_memallocflags_internal.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_memallocflags_internal.h
new file mode 100644 (file)
index 0000000..4fee3d4
--- /dev/null
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management allocation flags for internal Services
+                use only
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines flags used on memory allocations and mappings
+                These flags are relevant throughout the memory management
+                software stack and are specified by users of services and
+                understood by all levels of the memory management in the server
+                and in special cases in the client.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_INTERNAL_H
+#define PVRSRV_MEMALLOCFLAGS_INTERNAL_H
+
+/*!
+   CPU domain. Request uncached memory. This means that any writes to memory
+   allocated with this flag are written straight to memory and thus are
+   coherent for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (1ULL<<11)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*!
+ * Memory will be uncached on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*!
+  @Description    Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set.
+  @Input  uiFlags Allocation flags.
+  @Return         True if the mode is set, false otherwise
+ */
+#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED)
+
+#endif /* PVRSRV_MEMALLOCFLAGS_INTERNAL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_sync_km.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_sync_km.h
new file mode 100644 (file)
index 0000000..04611f9
--- /dev/null
@@ -0,0 +1,65 @@
+/*************************************************************************/ /*!
+@File
+@Title         PVR synchronisation interface
+@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description   Types for server side code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVRSRV_SYNC_KM_H
+#define PVRSRV_SYNC_KM_H
+
+#include <powervr/pvrsrv_sync_ext.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define SYNC_FB_FILE_STRING_MAX                        256
+#define SYNC_FB_MODULE_STRING_LEN_MAX  (32)
+#define        SYNC_FB_DESC_STRING_LEN_MAX             (32)
+
+/* By default, fence-sync module emits into HWPerf (of course, if enabled) and
+ * considers a process (sleepable) context */
+#define PVRSRV_FENCE_FLAG_NONE             (0U)
+#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0)
+#define PVRSRV_FENCE_FLAG_CTX_ATOMIC       (1U << 1)
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* PVRSRV_SYNC_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_tlcommon.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_tlcommon.h
new file mode 100644 (file)
index 0000000..28999e5
--- /dev/null
@@ -0,0 +1,260 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVR_TLCOMMON_H
+#define PVR_TLCOMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Handle type for stream descriptor objects as created by this API */
+typedef IMG_HANDLE PVRSRVTL_SD;
+
+/*! Maximum stream name length including the null byte */
+#define PRVSRVTL_MAX_STREAM_NAME_SIZE  40U
+
+/*! Maximum number of streams expected to exist */
+#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE)
+
+/*! Packet lengths are always rounded up to a multiple of 8 bytes */
+#define PVRSRVTL_PACKET_ALIGNMENT              8U
+#define PVRSRVTL_ALIGN(x)                              (((x)+PVRSRVTL_PACKET_ALIGNMENT-1U) & ~(PVRSRVTL_PACKET_ALIGNMENT-1U))
+
+
+/*! A packet is made up of a header structure followed by the data bytes.
+ * There are 3 types of packet: normal (has data), data lost and padding,
+ * see packet flags. Header kept small to reduce data overhead.
+ *
+ * if the ORDER of the structure members is changed, please UPDATE the
+ * PVRSRVTL_PACKET_FLAG_OFFSET macro.
+ *
+ * Layout of uiTypeSize member is :
+ *
+ * |<---------------------------32-bits------------------------------>|
+ * |<----8---->|<-----1----->|<----7--->|<------------16------------->|
+ * |    Type   | Drop-Oldest |  UNUSED  |             Size            |
+ *
+ */
+typedef struct
+{
+       IMG_UINT32 uiTypeSize;  /*!< Type, Drop-Oldest flag & number of bytes following header */
+       IMG_UINT32 uiReserved;  /*!< Reserve, packets and data must be 8 byte aligned */
+
+       /* First bytes of TL packet data follow header ... */
+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR;
+
+/* Structure must always be a size multiple of 8 as stream buffer
+ * still an array of IMG_UINT32s.
+ */
+static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8");
+
+/*! Packet header reserved word fingerprint "TLP1" */
+#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U
+
+/*! Packet header mask used to extract the size from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_SIZE_MASK    0x0000FFFFU
+#define PVRSRVTL_MAX_PACKET_SIZE        (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU)
+
+
+/*! Packet header mask used to extract the type from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_TYPE_MASK    0xFF000000U
+#define PVRSRVTL_PACKETHDR_TYPE_OFFSET  24U
+
+/*! Packet header mask used to check if packets before this one were dropped
+ * or not. Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK    0x00800000U
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET    23U
+
+/*! Packet type enumeration.
+ */
+typedef IMG_UINT32 PVRSRVTL_PACKETTYPE;
+
+/*! Undefined packet */
+#define PVRSRVTL_PACKETTYPE_UNDEF 0U
+
+/*! Normal packet type. Indicates data follows the header.
+ */
+#define PVRSRVTL_PACKETTYPE_DATA 1U
+
+/*! When seen this packet type indicates that at this moment in the stream
+ * packet(s) were not able to be accepted due to space constraints and
+ * that recent data may be lost - depends on how the producer handles the
+ * error. Such packets have no data, data length is 0.
+ */
+#define PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED 2U
+
+/*! Packets with this type set are padding packets that contain undefined
+ * data and must be ignored/skipped by the client. They are used when the
+ * circular stream buffer wraps around and there is not enough space for
+ * the data at the end of the buffer. Such packets have a length of 0 or
+ * more.
+ */
+#define PVRSRVTL_PACKETTYPE_PADDING 3U
+
+/*! This packet type conveys to the stream consumer that the stream
+ * producer has reached the end of data for that data sequence. The
+ * TLDaemon has several options for processing these packets that can
+ * be selected on a per stream basis.
+ */
+#define PVRSRVTL_PACKETTYPE_MARKER_EOS 4U
+
+/*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes
+ * old data record output file before opening new/next one
+ */
+#define PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD 5U
+
+/*! Packet emitted on first stream opened by writer. Packet carries a name
+ * of the opened stream in a form of null-terminated string.
+ */
+#define PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE 6U
+
+/*! Packet emitted on last stream closed by writer. Packet carries a name
+ * of the closed stream in a form of null-terminated string.
+ */
+#define PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE 7U
+
+#define PVRSRVTL_PACKETTYPE_LAST 8U
+
+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared:
+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities.
+ */
+#define PVRSRVTL_SET_PACKET_DATA(len)       (len) | (PVRSRVTL_PACKETTYPE_DATA                     << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_PADDING(len)    (len) | (PVRSRVTL_PACKETTYPE_PADDING                  << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_WRITE_FAILED    (0U)   | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_HDR(len, type)  (len) | ((type)                                       << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Returns the number of bytes of data in the packet.
+ * p may be any address type.
+ */
+#define GET_PACKET_DATA_LEN(p) \
+       ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR) (void *) (p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK)
+
+
+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */
+#define GET_PACKET_DATA_PTR(p) \
+       (((IMG_UINT8 *) (void *) (p)) + sizeof(PVRSRVTL_PACKETHDR))
+
+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type.
+ */
+#define GET_PACKET_HDR(p)              ((PVRSRVTL_PPACKETHDR) ((void *) (p)))
+
+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack
+ *  It is up to the caller to determine if the new address is within the
+ *  packet buffer.
+ */
+#define GET_NEXT_PACKET_ADDR(p) \
+       GET_PACKET_HDR( \
+               GET_PACKET_DATA_PTR(p) + \
+               ( \
+                       (GET_PACKET_DATA_LEN(p) + (PVRSRVTL_PACKET_ALIGNMENT-1U)) & \
+                       (~(PVRSRVTL_PACKET_ALIGNMENT-1U)) \
+               ) \
+       )
+
+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define GET_PACKET_TYPE(p)             (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize.
+ * p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define SET_PACKETS_DROPPED(p)         (((p)->uiTypeSize) | (1UL << PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET))
+
+/*! Check if packets were dropped before this packet.
+ * p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define CHECK_PACKETS_DROPPED(p)       (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)
+
+/*! Flags for use with PVRSRVTLOpenStream
+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
+ * 0x02 - When the stream does not exist wait for a bit (2s) in
+ *        PVRSRVTLOpenStream() and then exit with a timeout error if it still
+ *        does not exist.
+ * 0x04 - Open stream for write only operations.
+ *        If flag is not used stream is opened as read-only. This flag is
+ *        required if one wants to call reserve/commit/write function on the
+ *        stream descriptor. Read from on the stream descriptor opened
+ *        with this flag will fail.
+ * 0x08 - Disable Producer Callback.
+ *        If this flag is set and the stream becomes empty, do not call any
+ *        associated producer callback to generate more data from the reader
+ *        context.
+ * 0x10 - Reset stream on open.
+ *        When this flag is used the stream will drop all of the stored data.
+ * 0x20 - Limit read position to the write position at time the stream
+ *        was opened. Hence this flag will freeze the content read to that
+ *        produced before the stream was opened for reading.
+ * 0x40 - Ignore Open Callback.
+ *        When this flag is set ignore any OnReaderOpenCallback setting for
+ *        the stream. This allows access to the stream to be made without
+ *        generating any extra packets into the stream.
+ */
+
+#define PVRSRV_STREAM_FLAG_NONE                        (0U)
+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING         (1U<<0)
+#define PVRSRV_STREAM_FLAG_OPEN_WAIT                   (1U<<1)
+#define PVRSRV_STREAM_FLAG_OPEN_WO                     (1U<<2)
+#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK   (1U<<3)
+#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN               (1U<<4)
+#define PVRSRV_STREAM_FLAG_READ_LIMIT                  (1U<<5)
+#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK        (1U<<6)
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_TLCOMMON_H */
+/******************************************************************************
+ End of file (pvrsrv_tlcommon.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrsrv_tlstreams.h b/drivers/gpu/drm/img/img-rogue/include/pvrsrv_tlstreams.h
new file mode 100644 (file)
index 0000000..9064075
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer stream names
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_TLSTREAMS_H
+#define PVRSRV_TLSTREAMS_H
+
+#define PVRSRV_TL_CTLR_STREAM "tlctrl"
+
+#define PVRSRV_TL_HWPERF_RGX_FW_STREAM      "hwperf_fw_"
+#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_"
+
+/* Host HWPerf client stream names are of the form 'hwperf_client_<pid>' */
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM         "hwperf_client_"
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u"
+
+#endif /* PVRSRV_TLSTREAMS_H */
+
+/******************************************************************************
+ End of file (pvrsrv_tlstreams.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/pvrversion.h b/drivers/gpu/drm/img/img-rogue/include/pvrversion.h
new file mode 100644 (file)
index 0000000..c62b3f7
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File           pvrversion.h
+@Title          PowerVR version numbers and strings.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Version numbers and strings for PowerVR components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRVERSION_H
+#define PVRVERSION_H
+
+#define PVRVERSION_MAJ               1U
+#define PVRVERSION_MIN               17U
+
+#define PVRVERSION_FAMILY           "rogueddk"
+#define PVRVERSION_BRANCHNAME       "1.17"
+#define PVRVERSION_BUILD             6210866
+#define PVRVERSION_BSCONTROL        "Rogue_DDK_Linux_WS"
+
+#define PVRVERSION_STRING           "Rogue_DDK_Linux_WS rogueddk 1.17@6210866"
+#define PVRVERSION_STRING_SHORT     "1.17@6210866"
+
+#define COPYRIGHT_TXT               "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI          621
+#define PVRVERSION_BUILD_LO          866
+#define PVRVERSION_STRING_NUMERIC   "1.17.621.866"
+
+#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU)
+
+#endif /* PVRVERSION_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_common.h b/drivers/gpu/drm/img/img-rogue/include/rgx_common.h
new file mode 100644 (file)
index 0000000..b6ae150
--- /dev/null
@@ -0,0 +1,235 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Common Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common types and definitions for RGX software
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_H
+#define RGX_COMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/* Included to get the BVNC_KM_N defined and other feature defs */
+#include "km/rgxdefs_km.h"
+
+#include "rgx_common_asserts.h"
+
+
+/* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform.
+ * As such a driver can support either the vz-validation code or real virtualisation.
+ * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_OS_SUPPORTED is the internal symbol used in the DDK */
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1))
+#error "Invalid build configuration: Virtualisation support (PVRSRV_VZ_NUM_OSID > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive."
+#endif
+
+/* The RGXFWIF_DM defines assume only one of RGX_FEATURE_TLA or
+ * RGX_FEATURE_FASTRENDER_DM is present. Ensure this with a compile-time check.
+ */
+#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM)
+#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!"
+#endif
+
+/*! The master definition for data masters known to the firmware of RGX.
+ * When a new DM is added to this list, relevant entry should be added to
+ * RGX_HWPERF_DM enum list.
+ * The DM in a V1 HWPerf packet uses this definition. */
+
+typedef IMG_UINT32 RGXFWIF_DM;
+
+#define        RGXFWIF_DM_GP                   IMG_UINT32_C(0)
+/* Either TDM or 2D DM is present. The above build time error is present to verify this */
+#define        RGXFWIF_DM_2D                   IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */
+#define        RGXFWIF_DM_TDM                  IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */
+
+#define        RGXFWIF_DM_GEOM                 IMG_UINT32_C(2)
+#define        RGXFWIF_DM_3D                   IMG_UINT32_C(3)
+#define        RGXFWIF_DM_CDM                  IMG_UINT32_C(4)
+#define        RGXFWIF_DM_RAY                  IMG_UINT32_C(5)
+#define        RGXFWIF_DM_GEOM2                IMG_UINT32_C(6)
+#define        RGXFWIF_DM_GEOM3                IMG_UINT32_C(7)
+#define        RGXFWIF_DM_GEOM4                IMG_UINT32_C(8)
+
+#define        RGXFWIF_DM_LAST                 RGXFWIF_DM_GEOM4
+
+typedef IMG_UINT32 RGX_KICK_TYPE_DM;
+#define RGX_KICK_TYPE_DM_GP            IMG_UINT32_C(0x001)
+#define RGX_KICK_TYPE_DM_TDM_2D        IMG_UINT32_C(0x002)
+#define RGX_KICK_TYPE_DM_TA            IMG_UINT32_C(0x004)
+#define RGX_KICK_TYPE_DM_3D            IMG_UINT32_C(0x008)
+#define RGX_KICK_TYPE_DM_CDM   IMG_UINT32_C(0x010)
+#define RGX_KICK_TYPE_DM_RTU   IMG_UINT32_C(0x020)
+#define RGX_KICK_TYPE_DM_SHG   IMG_UINT32_C(0x040)
+#define RGX_KICK_TYPE_DM_TQ2D  IMG_UINT32_C(0x080)
+#define RGX_KICK_TYPE_DM_TQ3D  IMG_UINT32_C(0x100)
+#define RGX_KICK_TYPE_DM_RAY   IMG_UINT32_C(0x200)
+#define RGX_KICK_TYPE_DM_LAST  IMG_UINT32_C(0x400)
+
+/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RDM, GEOM2, GEOM3, GEOM4 */
+#define RGXFWIF_DM_MAX                 (RGXFWIF_DM_LAST + 1U)
+
+/*
+ * Data Master Tags to be appended to resources created on behalf of each RGX
+ * Context.
+ */
+#define RGX_RI_DM_TAG_KS   'K'
+#define RGX_RI_DM_TAG_CDM  'C'
+#define RGX_RI_DM_TAG_RC   'R' /* To be removed once TA/3D Timelines are split */
+#define RGX_RI_DM_TAG_TA   'V'
+#define RGX_RI_DM_TAG_GEOM 'V'
+#define RGX_RI_DM_TAG_3D   'P'
+#define RGX_RI_DM_TAG_TDM  'T'
+#define RGX_RI_DM_TAG_TQ2D '2'
+#define RGX_RI_DM_TAG_TQ3D 'Q'
+#define RGX_RI_DM_TAG_RAY  'r'
+
+/*
+ * Client API Tags to be appended to resources created on behalf of each
+ * Client API.
+ */
+#define RGX_RI_CLIENT_API_GLES1    '1'
+#define RGX_RI_CLIENT_API_GLES3    '3'
+#define RGX_RI_CLIENT_API_VULKAN   'V'
+#define RGX_RI_CLIENT_API_EGL      'E'
+#define RGX_RI_CLIENT_API_OPENCL   'C'
+#define RGX_RI_CLIENT_API_OPENGL   'G'
+#define RGX_RI_CLIENT_API_SERVICES 'S'
+#define RGX_RI_CLIENT_API_WSEGL    'W'
+#define RGX_RI_CLIENT_API_ANDROID  'A'
+#define RGX_RI_CLIENT_API_LWS      'L'
+
+/*
+ * Format a RI annotation for a given RGX Data Master context
+ */
+#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do         \
+       {                                                                        \
+               (annotation)[0] = (dmTag);                                           \
+               (annotation)[1] = (clientAPI);                                       \
+               (annotation)[2] = '\0';                                              \
+       } while (false)
+
+/*!
+ ******************************************************************************
+ * RGXFW Compiler alignment definitions
+ *****************************************************************************/
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) || defined(INTEGRITY_OS)
+#define RGXFW_ALIGN                    __attribute__ ((aligned (8)))
+#define        RGXFW_ALIGN_DCACHEL             __attribute__((aligned (64)))
+#elif defined(_MSC_VER)
+#define RGXFW_ALIGN                    __declspec(align(8))
+#define        RGXFW_ALIGN_DCACHEL             __declspec(align(64))
+#pragma warning (disable : 4324)
+#else
+#error "Align MACROS need to be defined for this compiler"
+#endif
+
+/*!
+ ******************************************************************************
+ * Force 8-byte alignment for structures allocated uncached.
+ *****************************************************************************/
+#define UNCACHED_ALIGN      RGXFW_ALIGN
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation states
+ *****************************************************************************/
+#define RGXFWIF_GPU_UTIL_STATE_IDLE      (0U)
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE    (1U)
+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED   (2U)
+#define RGXFWIF_GPU_UTIL_STATE_NUM       (3U)
+#define RGXFWIF_GPU_UTIL_STATE_MASK      IMG_UINT64_C(0x0000000000000003)
+
+
+/*
+ * Maximum amount of register writes that can be done by the register
+ * programmer (FW or META DMA). This is not a HW limitation, it is only
+ * a protection against malformed inputs to the register programmer.
+ */
+#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES  (128U)
+
+/* FW common context priority. */
+/*!
+ * @AddToGroup WorkloadContexts
+ * @{
+ */
+#define RGX_CTX_PRIORITY_REALTIME  (INT32_MAX)
+#define RGX_CTX_PRIORITY_HIGH      (2U) /*!< HIGH priority */
+#define RGX_CTX_PRIORITY_MEDIUM    (1U) /*!< MEDIUM priority */
+#define RGX_CTX_PRIORITY_LOW       (0) /*!< LOW priority */
+/*!
+ * @} End of AddToGroup WorkloadContexts
+ */
+
+
+/*
+ *   Use of the 32-bit context property flags mask
+ *   ( X = taken/in use, - = available/unused )
+ *
+ *                                   0
+ *                                   |
+ *    -------------------------------x
+ */
+/*
+ * Context creation flags
+ * (specify a context's properties at creation time)
+ */
+#define RGX_CONTEXT_FLAG_DISABLESLR                                    (1UL << 0) /*!< Disable SLR */
+
+/* Bitmask of context flags allowed to be modified after context create. */
+#define RGX_CONTEXT_FLAGS_WRITEABLE_MASK            (RGX_CONTEXT_FLAG_DISABLESLR)
+
+/* List of attributes that may be set for a context */
+typedef enum _RGX_CONTEXT_PROPERTY_
+{
+       RGX_CONTEXT_PROPERTY_FLAGS  = 0, /*!< Context flags */
+} RGX_CONTEXT_PROPERTY;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_H */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_common_asserts.h b/drivers/gpu/drm/img/img-rogue/include/rgx_common_asserts.h
new file mode 100644 (file)
index 0000000..c571cc6
--- /dev/null
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Common Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common types and definitions for RGX software
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_ASSERTS_H
+#define RGX_COMMON_ASSERTS_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*! This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver */
+#define RGX_FW_ALIGNMENT_LSB (7U)
+
+/*! Macro to test structure size alignment */
+#define RGX_FW_STRUCT_SIZE_ASSERT(_a)  \
+       static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U,        \
+                                 "Size of " #_a " is not properly aligned")
+
+/*! Macro to test structure member alignment */
+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b)    \
+       static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U,  \
+                                 "Offset of " #_a "." #_b " is not properly aligned")
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_ASSERTS_H */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_compat_bvnc.h b/drivers/gpu/drm/img/img-rogue/include/rgx_compat_bvnc.h
new file mode 100644 (file)
index 0000000..c3e1333
--- /dev/null
@@ -0,0 +1,140 @@
+/*************************************************************************/ /*!
+@File           rgx_compat_bvnc.h
+@Title          BVNC compatibility check utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used for packing BNC and V.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_COMPAT_BVNC_H)
+#define RGX_COMPAT_BVNC_H
+
+#include "img_types.h"
+
+#if defined(RGX_FIRMWARE)               /* Services firmware */
+# include "rgxfw_utils.h"
+# define PVR_COMPAT_ASSERT RGXFW_ASSERT
+#elif !defined(RGX_BUILD_BINARY)        /* Services host driver code */
+# include "pvr_debug.h"
+# define PVR_COMPAT_ASSERT PVR_ASSERT
+#else                                   /* FW user-mode tools */
+# include <assert.h>
+# define PVR_COMPAT_ASSERT assert
+#endif
+
+/* 64bit endian conversion macros */
+#if defined(__BIG_ENDIAN__)
+#define RGX_INT64_TO_BE(N) (N)
+#define RGX_INT64_FROM_BE(N) (N)
+#define RGX_INT32_TO_BE(N) (N)
+#define RGX_INT32_FROM_BE(N) (N)
+#else
+#define RGX_INT64_TO_BE(N)        \
+       ((((N) >> 56)   & 0xff)       \
+        | (((N) >> 40) & 0xff00)     \
+        | (((N) >> 24) & 0xff0000)   \
+        | (((N) >> 8)  & 0xff000000U) \
+        | ((N)                << 56) \
+        | (((N) & 0xff00)     << 40) \
+        | (((N) & 0xff0000)   << 24) \
+        | (((N) & 0xff000000U) << 8))
+#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N)
+
+#define RGX_INT32_TO_BE(N)   \
+       ((((N) >> 24)  & 0xff)   \
+        | (((N) >> 8) & 0xff00) \
+        | ((N)           << 24) \
+        | ((((N) & 0xff00) << 8)))
+#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N)
+#endif
+
+/******************************************************************************
+ * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check
+ *****************************************************************************/
+
+#define RGX_BVNC_PACK_SHIFT_B 48
+#define RGX_BVNC_PACK_SHIFT_V 32
+#define RGX_BVNC_PACK_SHIFT_N 16
+#define RGX_BVNC_PACK_SHIFT_C 0
+
+#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000))
+#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000))
+#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000))
+#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF))
+
+#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B))
+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V))
+#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N))
+#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C))
+
+#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do {                                                                                                                      \
+                                                                               (bvnc) = IMG_FALSE;                                                                                                     \
+                                                                               (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion);           \
+                                                                               if (version)                                                                                                            \
+                                                                               {                                                                                                                                       \
+                                                                                       (bvnc) = ((L).ui64BVNC == (R).ui64BVNC);                                                \
+                                                                               }                                                                                                                                       \
+                                                                               (all) = (version) && (bvnc);                                                                            \
+                                                                       } while (false)
+
+
+/**************************************************************************//**
+ * Utility function for packing BVNC
+ *****************************************************************************/
+static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+       /*
+        * Test for input B, V, N and C exceeding max bit width.
+        */
+       PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0U);
+       PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0U);
+       PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0U);
+       PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0U);
+
+       return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) |
+                       ((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) |
+                       ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) |
+                       ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C));
+}
+
+
+#endif /* RGX_COMPAT_BVNC_H */
+
+/******************************************************************************
+ End of file (rgx_compat_bvnc.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_fwif_resetframework.h b/drivers/gpu/drm/img/img-rogue/include/rgx_fwif_resetframework.h
new file mode 100644 (file)
index 0000000..e60bafd
--- /dev/null
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File           rgx_fwif_resetframework.h
+@Title          Post-reset work-around framework FW interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_RESETFRAMEWORK_H)
+#define RGX_FWIF_RESETFRAMEWORK_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+typedef struct
+{
+       union
+       {
+               IMG_UINT64      uCDMReg_CDM_CB_BASE;                    //  defined(RGX_FEATURE_CDM_USER_MODE_QUEUE)
+               IMG_UINT64      uCDMReg_CDM_CTRL_STREAM_BASE;   // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE)
+       };
+       IMG_UINT64      uCDMReg_CDM_CB_QUEUE;                           // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE)
+       IMG_UINT64      uCDMReg_CDM_CB;                                         // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE)
+} RGXFWIF_RF_REGISTERS;
+
+typedef struct
+{
+       /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+       RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters;
+
+} RGXFWIF_RF_CMD;
+
+/* to opaquely allocate and copy in the kernel */
+#define RGXFWIF_RF_CMD_SIZE  sizeof(RGXFWIF_RF_CMD)
+
+#endif /* RGX_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_fwif_sf.h b/drivers/gpu/drm/img/img-rogue/include/rgx_fwif_sf.h
new file mode 100644 (file)
index 0000000..9238cf8
--- /dev/null
@@ -0,0 +1,931 @@
+/*************************************************************************/ /*!
+@File           rgx_fwif_sf.h
+@Title          RGX firmware interface string format specifiers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the rgx firmware logging messages. The following
+                list are the messages the firmware prints. Changing anything
+                but the first column or spelling mistakes in the strings will
+                break compatibility with log files created with older/newer
+                firmware versions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_SF_H
+#define RGX_FWIF_SF_H
+
+/******************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *           WILL BREAK fw tracing message compatibility with previous
+ *           fw versions. Only add new ones, if so required.
+ *****************************************************************************/
+/* Available log groups */
+#define RGXFW_LOG_SFGROUPLIST       \
+       X(RGXFW_GROUP_NULL,NULL)        \
+       X(RGXFW_GROUP_MAIN,MAIN)        \
+       X(RGXFW_GROUP_CLEANUP,CLEANUP)  \
+       X(RGXFW_GROUP_CSW,CSW)          \
+       X(RGXFW_GROUP_PM, PM)           \
+       X(RGXFW_GROUP_RTD,RTD)          \
+       X(RGXFW_GROUP_SPM,SPM)          \
+       X(RGXFW_GROUP_MTS,MTS)          \
+       X(RGXFW_GROUP_BIF,BIF)          \
+       X(RGXFW_GROUP_MISC,MISC)        \
+       X(RGXFW_GROUP_POW,POW)          \
+       X(RGXFW_GROUP_HWR,HWR)          \
+       X(RGXFW_GROUP_HWP,HWP)          \
+       X(RGXFW_GROUP_RPM,RPM)          \
+       X(RGXFW_GROUP_DMA,DMA)          \
+       X(RGXFW_GROUP_DBG,DBG)
+
+/*!
+ * @InGroup SRVAndFWTracing
+ * @Brief FW Trace log groups(GID) list
+ */
+enum RGXFW_LOG_SFGROUPS {
+#define X(A,B) A,
+       RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+#define IMG_SF_STRING_MAX_SIZE 256U
+
+typedef struct {
+       IMG_UINT32 ui32Id;
+       IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE];
+} RGXFW_STID_FMT; /*  pair of string format id and string formats */
+
+typedef struct {
+       IMG_UINT32 ui32Id;
+       const IMG_CHAR *psName;
+} RGXKM_STID_FMT; /*  pair of string format id and string formats */
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id          : id within a group
+ * gid         : group id
+ * Sym name    : name of enumerations used to identify message strings
+ * String      : Actual string
+ * #args       : number of arguments the string format requires
+ */
+#define RGXFW_LOG_SFIDLIST \
+/*id, gid,              id name,        string,                           # arguments */ \
+X(  0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \
+\
+X(  1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \
+X(  2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \
+X(  3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \
+X(  4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \
+X(  5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X(  6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \
+X(  7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \
+X(  8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \
+X(  9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \
+X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \
+X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \
+X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \
+X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \
+X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \
+X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \
+X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
+X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \
+X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \
+X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \
+X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \
+X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \
+X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \
+X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \
+X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \
+X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
+X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \
+X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \
+X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \
+X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \
+X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \
+X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \
+X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \
+X( 34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \
+X( 35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \
+X( 36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \
+X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \
+X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \
+X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \
+X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \
+X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \
+X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \
+X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \
+X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \
+X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \
+X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \
+X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \
+X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \
+X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \
+X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \
+X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \
+X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \
+X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \
+X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \
+X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \
+X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \
+X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \
+X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \
+X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \
+X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \
+X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \
+X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \
+X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \
+X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \
+X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \
+X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \
+X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \
+X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \
+X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \
+X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \
+X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \
+X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \
+X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \
+X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \
+X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \
+X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \
+X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
+X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \
+X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \
+X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \
+X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \
+X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \
+X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \
+X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \
+X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \
+X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \
+X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
+X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
+X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \
+X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \
+X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \
+X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \
+X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \
+X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \
+X( 97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \
+X( 98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \
+X( 99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \
+X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \
+X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \
+X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \
+X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \
+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \
+X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \
+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]:  0x%08x 0x%08x)", 4) \
+X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \
+X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \
+X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \
+X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \
+X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \
+X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \
+X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \
+X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \
+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \
+X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \
+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \
+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \
+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \
+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \
+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \
+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \
+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \
+X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \
+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \
+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \
+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \
+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \
+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, "  Phantom %d: USCTiles=%d", 2) \
+X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \
+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \
+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \
+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \
+X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \
+X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \
+X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \
+X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \
+X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \
+X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \
+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \
+X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \
+X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \
+X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \
+X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \
+X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \
+X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \
+X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \
+X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \
+X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \
+X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \
+X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \
+X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \
+X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \
+X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \
+X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \
+X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \
+X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \
+X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \
+X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \
+X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \
+X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \
+X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \
+X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \
+X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \
+X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \
+X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \
+X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \
+X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \
+X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \
+X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \
+X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \
+X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \
+X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \
+X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \
+X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \
+X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \
+X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \
+X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \
+X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \
+X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \
+X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \
+X(189, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \
+X(190, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \
+X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \
+X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \
+X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \
+X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \
+X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \
+X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \
+X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \
+X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \
+X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \
+X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \
+X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \
+X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \
+X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \
+X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \
+X(205, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)", 4) \
+X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \
+X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \
+X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \
+X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \
+X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \
+X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, "  core %u, reg = %u, mask = 0x%X)", 3) \
+X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \
+X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \
+X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \
+X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \
+X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \
+X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \
+X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \
+X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\
+X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \
+X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \
+X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \
+X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \
+X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \
+X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \
+X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %d", 2) \
+X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM", 2) \
+X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
+X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 12) \
+X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \
+X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u ", 1) \
+X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u ", 1) \
+X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x", 3) \
+X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u ", 1) \
+X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \
+X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8)\
+X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u ", 1) \
+\
+X(  1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \
+X(  2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \
+X(  3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \
+X(  4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \
+X(  5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \
+X(  6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \
+X(  7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \
+X(  8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \
+X(  9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \
+X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \
+X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \
+X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \
+X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \
+X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \
+X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \
+X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \
+X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \
+X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \
+X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \
+X( 20, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \
+X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u", 2) \
+\
+X(  1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \
+X(  2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \
+X(  3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \
+X(  4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \
+X(  5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \
+X(  6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \
+X(  7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \
+X(  8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \
+X(  9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \
+X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \
+X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \
+X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \
+X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \
+X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \
+X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \
+X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \
+X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \
+X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \
+\
+X(  1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \
+X(  2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
+X(  3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \
+X(  4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \
+X(  5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \
+X(  6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \
+X(  7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \
+X(  8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \
+X(  9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \
+X( 10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \
+X( 11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \
+X( 12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \
+X( 13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \
+X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
+X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \
+X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \
+X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \
+X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \
+X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \
+X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \
+X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \
+X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \
+X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
+X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \
+X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \
+X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \
+X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \
+X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \
+X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \
+X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \
+X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \
+X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \
+X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \
+X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \
+X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \
+X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \
+X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \
+X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \
+X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \
+X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \
+X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \
+X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \
+X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \
+X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE,  "3D context resume IPP state: 0x%08.8x%08x", 2) \
+X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \
+X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \
+X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \
+X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \
+X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \
+X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \
+X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \
+X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \
+X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \
+X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \
+\
+X(  1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \
+X(  2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \
+X(  3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %d", 1) \
+X(  4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %d refcount now %d", 2) \
+X(  5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %d refcount now %d", 2) \
+X(  6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
+X(  7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \
+X(  8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \
+X(  9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \
+X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context  %d, Register's contents are now 0x%04x", 3) \
+X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \
+X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \
+X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \
+X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \
+X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
+X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \
+X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \
+X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \
+X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d", 6) \
+X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %d as register range [%u - %u]", 3) \
+\
+X(  1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \
+X(  2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \
+X(  3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \
+X(  4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \
+X(  5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \
+X(  6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \
+X(  7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \
+X(  8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \
+X(  9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \
+X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \
+X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \
+X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \
+X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \
+X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \
+X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \
+X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \
+X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \
+X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \
+X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \
+X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \
+X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \
+X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \
+X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \
+X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \
+X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \
+X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \
+X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \
+X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \
+X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \
+X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \
+X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \
+\
+X(  1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \
+X(  2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \
+X(  3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \
+X(  4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \
+X(  5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \
+X(  6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \
+X(  7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \
+X(  8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \
+X(  9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \
+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\
+X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \
+X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \
+X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \
+X( 20, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \
+X( 21, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 22, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \
+X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \
+X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \
+X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \
+X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \
+X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \
+X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \
+X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %d)", 1) \
+X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %d)", 1) \
+X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %d)", 1) \
+X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %d)", 1) \
+\
+X(  1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \
+X(  2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \
+X(  3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \
+X(  4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \
+X(  5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \
+X(  6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \
+X(  7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \
+X(  8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \
+X(  9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \
+X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \
+X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \
+X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \
+X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \
+X( 14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \
+X( 15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \
+X( 16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \
+X( 17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \
+X( 18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \
+X( 19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \
+X( 20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \
+X( 21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \
+X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \
+\
+X(  1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \
+X(  2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \
+X(  3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \
+X(  4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \
+X(  5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \
+X(  6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
+X(  7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \
+X(  8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \
+X(  9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \
+X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \
+X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \
+X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \
+X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
+X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \
+X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \
+X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load  Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \
+X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \
+X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \
+X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \
+X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \
+X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \
+X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \
+X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \
+X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \
+X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \
+X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \
+X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \
+X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load  Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \
+X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \
+X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \
+X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load  Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \
+X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \
+X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \
+X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \
+X( 39, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset.", 3) \
+X( 40, RGXFW_GROUP_RTD, RGXFW_SF_RTD_GEOM_RENDERSTATE, "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \
+X( 41, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FRAG_RENDERSTATE, "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \
+\
+X(  1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \
+X(  2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \
+X(  3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \
+X(  4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \
+X(  5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \
+X(  6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \
+X(  7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \
+X(  8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \
+X(  9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \
+X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \
+X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \
+X( 12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \
+X( 13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \
+X( 14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \
+X( 15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \
+X( 16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \
+X( 17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \
+X( 18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \
+X( 19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \
+X( 20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \
+X( 21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \
+X( 22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \
+X( 23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \
+X( 24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \
+X( 25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \
+X( 26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \
+X( 27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \
+X( 28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \
+X( 29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \
+X( 30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \
+X( 31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \
+X( 32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none", 0) \
+X( 33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \
+X( 34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \
+X( 35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \
+X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \
+X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \
+X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \
+X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \
+X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \
+X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \
+X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \
+X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \
+X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \
+X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \
+X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \
+X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \
+X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \
+X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \
+X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \
+X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \
+X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \
+X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \
+X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \
+X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
+X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \
+X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \
+X( 69, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \
+\
+X(  1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \
+X(  2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X(  3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \
+X(  4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \
+X(  5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \
+X(  6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X(  7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \
+X(  8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \
+X(  9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \
+X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \
+X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \
+X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \
+X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \
+X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \
+X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \
+X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \
+X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \
+X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \
+X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \
+X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \
+X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \
+X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \
+X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \
+X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \
+X( 28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \
+X( 29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \
+X( 30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \
+X( 31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \
+X( 32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \
+X( 33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \
+X( 34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \
+X( 35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \
+X( 36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \
+X( 37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \
+X( 38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \
+X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \
+X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \
+X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \
+X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \
+X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \
+X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \
+X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \
+X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \
+X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \
+X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \
+X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \
+X( 51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \
+X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \
+X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \
+X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \
+X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \
+X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \
+X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \
+X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \
+X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \
+X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \
+X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \
+X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \
+X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \
+X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \
+X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \
+X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \
+X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \
+X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \
+X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \
+X( 70, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \
+X( 71, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \
+X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %d, RAC Active? %d", 2) \
+X( 73, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \
+\
+X(  1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \
+X(  2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
+X(  3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \
+X(  4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \
+X(  5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \
+X(  6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
+X(  7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \
+X(  8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \
+X(  9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
+X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
+X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \
+X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \
+X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \
+X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \
+X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \
+X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \
+X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \
+X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \
+X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \
+X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \
+X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \
+X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \
+X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \
+X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \
+X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \
+X( 26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \
+X( 27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \
+X( 28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \
+X( 29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \
+X( 30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \
+X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \
+X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \
+X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \
+X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \
+X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \
+X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \
+X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \
+X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \
+X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \
+X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \
+X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \
+X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \
+X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \
+X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \
+X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \
+X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \
+X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \
+X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \
+X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \
+X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \
+X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \
+X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \
+X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \
+X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \
+X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \
+X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \
+X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \
+X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \
+X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \
+X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \
+X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \
+X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \
+X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \
+X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \
+X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \
+X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \
+X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \
+X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \
+X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \
+X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \
+X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \
+X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \
+X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \
+X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \
+X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \
+X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \
+X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \
+X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \
+X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \
+X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \
+X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \
+X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \
+X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \
+X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
+X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
+X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \
+X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \
+X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \
+X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \
+X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %d with value 0x%08x", 2) \
+X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \
+X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \
+X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \
+X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \
+X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \
+X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
+X( 97, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \
+\
+X(  1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \
+X(  2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \
+X(  3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \
+X(  4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \
+X(  5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \
+X(  6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \
+X(  7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \
+X(  8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \
+X(  9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \
+X( 10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \
+X( 11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \
+X( 12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \
+X( 13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \
+X( 14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \
+X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \
+X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x  value: 0x%x", 2) \
+X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u  ID:0x%x", 2) \
+X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \
+X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \
+X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \
+X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \
+X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \
+X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \
+X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \
+X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \
+X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \
+X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \
+X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \
+X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \
+X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \
+X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \
+X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \
+X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \
+X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \
+X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \
+X( 36, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCMAX, "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x", 3) \
+X( 37, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_IGNORE_BLOCKS, "Blocks IGNORED for GPU %u", 1) \
+\
+X(  1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \
+X(  2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \
+X(  3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \
+X(  4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \
+X(  5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \
+X(  6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \
+X(  7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \
+X(  8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \
+X(  9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \
+\
+X(  1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \
+X(  2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x", 1) \
+X(  3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \
+X(  4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \
+X(  5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \
+X(  6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \
+X(  7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \
+X(  8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \
+X(  9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \
+X( 10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d", 1) \
+X( 11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d", 2) \
+X( 12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d", 3) \
+X( 13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \
+X( 14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \
+X( 15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \
+X( 16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \
+X( 17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \
+X( 18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u", 1) \
+X( 19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u", 2) \
+X( 20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \
+X( 21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \
+X( 22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \
+X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \
+X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \
+X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \
+\
+X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15)
+
+
+/*  The symbolic names found in the table above are assigned an ui32 value of
+ *  the following format:
+ *  31 30 28 27       20   19  16    15  12      11            0   bits
+ *  -   ---   ---- ----     ----      ----        ---- ---- ----
+ *     0-11: id number
+ *    12-15: group id number
+ *    16-19: number of parameters
+ *    20-27: unused
+ *    28-30: active: identify SF packet, otherwise regular int32
+ *       31: reserved for signed/unsigned compatibility
+ *
+ *   The following macro assigns those values to the enum generated SF ids list.
+ */
+#define RGXFW_LOG_IDMARKER                     (0x70000000U)
+#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER
+
+#define RGXFW_LOG_IDMASK                       (0xFFF00000U)
+#define RGXFW_LOG_VALIDID(I)           (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER)
+
+typedef enum {
+#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e),
+       RGXFW_LOG_SFIDLIST
+#undef X
+} RGXFW_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU)
+
+#endif /* RGX_FWIF_SF_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_heap_firmware.h b/drivers/gpu/drm/img/img-rogue/include/rgx_heap_firmware.h
new file mode 100644 (file)
index 0000000..db2b90b
--- /dev/null
@@ -0,0 +1,120 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX FW heap definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_HEAP_FIRMWARE_H)
+#define RGX_HEAP_FIRMWARE_H
+
+/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h)
+ * NOTE:
+ *      The firmware heaps bases and sizes are defined here to
+ *      simplify #include dependencies, see rgxheapconfig.h
+ *      for the full RGX virtual address space layout.
+ */
+
+/*
+ * The Config heap holds initialisation data shared between the
+ * the driver and firmware (e.g. pointers to the KCCB and FWCCB).
+ * The Main Firmware heap size is adjusted accordingly but most
+ * of the map / unmap functions must take into consideration
+ * the entire range (i.e. main and config heap).
+ */
+#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS              (IMG_UINT32_C(2))
+#define RGX_FIRMWARE_HEAP_SHIFT                      RGX_FW_HEAP_SHIFT
+#define RGX_FIRMWARE_RAW_HEAP_BASE                   (0xE1C0000000ULL)
+#define RGX_FIRMWARE_RAW_HEAP_SIZE                   (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT)
+
+/* To enable the firmware to compute the exact address of structures allocated by the KM
+ * in the Fw Config subheap, regardless of the KM's page size (and PMR granularity),
+ * objects allocated consecutively but from different PMRs (due to differing memalloc flags)
+ * are allocated with a 64kb offset. This way, all structures will be located at the same base
+ * addresses when the KM is running with a page size of 4k, 16k or 64k.  */
+#define RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY    (IMG_UINT32_C(0x10000))
+
+/* Ensure the heap can hold 3 PMRs of maximum supported granularity (192KB):
+ * 1st PMR: RGXFWIF_CONNECTION_CTL
+ * 2nd PMR: RGXFWIF_OSINIT
+ * 3rd PMR: RGXFWIF_SYSINIT */
+#define RGX_FIRMWARE_CONFIG_HEAP_SIZE                (IMG_UINT32_C(3)*RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY)
+
+#define RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE          (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+/*
+ * MIPS FW needs space in the Main heap to map GPU memory.
+ * This space is taken from the MAIN heap, to avoid creating a new heap.
+ */
+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL       (IMG_UINT32_C(0x100000)) /* 1MB */
+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101     (IMG_UINT32_C(0x400000)) /* 4MB */
+
+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL      (RGX_FIRMWARE_RAW_HEAP_SIZE -  RGX_FIRMWARE_CONFIG_HEAP_SIZE - \
+                                                           RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL)
+
+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101    (RGX_FIRMWARE_RAW_HEAP_SIZE -  RGX_FIRMWARE_CONFIG_HEAP_SIZE - \
+                                                           RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101)
+
+#if !defined(__KERNEL__)
+#if defined(FIX_HW_BRN_65101)
+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE      RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101
+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE        RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101
+
+#include "img_defs.h"
+static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU map size cannot be increased due to BRN65101 with a small FW heap");
+
+#else
+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE      RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL
+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE        RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL
+#endif
+#endif /* !defined(__KERNEL__) */
+
+#define RGX_FIRMWARE_MAIN_HEAP_BASE             RGX_FIRMWARE_RAW_HEAP_BASE
+#define RGX_FIRMWARE_CONFIG_HEAP_BASE           (RGX_FIRMWARE_MAIN_HEAP_BASE + \
+                                                 RGX_FIRMWARE_RAW_HEAP_SIZE - \
+                                                 RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+
+/*
+ * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and
+ * the minimum is 4MiB (1<<22); the default firmware heap size is set to
+ * maximum 32MiB.
+ */
+#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25)
+#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]"
+#endif
+
+#endif /* RGX_HEAP_FIRMWARE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_hwperf_common.h b/drivers/gpu/drm/img/img-rogue/include/rgx_hwperf_common.h
new file mode 100644 (file)
index 0000000..0635a51
--- /dev/null
@@ -0,0 +1,482 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HWPerf and Debug Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common data types definitions for hardware performance API
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_COMMON_H_
+#define RGX_HWPERF_COMMON_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at
+ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/******************************************************************************
+ * Includes and Defines
+ *****************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#include "rgx_common_asserts.h"
+#include "pvrsrv_tlcommon.h"
+
+
+/******************************************************************************
+ * Packet Event Type Enumerations
+ *****************************************************************************/
+
+/*! Type used to encode the event that generated the packet.
+ * NOTE: When this type is updated the corresponding hwperfbin2json tool
+ * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will
+ * also need updating when adding new types.
+ *
+ * @par
+ * The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 63 event types.
+ */
+
+typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE;
+
+#define RGX_HWPERF_INVALID                             0x00U /*!< Invalid. Reserved value. */
+
+/*! FW types 0x01..0x06 */
+#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE   0x01U
+
+#define RGX_HWPERF_FW_BGSTART                  0x01U /*!< Background task processing start */
+#define RGX_HWPERF_FW_BGEND                            0x02U /*!< Background task end */
+#define RGX_HWPERF_FW_IRQSTART                 0x03U /*!< IRQ task processing start */
+
+#define RGX_HWPERF_FW_IRQEND                   0x04U /*!< IRQ task end */
+#define RGX_HWPERF_FW_DBGSTART                 0x05U /*!< Debug event start */
+#define RGX_HWPERF_FW_DBGEND                   0x06U /*!< Debug event end */
+
+#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE            0x06U
+
+/*! HW types 0x07..0x19 */
+#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE  0x07U
+
+#define RGX_HWPERF_HW_PMOOM_TAPAUSE            0x07U /*!< TA Pause at PM Out of Memory */
+
+#define RGX_HWPERF_HW_TAKICK                   0x08U /*!< TA task started */
+#define RGX_HWPERF_HW_TAFINISHED               0x09U /*!< TA task finished */
+#define RGX_HWPERF_HW_3DTQKICK                 0x0AU /*!< 3D TQ started */
+#define RGX_HWPERF_HW_3DKICK                   0x0BU /*!< 3D task started */
+#define RGX_HWPERF_HW_3DFINISHED               0x0CU /*!< 3D task finished */
+#define RGX_HWPERF_HW_CDMKICK                  0x0DU /*!< CDM task started */
+#define RGX_HWPERF_HW_CDMFINISHED              0x0EU /*!< CDM task finished */
+#define RGX_HWPERF_HW_TLAKICK                  0x0FU /*!< TLA task started */
+#define RGX_HWPERF_HW_TLAFINISHED              0x10U /*!< TLS task finished */
+#define RGX_HWPERF_HW_3DSPMKICK                        0x11U /*!< 3D SPM task started */
+#define RGX_HWPERF_HW_PERIODIC                 0x12U /*!< Periodic event with updated HW counters */
+#define RGX_HWPERF_HW_RTUKICK                  0x13U /*!< Reserved, future use */
+#define RGX_HWPERF_HW_RTUFINISHED              0x14U /*!< Reserved, future use */
+#define RGX_HWPERF_HW_SHGKICK                  0x15U /*!< Reserved, future use */
+#define RGX_HWPERF_HW_SHGFINISHED              0x16U /*!< Reserved, future use */
+#define RGX_HWPERF_HW_3DTQFINISHED             0x17U /*!< 3D TQ finished */
+#define RGX_HWPERF_HW_3DSPMFINISHED            0x18U /*!< 3D SPM task finished */
+
+#define RGX_HWPERF_HW_PMOOM_TARESUME   0x19U /*!< TA Resume after PM Out of Memory */
+
+/*! HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */
+#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE   0x19U
+
+/*! other types 0x1A..0x1F */
+#define RGX_HWPERF_CLKS_CHG                            0x1AU /*!< Clock speed change in GPU */
+#define RGX_HWPERF_GPU_STATE_CHG               0x1BU /*!< GPU work state change */
+
+/*! power types 0x20..0x27 */
+#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE    0x20U
+#define RGX_HWPERF_PWR_EST_REQUEST             0x20U /*!< Power estimate requested (via GPIO) */
+#define RGX_HWPERF_PWR_EST_READY               0x21U /*!< Power estimate inputs ready */
+#define RGX_HWPERF_PWR_EST_RESULT              0x22U /*!< Power estimate result calculated */
+#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE     0x22U
+
+#define RGX_HWPERF_PWR_CHG                             0x23U /*!< Power state change */
+
+/*! HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */
+#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE  0x28U
+
+#define RGX_HWPERF_HW_TDMKICK                  0x28U /*!< TDM task started */
+#define RGX_HWPERF_HW_TDMFINISHED              0x29U /*!< TDM task finished */
+#define RGX_HWPERF_HW_NULLKICK                 0x2AU /*!< NULL event */
+
+#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU
+
+/*! context switch types 0x30..0x31 */
+#define RGX_HWPERF_CSW_START                   0x30U /*!< HW context store started */
+#define RGX_HWPERF_CSW_FINISHED                        0x31U /*!< HW context store finished */
+
+/*! DVFS events */
+#define RGX_HWPERF_DVFS                                        0x32U /*!< Dynamic voltage/frequency scaling events */
+
+/*! firmware misc 0x38..0x39 */
+#define RGX_HWPERF_UFO                                 0x38U /*!< FW UFO Check / Update */
+#define RGX_HWPERF_FWACT                               0x39U /*!< FW Activity notification */
+
+/*! last */
+#define RGX_HWPERF_LAST_TYPE                   0x3BU
+
+/*! This enumeration must have a value that is a power of two as it is
+ * used in masks and a filter bit field (currently 64 bits long).
+ */
+#define RGX_HWPERF_MAX_TYPE                            0x40U
+
+static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types");
+
+/*! Macro used to check if an event type ID is present in the known set of hardware type events */
+#define HWPERF_PACKET_IS_HW_TYPE(_etype)       (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \
+                                                                                        ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE))
+
+/*! Macro used to check if an event type ID is present in the known set of firmware type events */
+#define HWPERF_PACKET_IS_FW_TYPE(_etype)                                       \
+       ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE &&    \
+        (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE)
+
+
+typedef enum {
+       RGX_HWPERF_HOST_INVALID   = 0x00,           /*!< Invalid, do not use. */
+       RGX_HWPERF_HOST_ENQ       = 0x01,           /*!< ``0x01`` Kernel driver has queued GPU work.
+                                                    See RGX_HWPERF_HOST_ENQ_DATA */
+       RGX_HWPERF_HOST_UFO       = 0x02,           /*!< ``0x02`` UFO updated by the driver.
+                                                    See RGX_HWPERF_HOST_UFO_DATA */
+       RGX_HWPERF_HOST_ALLOC     = 0x03,           /*!< ``0x03`` Resource allocated.
+                                                    See RGX_HWPERF_HOST_ALLOC_DATA */
+       RGX_HWPERF_HOST_CLK_SYNC  = 0x04,           /*!< ``0x04`` GPU / Host clocks correlation data.
+                                                    See RGX_HWPERF_HOST_CLK_SYNC_DATA */
+       RGX_HWPERF_HOST_FREE      = 0x05,           /*!< ``0x05`` Resource freed,
+                                                    See RGX_HWPERF_HOST_FREE_DATA */
+       RGX_HWPERF_HOST_MODIFY    = 0x06,           /*!< ``0x06`` Resource modified / updated.
+                                                    See RGX_HWPERF_HOST_MODIFY_DATA */
+       RGX_HWPERF_HOST_DEV_INFO  = 0x07,           /*!< ``0x07`` Device Health status.
+                                                    See RGX_HWPERF_HOST_DEV_INFO_DATA */
+       RGX_HWPERF_HOST_INFO      = 0x08,           /*!< ``0x08`` Device memory usage information.
+                                                    See RGX_HWPERF_HOST_INFO_DATA */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09,     /*!< ``0x09`` Wait for sync event.
+                                                    See RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA */
+       RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE  = 0x0A, /*!< ``0x0A`` Software timeline advanced.
+                                                    See RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA */
+       RGX_HWPERF_HOST_CLIENT_INFO = 0x0B,                     /*!< ``0x0B`` Additional client info.
+                                                    See RGX_HWPERF_HOST_CLIENT_INFO_DATA */
+
+       /*! last */
+       RGX_HWPERF_HOST_LAST_TYPE,
+
+       /*! This enumeration must have a value that is a power of two as it is
+        * used in masks and a filter bit field (currently 32 bits long).
+        */
+       RGX_HWPERF_HOST_MAX_TYPE  = 0x20
+} RGX_HWPERF_HOST_EVENT_TYPE;
+
+/*!< The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 31 event types.
+ */
+static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types");
+
+
+/******************************************************************************
+ * Packet Header Format Version 2 Types
+ *****************************************************************************/
+
+/*! Major version number of the protocol in operation
+ */
+#define RGX_HWPERF_V2_FORMAT 2
+
+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet
+ */
+#define HWPERF_PACKET_V2_SIG           0x48575032
+
+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet
+ */
+#define HWPERF_PACKET_V2A_SIG          0x48575041
+
+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet
+ */
+#define HWPERF_PACKET_V2B_SIG          0x48575042
+
+/*! Signature ASCII pattern 'HWPC' found in the first word of a HWPerfV2c packet
+ */
+#define HWPERF_PACKET_V2C_SIG          0x48575043
+
+#define HWPERF_PACKET_ISVALID(_val) (((_val) == HWPERF_PACKET_V2_SIG) || ((_val) == HWPERF_PACKET_V2A_SIG) || ((_val) == HWPERF_PACKET_V2B_SIG) || ((_val) == HWPERF_PACKET_V2C_SIG))
+/*!< Checks that the packet signature is one of the supported versions */
+
+/*! Type defines the HWPerf packet header common to all events. */
+typedef struct
+{
+       IMG_UINT32  ui32Sig;        /*!< Always the value HWPERF_PACKET_SIG */
+       IMG_UINT32  ui32Size;       /*!< Overall packet size in bytes */
+       IMG_UINT32  eTypeId;        /*!< Event type information field */
+       IMG_UINT32  ui32Ordinal;    /*!< Sequential number of the packet */
+       IMG_UINT64  ui64Timestamp;  /*!< Event timestamp */
+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR;
+
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp);
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR);
+
+
+/*! Mask for use with the IMG_UINT32 ui32Size header field */
+#define RGX_HWPERF_SIZE_MASK         0xFFFFU
+
+/*! This macro defines an upper limit to which the size of the largest variable
+ * length HWPerf packet must fall within, currently 3KB. This constant may be
+ * used to allocate a buffer to hold one packet.
+ * This upper limit is policed by packet producing code.
+ */
+#define RGX_HWPERF_MAX_PACKET_SIZE   0xC00U
+
+/*! Defines an upper limit to the size of a variable length packet payload.
+ */
+#define RGX_HWPERF_MAX_PAYLOAD_SIZE     ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\
+       sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+
+/*! Macro which takes a structure name and provides the packet size for
+ * a fixed size payload packet, rounded up to 8 bytes to align packets
+ * for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct)       ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro which takes the number of bytes written in the data payload of a
+ * packet for a variable size payload packet, rounded up to 8 bytes to
+ * align packets for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size)      ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&((IMG_UINT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN((_size), PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro to obtain the size of the packet */
+#define RGX_HWPERF_GET_SIZE(_packet_addr)         ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK))
+
+/*! Macro to obtain the size of the packet data */
+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr)    (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR))
+
+/*! Masks for use with the IMG_UINT32 eTypeId header field */
+#define RGX_HWPERF_TYPEID_MASK                 0x0007FFFFU
+#define RGX_HWPERF_TYPEID_EVENT_MASK   0x00007FFFU
+#define RGX_HWPERF_TYPEID_THREAD_MASK  0x00008000U
+#define RGX_HWPERF_TYPEID_STREAM_MASK  0x00070000U
+#define RGX_HWPERF_TYPEID_META_DMA_MASK        0x00080000U
+#define RGX_HWPERF_TYPEID_M_CORE_MASK  0x00100000U
+#define RGX_HWPERF_TYPEID_OSID_MASK            0x07000000U
+
+/*! Meta thread macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_META_THREAD_SHIFT   15U
+#define RGX_HWPERF_META_THREAD_ID0             0x0U  /*!< Meta Thread 0 ID */
+#define RGX_HWPERF_META_THREAD_ID1             0x1U  /*!< Meta Thread 1 ID */
+/*! Obsolete, kept for source compatibility */
+#define RGX_HWPERF_META_THREAD_MASK            0x1U
+/*! Stream ID macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_STREAM_SHIFT                        16U
+/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */
+#define RGX_HWPERF_META_DMA_SHIFT              19U
+/*! Bit-shift macro used for encoding multi-core data into the type field of a packet */
+#define RGX_HWPERF_M_CORE_SHIFT                        20U
+/*! OSID bit-shift macro used for encoding OSID into type field of a packet */
+#define RGX_HWPERF_OSID_SHIFT                  24U
+typedef enum {
+       RGX_HWPERF_STREAM_ID0_FW,     /*!< Events from the Firmware/GPU */
+       RGX_HWPERF_STREAM_ID1_HOST,   /*!< Events from the Server host driver component */
+       RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */
+       RGX_HWPERF_STREAM_ID_LAST,
+} RGX_HWPERF_STREAM_ID;
+
+/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */
+static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT),
+               "Too many HWPerf stream IDs.");
+
+/*! Compile-time value used to seed the Multi-Core (MC) bit in the typeID field.
+ *  Only set by RGX_FIRMWARE builds.
+ */
+#if defined(RGX_FIRMWARE)
+# if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT)
+#define RGX_HWPERF_M_CORE_VALUE 1U  /*!< 1 => Multi-core supported */
+# else
+#define RGX_HWPERF_M_CORE_VALUE 0U  /*!< 0 => Multi-core not supported */
+# endif
+#else
+#define RGX_HWPERF_M_CORE_VALUE 0U  /*!< 0 => Multi-core not supported */
+#endif
+
+/*! Macros used to set the packet type and encode meta thread ID (0|1),
+ * HWPerf stream ID, multi-core capability and OSID within the typeID */
+#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\
+               ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \
+               (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \
+               (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \
+               (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \
+               (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)) | \
+               (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT))))
+
+/*! Obtains the event type that generated the packet */
+#define RGX_HWPERF_GET_TYPE(_packet_addr)            (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK)
+
+/*! Obtains the META Thread number that generated the packet */
+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr)       (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT))
+
+/*! Determines if the packet generated contains multi-core data */
+#define RGX_HWPERF_GET_M_CORE(_packet_addr)          (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_M_CORE_MASK) >> RGX_HWPERF_M_CORE_SHIFT)
+
+/*! Obtains the guest OSID which resulted in packet generation */
+#define RGX_HWPERF_GET_OSID(_packet_addr)            (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT)
+
+/*! Obtain stream id */
+#define RGX_HWPERF_GET_STREAM_ID(_packet_addr)       (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT))
+
+/*! Obtain information about how the packet was generated, which might affect payload total size */
+#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr)   (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT))
+
+/*! Obtains a typed pointer to a packet given a buffer address */
+#define RGX_HWPERF_GET_PACKET(_buffer_addr)            ((RGX_HWPERF_V2_PACKET_HDR *)(void *)  (_buffer_addr))
+/*! Obtains a typed pointer to a data structure given a packet address */
+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+/*! Obtains a typed pointer to the next packet given a packet address */
+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr)       ((RGX_HWPERF_V2_PACKET_HDR *)  (IMG_OFFSET_ADDR((_packet_addr), RGX_HWPERF_SIZE_MASK&((_packet_addr)->ui32Size))))
+
+/*! Obtains a typed pointer to a packet header given the packet data address */
+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr)     ((RGX_HWPERF_V2_PACKET_HDR *)  (IMG_OFFSET_ADDR((_packet_addr), -(IMG_INT32)sizeof(RGX_HWPERF_V2_PACKET_HDR))))
+
+
+/******************************************************************************
+ * Other Common Defines
+ *****************************************************************************/
+
+/*! This macro is not a real array size, but indicates the array has a variable
+ * length only known at run-time but always contains at least 1 element. The
+ * final size of the array is deduced from the size field of a packet header.
+ */
+#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS  1U
+
+/*! This macro is not a real array size, but indicates the array is optional
+ * and if present has a variable length only known at run-time. The final
+ * size of the array is deduced from the size field of a packet header. */
+#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U
+
+
+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK       0xFFFF0000U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK      0x0000FFFFU
+
+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT      16U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U
+
+/*! Macro used to set the block info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT))))
+
+/*! Macro used to obtain the number of counter blocks present in the packet */
+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo)            (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)
+
+/*! Obtains the offset of the counter block stream in the packet */
+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo)           (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)
+
+/*! This macro gets the number of blocks depending on the packet version */
+#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \
+       do { \
+               if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \
+               { \
+                       (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\
+               } \
+               else \
+               { \
+                       IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\
+                       (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \
+               } \
+       } while (0)
+
+/*! This macro gets the counter stream pointer depending on the packet version */
+#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \
+{ \
+       if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \
+       { \
+               (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \
+       } \
+       else \
+       { \
+               IMG_UINT32 ui32BlkStreamOffsetInWords = (((_sig) == HWPERF_PACKET_V2_SIG) ? 6 : 8); \
+               (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR_DW((_hw_packet_data), ui32BlkStreamOffsetInWords)); \
+       } \
+}
+
+/*! Masks for use with the IMG_UINT32 ui32KickInfo field */
+#define RGX_HWPERF_KICKINFO_KICKID_MASK        0x000000FFU
+
+/*! Shift for the Kick ID field in ui32KickInfo */
+#define RGX_HWPERF_KICKINFO_KICKID_SHIFT 0U
+
+/*! Macro used to set the kick info field. */
+#define RGX_HWPERF_MAKE_KICKINFO(_kickid) ((IMG_UINT32) (RGX_HWPERF_KICKINFO_KICKID_MASK&((_kickid) << RGX_HWPERF_KICKINFO_KICKID_SHIFT)))
+
+/*! Macro used to obtain the Kick ID if present in the packet */
+#define RGX_HWPERF_GET_KICKID(_kickinfo)            (((_kickinfo) & RGX_HWPERF_KICKINFO_KICKID_MASK) >> RGX_HWPERF_KICKINFO_KICKID_SHIFT)
+
+/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */
+#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U
+#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the UFO count and data stream fields */
+#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U
+#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U
+
+/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \
+        ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \
+        (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT))))
+
+/*! Macro used to obtain UFO count*/
+#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \
+        (((_streaminfo) & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT)
+
+/*! Obtains the offset of the UFO stream in the packet */
+#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \
+        (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_COMMON_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_meta.h b/drivers/gpu/drm/img/img-rogue/include/rgx_meta.h
new file mode 100644 (file)
index 0000000..bdff11f
--- /dev/null
@@ -0,0 +1,385 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX META definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX META helper definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_META_H)
+#define RGX_META_H
+
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+
+#include "img_defs.h"
+#include "km/rgxdefs_km.h"
+
+
+/******************************************************************************
+* META registers and MACROS
+******************************************************************************/
+#define META_CR_CTRLREG_BASE(T)                                        (0x04800000U + (0x1000U*(T)))
+
+#define META_CR_TXPRIVEXT                                              (0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN                             (IMG_UINT32_C(0x1) << 7)
+
+#define META_CR_SYSC_JTAG_THREAD                               (0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN               (0x00000004)
+
+#define META_CR_PERF_COUNT0                                            (0x0480FFE0)
+#define META_CR_PERF_COUNT1                                            (0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT                  (28)
+#define META_CR_PERF_COUNT_CTRL_MASK                   (0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS             (IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS             (IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS             (IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE                  (IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT                   (24)
+#define META_CR_PERF_COUNT_THR_MASK                            (0x0F000000)
+#define META_CR_PERF_COUNT_THR_0                               (IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1                               (IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_SHIFT)
+
+#define META_CR_TxVECINT_BHALT                                 (0x04820500)
+#define META_CR_PERF_ICORE0                                            (0x0480FFD0)
+#define META_CR_PERF_ICORE1                                            (0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS                  (0x8)
+
+#define META_CR_PERF_COUNT(CTRL, THR)                  ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+                                                                                                (THR << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define META_CR_TXUXXRXDT_OFFSET                               (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U)
+#define META_CR_TXUXXRXRQ_OFFSET                               (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U)
+
+#define META_CR_TXUXXRXRQ_DREADY_BIT                   (0x80000000U)   /* Poll for done */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT                            (0x00010000U)   /* Set for read  */
+#define META_CR_TXUXXRXRQ_TX_S                                 (12)
+#define META_CR_TXUXXRXRQ_RX_S                                 (4)
+#define META_CR_TXUXXRXRQ_UXX_S                                        (0)
+
+#define META_CR_TXUIN_ID                                               (0x0)                   /* Internal ctrl regs */
+#define META_CR_TXUD0_ID                                               (0x1)                   /* Data unit regs */
+#define META_CR_TXUD1_ID                                               (0x2)                   /* Data unit regs */
+#define META_CR_TXUA0_ID                                               (0x3)                   /* Address unit regs */
+#define META_CR_TXUA1_ID                                               (0x4)                   /* Address unit regs */
+#define META_CR_TXUPC_ID                                               (0x5)                   /* PC registers */
+
+/* Macros to calculate register access values */
+#define META_CR_CORE_REG(Thr, RegNum, Unit)    (((IMG_UINT32)(Thr)             << META_CR_TXUXXRXRQ_TX_S) | \
+                                                                                        ((IMG_UINT32)(RegNum)  << META_CR_TXUXXRXRQ_RX_S) | \
+                                                                                        ((IMG_UINT32)(Unit)    << META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC                META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX       META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP                META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC                META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX       META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP                META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(Thread)      META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(Thread)      META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID)
+
+#define META_CR_COREREG_ENABLE                 (0x0000000U)
+#define META_CR_COREREG_STATUS                 (0x0000010U)
+#define META_CR_COREREG_DEFR                   (0x00000A0U)
+#define META_CR_COREREG_PRIVEXT                        (0x00000E8U)
+
+#define META_CR_T0ENABLE_OFFSET                        (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE)
+#define META_CR_T0STATUS_OFFSET                        (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS)
+#define META_CR_T0DEFR_OFFSET                  (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR)
+#define META_CR_T0PRIVEXT_OFFSET               (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_T1ENABLE_OFFSET                        (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE)
+#define META_CR_T1STATUS_OFFSET                        (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS)
+#define META_CR_T1DEFR_OFFSET                  (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR)
+#define META_CR_T1PRIVEXT_OFFSET               (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT            (0x00000001U)   /* Set if running */
+#define META_CR_TXSTATUS_PRIV                  (0x00020000U)
+#define META_CR_TXPRIVEXT_MINIM                        (0x00000080U)
+
+#define META_MEM_GLOBAL_RANGE_BIT              (0x80000000U)
+
+#define META_CR_TXCLKCTRL          (0x048000B0)
+#define META_CR_TXCLKCTRL_ALL_ON   (0x55111111)
+#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222)
+
+
+/******************************************************************************
+* META LDR Format
+******************************************************************************/
+/* Block header structure */
+typedef struct
+{
+       IMG_UINT32      ui32DevID;
+       IMG_UINT32      ui32SLCode;
+       IMG_UINT32      ui32SLData;
+       IMG_UINT16      ui16PLCtrl;
+       IMG_UINT16      ui16CRC;
+
+} RGX_META_LDR_BLOCK_HDR;
+
+/* High level data stream block structure */
+typedef struct
+{
+       IMG_UINT16      ui16Cmd;
+       IMG_UINT16      ui16Length;
+       IMG_UINT32      ui32Next;
+       IMG_UINT32      aui32CmdData[4];
+
+} RGX_META_LDR_L1_DATA_BLK;
+
+/* High level data stream block structure */
+typedef struct
+{
+       IMG_UINT16      ui16Tag;
+       IMG_UINT16      ui16Length;
+       IMG_UINT32      aui32BlockData[4];
+
+} RGX_META_LDR_L2_DATA_BLK;
+
+/* Config command structure */
+typedef struct
+{
+       IMG_UINT32      ui32Type;
+       IMG_UINT32      aui32BlockData[4];
+
+} RGX_META_LDR_CFG_BLK;
+
+/* Block type definitions */
+#define RGX_META_LDR_COMMENT_TYPE_MASK                 (0x0010U)
+#define RGX_META_LDR_BLK_IS_COMMENT(X)                 ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U)
+
+/* Command definitions
+ *  Value   Name            Description
+ *  0       LoadMem         Load memory with binary data.
+ *  1       LoadCore        Load a set of core registers.
+ *  2       LoadMMReg       Load a set of memory mapped registers.
+ *  3       StartThreads    Set each thread PC and SP, then enable threads.
+ *  4       ZeroMem         Zeros a memory region.
+ *  5       Config          Perform a configuration command.
+ */
+#define RGX_META_LDR_CMD_MASK                          (0x000FU)
+
+#define RGX_META_LDR_CMD_LOADMEM                       (0x0000U)
+#define RGX_META_LDR_CMD_LOADCORE                      (0x0001U)
+#define RGX_META_LDR_CMD_LOADMMREG                     (0x0002U)
+#define RGX_META_LDR_CMD_START_THREADS         (0x0003U)
+#define RGX_META_LDR_CMD_ZEROMEM                       (0x0004U)
+#define RGX_META_LDR_CMD_CONFIG                        (0x0005U)
+
+/* Config Command definitions
+ *  Value   Name        Description
+ *  0       Pause       Pause for x times 100 instructions
+ *  1       Read        Read a value from register - No value return needed.
+ *                      Utilises effects of issuing reads to certain registers
+ *  2       Write       Write to mem location
+ *  3       MemSet      Set mem to value
+ *  4       MemCheck    check mem for specific value.
+ */
+#define RGX_META_LDR_CFG_PAUSE                 (0x0000)
+#define RGX_META_LDR_CFG_READ                  (0x0001)
+#define RGX_META_LDR_CFG_WRITE                 (0x0002)
+#define RGX_META_LDR_CFG_MEMSET                        (0x0003)
+#define RGX_META_LDR_CFG_MEMCHECK              (0x0004)
+
+
+/******************************************************************************
+* RGX FW segmented MMU definitions
+******************************************************************************/
+/* All threads can access the segment */
+#define RGXFW_SEGMMU_ALLTHRS   (IMG_UINT32_C(0xf) << 8U)
+/* Writable */
+#define RGXFW_SEGMMU_WRITEABLE (0x1U << 1U)
+/* All threads can access and writable */
+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE)
+
+/* Direct map region 10 used for mapping GPU memory - max 8MB */
+#define RGXFW_SEGMMU_DMAP_GPU_ID                       (10U)
+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START       (0x07000000U)
+#define RGXFW_SEGMMU_DMAP_GPU_MAX_SIZE         (0x00800000U)
+
+/* Segment IDs */
+#define RGXFW_SEGMMU_DATA_ID                   (1U)
+#define RGXFW_SEGMMU_BOOTLDR_ID                        (2U)
+#define RGXFW_SEGMMU_TEXT_ID                   (RGXFW_SEGMMU_BOOTLDR_ID)
+
+/*
+ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU.
+ * All the segments configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are
+ * CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic.
+ */
+#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx)      ((((IMG_UINT64) ((pers)    & 0x3U))  << 52) | \
+                                                                           (((IMG_UINT64) ((mmu_ctx) & 0xFFU)) << 44) | \
+                                                                           (((IMG_UINT64) ((slc_policy) & 0x1U))  << 40))
+#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx)      RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3U, 0x0U, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx)    RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0U, 0x1U, mmu_ctx)
+
+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten
+ * accesses through this segment
+ */
+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm)              (((IMG_UINT64)((IMG_UINT64)(pc)    & 0xFU) << 44U) | \
+                                                              ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U))
+
+#define RGXFW_SEGMMU_META_BIFDM_ID   (0x7U)
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+#if defined(RGX_FEATURE_SLC_VIVT)
+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED    RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED
+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED  RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED
+#define RGXFW_SEGMMU_OUTADDR_TOP_META          RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED
+#else
+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED    RGXFW_SEGMMU_OUTADDR_TOP_SLC
+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED  RGXFW_SEGMMU_OUTADDR_TOP_SLC
+#define RGXFW_SEGMMU_OUTADDR_TOP_META(pc)      RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, RGXFW_SEGMMU_META_BIFDM_ID)
+#endif
+#endif
+
+/* META segments have 4kB minimum size */
+#define RGXFW_SEGMMU_ALIGN                     (0x1000U)
+
+/* Segmented MMU registers (n = segment id) */
+#define META_CR_MMCU_SEGMENTn_BASE(n)                  (0x04850000U + ((n)*0x10U))
+#define META_CR_MMCU_SEGMENTn_LIMIT(n)                 (0x04850004U + ((n)*0x10U))
+#define META_CR_MMCU_SEGMENTn_OUTA0(n)                 (0x04850008U + ((n)*0x10U))
+#define META_CR_MMCU_SEGMENTn_OUTA1(n)                 (0x0485000CU + ((n)*0x10U))
+
+/* The following defines must be recalculated if the Meta MMU segments used
+ * to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached,   FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached,   META cached,   FW base address 0x10000000
+ * - SLC cached,   META uncached, FW base address 0x90000000
+ */
+#define RGXFW_SEGMMU_DATA_BASE_ADDRESS        (0x10000000U)
+#define RGXFW_SEGMMU_DATA_META_CACHED         (0x0U)
+#define RGXFW_SEGMMU_DATA_META_UNCACHED       (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000
+#define RGXFW_SEGMMU_DATA_META_CACHE_MASK     (META_MEM_GLOBAL_RANGE_BIT)
+/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in
+ * the PTEs for the FW data, not in the Meta Segment MMU, which means these
+ * defines have no real effect in those cases
+ */
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED     (0x0U)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED   (0x60000000U)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U)
+
+
+#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META)
+#error "SECURE_FW_CODE_OSID is not supported on META cores"
+#endif
+
+
+/******************************************************************************
+* RGX FW Bootloader defaults
+******************************************************************************/
+#define RGXFW_BOOTLDR_META_ADDR                (0x40000000U)
+#define RGXFW_BOOTLDR_DEVV_ADDR_0      (0xC0000000U)
+#define RGXFW_BOOTLDR_DEVV_ADDR_1      (0x000000E1)
+#define RGXFW_BOOTLDR_DEVV_ADDR                ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0)
+#define RGXFW_BOOTLDR_LIMIT            (0x1FFFF000)
+#define RGXFW_MAX_BOOTLDR_OFFSET       (0x1000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define RGXFW_BOOTLDR_CONF_OFFSET      (0x80)
+
+
+/******************************************************************************
+* RGX META Stack
+******************************************************************************/
+#define RGX_META_STACK_SIZE  (0x1000U)
+
+/******************************************************************************
+ RGX META Core memory
+******************************************************************************/
+/* code and data both map to the same physical memory */
+#define RGX_META_COREMEM_CODE_ADDR   (0x80000000U)
+#define RGX_META_COREMEM_DATA_ADDR   (0x82000000U)
+#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU)
+
+#if defined(__KERNEL__)
+#define RGX_META_IS_COREMEM_CODE(A, B)  (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B))))
+#define RGX_META_IS_COREMEM_DATA(A, B)  (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B))))
+#endif
+
+/******************************************************************************
+* 2nd thread
+******************************************************************************/
+#define RGXFW_THR1_PC          (0x18930000)
+#define RGXFW_THR1_SP          (0x78890000)
+
+/******************************************************************************
+* META compatibility
+******************************************************************************/
+
+#define META_CR_CORE_ID                        (0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT      (16U)
+#define META_CR_CORE_ID_VER_CLRMSK     (0XFF00FFFFU)
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+
+       #if (RGX_FEATURE_META == MTP218)
+       #define RGX_CR_META_CORE_ID_VALUE 0x19
+       #elif (RGX_FEATURE_META == MTP219)
+       #define RGX_CR_META_CORE_ID_VALUE 0x1E
+       #elif (RGX_FEATURE_META == LTP218)
+       #define RGX_CR_META_CORE_ID_VALUE 0x1C
+       #elif (RGX_FEATURE_META == LTP217)
+       #define RGX_CR_META_CORE_ID_VALUE 0x1F
+       #else
+       #error "Unknown META ID"
+       #endif
+#else
+
+       #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19
+       #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E
+       #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C
+       #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#endif
+#define RGXFW_PROCESSOR_META        "META"
+
+
+#endif /* RGX_META_H */
+
+/******************************************************************************
+ End of file (rgx_meta.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_mips.h b/drivers/gpu/drm/img/img-rogue/include/rgx_mips.h
new file mode 100644 (file)
index 0000000..c2f3818
--- /dev/null
@@ -0,0 +1,374 @@
+/*************************************************************************/ /*!
+@File           rgx_mips.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       RGX
+@Description    RGX MIPS definitions, kernel/user space
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_MIPS_H)
+#define RGX_MIPS_H
+
+/*
+ * Utility defines for memory management
+ */
+#define RGXMIPSFW_LOG2_PAGE_SIZE_4K              (12)
+#define RGXMIPSFW_PAGE_SIZE_4K                   (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K)
+#define RGXMIPSFW_PAGE_MASK_4K                   (RGXMIPSFW_PAGE_SIZE_4K - 1)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_64K             (16)
+#define RGXMIPSFW_PAGE_SIZE_64K                  (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K)
+#define RGXMIPSFW_PAGE_MASK_64K                  (RGXMIPSFW_PAGE_SIZE_64K - 1)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_256K            (18)
+#define RGXMIPSFW_PAGE_SIZE_256K                 (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_256K)
+#define RGXMIPSFW_PAGE_MASK_256K                 (RGXMIPSFW_PAGE_SIZE_256K - 1)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_1MB             (20)
+#define RGXMIPSFW_PAGE_SIZE_1MB                  (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_1MB)
+#define RGXMIPSFW_PAGE_MASK_1MB                  (RGXMIPSFW_PAGE_SIZE_1MB - 1)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_4MB             (22)
+#define RGXMIPSFW_PAGE_SIZE_4MB                  (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4MB)
+#define RGXMIPSFW_PAGE_MASK_4MB                  (RGXMIPSFW_PAGE_SIZE_4MB - 1)
+#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE            (2)
+/* log2 page table sizes dependent on FW heap size and page size (for each OS) */
+#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K         (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_4K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE)
+#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K        (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_64K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE)
+/* Maximum number of page table pages (both Host and MIPS pages) */
+#define RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES        (4)
+/* Total number of TLB entries */
+#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES          (16)
+/* "Uncached" caching policy */
+#define RGXMIPSFW_UNCACHED_CACHE_POLICY          (0X00000002U)
+/* "Write-back write-allocate" caching policy */
+#define RGXMIPSFW_WRITEBACK_CACHE_POLICY         (0X00000003)
+/* "Write-through no write-allocate" caching policy */
+#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY      (0X00000001)
+/* Cached policy used by MIPS in case of physical bus on 32 bit */
+#define RGXMIPSFW_CACHED_POLICY                  (RGXMIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT      (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY)
+/* Total number of Remap entries */
+#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES        (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES)
+
+
+/*
+ * MIPS EntryLo/PTE format
+ */
+
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT     (31U)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK    (0X7FFFFFFF)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN        (0X80000000U)
+
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT     (30U)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK    (0XBFFFFFFF)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN        (0X40000000U)
+
+/* Page Frame Number */
+#define RGXMIPSFW_ENTRYLO_PFN_SHIFT              (6)
+#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT         (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK               (0x03FFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE               (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT   (0x3FFFFFC0U)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT   (24)
+#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT     (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+                                                  RGXMIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT     (3U)
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK    (0XFFFFFFC7U)
+
+#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT            (2U)
+#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK           (0XFFFFFFFB)
+#define RGXMIPSFW_ENTRYLO_DIRTY_EN               (0X00000004U)
+
+#define RGXMIPSFW_ENTRYLO_VALID_SHIFT            (1U)
+#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK           (0XFFFFFFFD)
+#define RGXMIPSFW_ENTRYLO_VALID_EN               (0X00000002U)
+
+#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT           (0U)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK          (0XFFFFFFFE)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_EN              (0X00000001U)
+
+#define RGXMIPSFW_ENTRYLO_DVG                    (RGXMIPSFW_ENTRYLO_DIRTY_EN | \
+                                                  RGXMIPSFW_ENTRYLO_VALID_EN | \
+                                                  RGXMIPSFW_ENTRYLO_GLOBAL_EN)
+#define RGXMIPSFW_ENTRYLO_UNCACHED               (RGXMIPSFW_UNCACHED_CACHE_POLICY << \
+                                                  RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED           (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED)
+
+
+/* Remap Range Config Addr Out */
+/* These defines refer to the upper half of the Remap Range Config register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK      (0x0FFFFFF0)
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT     (4)  /* wrt upper half of the register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT     (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+                                                  RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2)
+#define MIPS_FW_CODE_OSID                        (SECURE_FW_CODE_OSID)
+#elif defined(SECURE_FW_CODE_OSID)
+#define MIPS_FW_CODE_OSID                        (1U)
+#endif
+
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ *   - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ *   - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ *   - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ *   - (benign trampoline)               : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES       (2)
+#define RGXMIPSFW_TRAMPOLINE_NUMPAGES            (1U << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define RGXMIPSFW_TRAMPOLINE_SIZE                (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K)
+#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE   (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K)
+
+#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR    (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define RGXMIPSFW_TRAMPOLINE_OFFSET(a)           (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define RGXMIPSFW_SENSITIVE_ADDR(a)              (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1UL << RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1U) & a))
+
+/*
+ * Firmware virtual layout and remap configuration
+ */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ *   used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ *   the bottom of the base input address that survive onto the output address
+ *   (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region
+ */
+
+/* Boot remap setup */
+#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE        (0xBFC00000)
+#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN        (0x1FC00000U)
+#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE     (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup */
+#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE        (0xBFC01000)
+#define RGXMIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000)
+#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN        (0x1FC01000U)
+#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE     (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup */
+#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE        (0x9FC02000)
+#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN        (0x1FC02000U)
+#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE   (12)
+#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE        (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Permanent mappings setup */
+#define RGXMIPSFW_PT_VIRTUAL_BASE                (0xCF000000)
+#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE         (0xCF800000)
+#define RGXMIPSFW_STACK_VIRTUAL_BASE             (0xCF600000)
+
+
+/*
+ * Bootloader configuration data
+ */
+/* Bootloader configuration offset (where RGXMIPSFW_BOOT_DATA lives)
+ * within the bootloader/NMI data page */
+#define RGXMIPSFW_BOOTLDR_CONF_OFFSET                         (0x0U)
+
+
+/*
+ * NMI shared data
+ */
+/* Base address of the shared data within the bootloader/NMI data page */
+#define RGXMIPSFW_NMI_SHARED_DATA_BASE                        (0x100)
+/* Size used by Debug dump data */
+#define RGXMIPSFW_NMI_SHARED_SIZE                             (0x2B0)
+/* Offsets in the NMI shared area in 32-bit words */
+#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET                        (0x0)
+#define RGXMIPSFW_NMI_STATE_OFFSET                            (0x1)
+#define RGXMIPSFW_NMI_ERROR_STATE_SET                         (0x1)
+
+/*
+ * MIPS boot stage
+ */
+#define RGXMIPSFW_BOOT_STAGE_OFFSET                           (0x400)
+
+/*
+ * MIPS private data in the bootloader data page.
+ * Memory below this offset is used by the FW only, no interface data allowed.
+ */
+#define RGXMIPSFW_PRIVATE_DATA_OFFSET                         (0x800)
+
+
+/* The things that follow are excluded when compiling assembly sources */
+#if !defined(RGXMIPSFW_ASSEMBLY_CODE)
+#include "img_types.h"
+#include "km/rgxdefs_km.h"
+
+typedef struct
+{
+       IMG_UINT64 ui64StackPhyAddr;
+       IMG_UINT64 ui64RegBase;
+       IMG_UINT64 aui64PTPhyAddr[RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES];
+       IMG_UINT32 ui32PTLog2PageSize;
+       IMG_UINT32 ui32PTNumPages;
+       IMG_UINT32 ui32Reserved1;
+       IMG_UINT32 ui32Reserved2;
+} RGXMIPSFW_BOOT_DATA;
+
+#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset)                (offset / sizeof(IMG_UINT32))
+#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset)                (offset / sizeof(IMG_UINT64))
+
+/* Used for compatibility checks */
+#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK                         (0xFFFFE3FFU)
+#define RGXMIPSFW_ARCHTYPE_VER_SHIFT                          (10U)
+#define RGXMIPSFW_CORE_ID_VALUE                               (0x001U)
+#define RGXFW_PROCESSOR_MIPS                                  "MIPS"
+
+/* microAptivAP cache line size */
+#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE                (16U)
+
+/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */
+#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN           (16U)
+
+/* Values to put in the MIPS selectors for performance counters */
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0          (9U)   /* Icache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1            (9U)   /* Icache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0          (10U)  /* Dcache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1            (11U) /* Dcache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0      (5U)  /* ITLB instruction accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1        (7U)  /* JTLB instruction accesses misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0          (1U)  /* Instructions completed in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1         (8U)  /* JTLB data misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT                 (5U)  /* Shift for the Event field in the MIPS perf ctrl registers */
+/* Additional flags for performance counters. See MIPS manual for further reference */
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE             (8U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE           (2U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL                   (1U)
+
+
+#define RGXMIPSFW_C0_NBHWIRQ   8
+
+/* Macros to decode C0_Cause register */
+#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE)       (((CAUSE) & 0x7cU) >> 2U)
+#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR      9
+/* Use only when Coprocessor Unusable exception */
+#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28U) & 0x3U)
+#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10)
+#define RGXMIPSFW_C0_CAUSE_FDCIPENDING          (1UL << 21)
+#define RGXMIPSFW_C0_CAUSE_IV                   (1UL << 23)
+#define RGXMIPSFW_C0_CAUSE_IC                   (1UL << 25)
+#define RGXMIPSFW_C0_CAUSE_PCIPENDING           (1UL << 26)
+#define RGXMIPSFW_C0_CAUSE_TIPENDING            (1UL << 30)
+#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY         (1UL << 31)
+
+/* Macros to decode C0_Debug register */
+#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10U) & 0x1fU)
+#define RGXMIPSFW_C0_DEBUG_DSS            (1UL << 0)
+#define RGXMIPSFW_C0_DEBUG_DBP            (1UL << 1)
+#define RGXMIPSFW_C0_DEBUG_DDBL           (1UL << 2)
+#define RGXMIPSFW_C0_DEBUG_DDBS           (1UL << 3)
+#define RGXMIPSFW_C0_DEBUG_DIB            (1UL << 4)
+#define RGXMIPSFW_C0_DEBUG_DINT           (1UL << 5)
+#define RGXMIPSFW_C0_DEBUG_DIBIMPR        (1UL << 6)
+#define RGXMIPSFW_C0_DEBUG_DDBLIMPR       (1UL << 18)
+#define RGXMIPSFW_C0_DEBUG_DDBSIMPR       (1UL << 19)
+#define RGXMIPSFW_C0_DEBUG_IEXI           (1UL << 20)
+#define RGXMIPSFW_C0_DEBUG_DBUSEP         (1UL << 21)
+#define RGXMIPSFW_C0_DEBUG_CACHEEP        (1UL << 22)
+#define RGXMIPSFW_C0_DEBUG_MCHECKP        (1UL << 23)
+#define RGXMIPSFW_C0_DEBUG_IBUSEP         (1UL << 24)
+#define RGXMIPSFW_C0_DEBUG_DM             (1UL << 30)
+#define RGXMIPSFW_C0_DEBUG_DBD            (1UL << 31)
+
+/* Macros to decode TLB entries */
+#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK)       (((PAGE_MASK) >> 13) & 0XFFFFU)
+#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK)  ((((PAGE_MASK) | 0x1FFFU) + 1U) >> 11U) /* page size in KB */
+#define RGXMIPSFW_TLB_GET_PAGE_MASK(PAGE_SIZE)  ((((PAGE_SIZE) << 11) - 1) & ~0x7FF) /* page size in KB */
+#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI)        ((ENTRY_HI) >> 13)
+#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO)   (((ENTRY_LO) >> 3) & 0x7U)
+#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO)         (((ENTRY_LO) >> 6) & 0XFFFFFU)
+/* GET_PA uses a non-standard PFN mask for 36 bit addresses */
+#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO)          (((IMG_UINT64)(ENTRY_LO) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6)
+#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO)     (((ENTRY_LO) >> 30) & 0x3U)
+#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO)         ((ENTRY_LO) & 0x7U)
+#define RGXMIPSFW_TLB_GLOBAL                    (1U)
+#define RGXMIPSFW_TLB_VALID                     (1U << 1)
+#define RGXMIPSFW_TLB_DIRTY                     (1U << 2)
+#define RGXMIPSFW_TLB_XI                        (1U << 30)
+#define RGXMIPSFW_TLB_RI                        (1U << 31)
+
+typedef struct {
+       IMG_UINT32 ui32TLBPageMask;
+       IMG_UINT32 ui32TLBHi;
+       IMG_UINT32 ui32TLBLo0;
+       IMG_UINT32 ui32TLBLo1;
+} RGX_MIPS_TLB_ENTRY;
+
+typedef struct {
+       IMG_UINT32 ui32RemapAddrIn;     /* always 4k aligned */
+       IMG_UINT32 ui32RemapAddrOut;    /* always 4k aligned */
+       IMG_UINT32 ui32RemapRegionSize;
+} RGX_MIPS_REMAP_ENTRY;
+
+typedef struct {
+       IMG_UINT32 ui32ErrorState; /* This must come first in the structure */
+       IMG_UINT32 ui32ErrorEPC;
+       IMG_UINT32 ui32StatusRegister;
+       IMG_UINT32 ui32CauseRegister;
+       IMG_UINT32 ui32BadRegister;
+       IMG_UINT32 ui32EPC;
+       IMG_UINT32 ui32SP;
+       IMG_UINT32 ui32Debug;
+       IMG_UINT32 ui32DEPC;
+       IMG_UINT32 ui32BadInstr;
+       IMG_UINT32 ui32UnmappedAddress;
+       RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES];
+       RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES];
+} RGX_MIPS_STATE;
+
+#endif /* RGXMIPSFW_ASSEMBLY_CODE */
+
+#endif /* RGX_MIPS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgx_riscv.h b/drivers/gpu/drm/img/img-rogue/include/rgx_riscv.h
new file mode 100644 (file)
index 0000000..e5be2a5
--- /dev/null
@@ -0,0 +1,250 @@
+/*************************************************************************/ /*!
+@File           rgx_riscv.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       RGX
+@Description    RGX RISCV definitions, kernel/user space
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_RISCV_H)
+#define RGX_RISCV_H
+
+#include "km/rgxdefs_km.h"
+
+
+/* Utility defines to convert regions to virtual addresses and remaps */
+#define RGXRISCVFW_GET_REGION_BASE(r)           IMG_UINT32_C((r) << 28)
+#define RGXRISCVFW_GET_REGION(a)                IMG_UINT32_C((a) >> 28)
+#define RGXRISCVFW_MAX_REGION_SIZE              IMG_UINT32_C(1 << 28)
+#define RGXRISCVFW_GET_REMAP(r)                 (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 + ((r) * 8U))
+
+/* RISCV remap output is aligned to 4K */
+#define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN  (0x1000U)
+
+/*
+ * FW bootloader defines
+ */
+#define RGXRISCVFW_BOOTLDR_CODE_REGION          IMG_UINT32_C(0xC)
+#define RGXRISCVFW_BOOTLDR_DATA_REGION          IMG_UINT32_C(0x5)
+#define RGXRISCVFW_BOOTLDR_CODE_BASE            (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION))
+#define RGXRISCVFW_BOOTLDR_DATA_BASE            (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION))
+#define RGXRISCVFW_BOOTLDR_CODE_REMAP           (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_CODE_REGION))
+#define RGXRISCVFW_BOOTLDR_DATA_REMAP           (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_DATA_REGION))
+
+/* Bootloader data offset in dwords from the beginning of the FW data allocation */
+#define RGXRISCVFW_BOOTLDR_CONF_OFFSET          (0x0)
+
+/*
+ * FW coremem region defines
+ */
+#define RGXRISCVFW_COREMEM_REGION               IMG_UINT32_C(0x8)
+#define RGXRISCVFW_COREMEM_MAX_SIZE             IMG_UINT32_C(0x10000000) /* 256 MB */
+#define RGXRISCVFW_COREMEM_BASE                 (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_COREMEM_REGION))
+#define RGXRISCVFW_COREMEM_END                  (RGXRISCVFW_COREMEM_BASE + RGXRISCVFW_COREMEM_MAX_SIZE - 1U)
+
+
+/*
+ * Host-FW shared data defines
+ */
+#define RGXRISCVFW_SHARED_CACHED_DATA_REGION    (0x6UL)
+#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION  (0xDUL)
+#define RGXRISCVFW_SHARED_CACHED_DATA_BASE      (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION))
+#define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE    (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION))
+#define RGXRISCVFW_SHARED_CACHED_DATA_REMAP     (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION))
+#define RGXRISCVFW_SHARED_UNCACHED_DATA_REMAP   (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION))
+
+
+/*
+ * GPU SOCIF access defines
+ */
+#define RGXRISCVFW_SOCIF_REGION                 (0x2U)
+#define RGXRISCVFW_SOCIF_BASE                   (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SOCIF_REGION))
+
+
+/* The things that follow are excluded when compiling assembly sources */
+#if !defined(RGXRISCVFW_ASSEMBLY_CODE)
+#include "img_types.h"
+
+#define RGXFW_PROCESSOR_RISCV       "RISCV"
+#define RGXRISCVFW_CORE_ID_VALUE    (0x00450B02U)
+#define RGXRISCVFW_MISA_ADDR        (0x301U)
+#define RGXRISCVFW_MISA_VALUE       (0x40001104U)
+#define RGXRISCVFW_MSCRATCH_ADDR    (0x340U)
+
+typedef struct
+{
+       IMG_UINT64 ui64CorememCodeDevVAddr;
+       IMG_UINT64 ui64CorememDataDevVAddr;
+       IMG_UINT32 ui32CorememCodeFWAddr;
+       IMG_UINT32 ui32CorememDataFWAddr;
+       IMG_UINT32 ui32CorememCodeSize;
+       IMG_UINT32 ui32CorememDataSize;
+       IMG_UINT32 ui32Flags;
+       IMG_UINT32 ui32Reserved;
+} RGXRISCVFW_BOOT_DATA;
+
+/*
+ * List of registers to be printed in debug dump.
+ * First column:  register names (general purpose or control/status registers)
+ * Second column: register number to be used in abstract access register command
+ * (see RISC-V debug spec v0.13)
+ */
+#define RGXRISCVFW_DEBUG_DUMP_REGISTERS \
+       X(pc,        0x7b1) /* dpc */ \
+       X(ra,       0x1001) \
+       X(sp,       0x1002) \
+       X(mepc,      0x341) \
+       X(mcause,    0x342) \
+       X(mdseac,    0xfc0) \
+       X(mstatus,   0x300) \
+       X(mie,       0x304) \
+       X(mip,       0x344) \
+       X(mscratch,  0x340) \
+       X(mbvnc0,    0xffe) \
+       X(mbvnc1,    0xfff) \
+       X(micect,    0x7f0) \
+       X(mdcect,    0x7f3) \
+       X(mdcrfct,   0x7f4) \
+
+typedef struct
+{
+#define X(name, address) \
+       IMG_UINT32 name;
+
+       RGXRISCVFW_DEBUG_DUMP_REGISTERS
+#undef X
+} RGXRISCVFW_STATE;
+
+
+#define RGXRISCVFW_MCAUSE_INTERRUPT  (1U << 31)
+
+#define RGXRISCVFW_MCAUSE_TABLE \
+       X(0x00000000U, IMG_FALSE, "NMI pin assertion") /* Also reset value */ \
+       X(0x00000001U, IMG_TRUE,  "Instruction access fault") \
+       X(0x00000002U, IMG_TRUE,  "Illegal instruction") \
+       X(0x00000003U, IMG_TRUE,  "Breakpoint") \
+       X(0x00000004U, IMG_TRUE,  "Load address misaligned") \
+       X(0x00000005U, IMG_TRUE,  "Load access fault") \
+       X(0x00000006U, IMG_TRUE,  "Store/AMO address misaligned") \
+       X(0x00000007U, IMG_TRUE,  "Store/AMO access fault") \
+       X(0x0000000BU, IMG_TRUE,  "Environment call from M-mode (FW assert)") \
+       X(0x80000007U, IMG_FALSE, "Machine timer interrupt") \
+       X(0x8000000BU, IMG_FALSE, "Machine external interrupt") \
+       X(0x8000001EU, IMG_FALSE, "Machine correctable error local interrupt") \
+       X(0xF0000000U, IMG_TRUE,  "Machine D-bus store error NMI") \
+       X(0xF0000001U, IMG_TRUE,  "Machine D-bus non-blocking load error NMI") \
+       X(0xF0000002U, IMG_TRUE,  "dCache unrecoverable NMI")
+
+
+/* Debug module HW defines */
+#define RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER (0U)
+#define RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY   (2U)
+#define RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT   (2UL << 20)
+#define RGXRISCVFW_DMI_COMMAND_WRITE           (1UL << 16)
+#define RGXRISCVFW_DMI_COMMAND_READ            (0UL << 16)
+#define RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT     (2U)
+
+/* Abstract command error codes (descriptions from RISC-V debug spec v0.13) */
+typedef enum
+{
+       /* No error. */
+       RISCV_ABSTRACT_CMD_NO_ERROR = 0,
+
+       /*
+        * An abstract command was executing while command, abstractcs, or abstractauto
+        * was written, or when one of the data or progbuf registers was read or
+        * written. This status is only written if cmderr contains 0.
+        */
+       RISCV_ABSTRACT_CMD_BUSY = 1,
+
+       /*
+        * The requested command is not supported, regardless of whether
+        * the hart is running or not.
+        */
+       RISCV_ABSTRACT_CMD_NOT_SUPPORTED = 2,
+
+       /*
+        * An exception occurred while executing the command
+        * (e.g. while executing the Program Buffer).
+        */
+       RISCV_ABSTRACT_CMD_EXCEPTION = 3,
+
+       /*
+        * The abstract command couldn't execute because the hart wasn't in the required
+        * state (running/halted), or unavailable.
+        */
+       RISCV_ABSTRACT_CMD_HALT_RESUME = 4,
+
+       /*
+        * The abstract command failed due to a bus error
+        * (e.g. alignment, access size, or timeout).
+        */
+       RISCV_ABSTRACT_CMD_BUS_ERROR = 5,
+
+       /* The command failed for another reason. */
+       RISCV_ABSTRACT_CMD_OTHER_ERROR = 7
+
+} RGXRISCVFW_ABSTRACT_CMD_ERR;
+
+/* System Bus error codes (descriptions from RISC-V debug spec v0.13) */
+typedef enum
+{
+       /* There was no bus error. */
+       RISCV_SYSBUS_NO_ERROR = 0,
+
+       /* There was a timeout. */
+       RISCV_SYSBUS_TIMEOUT = 1,
+
+       /* A bad address was accessed. */
+       RISCV_SYSBUS_BAD_ADDRESS = 2,
+
+       /* There was an alignment error. */
+       RISCV_SYSBUS_BAD_ALIGNMENT = 3,
+
+       /* An access of unsupported size was requested. */
+       RISCV_SYSBUS_UNSUPPORTED_SIZE = 4,
+
+       /* Other. */
+       RISCV_SYSBUS_OTHER_ERROR = 7
+
+} RGXRISCVFW_SYSBUS_ERR;
+
+#endif /* RGXRISCVFW_ASSEMBLY_CODE */
+
+#endif /* RGX_RISCV_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rgxfw_log_helper.h b/drivers/gpu/drm/img/img-rogue/include/rgxfw_log_helper.h
new file mode 100644 (file)
index 0000000..275b63a
--- /dev/null
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File           rgxfw_log_helper.h
+@Title          Firmware TBI logging helper function
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       Generic
+@Description    This file contains some helper code to make TBI logging possible
+                Specifically, it uses the SFIDLIST xmacro to trace ids back to
+                the original strings.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGXFW_LOG_HELPER_H
+#define RGXFW_LOG_HELPER_H
+
+#include "rgx_fwif_sf.h"
+
+static const IMG_CHAR *const groups[]= {
+#define X(A,B) #B,
+       RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+/*  idToStringID : Search SFs tuples {id,string} for a matching id.
+ *   return index to array if found or RGXFW_SF_LAST if none found.
+ *   bsearch could be used as ids are in increasing order. */
+#if defined(RGX_FIRMWARE)
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs)
+#else
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs)
+#endif
+{
+       IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST;
+
+       for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++)
+       {
+               if ( ui32CheckData == psSFs[i].ui32Id )
+               {
+                       ui32Id = i;
+                       break;
+               }
+       }
+       return ui32Id;
+}
+
+#endif /* RGXFW_LOG_HELPER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/ri_typedefs.h b/drivers/gpu/drm/img/img-rogue/include/ri_typedefs.h
new file mode 100644 (file)
index 0000000..77be10e
--- /dev/null
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Information (RI) Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of RI management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_TYPEDEFS_H
+#define RI_TYPEDEFS_H
+
+#include "img_types.h"
+
+typedef struct RI_SUBLIST_ENTRY RI_ENTRY;
+typedef RI_ENTRY* RI_HANDLE;
+
+#endif /* #ifndef RI_TYPEDEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_alignchecks.h
new file mode 100644 (file)
index 0000000..4f82b23
--- /dev/null
@@ -0,0 +1,192 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX fw interface alignment checks
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Checks to avoid disalignment in RGX fw data structures
+                shared with the host
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_ALIGNCHECKS_H)
+#define RGX_FWIF_ALIGNCHECKS_H
+
+/* for the offsetof macro */
+#if defined(__KERNEL__) && defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/*!
+ ******************************************************************************
+ * Alignment UM/FW checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_UM_MAX 128U
+
+#define RGXFW_ALIGN_CHECKS_INIT0                                                               \
+               sizeof(RGXFWIF_TRACEBUF),                                                               \
+               offsetof(RGXFWIF_TRACEBUF, ui32LogType),                                \
+               offsetof(RGXFWIF_TRACEBUF, sTraceBuf),                                  \
+               offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords),   \
+               offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags),                  \
+                                                                                                                               \
+               sizeof(RGXFWIF_SYSDATA),                                                                \
+               offsetof(RGXFWIF_SYSDATA, ePowState),                                   \
+               offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount),                 \
+               offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal),                 \
+               offsetof(RGXFWIF_SYSDATA, ui32FWFaults),                                \
+               offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags),                   \
+                                                                                                                               \
+               sizeof(RGXFWIF_OSDATA),                                                                 \
+               offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark),                \
+               offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted),                 \
+                                                                                                                                       \
+               sizeof(RGXFWIF_HWRINFOBUF),                                                                     \
+               offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount),          \
+               offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount),           \
+               offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount),         \
+               offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount),       \
+                                                                                                                                       \
+               /* RGXFWIF_CMDTA checks */                                              \
+               sizeof(RGXFWIF_CMDTA),                                                  \
+               offsetof(RGXFWIF_CMDTA, sGeomRegs),                             \
+                                                                                                               \
+               /* RGXFWIF_CMD3D checks */                                              \
+               sizeof(RGXFWIF_CMD3D),                                                  \
+               offsetof(RGXFWIF_CMD3D, s3DRegs),                               \
+                                                                                                               \
+               /* RGXFWIF_CMDTRANSFER checks */                                \
+               sizeof(RGXFWIF_CMDTRANSFER),                                    \
+               offsetof(RGXFWIF_CMDTRANSFER, sTransRegs),              \
+                                                                                                               \
+                                                                                                               \
+               /* RGXFWIF_CMD_COMPUTE checks */                                \
+               sizeof(RGXFWIF_CMD_COMPUTE),                                    \
+               offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs),                \
+                                                                                                               \
+               /* RGXFWIF_FREELIST checks */                                   \
+               sizeof(RGXFWIF_FREELIST),                                               \
+               offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr), \
+               offsetof(RGXFWIF_FREELIST, ui32MaxPages),               \
+               offsetof(RGXFWIF_FREELIST, ui32CurrentPages),   \
+                                                                                                               \
+               /* RGXFWIF_HWRTDATA checks */                                           \
+               sizeof(RGXFWIF_HWRTDATA),                                                       \
+               offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr),       \
+               offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr),          \
+               offsetof(RGXFWIF_HWRTDATA, apsFreeLists),                       \
+               offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase),                     \
+               offsetof(RGXFWIF_HWRTDATA, eState),                                     \
+                                                                                                                       \
+               /* RGXFWIF_HWRTDATA_COMMON checks */                                    \
+               sizeof(RGXFWIF_HWRTDATA_COMMON),                                                \
+               offsetof(RGXFWIF_HWRTDATA_COMMON, bTACachesNeedZeroing),\
+                                                                                                                               \
+               /* RGXFWIF_HWPERF_CTL_BLK checks */                                     \
+               sizeof(RGXFWIF_HWPERF_CTL_BLK),                                         \
+               offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg),      \
+                                                                                                                       \
+               /* RGXFWIF_HWPERF_CTL checks */                 \
+               sizeof(RGXFWIF_HWPERF_CTL),                             \
+               offsetof(RGXFWIF_HWPERF_CTL, SelCntr)
+
+#if defined(RGX_FEATURE_TLA)
+#define RGXFW_ALIGN_CHECKS_INIT1                   \
+               RGXFW_ALIGN_CHECKS_INIT0,                  \
+               /* RGXFWIF_CMD2D checks */                 \
+               sizeof(RGXFWIF_CMD2D),                     \
+               offsetof(RGXFWIF_CMD2D, s2DRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT1               RGXFW_ALIGN_CHECKS_INIT0
+#endif /* RGX_FEATURE_TLA */
+
+
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFW_ALIGN_CHECKS_INIT                    \
+               RGXFW_ALIGN_CHECKS_INIT1,                  \
+               /* RGXFWIF_CMDTDM checks */                \
+               sizeof(RGXFWIF_CMDTDM),                    \
+               offsetof(RGXFWIF_CMDTDM, sTDMRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT                RGXFW_ALIGN_CHECKS_INIT1
+#endif /* ! RGX_FEATURE_FASTRENDER_DM */
+
+
+
+/*!
+ ******************************************************************************
+ * Alignment KM checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM                                   \
+               sizeof(RGXFWIF_SYSINIT),                                     \
+               offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr),                   \
+               offsetof(RGXFWIF_SYSINIT, sPDSExecBase),                     \
+               offsetof(RGXFWIF_SYSINIT, sUSCExecBase),                     \
+               offsetof(RGXFWIF_SYSINIT, asSigBufCtl),                      \
+               offsetof(RGXFWIF_SYSINIT, sTraceBufCtl),                     \
+               offsetof(RGXFWIF_SYSINIT, sFwSysData),                       \
+               sizeof(RGXFWIF_OSINIT),                                      \
+               offsetof(RGXFWIF_OSINIT, psKernelCCBCtl),                    \
+               offsetof(RGXFWIF_OSINIT, psKernelCCB),                       \
+               offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl),                  \
+               offsetof(RGXFWIF_OSINIT, psFirmwareCCB),                     \
+               offsetof(RGXFWIF_OSINIT, sFwOsData),                         \
+               offsetof(RGXFWIF_OSINIT, sRGXCompChecks),                    \
+                                                                            \
+               /* RGXFWIF_FWRENDERCONTEXT checks */                         \
+               sizeof(RGXFWIF_FWRENDERCONTEXT),                             \
+               offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),               \
+               offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),               \
+                                                                            \
+               sizeof(RGXFWIF_FWCOMMONCONTEXT),                             \
+               offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext),           \
+               offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode),                 \
+               offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB),                    \
+                                                                            \
+               sizeof(RGXFWIF_MMUCACHEDATA),                                \
+               offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags),              \
+               offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync),               \
+               offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue)
+
+#endif /* RGX_FWIF_ALIGNCHECKS_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_hwperf.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_hwperf.h
new file mode 100644 (file)
index 0000000..7001092
--- /dev/null
@@ -0,0 +1,252 @@
+/*************************************************************************/ /*!
+@File           rgx_fwif_hwperf.h
+@Title          RGX HWPerf support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared header between RGX firmware and Init process
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_HWPERF_H
+#define RGX_FWIF_HWPERF_H
+
+#include "rgx_fwif_shared.h"
+#include "rgx_hwperf.h"
+#include "rgxdefs_km.h"
+
+
+/*****************************************************************************/
+
+/* Structure to hold a block's parameters for passing between the BG context
+ * and the IRQ context when applying a configuration request. */
+typedef struct
+{
+       IMG_BOOL                bValid;
+       IMG_BOOL                bEnabled;
+       IMG_UINT32              eBlockID;
+       IMG_UINT32              uiCounterMask;
+       IMG_UINT64  RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_MUX_COUNTERS_MAX];
+}  RGXFWIF_HWPERF_CTL_BLK;
+
+/* Structure used to hold the configuration of the non-mux counters blocks */
+typedef struct
+{
+       IMG_UINT32            ui32NumSelectedCounters;
+       IMG_UINT32            aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS];
+} RGXFW_HWPERF_SELECT;
+
+/* Structure used to hold a Direct-Addressable block's parameters for passing
+ * between the BG context and the IRQ context when applying a configuration
+ * request. RGX_FEATURE_HWPERF_OCEANIC use only.
+ */
+typedef struct
+{
+       IMG_UINT32               uiEnabled;
+       IMG_UINT32               uiNumCounters;
+       IMG_UINT32               eBlockID;
+       RGXFWIF_DEV_VIRTADDR     psModel;
+       IMG_UINT32               aui32Counters[RGX_CNTBLK_COUNTERS_MAX];
+} RGXFWIF_HWPERF_DA_BLK;
+
+
+/* Structure to hold the whole configuration request details for all blocks
+ * The block masks and counts are used to optimise reading of this data. */
+typedef struct
+{
+       IMG_UINT32                         ui32HWPerfCtlFlags;
+
+       IMG_UINT32                         ui32SelectedCountersBlockMask;
+       RGXFW_HWPERF_SELECT RGXFW_ALIGN    SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS];
+
+       IMG_UINT32                         ui32EnabledMUXBlksCount;
+       RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_MUX_BLKS];
+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL;
+
+/* NOTE: The switch statement in this function must be kept in alignment with
+ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may
+ * result if not.
+ * The function provides a hash lookup to get a handle on the global store for
+ * a block's configuration store from it's block ID.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(rgxfw_hwperf_get_block_ctl)
+#endif
+static INLINE RGXFWIF_HWPERF_CTL_BLK *rgxfw_hwperf_get_block_ctl(
+               RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData)
+{
+       IMG_UINT32 ui32Idx;
+
+       /* Hash the block ID into a control configuration array index */
+       switch (eBlockID)
+       {
+               case RGX_CNTBLK_ID_TA:
+               case RGX_CNTBLK_ID_RASTER:
+               case RGX_CNTBLK_ID_HUB:
+               case RGX_CNTBLK_ID_TORNADO:
+               case RGX_CNTBLK_ID_JONES:
+               {
+                       ui32Idx = eBlockID;
+                       break;
+               }
+               case RGX_CNTBLK_ID_TPU_MCU0:
+               case RGX_CNTBLK_ID_TPU_MCU1:
+               case RGX_CNTBLK_ID_TPU_MCU2:
+               case RGX_CNTBLK_ID_TPU_MCU3:
+               case RGX_CNTBLK_ID_TPU_MCU4:
+               case RGX_CNTBLK_ID_TPU_MCU5:
+               case RGX_CNTBLK_ID_TPU_MCU6:
+               case RGX_CNTBLK_ID_TPU_MCU7:
+               {
+                       ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+                                               (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+                       break;
+               }
+               case RGX_CNTBLK_ID_USC0:
+               case RGX_CNTBLK_ID_USC1:
+               case RGX_CNTBLK_ID_USC2:
+               case RGX_CNTBLK_ID_USC3:
+               case RGX_CNTBLK_ID_USC4:
+               case RGX_CNTBLK_ID_USC5:
+               case RGX_CNTBLK_ID_USC6:
+               case RGX_CNTBLK_ID_USC7:
+               case RGX_CNTBLK_ID_USC8:
+               case RGX_CNTBLK_ID_USC9:
+               case RGX_CNTBLK_ID_USC10:
+               case RGX_CNTBLK_ID_USC11:
+               case RGX_CNTBLK_ID_USC12:
+               case RGX_CNTBLK_ID_USC13:
+               case RGX_CNTBLK_ID_USC14:
+               case RGX_CNTBLK_ID_USC15:
+               {
+                       ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+                                               (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+                       break;
+               }
+               case RGX_CNTBLK_ID_TEXAS0:
+               case RGX_CNTBLK_ID_TEXAS1:
+               case RGX_CNTBLK_ID_TEXAS2:
+               case RGX_CNTBLK_ID_TEXAS3:
+               case RGX_CNTBLK_ID_TEXAS4:
+               case RGX_CNTBLK_ID_TEXAS5:
+               case RGX_CNTBLK_ID_TEXAS6:
+               case RGX_CNTBLK_ID_TEXAS7:
+               {
+                       ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+                                               (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+                       break;
+               }
+               case RGX_CNTBLK_ID_RASTER0:
+               case RGX_CNTBLK_ID_RASTER1:
+               case RGX_CNTBLK_ID_RASTER2:
+               case RGX_CNTBLK_ID_RASTER3:
+               {
+                       ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+                                               (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+                       break;
+               }
+               case RGX_CNTBLK_ID_BLACKPEARL0:
+               case RGX_CNTBLK_ID_BLACKPEARL1:
+               case RGX_CNTBLK_ID_BLACKPEARL2:
+               case RGX_CNTBLK_ID_BLACKPEARL3:
+               {
+                       ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+                                               (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+                       break;
+               }
+               case RGX_CNTBLK_ID_PBE0:
+               case RGX_CNTBLK_ID_PBE1:
+               case RGX_CNTBLK_ID_PBE2:
+               case RGX_CNTBLK_ID_PBE3:
+               case RGX_CNTBLK_ID_PBE4:
+               case RGX_CNTBLK_ID_PBE5:
+               case RGX_CNTBLK_ID_PBE6:
+               case RGX_CNTBLK_ID_PBE7:
+               case RGX_CNTBLK_ID_PBE8:
+               case RGX_CNTBLK_ID_PBE9:
+               case RGX_CNTBLK_ID_PBE10:
+               case RGX_CNTBLK_ID_PBE11:
+               case RGX_CNTBLK_ID_PBE12:
+               case RGX_CNTBLK_ID_PBE13:
+               case RGX_CNTBLK_ID_PBE14:
+               case RGX_CNTBLK_ID_PBE15:
+               {
+                       ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+                                               RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+                                               (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+                       break;
+               }
+               default:
+               {
+                       ui32Idx = RGX_HWPERF_MAX_DEFINED_BLKS;
+                       break;
+               }
+       }
+       if (ui32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS)
+       {
+               return NULL;
+       }
+       return &psHWPerfInitData->sBlkCfg[ui32Idx];
+}
+
+/* Stub routine for rgxfw_hwperf_get_da_block_ctl() for non
+ * RGX_FEATURE_HWPERF_OCEANIC systems. Just return a NULL.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(rgxfw_hwperf_get_da_block_ctl)
+#endif
+static INLINE RGXFWIF_HWPERF_DA_BLK* rgxfw_hwperf_get_da_block_ctl(
+               RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData)
+{
+       PVR_UNREFERENCED_PARAMETER(eBlockID);
+       PVR_UNREFERENCED_PARAMETER(psHWPerfInitData);
+
+       return NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_km.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_km.h
new file mode 100644 (file)
index 0000000..9b665b6
--- /dev/null
@@ -0,0 +1,2341 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures used by pvrsrvkm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by pvrsrvkm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_KM_H)
+#define RGX_FWIF_KM_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "km/rgxdefs_km.h"
+#include "dllist.h"
+#include "rgx_hwperf.h"
+
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE                  0x00000000U
+#define RGXFWIF_LOG_TYPE_TRACE                 0x00000001U
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN            0x00000002U
+#define RGXFWIF_LOG_TYPE_GROUP_MTS             0x00000004U
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U
+#define RGXFWIF_LOG_TYPE_GROUP_CSW             0x00000010U
+#define RGXFWIF_LOG_TYPE_GROUP_BIF             0x00000020U
+#define RGXFWIF_LOG_TYPE_GROUP_PM              0x00000040U
+#define RGXFWIF_LOG_TYPE_GROUP_RTD             0x00000080U
+#define RGXFWIF_LOG_TYPE_GROUP_SPM             0x00000100U
+#define RGXFWIF_LOG_TYPE_GROUP_POW             0x00000200U
+#define RGXFWIF_LOG_TYPE_GROUP_HWR             0x00000400U
+#define RGXFWIF_LOG_TYPE_GROUP_HWP             0x00000800U
+#define RGXFWIF_LOG_TYPE_GROUP_RPM             0x00001000U
+#define RGXFWIF_LOG_TYPE_GROUP_DMA             0x00002000U
+#define RGXFWIF_LOG_TYPE_GROUP_MISC            0x00004000U
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG   0x80000000U
+#define RGXFWIF_LOG_TYPE_GROUP_MASK            0x80007FFEU
+#define RGXFWIF_LOG_TYPE_MASK                  0x80007FFFU
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST   "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+       const IMG_CHAR* pszLogGroupName;
+       IMG_UINT32      ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+  Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+  table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none",    RGXFWIF_LOG_TYPE_NONE }, \
+                                         { "main",    RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+                                         { "mts",     RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+                                         { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+                                         { "csw",     RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+                                         { "bif",     RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+                                         { "pm",      RGXFWIF_LOG_TYPE_GROUP_PM }, \
+                                         { "rtd",     RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+                                         { "spm",     RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+                                         { "pow",     RGXFWIF_LOG_TYPE_GROUP_POW }, \
+                                         { "hwr",     RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+                                         { "hwp",     RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+                                         { "rpm",     RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+                                         { "dma",     RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+                                         { "misc",    RGXFWIF_LOG_TYPE_GROUP_MISC }, \
+                                         { "debug",   RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types)  ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U)       ?("main ")              :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U)                ?("mts ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U)    ?("cleanup ")   :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U)                ?("csw ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U)                ?("bif ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U)         ?("pm ")                :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U)                ?("rtd ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U)                ?("spm ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U)                ?("pow ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U)                ?("hwr ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U)                ?("hwp ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U)                ?("rpm ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U)                ?("dma ")               :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U)       ?("misc ")              :("")),         \
+                                                ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U)      ?("debug ")             :(""))
+
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+#define RGXFW_SIG_BUFFER_SIZE_MIN       (8192)
+
+#define RGXFWIF_TIMEDIFF_ID                    ((0x1UL << 28) | RGX_CR_TIMER)
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */
+#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+#define RGXFW_THREAD_NUM 2U
+#else
+#define RGXFW_THREAD_NUM 1U
+#endif
+
+#define RGXFW_POLL_TYPE_SET 0x80000000U
+
+typedef struct
+{
+       IMG_CHAR        szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+       IMG_CHAR        szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+       IMG_UINT32      ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF;
+
+/*!
+ * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface
+ * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing
+ * @{
+ */
+
+/*!
+ * @Brief Firmware trace buffer details
+ */
+typedef struct
+{
+       IMG_UINT32            ui32TracePointer;          /*!< Trace pointer (write index into Trace Buffer)*/
+
+#if defined(RGX_FIRMWARE)
+       IMG_UINT32            *pui32RGXFWIfTraceBuffer;  /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */
+#else
+       RGXFWIF_DEV_VIRTADDR  pui32RGXFWIfTraceBuffer;   /*!< Trace buffer address (FW address)*/
+#endif
+       IMG_PUINT32           pui32TraceBuffer;          /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */
+
+       RGXFWIF_FILE_INFO_BUF sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+/*! @} End of Defgroup SRVAndFWTracing */
+
+#define RGXFWIF_FWFAULTINFO_MAX                (8U)                    /* Total number of FW fault logs stored */
+
+typedef struct
+{
+       IMG_UINT64 RGXFW_ALIGN  ui64CRTimer;
+       IMG_UINT64 RGXFW_ALIGN  ui64OSTimer;
+       IMG_UINT32 RGXFW_ALIGN  ui32Data;
+       IMG_UINT32 ui32Reserved;
+       RGXFWIF_FILE_INFO_BUF   sFaultBuf;
+} UNCACHED_ALIGN RGX_FWFAULTINFO;
+
+
+#define RGXFWIF_POW_STATES \
+  X(RGXFWIF_POW_OFF)                   /* idle and handshaked with the host (ready to full power down) */ \
+  X(RGXFWIF_POW_ON)                            /* running HW commands */ \
+  X(RGXFWIF_POW_FORCED_IDLE)   /* forced idle */ \
+  X(RGXFWIF_POW_IDLE)                  /* idle waiting for host handshake */
+
+typedef enum
+{
+#define X(NAME) NAME,
+       RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK                        (IMG_UINT32_C(0x1) << 0U)       /*!< The HW state is ok or locked up */
+#define RGXFWIF_HWR_RESET_IN_PROGRESS  (IMG_UINT32_C(0x1) << 1U)       /*!< Tells if a HWR reset is in progress */
+#define RGXFWIF_HWR_GENERAL_LOCKUP             (IMG_UINT32_C(0x1) << 3U)       /*!< A DM unrelated lockup has been detected */
+#define RGXFWIF_HWR_DM_RUNNING_OK              (IMG_UINT32_C(0x1) << 4U)       /*!< At least one DM is running without being close to a lockup */
+#define RGXFWIF_HWR_DM_STALLING                        (IMG_UINT32_C(0x1) << 5U)       /*!< At least one DM is close to lockup */
+#define RGXFWIF_HWR_FW_FAULT                   (IMG_UINT32_C(0x1) << 6U)       /*!< The FW has faulted and needs to restart */
+#define RGXFWIF_HWR_RESTART_REQUESTED  (IMG_UINT32_C(0x1) << 7U)       /*!< The FW has requested the host to restart it */
+
+#define RGXFWIF_PHR_STATE_SHIFT                        (8U)
+#define RGXFWIF_PHR_RESTART_REQUESTED  (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT)    /*!< The FW has requested the host to restart it, per PHR configuration */
+#define RGXFWIF_PHR_RESTART_FINISHED   (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT)    /*!< A PHR triggered GPU reset has just finished */
+#define RGXFWIF_PHR_RESTART_MASK               (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED)
+
+#define RGXFWIF_PHR_MODE_OFF                   (0UL)
+#define RGXFWIF_PHR_MODE_RD_RESET              (1UL)
+#define RGXFWIF_PHR_MODE_FULL_RESET            (2UL)
+
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING                                       (0x00U)         /*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR                         (IMG_UINT32_C(0x1) << 0)        /*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP                                    (IMG_UINT32_C(0x1) << 2)        /*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP                      (IMG_UINT32_C(0x1) << 3)        /*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR                     (IMG_UINT32_C(0x1) << 4)        /*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP                         (IMG_UINT32_C(0x1) << 5)        /*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP                       (IMG_UINT32_C(0x1) << 6)        /*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING                     (IMG_UINT32_C(0x1) << 7)        /*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING           (IMG_UINT32_C(0x1) << 8)        /*!< DM was innocently affected by another DM over-running which caused HWR */
+#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH           (IMG_UINT32_C(0x1) << 9)        /*!< DM was forced into HWR as it delayed more important workloads */
+#define RGXFWIF_DM_STATE_GPU_ECC_HWR                           (IMG_UINT32_C(0x1) << 10)       /*!< DM was forced into HWR due to an uncorrected GPU ECC error */
+
+/* Firmware's connection state */
+typedef IMG_UINT32 RGXFWIF_CONNECTION_FW_STATE;
+#define RGXFW_CONNECTION_FW_OFFLINE            0U      /*!< Firmware is offline */
+#define RGXFW_CONNECTION_FW_READY              1U      /*!< Firmware is initialised */
+#define RGXFW_CONNECTION_FW_ACTIVE             2U      /*!< Firmware connection is fully established */
+#define RGXFW_CONNECTION_FW_OFFLOADING 3U      /*!< Firmware is clearing up connection data */
+#define RGXFW_CONNECTION_FW_STATE_COUNT        4U
+
+/* OS' connection state */
+typedef enum
+{
+       RGXFW_CONNECTION_OS_OFFLINE = 0,        /*!< OS is offline */
+       RGXFW_CONNECTION_OS_READY,                      /*!< OS's KM driver is setup and waiting */
+       RGXFW_CONNECTION_OS_ACTIVE,                     /*!< OS connection is fully established */
+       RGXFW_CONNECTION_OS_STATE_COUNT
+} RGXFWIF_CONNECTION_OS_STATE;
+
+typedef struct
+{
+       IMG_UINT                        bfOsState               : 3;
+       IMG_UINT                        bfFLOk                  : 1;
+       IMG_UINT                        bfFLGrowPending : 1;
+       IMG_UINT                        bfIsolatedOS    : 1;
+       IMG_UINT                        bfReserved              : 26;
+} RGXFWIF_OS_RUNTIME_FLAGS;
+
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+#define PVR_SLR_LOG_ENTRIES 10U
+#define PVR_SLR_LOG_STRLEN  30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */
+
+typedef struct
+{
+       IMG_UINT64 RGXFW_ALIGN  ui64Timestamp;
+       IMG_UINT32                              ui32FWCtxAddr;
+       IMG_UINT32                              ui32NumUFOs;
+       IMG_CHAR                                aszCCBName[PVR_SLR_LOG_STRLEN];
+} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY;
+#endif
+
+/*!
+ * @InGroup SRVAndFWTracing
+ * @Brief Firmware trace control data
+ */
+typedef struct
+{
+       IMG_UINT32              ui32LogType;                  /*!< FW trace log group configuration */
+       RGXFWIF_TRACEBUF_SPACE  sTraceBuf[RGXFW_THREAD_NUM];  /*!< FW Trace buffer */
+       IMG_UINT32              ui32TraceBufSizeInDWords;     /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated
+                                                                                                                       (in RGXTraceBufferInitOnDemandResources) */
+       IMG_UINT32              ui32TracebufFlags;            /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+/*! @Brief Firmware system data shared with the Host driver */
+typedef struct
+{
+       IMG_UINT32                 ui32ConfigFlags;                       /*!< Configuration flags from host */
+       IMG_UINT32                 ui32ConfigFlagsExt;                    /*!< Extended configuration flags from host */
+       volatile RGXFWIF_POW_STATE ePowState;
+       volatile IMG_UINT32        ui32HWPerfRIdx;
+       volatile IMG_UINT32        ui32HWPerfWIdx;
+       volatile IMG_UINT32        ui32HWPerfWrapCount;
+       IMG_UINT32                 ui32HWPerfSize;                        /*!< Constant after setup, needed in FW */
+       IMG_UINT32                 ui32HWPerfDropCount;                   /*!< The number of times the FW drops a packet due to buffer full */
+
+       /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with
+        * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */
+       IMG_UINT32                 ui32HWPerfUt;                          /*!< Buffer utilisation, high watermark of bytes in use */
+       IMG_UINT32                 ui32FirstDropOrdinal;                  /*!< The ordinal of the first packet the FW dropped */
+       IMG_UINT32                 ui32LastDropOrdinal;                   /*!< The ordinal of the last packet the FW dropped */
+       RGXFWIF_OS_RUNTIME_FLAGS   asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */
+       RGX_FWFAULTINFO            sFaultInfo[RGXFWIF_FWFAULTINFO_MAX];   /*!< Firmware fault info */
+       IMG_UINT32                 ui32FWFaults;                          /*!< Firmware faults count */
+       IMG_UINT32                 aui32CrPollAddr[RGXFW_THREAD_NUM];     /*!< Failed poll address */
+       IMG_UINT32                 aui32CrPollMask[RGXFW_THREAD_NUM];     /*!< Failed poll mask */
+       IMG_UINT32                 aui32CrPollCount[RGXFW_THREAD_NUM];    /*!< Failed poll count */
+       IMG_UINT64 RGXFW_ALIGN     ui64StartIdleTime;
+#if defined(SUPPORT_POWMON_COMPONENT)
+#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+       RGXFWIF_TRACEBUF_SPACE     sPowerMonBuf;
+       IMG_UINT32                 ui32PowerMonBufSizeInDWords;
+#endif
+#endif
+
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+#define RGXFWIF_STATS_FRAMEWORK_LINESIZE    (8)
+#define RGXFWIF_STATS_FRAMEWORK_MAX         (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE)
+       IMG_UINT32 RGXFW_ALIGN     aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX];
+#endif
+       RGXFWIF_HWR_STATEFLAGS     ui32HWRStateFlags; /*!< Firmware's Current HWR state */
+       RGXFWIF_HWR_RECOVERYFLAGS  aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */
+       IMG_UINT32                 ui32FwSysDataFlags;                      /*!< Compatibility and other flags */
+       IMG_UINT32                 ui32McConfig;                            /*!< Identify whether MC config is P-P or P-S */
+} UNCACHED_ALIGN RGXFWIF_SYSDATA;
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware per-os data and configuration
+ */
+typedef struct
+{
+       IMG_UINT32                 ui32FwOsConfigFlags;                   /*!< Configuration flags from an OS */
+       IMG_UINT32                 ui32FWSyncCheckMark;                   /*!< Markers to signal that the host should perform a full sync check */
+       IMG_UINT32                 ui32HostSyncCheckMark;                  /*!< Markers to signal that the Firmware should perform a full sync check */
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+       IMG_UINT32                 ui32ForcedUpdatesRequested;
+       IMG_UINT8                  ui8SLRLogWp;
+       RGXFWIF_SLR_ENTRY          sSLRLogFirst;
+       RGXFWIF_SLR_ENTRY          sSLRLog[PVR_SLR_LOG_ENTRIES];
+       IMG_UINT64 RGXFW_ALIGN     ui64LastForcedUpdateTime;
+#endif
+       volatile IMG_UINT32        aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+       IMG_UINT32                 ui32KCCBCmdsExecuted;                  /*!< Executed Kernel CCB command count */
+       RGXFWIF_DEV_VIRTADDR       sPowerSync;                            /*!< Sync prim used to signal the host the power off state */
+       IMG_UINT32                 ui32FwOsDataFlags;                       /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_OSDATA;
+
+/* Firmware trace time-stamp field breakup */
+
+/* RGX_CR_TIMER register read (48 bits) value*/
+#define RGXFWT_TIMESTAMP_TIME_SHIFT                   (0U)
+#define RGXFWT_TIMESTAMP_TIME_CLRMSK                  (IMG_UINT64_C(0xFFFF000000000000))
+
+/* Extra debug-info (16 bits) */
+#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT             (48U)
+#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK            ~RGXFWT_TIMESTAMP_TIME_CLRMSK
+
+
+/* Debug-info sub-fields */
+/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */
+#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT        (0U)
+#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET          (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT)
+
+/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */
+#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT     (1U)
+#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET       (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT)
+
+/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */
+#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT          (2U)
+#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET            (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT)
+
+/* Bit 3-15: Unused bits */
+
+#define RGXFWT_DEBUG_INFO_STR_MAXLEN                  64
+#define RGXFWT_DEBUG_INFO_STR_PREPEND                 " (debug info: "
+#define RGXFWT_DEBUG_INFO_STR_APPEND                  ")"
+
+/* Table of debug info sub-field's masks and corresponding message strings
+ * to be appended to firmware trace
+ *
+ * Mask     : 16 bit mask to be applied to debug-info field
+ * String   : debug info message string
+ */
+
+#define RGXFWT_DEBUG_INFO_MSKSTRLIST \
+/*Mask,                                           String*/ \
+X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET,      "mmu pf") \
+X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET,   "mmu pending") \
+X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET,        "slave events")
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+/*!
+ * @Defgroup HWRInfo FW HWR shared data interface
+ * @Brief Types grouping data structures and defines used in realising the HWR record.
+ * @{
+ */
+/*! @Brief HWR Lockup types */
+typedef enum
+{
+       RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */
+       RGX_HWRTYPE_OVERRUN        = 1, /*!< DM overrun */
+       RGX_HWRTYPE_POLLFAILURE    = 2, /*!< Poll failure */
+       RGX_HWRTYPE_BIF0FAULT      = 3, /*!< BIF0 fault */
+       RGX_HWRTYPE_BIF1FAULT      = 4, /*!< BIF1 fault */
+       RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */
+       RGX_HWRTYPE_MMUFAULT       = 6, /*!< MMU fault */
+       RGX_HWRTYPE_MMUMETAFAULT   = 7, /*!< MMU META fault */
+       RGX_HWRTYPE_MIPSTLBFAULT   = 8, /*!< MIPS TLB fault */
+       RGX_HWRTYPE_ECCFAULT       = 9, /*!< ECC fault */
+       RGX_HWRTYPE_MMURISCVFAULT  = 10, /*!< MMU RISCV fault */
+} RGX_HWRTYPE;
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1)
+
+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT)      ||       \
+                                                   ((eHWRType) == RGX_HWRTYPE_BIF1FAULT)      ||       \
+                                                   ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) ||       \
+                                                   ((eHWRType) == RGX_HWRTYPE_MMUFAULT)       ||       \
+                                                   ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT)   ||       \
+                                                   ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT)   ||       \
+                                                   ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false)
+
+typedef struct
+{
+       IMG_UINT64      RGXFW_ALIGN             ui64BIFReqStatus; /*!< BIF request status */
+       IMG_UINT64      RGXFW_ALIGN             ui64BIFMMUStatus; /*!< MMU status */
+       IMG_UINT64      RGXFW_ALIGN             ui64PCAddress; /*!< phys address of the page catalogue */
+       IMG_UINT64      RGXFW_ALIGN             ui64Reserved;
+} RGX_BIFINFO;
+
+typedef struct
+{
+       IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */
+} RGX_ECCINFO;
+
+typedef struct
+{
+       IMG_UINT64      RGXFW_ALIGN             aui64MMUStatus[2]; /*!< MMU status */
+       IMG_UINT64      RGXFW_ALIGN             ui64PCAddress; /*!< phys address of the page catalogue */
+       IMG_UINT64      RGXFW_ALIGN             ui64Reserved;
+} RGX_MMUINFO;
+
+typedef struct
+{
+       IMG_UINT32      ui32ThreadNum; /*!< Thread ID performing poll operation */
+       IMG_UINT32      ui32CrPollAddr; /*!< CR Poll Address */
+       IMG_UINT32      ui32CrPollMask; /*!< CR Poll mask */
+       IMG_UINT32      ui32CrPollLastValue; /*!< CR Poll last value */
+       IMG_UINT64      RGXFW_ALIGN ui64Reserved;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct
+{
+       IMG_UINT32 ui32BadVAddr; /*!< VA address */
+       IMG_UINT32 ui32EntryLo;
+} RGX_TLBINFO;
+
+/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */
+typedef struct
+{
+       union
+       {
+               RGX_BIFINFO  sBIFInfo; /*!< BIF failure details */
+               RGX_MMUINFO  sMMUInfo; /*!< MMU failure details */
+               RGX_POLLINFO sPollInfo; /*!< Poll failure details */
+               RGX_TLBINFO  sTLBInfo; /*!< TLB failure details */
+               RGX_ECCINFO  sECCInfo; /*!< ECC failure details */
+       } uHWRData;
+
+       IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */
+       IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */
+       IMG_UINT32             ui32FrameNum; /*!< Frame number of the workload */
+       IMG_UINT32             ui32PID; /*!< PID belonging to the workload */
+       IMG_UINT32             ui32ActiveHWRTData; /*!< HWRT data of the workload */
+       IMG_UINT32             ui32HWRNumber; /*!< HWR number */
+       IMG_UINT32             ui32EventStatus; /*!< Core specific event status register at the time of lockup */
+       IMG_UINT32             ui32HWRRecoveryFlags; /*!< DM state flags */
+       RGX_HWRTYPE            eHWRType; /*!< Type of lockup */
+       RGXFWIF_DM             eDM; /*!< Recovery triggered for the DM */
+       IMG_UINT32             ui32CoreID; /*!< Core ID of the GPU */
+       IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */
+       IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */
+       IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */
+       IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */
+       IMG_UINT64 RGXFW_ALIGN ui64Reserved[2];
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8U                                                    /* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8U                                                     /* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST)        /* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U)    /* Index of the last log in the HWR log buffer */
+
+/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */
+typedef struct
+{
+       RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */
+       IMG_UINT32  ui32HwrCounter; /*!< HWR counter used in FL reconstruction */
+       IMG_UINT32  ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */
+       IMG_UINT32  ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */
+       IMG_UINT32  ui32HWRInfoBufFlags; /* Compatibility and other flags */
+       IMG_UINT32  aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */
+       IMG_UINT32  aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */
+       IMG_UINT32  aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */
+       IMG_UINT32  aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+/*! @} End of HWRInfo */
+
+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN              (IMG_UINT32_C(0x1))
+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN            (IMG_UINT32_C(0x2))
+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN              (IMG_UINT32_C(0x3))
+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN   (IMG_UINT32_C(0x4))
+
+#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1))
+#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2))
+
+#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1))
+#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2))
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ *****************************************************************************/
+
+/* Flag definitions affecting the firmware globally */
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND                             (IMG_UINT32_C(0x1) << 0)
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN                             (IMG_UINT32_C(0x1) << 1)
+#define RGXFWIF_INICFG_HWPERF_EN                                               (IMG_UINT32_C(0x1) << 2)
+#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN                            (IMG_UINT32_C(0x1) << 3)
+#define RGXFWIF_INICFG_POW_RASCALDUST                                  (IMG_UINT32_C(0x1) << 4)
+/* 5 unused */
+#define RGXFWIF_INICFG_FBCDC_V3_1_EN                                   (IMG_UINT32_C(0x1) << 6)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN                                  (IMG_UINT32_C(0x1) << 7)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN                            (IMG_UINT32_C(0x1) << 8)
+/* 9 unused */
+/* 10 unused */
+/* 11 unused */
+#define RGXFWIF_INICFG_REGCONFIG_EN                                            (IMG_UINT32_C(0x1) << 12)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY                   (IMG_UINT32_C(0x1) << 13)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER                              (IMG_UINT32_C(0x1) << 14)
+/* 15 unused */
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT                 (16)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST                  (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM                        (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW                  (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY               (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK                  (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP                              (IMG_UINT32_C(0x1) << 19)
+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER                   (IMG_UINT32_C(0x1) << 20)
+#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED                        (IMG_UINT32_C(0x1) << 21)
+#define RGXFWIF_INICFG_VALIDATE_IRQ                                            (IMG_UINT32_C(0x1) << 22)
+#define RGXFWIF_INICFG_DISABLE_PDP_EN                                  (IMG_UINT32_C(0x1) << 23)
+#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN  (IMG_UINT32_C(0x1) << 24)
+#define RGXFWIF_INICFG_WORKEST                                                 (IMG_UINT32_C(0x1) << 25)
+#define RGXFWIF_INICFG_PDVFS                                                   (IMG_UINT32_C(0x1) << 26)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT                   (27)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND             (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN             (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK                            (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT                              (29)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE                               (0)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP                   (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP                   (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK                               (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\
+                                                         RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP)
+#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER                   (IMG_UINT32_C(0x1) << 31)
+
+#define RGXFWIF_INICFG_ALL                                                             (0xFFFFFFFFU)
+
+/* Extended Flag definitions affecting the firmware globally */
+#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT                  (0)
+/* [7]   YUV10 override
+ * [6:4] Quality
+ * [3]   Quality enable
+ * [2:1] Compression scheme
+ * [0]   Lossy group */
+#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK                   (IMG_UINT32_C(0xFF)) /* RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL */
+#define RGXFWIF_INICFG_EXT_ALL                                                 (RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK)
+
+#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK                            ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+                                                                                                                 RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+/* Flag definitions affecting only workloads submitted by a particular OS */
+
+/*!
+ * @AddToGroup ContextSwitching
+ * @{
+ * @Name Per-OS DM context switch configuration flags
+ * @{
+ */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN                             (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN                            (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM DM context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN                              (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN                             (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */
+
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM                              (IMG_UINT32_C(0x1) << 4)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM                             (IMG_UINT32_C(0x1) << 5)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D                               (IMG_UINT32_C(0x1) << 6)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM                              (IMG_UINT32_C(0x1) << 7)
+
+#define RGXFWIF_INICFG_OS_ALL                                                  (0xFFU)
+
+#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL                             (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \
+                                                                                                                RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \
+                                                                                                                RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \
+                                                                                                                RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN)
+
+#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK                             ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL)
+
+/*!
+ * @} End of Per-OS Context switch configuration flags
+ * @} End of AddToGroup ContextSwitching
+ */
+
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF                                  (IMG_UINT32_C(0x1) << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT                                   (IMG_UINT32_C(0x1) << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE                                        (IMG_UINT32_C(0x1) << 1)
+
+typedef IMG_UINT32 RGX_ACTIVEPM_CONF;
+#define RGX_ACTIVEPM_FORCE_OFF 0U
+#define RGX_ACTIVEPM_FORCE_ON  1U
+#define RGX_ACTIVEPM_DEFAULT   2U
+
+typedef enum
+{
+       RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+       RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+       RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+/* Unused registers re-purposed for storing counters of the Firmware's
+ * interrupts for each OS
+ */
+#define IRQ_COUNTER_STORAGE_REGS                        \
+               0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK         */  \
+               0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK         */  \
+               0x2030U, /* RGX_CR_PM_START_OF_MMU_TACONTEXT*/  \
+               0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+               0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+               0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+               0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/  \
+               0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/
+#endif
+
+typedef struct
+{
+       IMG_UINT16 ui16RegNum;                          /*!< Register number */
+       IMG_UINT16 ui16IndirectRegNum;          /*!< Indirect register number (or 0 if not used) */
+       IMG_UINT16 ui16IndirectStartVal;        /*!< Start value for indirect register */
+       IMG_UINT16 ui16IndirectEndVal;          /*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+#if defined(RGX_FIRMWARE)
+typedef DLLIST_NODE                                                    RGXFWIF_DLLIST_NODE;
+#else
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+                RGXFWIF_DEV_VIRTADDR n;}       RGXFWIF_DLLIST_NODE;
+#endif
+
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_SYSDATA;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_OSDATA;
+#if defined(SUPPORT_TBI_INTERFACE)
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TBIBUF;
+#endif
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWPERFBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWPERF_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGX_HWPERF_CONFIG_MUX_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR  PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR  PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCB_RTN_SLOTS;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CORE_CLK_RATE;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_COUNTERBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FIRMWAREGCOVBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_TIMESTAMP_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_RF_CMD;
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+ * This number is used to represent unallocated page catalog base register
+ */
+#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU
+
+/*!
+    Firmware memory context.
+*/
+typedef struct
+{
+       IMG_DEV_PHYADDR                 RGXFW_ALIGN sPCDevPAddr;        /*!< device physical address of context's page catalogue */
+       IMG_UINT32                              uiPageCatBaseRegSet;    /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCSET == unallocated) */
+       IMG_UINT32                              uiBreakpointAddr; /*!< breakpoint address */
+       IMG_UINT32                              uiBPHandlerAddr; /*!< breakpoint handler address */
+       IMG_UINT32                              uiBreakpointCtl; /*!< DM and enable control for BP */
+       IMG_UINT32                              ui32FwMemCtxFlags; /*!< Compatibility and other flags */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       IMG_UINT32              ui32OSid;
+       IMG_BOOL                bOSidAxiProt;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+/*!
+ * FW context state flags
+ */
+#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME                              (0x00000001U)
+#define RGXFWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL  (0x000000FFU)
+#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE                 (0x00000100U)
+#define RGXFWIF_CONTEXT_FLAGS_LAST_KICK_SECURE                 (0x00000200U)
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware GEOM/TA context suspend state
+ */
+typedef struct
+{
+       /* FW-accessible TA state which must be written out to memory on context store */
+       IMG_UINT64      RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER;               /*!< VDM control stream stack pointer, to store in mid-TA */
+       IMG_UINT64      RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init;  /*!< Initial value of VDM control stream stack pointer (in case is 'lost' due to a lock-up) */
+       IMG_UINT32      uTAReg_VBS_SO_PRIM[4];
+       IMG_UINT16      ui16TACurrentIdx;
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM;
+
+typedef struct
+{
+       /* FW-accessible TA state which must be written out to memory on context store */
+       RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES];
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware FRAG/3D context suspend state
+ */
+typedef struct
+{
+       /* FW-accessible ISP state which must be written out to memory on context store */
+       IMG_UINT32      u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< PM deallocation status */
+       IMG_UINT32      u3DReg_PM_PDS_MTILEFREE_STATUS; /*!< Macro-tiles (MTs) finished status */
+       IMG_UINT32      ui32CtxStateFlags;      /*!< Compatibility and other flags */
+       /* au3DReg_ISP_STORE should be the last element of the structure
+        * as this is an array whose size is determined at runtime
+        * after detecting the RGX core */
+       IMG_UINT32      au3DReg_ISP_STORE[]; /*!< ISP state (per-pipe) */
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+static_assert(sizeof(RGXFWIF_3DCTX_STATE) <= 16U,
+              "Size of structure RGXFWIF_3DCTX_STATE exceeds maximum expected size.");
+
+#define RGXFWIF_CTX_USING_BUFFER_A             (0)
+#define RGXFWIF_CTX_USING_BUFFER_B             (1U)
+
+typedef struct
+{
+       IMG_UINT32      ui32CtxStateFlags; /*!< Target buffer and other flags */
+} RGXFWIF_COMPUTECTX_STATE;
+
+/*!
+ * @InGroup WorkloadContexts
+ * @Brief Firmware Common Context (or FWCC)
+ */
+typedef struct RGXFWIF_FWCOMMONCONTEXT_
+{
+       /* CCB details for this firmware context */
+       PRGXFWIF_CCCB_CTL               psCCBCtl;                               /*!< CCB control */
+       PRGXFWIF_CCCB                   psCCB;                                  /*!< CCB base */
+       RGXFWIF_DMA_ADDR                sCCBMetaDMAAddr;
+
+       /* Context suspend state */
+       PRGXFWIF_COMMONCTX_STATE        RGXFW_ALIGN psContextState;             /*!< TA/3D context suspend state, read/written by FW */
+
+       /* Flags e.g. for context switching */
+       IMG_UINT32                              ui32FWComCtxFlags;
+       IMG_INT32                               i32Priority;  /*!< Priority level */
+       IMG_UINT32                              ui32PrioritySeqNum;
+
+       /* Framework state */
+       PRGXFWIF_RF_CMD                 RGXFW_ALIGN psRFCmd;            /*!< Register updates for Framework */
+
+       /* Statistic updates waiting to be passed back to the host... */
+       IMG_BOOL                                bStatsPending;                  /*!< True when some stats are pending */
+       IMG_INT32                               i32StatsNumStores;              /*!< Number of stores on this context since last update */
+       IMG_INT32                               i32StatsNumOutOfMemory;         /*!< Number of OOMs on this context since last update */
+       IMG_INT32                               i32StatsNumPartialRenders;      /*!< Number of PRs on this context since last update */
+       RGXFWIF_DM                              eDM;                            /*!< Data Master type */
+       IMG_UINT64                              RGXFW_ALIGN  ui64WaitSignalAddress;     /*!< Device Virtual Address of the signal the context is waiting on */
+       RGXFWIF_DLLIST_NODE             RGXFW_ALIGN  sWaitSignalNode;                   /*!< List entry for the wait-signal list */
+       RGXFWIF_DLLIST_NODE             RGXFW_ALIGN  sBufStalledNode;                   /*!< List entry for the buffer stalled list */
+       IMG_UINT64                              RGXFW_ALIGN  ui64CBufQueueCtrlAddr;     /*!< Address of the circular buffer queue pointers */
+
+       IMG_UINT64                              RGXFW_ALIGN  ui64RobustnessAddress;
+       IMG_UINT32                              ui32MaxDeadlineMS;                      /*!< Max HWR deadline limit in ms */
+       bool                                    bReadOffsetNeedsReset;                  /*!< Following HWR circular buffer read-offset needs resetting */
+
+       RGXFWIF_DLLIST_NODE             RGXFW_ALIGN sWaitingNode;               /*!< List entry for the waiting list */
+       RGXFWIF_DLLIST_NODE             RGXFW_ALIGN sRunNode;                   /*!< List entry for the run list */
+       RGXFWIF_UFO                             sLastFailedUFO;                 /*!< UFO that last failed (or NULL) */
+
+       PRGXFWIF_FWMEMCONTEXT   psFWMemContext;                                 /*!< Memory context */
+
+       /* References to the host side originators */
+       IMG_UINT32                              ui32ServerCommonContextID;      /*!< the Server Common Context */
+       IMG_UINT32                              ui32PID;                        /*!< associated process ID */
+
+       IMG_BOOL                                bGeomOOMDisabled;               /*!< True when Geom DM OOM is not allowed */
+
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U,
+              "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size.");
+
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_TQ[RGX_TRP_MAX_NUM_CORES][1];
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2];
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4];
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2];
+
+/*!
+ * @InGroup WorkloadContexts
+ * @Brief Firmware render context.
+ */
+typedef struct
+{
+       RGXFWIF_FWCOMMONCONTEXT sTAContext;                             /*!< Firmware context for the TA */
+       RGXFWIF_FWCOMMONCONTEXT s3DContext;                             /*!< Firmware context for the 3D */
+
+       RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState;
+
+       IMG_UINT32                      ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+       IMG_UINT32                      ui32FwRenderCtxFlags; /*!< Compatibility and other flags */
+
+#if defined(SUPPORT_TRP)
+       RGXFWIF_TRP_CHECKSUM_3D         aui64TRPChecksums3D;
+       RGXFWIF_TRP_CHECKSUM_GEOM       aui64TRPChecksumsGeom;
+#endif
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+       Firmware compute context.
+*/
+typedef struct
+{
+       RGXFWIF_FWCOMMONCONTEXT sCDMContext;                            /*!< Firmware context for the CDM */
+
+       RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState;
+
+       IMG_UINT32                      ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+       IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */
+
+       IMG_UINT32              ui32WGPState;
+       IMG_UINT32              ui32WGPChecksum;
+       IMG_UINT32              ui32CoreMaskA;
+       IMG_UINT32              ui32CoreMaskB;
+} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT;
+
+/*!
+       Firmware TDM context.
+*/
+typedef struct
+{
+       RGXFWIF_FWCOMMONCONTEXT sTDMContext;                            /*!< Firmware context for the TDM */
+
+       IMG_UINT32                      ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT;
+
+/*!
+ * @InGroup WorkloadContexts
+ * @Brief Firmware transfer context.
+ */
+typedef struct
+{
+       RGXFWIF_FWCOMMONCONTEXT sTQContext;                      /*!< Firmware context for TQ3D */
+
+#if defined(SUPPORT_TRP)
+       IMG_UINT32                              ui32TRPState;
+       RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ;
+#endif
+} UNCACHED_ALIGN RGXFWIF_FWTRANSFERCONTEXT;
+
+/*!
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ *****************************************************************************/
+
+/* CMD_TYPE 32bit contains:
+ * 31:16       Reserved for magic value to detect corruption (16 bits)
+ * 15          Reserved for RGX_CCB_TYPE_TASK (1 bit)
+ * 14:0                Bits available for CMD_TYPEs (15 bits) */
+
+
+/* Magic value to detect corruption */
+#define RGX_CMD_MAGIC_DWORD                    IMG_UINT32_C(0x2ABC)
+#define RGX_CMD_MAGIC_DWORD_MASK       (0xFFFF0000U)
+#define RGX_CMD_MAGIC_DWORD_SHIFT      (16U)
+#define RGX_CMD_MAGIC_DWORD_SHIFTED    (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT)
+
+/*!
+ * @InGroup KCCBTypes ClientCCBTypes
+ * @Brief Generic CCB control structure
+ */
+typedef struct
+{
+       volatile IMG_UINT32             ui32WriteOffset;                /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+       volatile IMG_UINT32             ui32ReadOffset;                 /*!< read offset into array of commands */
+       IMG_UINT32                              ui32WrapMask;                   /*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+       IMG_UINT32                              ui32CmdSize;                    /*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ * @Defgroup KCCBTypes Kernel CCB data interface
+ * @Brief Types grouping data structures and defines used in realising the KCCB functionality
+ * @{
+ */
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT      (0x1U) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD      (0x2U) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC      (0x4U) /* MMU_CTRL_INVAL_PC_EN */
+
+#if !defined(__KERNEL)
+
+#if !defined(RGX_FEATURE_SLC_VIVT)
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE < 2)
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */
+#else
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB)
+#endif
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */
+
+#else /* RGX_FEATURE_SLC_VIVT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#else
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command
+ */
+typedef struct
+{
+       IMG_UINT32            ui32CacheFlags;
+       RGXFWIF_DEV_VIRTADDR  sMMUCacheSync;
+       IMG_UINT32            ui32MMUCacheSyncUpdateValue;
+} RGXFWIF_MMUCACHEDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0)
+#define RGXFWIF_BPDATA_FLAGS_WRITE  (1U << 1)
+#define RGXFWIF_BPDATA_FLAGS_CTL    (1U << 2)
+#define RGXFWIF_BPDATA_FLAGS_REGS   (1U << 3)
+
+typedef struct
+{
+       PRGXFWIF_FWMEMCONTEXT   psFWMemContext;                 /*!< Memory context */
+       IMG_UINT32              ui32BPAddr;                     /*!< Breakpoint address */
+       IMG_UINT32              ui32HandlerAddr;                /*!< Breakpoint handler */
+       IMG_UINT32              ui32BPDM;                       /*!< Breakpoint control */
+       IMG_UINT32              ui32BPDataFlags;
+       IMG_UINT32              ui32TempRegs;           /*!< Number of temporary registers to overallocate */
+       IMG_UINT32              ui32SharedRegs;         /*!< Number of shared registers to overallocate */
+       RGXFWIF_DM      eDM;                /*!< DM associated with the breakpoint */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command
+ */
+typedef struct
+{
+       PRGXFWIF_FWCOMMONCONTEXT        psContext;                      /*!< address of the firmware context */
+       IMG_UINT32                                      ui32CWoffUpdate;        /*!< Client CCB woff update */
+       IMG_UINT32                                      ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */
+       IMG_UINT32                                      ui32NumCleanupCtl;              /*!< number of CleanupCtl pointers attached */
+       PRGXFWIF_CLEANUP_CTL            apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+       IMG_UINT32                                      ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+/*!
+ * @Brief Command data for @Ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command
+ */
+typedef struct
+{
+       RGXFWIF_KCCB_CMD_KICK_DATA      sTACmdKickData; /*!< GEOM DM kick command data */
+       RGXFWIF_KCCB_CMD_KICK_DATA      s3DCmdKickData; /*!< FRAG DM kick command data */
+} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command
+ */
+typedef struct
+{
+       PRGXFWIF_FWCOMMONCONTEXT        psContext;                      /*!< address of the firmware context */
+       IMG_UINT32                                      ui32CCBFenceOffset;     /*!< Client CCB fence offset */
+} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA;
+
+/*!
+ * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command
+ */
+typedef enum
+{
+       RGXFWIF_CLEANUP_FWCOMMONCONTEXT,                /*!< FW common context cleanup */
+       RGXFWIF_CLEANUP_HWRTDATA,                               /*!< FW HW RT data cleanup */
+       RGXFWIF_CLEANUP_FREELIST,                               /*!< FW freelist cleanup */
+       RGXFWIF_CLEANUP_ZSBUFFER,                               /*!< FW ZS Buffer cleanup */
+} RGXFWIF_CLEANUP_TYPE;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command
+ */
+typedef struct
+{
+       RGXFWIF_CLEANUP_TYPE                    eCleanupType;                   /*!< Cleanup type */
+       union {
+               PRGXFWIF_FWCOMMONCONTEXT        psContext;                              /*!< FW common context to cleanup */
+               PRGXFWIF_HWRTDATA                       psHWRTData;                             /*!< HW RT to cleanup */
+               PRGXFWIF_FREELIST                       psFreelist;                             /*!< Freelist to cleanup */
+               PRGXFWIF_ZSBUFFER                       psZSBuffer;                             /*!< ZS Buffer to cleanup */
+       } uCleanupData;
+} RGXFWIF_CLEANUP_REQUEST;
+
+/*!
+ * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command
+ */
+typedef enum
+{
+       RGXFWIF_POW_OFF_REQ = 1,           /*!< GPU power-off request */
+       RGXFWIF_POW_FORCED_IDLE_REQ,       /*!< Force-idle related request */
+       RGXFWIF_POW_NUM_UNITS_CHANGE,      /*!< Request to change default powered scalable units */
+       RGXFWIF_POW_APM_LATENCY_CHANGE     /*!< Request to change the APM latency period */
+} RGXFWIF_POWER_TYPE;
+
+/*!
+ * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request
+ */
+typedef enum
+{
+       RGXFWIF_POWER_FORCE_IDLE = 1,      /*!< Request to force-idle GPU */
+       RGXFWIF_POWER_CANCEL_FORCED_IDLE,  /*!< Request to cancel a previously successful force-idle transition */
+       RGXFWIF_POWER_HOST_TIMEOUT,        /*!< Notification that host timed-out waiting for force-idle state */
+} RGXFWIF_POWER_FORCE_IDLE_TYPE;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command
+ */
+typedef struct
+{
+       RGXFWIF_POWER_TYPE                                      ePowType;                                       /*!< Type of power request */
+       union
+       {
+               IMG_UINT32                                              ui32NumOfDusts;                 /*!< Number of active Dusts */
+               IMG_BOOL                                                bForced;                                /*!< If the operation is mandatory */
+               RGXFWIF_POWER_FORCE_IDLE_TYPE   ePowRequestType;                /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */
+       } uPowerReqData;
+} RGXFWIF_POWER_REQUEST;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command
+ */
+typedef struct
+{
+       PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+       IMG_BOOL    bInval;                 /*!< Invalidate the cache as well as flushing */
+       IMG_BOOL    bDMContext;             /*!< The data to flush/invalidate belongs to a specific DM context */
+       IMG_UINT64      RGXFW_ALIGN ui64Address;        /*!< Optional address of range (only useful when bDMContext == FALSE) */
+       IMG_UINT64      RGXFW_ALIGN ui64Size;           /*!< Optional size of range (only useful when bDMContext == FALSE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef enum
+{
+       RGXFWIF_HWPERF_CTRL_TOGGLE = 0,
+       RGXFWIF_HWPERF_CTRL_SET    = 1,
+       RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2
+} RGXFWIF_HWPERF_UPDATE_CONFIG;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command
+ */
+typedef struct
+{
+       RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */
+       IMG_UINT64      RGXFW_ALIGN     ui64Mask;   /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef struct
+{
+       IMG_UINT32                ui32NumBlocks;    /*!< Number of RGX_HWPERF_CONFIG_MUX_CNTBLK in the array */
+       PRGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfigs;    /*!< Address of the RGX_HWPERF_CONFIG_MUX_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+typedef struct
+{
+       IMG_UINT32                ui32NumBlocks;    /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+       PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs;    /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_DA_BLKS;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command
+ */
+typedef struct
+{
+       IMG_UINT32      ui32NewClockSpeed;                      /*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX   16U
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command
+ */
+typedef struct
+{
+       bool            bEnable;
+       IMG_UINT32      ui32NumBlocks;                              /*!< Number of block IDs in the array */
+       IMG_UINT16      aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX];   /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+
+typedef struct
+{
+       IMG_UINT16                      ui16CustomBlock;
+       IMG_UINT16                      ui16NumCounters;
+       PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs;
+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands
+ */
+typedef struct
+{
+       RGXFWIF_DEV_VIRTADDR    sZSBufferFWDevVAddr;                            /*!< ZS-Buffer FW address */
+       IMG_BOOL                                bDone;                                                          /*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+#if defined(SUPPORT_VALIDATION)
+typedef struct
+{
+       IMG_UINT32 ui32RegWidth;
+       IMG_BOOL   bWriteOp;
+       IMG_UINT32 ui32RegAddr;
+       IMG_UINT64 RGXFW_ALIGN ui64RegVal;
+} RGXFWIF_RGXREG_DATA;
+
+typedef struct
+{
+       IMG_UINT64 ui64BaseAddress;
+       PRGXFWIF_FWCOMMONCONTEXT psContext;
+       IMG_UINT32 ui32Size;
+} RGXFWIF_GPUMAP_DATA;
+#endif
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command
+ */
+typedef struct
+{
+       RGXFWIF_DEV_VIRTADDR    sFreeListFWDevVAddr;                            /*!< Freelist FW address */
+       IMG_UINT32                              ui32DeltaPages;                                         /*!< Amount of the Freelist change */
+       IMG_UINT32                              ui32NewPages;                                           /*!< New amount of pages on the freelist (including ready pages) */
+       IMG_UINT32              ui32ReadyPages;                     /*!< Number of ready pages to be held in reserve until OOM */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT         (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U)
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command
+ */
+typedef struct
+{
+       IMG_UINT32                      ui32FreelistsCount;
+       IMG_UINT32                      aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command
+ */
+typedef struct
+{
+       PRGXFWIF_FWCOMMONCONTEXT  psContext; /*!< Context to that may need to be resumed following write offset update */
+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA;
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+#define NUM_OPP_VALUES 16
+
+typedef struct
+{
+       IMG_UINT32                      ui32Volt; /* V  */
+       IMG_UINT32                      ui32Freq; /* Hz */
+} UNCACHED_ALIGN PDVFS_OPP;
+
+typedef struct
+{
+       PDVFS_OPP               asOPPValues[NUM_OPP_VALUES];
+#if defined(DEBUG)
+       IMG_UINT32              ui32MinOPPPoint;
+#endif
+       IMG_UINT32              ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP;
+
+typedef struct
+{
+       IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA;
+
+typedef struct
+{
+       IMG_UINT32 ui32MinOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum
+{
+       RGXFWIF_REGCFG_CMD_ADD                          = 101,
+       RGXFWIF_REGCFG_CMD_CLEAR                        = 102,
+       RGXFWIF_REGCFG_CMD_ENABLE                       = 103,
+       RGXFWIF_REGCFG_CMD_DISABLE                      = 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef enum
+{
+       RGXFWIF_REG_CFG_TYPE_PWR_ON=0,      /* Sidekick power event */
+       RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,   /* Rascal / dust power event */
+       RGXFWIF_REG_CFG_TYPE_TA,            /* TA kick */
+       RGXFWIF_REG_CFG_TYPE_3D,            /* 3D kick */
+       RGXFWIF_REG_CFG_TYPE_CDM,           /* Compute kick */
+       RGXFWIF_REG_CFG_TYPE_TLA,           /* TLA kick */
+       RGXFWIF_REG_CFG_TYPE_TDM,           /* TDM kick */
+       RGXFWIF_REG_CFG_TYPE_ALL            /* Applies to all types. Keep as last element */
+} RGXFWIF_REG_CFG_TYPE;
+
+typedef struct
+{
+       IMG_UINT64              ui64Addr;
+       IMG_UINT64              ui64Mask;
+       IMG_UINT64              ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+typedef struct
+{
+       RGXFWIF_REGDATA_CMD_TYPE         eCmdType;
+       RGXFWIF_REG_CFG_TYPE             eRegConfigType;
+       RGXFWIF_REG_CFG_REC RGXFW_ALIGN  sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct
+{
+       /**
+        * PDump WRW command write granularity is 32 bits.
+        * Add padding to ensure array size is 32 bit granular.
+        */
+       IMG_UINT8           RGXFW_ALIGN  aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))];
+       RGXFWIF_REG_CFG_REC RGXFW_ALIGN  asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+typedef enum
+{
+       RGXFWIF_OS_ONLINE = 1,
+       RGXFWIF_OS_OFFLINE
+} RGXFWIF_OS_STATE_CHANGE;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command
+ */
+typedef struct
+{
+       IMG_UINT32 ui32OSid;
+       RGXFWIF_OS_STATE_CHANGE eNewOSState;
+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
+
+typedef enum
+{
+       RGXFWIF_PWR_COUNTER_DUMP_START = 1,
+       RGXFWIF_PWR_COUNTER_DUMP_STOP,
+       RGXFWIF_PWR_COUNTER_DUMP_SAMPLE,
+} RGXFWIF_COUNTER_DUMP_REQUEST;
+
+typedef struct
+{
+       RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest;
+}  RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA;
+
+/*!
+ * @Brief List of command types supported by the Kernel CCB
+ */
+typedef enum
+{
+       /* Common commands */
+       RGXFWIF_KCCB_CMD_KICK                                                           = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */
+       RGXFWIF_KCCB_CMD_MMUCACHE                                                       = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */
+       RGXFWIF_KCCB_CMD_BP                                                                     = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+       RGXFWIF_KCCB_CMD_SLCFLUSHINVAL                                          = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */
+       RGXFWIF_KCCB_CMD_CLEANUP                                                        = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+       RGXFWIF_KCCB_CMD_POW                                                            = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */
+       RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE                        = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */
+       RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE                      = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */
+       RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE                           = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */
+       RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE        = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */
+       /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */
+       RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE                     = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+       RGXFWIF_KCCB_CMD_HEALTH_CHECK                                           = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */
+       RGXFWIF_KCCB_CMD_FORCE_UPDATE                                           = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */
+
+       RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK                            = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */
+       RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE                      = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */
+
+       /* Commands only permitted to the native or host OS */
+       RGXFWIF_KCCB_CMD_REGCONFIG                                                      = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+       RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG                           = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+       /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS */
+       RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS                                       = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+       RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE                                     = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */
+       /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT*/
+       RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE                                         = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+       RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ                           = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */
+       RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE                           = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */
+       RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL                                        = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */
+       /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */
+       /*RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */
+       RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ                           = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */
+       RGXFWIF_KCCB_CMD_PHR_CFG                                                        = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */
+#if defined(SUPPORT_VALIDATION)
+       RGXFWIF_KCCB_CMD_RGXREG                             = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */
+#endif
+       RGXFWIF_KCCB_CMD_WDG_CFG                                                        = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */
+       RGXFWIF_KCCB_CMD_COUNTER_DUMP                                           = 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */
+       RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS                      = 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */
+       RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS                     = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */
+#if defined(SUPPORT_VALIDATION)
+       RGXFWIF_KCCB_CMD_GPUMAP                                                         = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */
+#endif
+       RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS                 = 220U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */
+} RGXFWIF_KCCB_CMD_TYPE;
+
+#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1)
+
+/*! @Brief Kernel CCB command packet */
+typedef struct
+{
+       RGXFWIF_KCCB_CMD_TYPE  eCmdType;      /*!< Command type */
+       IMG_UINT32             ui32KCCBFlags; /*!< Compatibility and other flags */
+
+       /* NOTE: Make sure that uCmdData is the last member of this struct
+        * This is to calculate actual command size for device mem copy.
+        * (Refer RGXGetCmdMemCopySize())
+        * */
+       union
+       {
+               RGXFWIF_KCCB_CMD_KICK_DATA                      sCmdKickData;                   /*!< Data for Kick command */
+               RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA       sCombinedTA3DCmdKickData;       /*!< Data for combined TA/3D Kick command */
+               RGXFWIF_MMUCACHEDATA                            sMMUCacheData;                  /*!< Data for MMU cache command */
+               RGXFWIF_BPDATA                                          sBPData;                                /*!< Data for Breakpoint Commands */
+               RGXFWIF_SLCFLUSHINVALDATA                       sSLCFlushInvalData;             /*!< Data for SLC Flush/Inval commands */
+               RGXFWIF_CLEANUP_REQUEST                         sCleanupData;                   /*!< Data for cleanup commands */
+               RGXFWIF_POWER_REQUEST                           sPowData;                               /*!< Data for power request commands */
+               RGXFWIF_HWPERF_CTRL                                     sHWPerfCtrl;                    /*!< Data for HWPerf control command */
+               RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS       sHWPerfCfgEnableBlks;   /*!< Data for HWPerf configure, clear and enable performance counter block command */
+               RGXFWIF_HWPERF_CTRL_BLKS                        sHWPerfCtrlBlks;                /*!< Data for HWPerf enable or disable performance counter block commands */
+               RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS      sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */
+               RGXFWIF_HWPERF_CONFIG_DA_BLKS           sHWPerfCfgDABlks;               /*!< Data for HWPerf configure Directly Addressable blocks */
+               RGXFWIF_CORECLKSPEEDCHANGE_DATA         sCoreClkSpeedChangeData;/*!< Data for core clock speed change */
+               RGXFWIF_ZSBUFFER_BACKING_DATA           sZSBufferBackingData;   /*!< Feedback for Z/S Buffer backing/unbacking */
+               RGXFWIF_FREELIST_GS_DATA                        sFreeListGSData;                /*!< Feedback for Freelist grow/shrink */
+               RGXFWIF_FREELISTS_RECONSTRUCTION_DATA   sFreeListsReconstructionData;   /*!< Feedback for Freelists reconstruction */
+               RGXFWIF_REGCONFIG_DATA                          sRegConfigData;                 /*!< Data for custom register configuration */
+               RGXFWIF_WRITE_OFFSET_UPDATE_DATA    sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */
+               RGXFWIF_PDVFS_MAX_FREQ_DATA                     sPDVFSMaxFreqData;              /*!< Data for setting the max frequency/OPP */
+               RGXFWIF_PDVFS_MIN_FREQ_DATA                     sPDVFSMinFreqData;              /*!< Data for setting the min frequency/OPP */
+               RGXFWIF_OS_STATE_CHANGE_DATA        sCmdOSOnlineStateData;  /*!< Data for updating the Guest Online states */
+               RGXFWIF_DEV_VIRTADDR                sTBIBuffer;             /*!< Dev address for TBI buffer allocated on demand */
+               RGXFWIF_COUNTER_DUMP_DATA                       sCounterDumpConfigData; /*!< Data for dumping of register ranges */
+               RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA  sForceUpdateData;       /*!< Data for signalling all unmet fences for a given CCB */
+#if defined(SUPPORT_VALIDATION)
+               RGXFWIF_RGXREG_DATA                 sFwRgxData;             /*!< Data for reading off an RGX register */
+               RGXFWIF_GPUMAP_DATA                 sGPUMapData;            /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */
+#endif
+       } UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*! @} End of KCCBTypes */
+
+/*!
+ * @Defgroup FWCCBTypes Firmware CCB data interface
+ * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality
+ * @{
+ */
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the
+ * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands
+ *****************************************************************************/
+typedef struct
+{
+       IMG_UINT32                              ui32ZSBufferID; /*!< ZS buffer ID */
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB
+ * command
+ *****************************************************************************/
+typedef struct
+{
+       IMG_UINT32                              ui32FreelistID; /*!< Freelist ID */
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION
+ * Firmware CCB command
+ *****************************************************************************/
+typedef struct
+{
+       IMG_UINT32                      ui32FreelistsCount;                                     /*!< Freelists count */
+       IMG_UINT32                      ui32HwrCounter;                                         /*!< HWR counter */
+       IMG_UINT32                      aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF                        (1U<<0) /*!< 1 if a page fault happened */
+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS  (1U<<1) /*!< 1 if applicable to all contexts */
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION
+ * Firmware CCB command
+ *****************************************************************************/
+typedef struct
+{
+       IMG_UINT32                                              ui32ServerCommonContextID;      /*!< Context affected by the reset */
+       RGX_CONTEXT_RESET_REASON                eResetReason;                           /*!< Reason for reset */
+       RGXFWIF_DM                                              eDM;                                            /*!< Data Master affected by the reset */
+       IMG_UINT32                                              ui32ResetJobRef;                        /*!< Job ref running at the time of reset */
+       IMG_UINT32                                              ui32Flags;                                      /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield  */
+       IMG_UINT64 RGXFW_ALIGN                  ui64PCAddress;                          /*!< At what page catalog address */
+       IMG_DEV_VIRTADDR RGXFW_ALIGN    sFaultAddress;                          /*!< Page fault address (only when applicable) */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION
+ * Firmware CCB command
+ *****************************************************************************/
+typedef struct
+{
+       IMG_DEV_VIRTADDR sFWFaultAddr;  /*!< Page fault address */
+} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA;
+
+/*!
+ ******************************************************************************
+ * List of command types supported by the Firmware CCB
+ *****************************************************************************/
+typedef enum
+{
+       RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING              = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests ZSBuffer to be backed with physical pages
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */
+       RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING            = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests ZSBuffer to be unbacked
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */
+       RGXFWIF_FWCCB_CMD_FREELIST_GROW                 = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests an on-demand freelist grow
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */
+       RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION      = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests freelists reconstruction
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */
+       RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION    = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Notifies host of a HWR event on a context
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */
+       RGXFWIF_FWCCB_CMD_DEBUG_DUMP                    = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests an on-demand debug dump
+                                                                                                 \n Command data: None */
+       RGXFWIF_FWCCB_CMD_UPDATE_STATS                  = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests an on-demand update on process stats
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */
+
+       RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE          = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+       RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART           = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Requests GPU restart
+                                                                                                 \n Command data: None */
+#if defined(SUPPORT_VALIDATION)
+       RGXFWIF_FWCCB_CMD_REG_READ                      = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+#if defined(SUPPORT_SOC_TIMER)
+       RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS                 = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+#endif
+#endif
+       RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION    = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED,   /*!< Notifies host of a FW pagefault
+                                                                                                 \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+/*!
+ ******************************************************************************
+ * List of the various stats of the process to update/increment
+ *****************************************************************************/
+typedef enum
+{
+       RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1,         /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+       RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,                     /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+       RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES,                         /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+       RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES,                         /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+       RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES,                        /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+       RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES                         /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB
+ * command
+ *****************************************************************************/
+typedef struct
+{
+       RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE             eElementToUpdate;                       /*!< Element to update */
+       IMG_PID                                                                 pidOwner;                                       /*!< The pid of the process whose stats are being updated */
+       IMG_INT32                                                               i32AdjustmentValue;                     /*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+
+typedef struct
+{
+       IMG_UINT32 ui32CoreClkRate;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA;
+
+#if defined(SUPPORT_VALIDATION)
+typedef struct
+{
+       IMG_UINT64 ui64RegValue;
+} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA;
+
+#if defined(SUPPORT_SOC_TIMER)
+typedef struct
+{
+       IMG_UINT64 ui64timerGray;
+       IMG_UINT64 ui64timerBinary;
+       IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS];
+}  RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA;
+#endif
+#endif
+
+/*!
+ ******************************************************************************
+ * @Brief Firmware CCB command structure
+ *****************************************************************************/
+typedef struct
+{
+       RGXFWIF_FWCCB_CMD_TYPE  eCmdType;       /*!< Command type */
+       IMG_UINT32              ui32FWCCBFlags; /*!< Compatibility and other flags */
+
+       union
+       {
+               RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA                         sCmdZSBufferBacking;                    /*!< Data for Z/S-Buffer on-demand (un)backing*/
+               RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA                                      sCmdFreeListGS;                                 /*!< Data for on-demand freelist grow/shrink */
+               RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA         sCmdFreeListsReconstruction;    /*!< Data for freelists reconstruction */
+               RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA                            sCmdContextResetNotification;   /*!< Data for context reset notification */
+               RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA                                     sCmdUpdateStatsData;                    /*!< Data for updating process stats */
+               RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA                     sCmdCoreClkRateChange;
+               RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA                                     sCmdFWPagefault;                                /*!< Data for context reset notification */
+#if defined(SUPPORT_VALIDATION)
+               RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA                                      sCmdRgxRegReadData;
+#if defined(SUPPORT_SOC_TIMER)
+               RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA                            sCmdTimers;
+#endif
+#endif
+       } RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+/*! @} End of FWCCBTypes */
+
+/*!
+ ******************************************************************************
+ * Workload estimation Firmware CCB command structure for RGX
+ *****************************************************************************/
+typedef struct
+{
+       IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */
+       IMG_UINT32 ui32CyclesTaken;     /*!< The cycles the workload took on the hardware */
+} RGXFWIF_WORKEST_FWCCB_CMD;
+
+/*!
+ * @Defgroup ClientCCBTypes Client CCB data interface
+ * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality
+ * @{
+ */
+
+/* Required memory alignment for 64-bit variables accessible by Meta
+  (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared
+   between the host and meta that contains 64-bit variables has to maintain
+   this alignment) */
+#define RGXFWIF_FWALLOC_ALIGN  sizeof(IMG_UINT64)
+
+#define RGX_CCB_TYPE_TASK                      (IMG_UINT32_C(1) << 15)
+#define RGX_CCB_FWALLOC_ALIGN(size)    (((size) + (RGXFWIF_FWALLOC_ALIGN-1U)) & ~(RGXFWIF_FWALLOC_ALIGN - 1U))
+
+typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE;
+
+/*!
+ * @Name Client CCB command types
+ * @{
+ */
+#define RGXFWIF_CCB_CMD_TYPE_GEOM                      (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */
+#define RGXFWIF_CCB_CMD_TYPE_TQ_3D                     (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */
+#define RGXFWIF_CCB_CMD_TYPE_3D                                (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */
+#define RGXFWIF_CCB_CMD_TYPE_3D_PR                     (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */
+#define RGXFWIF_CCB_CMD_TYPE_CDM                       (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */
+#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM                    (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */
+#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_TQ_2D                     (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */
+#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP     (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_NULL                      (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_ABORT                     (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+
+/* Leave a gap between CCB specific commands and generic commands */
+#define RGXFWIF_CCB_CMD_TYPE_FENCE          (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */
+#define RGXFWIF_CCB_CMD_TYPE_UPDATE         (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */
+#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE     (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */
+#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR       (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */
+#define RGXFWIF_CCB_CMD_TYPE_PRIORITY       (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */
+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+   padding code with the CCB wrap upsets the FW if we don't have the task type
+   bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+*/
+#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */
+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */
+
+#if defined(SUPPORT_VALIDATION)
+#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+#endif
+
+#define RGXFWIF_CCB_CMD_TYPE_PADDING   (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */
+/*! @} End of Client CCB command types */
+
+typedef struct
+{
+       /* Index for the KM Workload estimation return data array */
+       IMG_UINT16 RGXFW_ALIGN         ui16ReturnDataIndex;
+       /* Predicted time taken to do the work in cycles */
+       IMG_UINT32 RGXFW_ALIGN         ui32CyclesPrediction;
+       /* Deadline for the workload (in usecs) */
+       IMG_UINT64 RGXFW_ALIGN         ui64Deadline;
+} RGXFWIF_WORKEST_KICK_DATA;
+
+/*! @Brief Command header of a command in the client CCB buffer.
+ *
+ *  Followed by this header is the command-data specific to the
+ *  command-type as specified in the header.
+ */
+typedef struct
+{
+       RGXFWIF_CCB_CMD_TYPE                                    eCmdType;      /*!< Command data type following this command header */
+       IMG_UINT32                                                              ui32CmdSize;   /*!< Size of the command following this header */
+       IMG_UINT32                                                              ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
+       IMG_UINT32                                                              ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+       RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN   sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+} RGXFWIF_CCB_CMD_HEADER;
+
+/*
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+
+/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */
+typedef struct
+{
+       IMG_INT32              i32Priority; /*!< Priority level */
+} RGXFWIF_CMD_PRIORITY;
+
+/*! @} End of ClientCCBTypes */
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct
+{
+       PRGXFWIF_SIGBUFFER              sBuffer;                        /*!< Ptr to Signature Buffer memory */
+       IMG_UINT32                              ui32LeftSizeInRegs;     /*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+typedef struct
+{
+       PRGXFWIF_COUNTERBUFFER  sBuffer;                        /*!< Ptr to counter dump buffer */
+       IMG_UINT32                              ui32SizeInDwords;       /*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL;
+
+typedef struct
+{
+       PRGXFWIF_FIRMWAREGCOVBUFFER     sBuffer;                /*!< Ptr to firmware gcov buffer */
+       IMG_UINT32                                      ui32Size;               /*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL;
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the
+       following define should be increased by 1 to indicate to the
+       compatibility logic that layout has changed. */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3
+
+typedef struct
+{
+       IMG_UINT32      ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+       IMG_UINT64      RGXFW_ALIGN ui64BVNC;
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+typedef struct
+{
+       IMG_UINT8       ui8OsCountSupport;
+} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+       RGXFWIF_COMPCHECKS_BVNC (name) = { \
+               RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \
+               0, \
+       }
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \
+       do { \
+               (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+               (name).ui64BVNC = 0; \
+       } while (false)
+
+typedef struct
+{
+       RGXFWIF_COMPCHECKS_BVNC         sHWBVNC;                                /*!< hardware BVNC (from the RGX registers) */
+       RGXFWIF_COMPCHECKS_BVNC         sFWBVNC;                                /*!< firmware BVNC */
+       IMG_UINT32                                      ui32FWProcessorVersion; /*!< identifier of the FW processor version */
+       IMG_UINT32                                      ui32DDKVersion;                 /*!< software DDK version */
+       IMG_UINT32                                      ui32DDKBuild;                   /*!< software DDK build no. */
+       IMG_UINT32                                      ui32BuildOptions;               /*!< build options bit-field */
+       RGXFWIF_INIT_OPTIONS            sInitOptions;                   /*!< initialisation options bit-field */
+       IMG_BOOL                                        bUpdated;                               /*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+/*!
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ *****************************************************************************/
+typedef struct
+{
+       IMG_UINT32         ui32ActivePMLatencyms;               /* APM latency in ms before signalling IDLE to the host */
+       IMG_UINT32         ui32RuntimeCfgFlags;                 /* Compatibility and other flags */
+       IMG_BOOL           bActivePMLatencyPersistant;          /* If set, APM latency does not reset to system default each GPU power transition */
+       IMG_UINT32         ui32CoreClockSpeed;                  /* Core clock speed, currently only used to calculate timer ticks */
+       IMG_UINT32         ui32DefaultDustsNumInit;             /* Last number of dusts change requested by the host */
+       IMG_UINT32         ui32PHRMode;                         /* Periodic Hardware Reset configuration values */
+       IMG_UINT32         ui32HCSDeadlineMS;                   /* New number of milliseconds C/S is allowed to last */
+       IMG_UINT32         ui32WdgPeriodUs;                     /* The watchdog period in microseconds */
+       IMG_UINT32         aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */
+       PRGXFWIF_HWPERFBUF sHWPerfBuf;                          /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U)
+
+#if defined(PDUMP)
+
+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U
+
+typedef enum
+{
+       RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+       RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT
+} RGXFWIF_PID_FILTER_MODE;
+
+typedef struct
+{
+       IMG_PID uiPID;
+       IMG_UINT32 ui32OSID;
+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
+
+typedef struct
+{
+       RGXFWIF_PID_FILTER_MODE eMode;
+       /* each process in the filter list is specified by a PID and OS ID pair.
+        * each PID and OS pair is an item in the items array (asItems).
+        * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries
+        * then it must be terminated by an item with pid of zero.
+        */
+       RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS];
+} RGXFW_ALIGN RGXFWIF_PID_FILTER;
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA  (0x1U << 0)
+#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE  (0x1U << 1)
+#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE  (0x1U << 2)
+#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE     (0x1U << 3)
+#endif
+
+typedef enum
+{
+       RGXFWIF_TPU_DM_PDM = 0,
+       RGXFWIF_TPU_DM_VDM = 1,
+       RGXFWIF_TPU_DM_CDM = 2,
+       RGXFWIF_TPU_DM_TDM = 3,
+       RGXFWIF_TPU_DM_LAST
+} RGXFWIF_TPU_DM;
+
+typedef enum
+{
+       RGXFWIF_GPIO_VAL_OFF           = 0, /*!< No GPIO validation */
+       RGXFWIF_GPIO_VAL_GENERAL       = 1, /*!< Simple test case that
+                                                initiates by sending data via the
+                                                GPIO and then sends back any data
+                                                received over the GPIO */
+       RGXFWIF_GPIO_VAL_AP            = 2, /*!< More complex test case that writes
+                                                and reads data across the entire
+                                                GPIO AP address range.*/
+#if defined(SUPPORT_STRIP_RENDERING)
+       RGXFWIF_GPIO_VAL_SR_BASIC      = 3, /*!< Strip Rendering AP based basic test.*/
+       RGXFWIF_GPIO_VAL_SR_COMPLEX    = 4, /*!< Strip Rendering AP based complex test.*/
+#endif
+       RGXFWIF_GPIO_VAL_TESTBENCH     = 5, /*!< Validates the GPIO Testbench. */
+       RGXFWIF_GPIO_VAL_LOOPBACK      = 6, /*!< Send and then receive each byte
+                                                in the range 0-255. */
+       RGXFWIF_GPIO_VAL_LOOPBACK_LITE = 7, /*!< Send and then receive each power-of-2
+                                                byte in the range 0-255. */
+       RGXFWIF_GPIO_VAL_LAST
+} RGXFWIF_GPIO_VAL_MODE;
+
+typedef enum
+{
+       FW_PERF_CONF_NONE = 0,
+       FW_PERF_CONF_ICACHE = 1,
+       FW_PERF_CONF_DCACHE = 2,
+       FW_PERF_CONF_JTLB_INSTR = 5,
+       FW_PERF_CONF_INSTRUCTIONS = 6
+} FW_PERF_CONF;
+
+typedef enum
+{
+       FW_BOOT_STAGE_TLB_INIT_FAILURE = -2,
+       FW_BOOT_STAGE_NOT_AVAILABLE = -1,
+       FW_BOOT_NOT_STARTED = 0,
+       FW_BOOT_BLDR_STARTED = 1,
+       FW_BOOT_CACHE_DONE,
+       FW_BOOT_TLB_DONE,
+       FW_BOOT_MAIN_STARTED,
+       FW_BOOT_ALIGNCHECKS_DONE,
+       FW_BOOT_INIT_DONE,
+} FW_BOOT_STAGE;
+
+/*!
+ * @AddToGroup KCCBTypes
+ * @{
+ * @Name Kernel CCB return slot responses
+ * @{
+ * Usage of bit-fields instead of bare integers
+ * allows FW to possibly pack-in several responses for each single kCCB command.
+ */
+
+#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED   (1U << 0) /*!< Command executed (return status from FW) */
+#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY   (1U << 1) /*!< A cleanup was requested but resource busy */
+#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE   (1U << 2) /*!< Poll failed in FW for a HW operation to complete */
+
+#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE            0x0U      /*!< Reset value of a kCCB return slot (set by host) */
+/*!
+ * @} End of Name Kernel CCB return slot responses
+ * @} End of AddToGroup KCCBTypes
+ */
+
+typedef struct
+{
+       /* Fw-Os connection states */
+       volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState;
+       volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState;
+       volatile IMG_UINT32                  ui32AliveFwToken;
+       volatile IMG_UINT32                  ui32AliveOsToken;
+} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL;
+
+/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT
+ * allocated by services and used by the Firmware on boot
+ **/
+typedef struct
+{
+       /* Kernel CCB */
+       PRGXFWIF_CCB_CTL        psKernelCCBCtl; /*!< Kernel CCB Control */
+       PRGXFWIF_CCB            psKernelCCB; /*!<  Kernel CCB */
+       PRGXFWIF_CCB_RTN_SLOTS  psKernelCCBRtnSlots; /*!<  Kernel CCB return slots */
+
+       /* Firmware CCB */
+       PRGXFWIF_CCB_CTL        psFirmwareCCBCtl; /*!<  Firmware CCB control */
+       PRGXFWIF_CCB            psFirmwareCCB; /*!<  Firmware CCB */
+
+       /* Workload Estimation Firmware CCB */
+       PRGXFWIF_CCB_CTL        psWorkEstFirmwareCCBCtl; /*!<  Workload estimation control */
+       PRGXFWIF_CCB            psWorkEstFirmwareCCB; /*!<  Workload estimation buffer */
+
+       PRGXFWIF_HWRINFOBUF     sRGXFWIfHWRInfoBufCtl; /*!<  HWRecoveryInfo control */
+
+       IMG_UINT32              ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */
+
+       PRGXFWIF_OSDATA         sFwOsData; /*!<  Firmware per-os shared data */
+
+       RGXFWIF_COMPCHECKS      sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */
+
+} UNCACHED_ALIGN RGXFWIF_OSINIT;
+
+/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT
+ * allocated by services and used by the Firmware on boot
+ **/
+typedef struct
+{
+       IMG_DEV_PHYADDR         RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */
+
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN sFBCDCLargeStateTableBase;
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */
+
+       IMG_UINT64              RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */
+
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN sSLC3FenceDevVAddr;
+
+       IMG_UINT32              RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST];
+
+       RGXFWIF_SIGBUF_CTL      asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */
+
+       RGXFWIF_PDVFS_OPP       sPDVFSOPPInfo;
+
+       RGXFWIF_DMA_ADDR        sCorememDataStore; /*!< Firmware coremem data */
+
+       RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl;
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */
+#endif
+
+       IMG_UINT32              ui32FilterFlags;
+
+       PRGXFWIF_RUNTIME_CFG    sRuntimeCfg; /*!<  Firmware Runtime configuration */
+
+       PRGXFWIF_TRACEBUF       sTraceBufCtl; /*!<  Firmware Trace buffer control */
+       PRGXFWIF_SYSDATA        sFwSysData; /*!< Firmware System shared data */
+#if defined(SUPPORT_TBI_INTERFACE)
+       PRGXFWIF_TBIBUF         sTBIBuf; /*!< Tbi log buffer */
+#endif
+
+       PRGXFWIF_GPU_UTIL_FWCB  sGpuUtilFWCbCtl; /*!< GPU utilization buffer */
+       PRGXFWIF_REG_CFG        sRegCfg; /*!< Firmware register user configuration */
+       PRGXFWIF_HWPERF_CTL     sHWPerfCtl; /*!< HWPerf counter block configuration.*/
+
+       RGXFWIF_DEV_VIRTADDR    sAlignChecks; /*!< Array holding Server structures alignment data */
+
+       IMG_UINT32              ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */
+
+       IMG_UINT32              ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */
+
+       IMG_BOOL                bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */
+
+       IMG_UINT32              ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */
+
+       IMG_UINT32              ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */
+
+       IMG_UINT32              ui32JonesDisableMask;
+
+       FW_PERF_CONF            eFirmwarePerf; /*!< Firmware performance counter config */
+
+       /**
+        * FW Pointer to memory containing core clock rate in Hz.
+        * Firmware (PDVFS) updates the memory when running on non primary FW thread
+        * to communicate to host driver.
+        */
+       PRGXFWIF_CORE_CLK_RATE  sCoreClockRate;
+
+#if defined(PDUMP)
+       RGXFWIF_PID_FILTER      sPIDFilter;
+#endif
+
+       RGXFWIF_GPIO_VAL_MODE   eGPIOValidationMode;
+
+       RGX_HWPERF_BVNC         sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       IMG_UINT32              ui32SecurityTestFlags;
+       RGXFWIF_DEV_VIRTADDR    pbSecureBuffer;
+       RGXFWIF_DEV_VIRTADDR    pbNonSecureBuffer;
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /*
+        * Used when validation is enabled to allow the host to check
+        * that MTS sent the correct sideband in response to a kick
+        * from a given OSes schedule register.
+        * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set
+        *
+        * Set by the host to:
+        * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT
+        * reset to 0 by FW when kicked by the given OSid
+        */
+       IMG_UINT32              ui32OSKickTest;
+#endif
+
+       /* Value to write into RGX_CR_TFBC_COMPRESSION_CONTROL */
+       IMG_UINT32              ui32TFBCCompressionControl;
+
+#if defined(SUPPORT_AUTOVZ)
+       IMG_UINT32              ui32VzWdgPeriod;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_SYSINIT;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#define RGXFWIF_KICK_TEST_ENABLED_BIT  0x1
+#define RGXFWIF_KICK_TEST_OSID_SHIFT   0x1
+#endif
+
+/*!
+ *****************************************************************************
+ * Timer correlation shared data and defines
+ *****************************************************************************/
+
+typedef struct
+{
+       IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+       IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp;
+       IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+
+       /* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+        * where the deltas are relative to the timestamps above:
+        * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+       IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs;
+
+       IMG_UINT32             ui32CoreClockSpeed;
+       IMG_UINT32             ui32Reserved;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+
+/* The following macros are used to help converting FW timestamps to the Host
+ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of
+ * time; it increments by 1 every 256 GPU clock ticks, so the general
+ * formula to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ *   otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ *             deltaCR * 256                                   256 * scale
+ *  deltaOS = --------------- * scale = deltaCR * K    [ K = --------------- ]
+ *             GPUclockspeed                                  GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to the base
+ * OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and periodic
+ * frequency calibration (executed every few seconds if the FW is doing
+ * some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT  (20)
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+       (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+
+/* See rgx_common.h for a list of GPU states */
+#define RGXFWIF_GPU_UTIL_TIME_MASK       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word)  ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+       (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+       (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE            256U
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount)  ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U,
+                         "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+typedef struct
+{
+       RGXFWIF_TIME_CORR      sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+       IMG_UINT32             ui32TimeCorrSeqCount;
+
+       /* Compatibility and other flags */
+       IMG_UINT32             ui32GpuUtilFlags;
+
+       /* Last GPU state + OS time of the last state update */
+       IMG_UINT64 RGXFW_ALIGN ui64LastWord;
+
+       /* Counters for the amount of time the GPU was active/idle/blocked */
+       IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+typedef struct
+{
+       IMG_UINT32           ui32RenderTargetIndex;             //Render number
+       IMG_UINT32           ui32CurrentRenderTarget;   //index in RTA
+       IMG_UINT32           ui32ActiveRenderTargets;   //total active RTs
+       IMG_UINT32           ui32CumulActiveRenderTargets;   //total active RTs from the first TA kick, for OOM
+       RGXFWIF_DEV_VIRTADDR sValidRenderTargets;  //Array of valid RT indices
+       RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders;  //Array of number of occurred partial renders per render target
+       IMG_UINT32           ui32MaxRTs;   //Number of render targets in the array
+       IMG_UINT32           ui32RTACtlFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief Firmware Freelist holding usage state of the Parameter Buffers
+ */
+typedef struct
+{
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN psFreeListDevVAddr; /*!< Freelist page table base */
+       IMG_UINT64              RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page  */
+       IMG_UINT32              ui32CurrentStackTop;            /*!< Freelist current free page  */
+       IMG_UINT32              ui32MaxPages;                   /*!< Max no. of pages can be added to the freelist */
+       IMG_UINT32              ui32GrowPages;                  /*!< No pages to add in each freelist grow */
+       IMG_UINT32              ui32CurrentPages;               /*!< Total no. of pages made available to the PM HW */
+       IMG_UINT32              ui32AllocatedPageCount;         /*!< No. of pages allocated by PM HW */
+       IMG_UINT32              ui32AllocatedMMUPageCount;      /*!< No. of pages allocated for GPU MMU for PM*/
+#if defined(SUPPORT_SHADOW_FREELISTS)
+       IMG_UINT32              ui32HWRCounter;
+       PRGXFWIF_FWMEMCONTEXT   psFWMemContext;
+#endif
+       IMG_UINT32              ui32FreeListID;                 /*!< Unique Freelist ID */
+       IMG_BOOL                bGrowPending;                   /*!< Freelist grow is pending */
+       IMG_UINT32              ui32ReadyPages;                 /*!< Reserved pages to be used only on PM OOM event */
+       IMG_UINT32              ui32FreelistFlags;              /*!< Compatibility and other flags */
+#if defined(SUPPORT_AGP)
+       IMG_UINT32              ui32PmGlobalPb;                 /*!< PM Global PB on which Freelist is loaded */
+#endif
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+/*!
+ ******************************************************************************
+ * HWRTData
+ *****************************************************************************/
+
+/* HWRTData flags */
+/* Deprecated flags 1:0 */
+#define HWRTDATA_HAS_LAST_TA              (1UL << 2)
+#define HWRTDATA_PARTIAL_RENDERED         (1UL << 3)
+#define HWRTDATA_DISABLE_TILE_REORDERING  (1UL << 4)
+#define HWRTDATA_NEED_BRN65101_BLIT       (1UL << 5)
+#define HWRTDATA_FIRST_BRN65101_STRIP     (1UL << 6)
+#define HWRTDATA_NEED_BRN67182_2ND_RENDER (1UL << 7)
+#if defined(SUPPORT_AGP)
+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0    (1UL << 8)
+#if defined(SUPPORT_AGP4)
+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1    (1UL << 9)
+#endif
+#define HWRTDATA_GEOM_NEEDS_RESUME        (1UL << 10)
+#endif
+
+typedef enum
+{
+       RGXFWIF_RTDATA_STATE_NONE = 0,
+       RGXFWIF_RTDATA_STATE_KICKTA,
+       RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+       RGXFWIF_RTDATA_STATE_TAFINISHED,
+       RGXFWIF_RTDATA_STATE_KICK3D,
+       RGXFWIF_RTDATA_STATE_3DFINISHED,
+       RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED,
+       RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+       RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+       /* In case of HWR, we can't set the RTDATA state to NONE,
+        * as this will cause any TA to become a first TA.
+        * To ensure all related TA's are skipped, we use the HWR state */
+       RGXFWIF_RTDATA_STATE_HWR,
+       RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct
+{
+       IMG_BOOL                                                        bTACachesNeedZeroing;
+
+       IMG_UINT32                                                      ui32ScreenPixelMax;
+       IMG_UINT64                                                      RGXFW_ALIGN ui64MultiSampleCtl;
+       IMG_UINT64                                                      ui64FlippedMultiSampleCtl;
+       IMG_UINT32                                                      ui32TPCStride;
+       IMG_UINT32                                                      ui32TPCSize;
+       IMG_UINT32                                                      ui32TEScreen;
+       IMG_UINT32                                                      ui32MTileStride;
+       IMG_UINT32                                                      ui32TEAA;
+       IMG_UINT32                                                      ui32TEMTILE1;
+       IMG_UINT32                                                      ui32TEMTILE2;
+       IMG_UINT32                                                      ui32ISPMergeLowerX;
+       IMG_UINT32                                                      ui32ISPMergeLowerY;
+       IMG_UINT32                                                      ui32ISPMergeUpperX;
+       IMG_UINT32                                                      ui32ISPMergeUpperY;
+       IMG_UINT32                                                      ui32ISPMergeScaleX;
+       IMG_UINT32                                                      ui32ISPMergeScaleY;
+       IMG_UINT32                                                      uiRgnHeaderSize;
+       IMG_UINT32                                                      ui32ISPMtileSize;
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context
+ */
+typedef struct
+{
+       IMG_DEV_VIRTADDR                RGXFW_ALIGN psPMMListDevVAddr;                  /*!< MList Data Store */
+
+       IMG_UINT64                      RGXFW_ALIGN ui64VCECatBase[4];                  /*!< VCE Page Catalogue base */
+       IMG_UINT64                      RGXFW_ALIGN ui64VCELastCatBase[4];
+       IMG_UINT64                      RGXFW_ALIGN ui64TECatBase[4];                   /*!< TE Page Catalogue base */
+       IMG_UINT64                      RGXFW_ALIGN ui64TELastCatBase[4];
+       IMG_UINT64                      RGXFW_ALIGN ui64AlistCatBase;                   /*!< Alist Page Catalogue base */
+       IMG_UINT64                      RGXFW_ALIGN ui64AlistLastCatBase;
+
+       IMG_UINT64                      RGXFW_ALIGN ui64PMAListStackPointer;            /*!< Freelist page table entry for current Mlist page  */
+       IMG_UINT32                      ui32PMMListStackPointer;                        /*!< Current Mlist page */
+
+       RGXFWIF_DEV_VIRTADDR            sHWRTDataCommonFwAddr;                          /*!< Render target dimension dependent data */
+
+       IMG_UINT32                      ui32HWRTDataFlags;
+       RGXFWIF_RTDATA_STATE            eState;                                         /*!< Current workload processing state of HWRTDATA */
+
+       PRGXFWIF_FREELIST               RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS];  /*!< Freelist to use */
+       IMG_UINT32                      aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+
+       IMG_DEV_VIRTADDR                RGXFW_ALIGN psVHeapTableDevVAddr;               /*!< VHeap table base */
+
+       RGXFWIF_CLEANUP_CTL             sCleanupState;                                  /*!< Render target clean up state */
+
+       RGXFWIF_RTA_CTL                 sRTACtl;                                        /*!< Render target array data */
+
+       IMG_DEV_VIRTADDR                RGXFW_ALIGN sTailPtrsDevVAddr;                  /*!< Tail pointers base */
+       IMG_DEV_VIRTADDR                RGXFW_ALIGN sMacrotileArrayDevVAddr;            /*!< Macrotiling array base */
+       IMG_DEV_VIRTADDR                RGXFW_ALIGN sRgnHeaderDevVAddr;                 /*!< Region headers base */
+       IMG_DEV_VIRTADDR                RGXFW_ALIGN sRTCDevVAddr;                       /*!< Render target cache base */
+#if defined(RGX_FIRMWARE)
+       struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom;
+#else
+       RGXFWIF_DEV_VIRTADDR            RGXFW_ALIGN pui32OwnerGeomNotUsedByHost;
+#endif
+#if defined(SUPPORT_TRP)
+       IMG_UINT32                      ui32KickFlagsCopy;
+       IMG_UINT32                      ui32TRPState;
+       IMG_UINT32                      ui32TEPageCopy;
+       IMG_UINT32                      ui32VCEPageCopy;
+#endif
+#if defined(SUPPORT_AGP)
+       IMG_BOOL                        bTACachesNeedZeroing;
+#endif
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+/* Sync_checkpoint firmware object.
+ * This is the FW-addressable structure use to hold the sync checkpoint's
+ * state and other information which needs to be accessed by the firmware.
+ */
+typedef struct
+{
+       IMG_UINT32      ui32State;          /*!< Holds the current state of the sync checkpoint */
+       IMG_UINT32      ui32FwRefCount;     /*!< Holds the FW reference count (num of fences/updates processed) */
+} SYNC_CHECKPOINT_FW_OBJ;
+
+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+
+#endif /* RGX_FWIF_KM_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_shared.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_fwif_shared.h
new file mode 100644 (file)
index 0000000..13844ad
--- /dev/null
@@ -0,0 +1,335 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures shared by both host client
+                and host server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_SHARED_H)
+#define RGX_FWIF_SHARED_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_common.h"
+#include "powervr/mem_types.h"
+
+/* Indicates the number of RTDATAs per RTDATASET */
+#if defined(SUPPORT_AGP)
+#define RGXMKIF_NUM_RTDATAS           4U
+#define RGXMKIF_NUM_GEOMDATAS         4U
+#define RGXMKIF_NUM_RTDATA_FREELISTS  12U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */
+#define RGX_NUM_GEOM_CORES           (2U)
+#else
+#define RGXMKIF_NUM_RTDATAS           2U
+#define RGXMKIF_NUM_GEOMDATAS         1U
+#define RGXMKIF_NUM_RTDATA_FREELISTS  2U  /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */
+#define RGX_NUM_GEOM_CORES           (1U)
+#endif
+
+/* Maximum number of UFOs in a CCB command.
+ * The number is based on having 32 sync prims (as originally), plus 32 sync
+ * checkpoints.
+ * Once the use of sync prims is no longer supported, we will retain
+ * the same total (64) as the number of sync checkpoints which may be
+ * supporting a fence is not visible to the client driver and has to
+ * allow for the number of different timelines involved in fence merges.
+ */
+#define RGXFWIF_CCB_CMD_MAX_UFOS                       (32U+32U)
+
+/*
+ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER)
+ * command passed through the bridge.
+ * Just across the bridge in the server, any incoming kick command size is
+ * checked against this maximum limit.
+ * In case the incoming command size is larger than the specified limit,
+ * the bridge call is retired with error.
+ */
+#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE   (1024U)
+
+typedef struct RGXFWIF_DEV_VIRTADDR_
+{
+       IMG_UINT32      ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct
+{
+       IMG_DEV_VIRTADDR        RGXFW_ALIGN psDevVirtAddr;
+       RGXFWIF_DEV_VIRTADDR    pbyFWAddr;
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8      RGXFWIF_CCCB;
+
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR  PRGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ * @InGroup ClientCCBTypes
+ * @Brief Command data for fence & update types Client CCB commands.
+ */
+typedef struct
+{
+       PRGXFWIF_UFO_ADDR       puiAddrUFO; /*!< Address to be checked/updated */
+       IMG_UINT32                      ui32Value;  /*!< Value to check-against/update-to */
+} RGXFWIF_UFO;
+
+typedef struct
+{
+       IMG_UINT32                      ui32SubmittedCommands;  /*!< Number of commands received by the FW */
+       IMG_UINT32                      ui32ExecutedCommands;   /*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+#define        RGXFWIF_PRBUFFER_START        IMG_UINT32_C(0)
+#define        RGXFWIF_PRBUFFER_ZSBUFFER     IMG_UINT32_C(0)
+#define        RGXFWIF_PRBUFFER_MSAABUFFER   IMG_UINT32_C(1)
+#define        RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2)
+
+typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE;
+
+typedef enum
+{
+       RGXFWIF_PRBUFFER_UNBACKED = 0,
+       RGXFWIF_PRBUFFER_BACKED,
+       RGXFWIF_PRBUFFER_BACKING_PENDING,
+       RGXFWIF_PRBUFFER_UNBACKING_PENDING,
+}RGXFWIF_PRBUFFER_STATE;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief OnDemand Z/S/MSAA Buffers
+ */
+typedef struct
+{
+       IMG_UINT32              ui32BufferID;           /*!< Buffer ID*/
+       IMG_BOOL                bOnDemand;              /*!< Needs On-demand Z/S/MSAA Buffer allocation */
+       RGXFWIF_PRBUFFER_STATE  eState;                 /*!< Z/S/MSAA -Buffer state */
+       RGXFWIF_CLEANUP_CTL     sCleanupState;          /*!< Cleanup state */
+       IMG_UINT32              ui32PRBufferFlags;      /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_PRBUFFER;
+
+/*
+ * Used to share frame numbers across UM-KM-FW,
+ * frame number is set in UM,
+ * frame number is required in both KM for HTB and FW for FW trace.
+ *
+ * May be used to house Kick flags in the future.
+ */
+typedef struct
+{
+       IMG_UINT32 ui32FrameNum; /*!< associated frame number */
+} CMD_COMMON;
+
+/*
+ * TA and 3D commands require set of firmware addresses that are stored in the
+ * Kernel. Client has handle(s) to Kernel containers storing these addresses,
+ * instead of raw addresses. We have to patch/write these addresses in KM to
+ * prevent UM from controlling FW addresses directly.
+ * Typedefs for TA and 3D commands are shared between Client and Firmware (both
+ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use
+ * TA|3D CMD type definitions directly. Therefore we have a SHARED block that
+ * is shared between UM-KM-FW across all BVNC configurations.
+ */
+typedef struct
+{
+       CMD_COMMON           sCmn;      /*!< Common command attributes */
+       RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command,
+                                                                          this is used for context selection and for storing out HW-context,
+                                                                          when TA is switched out for continuing later */
+
+       RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */
+
+} CMDTA3D_SHARED;
+
+/*!
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ *          Roff                           Doff                 Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ *            <      runnable commands       ><   !ready to run   >
+ *
+ * Cmd 1    : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+typedef struct
+{
+       IMG_UINT32  ui32WriteOffset;    /*!< Host write offset into CCB. This
+                                        *    must be aligned to 16 bytes. */
+       IMG_UINT32  ui32ReadOffset;     /*!< Firmware read offset into CCB.
+                                             Points to the command that is
+                                        *    runnable on GPU, if R!=W */
+       IMG_UINT32  ui32DepOffset;      /*!< Firmware fence dependency offset.
+                                        *    Points to commands not ready, i.e.
+                                        *    fence dependencies are not met. */
+       IMG_UINT32  ui32WrapMask;       /*!< Offset wrapping mask, total capacity
+                                             in bytes of the CCB-1 */
+#if defined(SUPPORT_AGP)
+       IMG_UINT32  ui32ReadOffset2;
+#if defined(SUPPORT_AGP4)
+       IMG_UINT32  ui32ReadOffset3;
+       IMG_UINT32  ui32ReadOffset4;
+#endif
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+
+typedef IMG_UINT32 RGXFW_FREELIST_TYPE;
+
+#define RGXFW_LOCAL_FREELIST     IMG_UINT32_C(0)
+#define RGXFW_GLOBAL_FREELIST    IMG_UINT32_C(1)
+#if defined(SUPPORT_AGP)
+#define RGXFW_GLOBAL2_FREELIST   IMG_UINT32_C(2)
+#define RGXFW_MAX_FREELISTS      (RGXFW_GLOBAL2_FREELIST + 1U)
+#else
+#define RGXFW_MAX_FREELISTS      (RGXFW_GLOBAL_FREELIST + 1U)
+#endif
+#define RGXFW_MAX_HWFREELISTS    (2U)
+
+/*!
+ * @Defgroup ContextSwitching Context switching data interface
+ * @Brief Types grouping data structures and defines used in realising the Context Switching (CSW) functionality
+ * @{
+ */
+
+/*!
+ * @Brief GEOM DM or TA register controls for context switch
+ */
+typedef struct
+{
+       IMG_UINT64      uTAReg_VDM_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the VDM's context state buffer */
+       IMG_UINT64      uTAReg_VDM_CONTEXT_STATE_RESUME_ADDR;
+       IMG_UINT64      uTAReg_TA_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the TA's context state buffer */
+
+       struct
+       {
+               IMG_UINT64      uTAReg_VDM_CONTEXT_STORE_TASK0; /*!< VDM context store task 0 */
+               IMG_UINT64      uTAReg_VDM_CONTEXT_STORE_TASK1; /*!< VDM context store task 1 */
+               IMG_UINT64      uTAReg_VDM_CONTEXT_STORE_TASK2; /*!< VDM context store task 2 */
+
+               /* VDM resume state update controls */
+               IMG_UINT64      uTAReg_VDM_CONTEXT_RESUME_TASK0; /*!< VDM context resume task 0 */
+               IMG_UINT64      uTAReg_VDM_CONTEXT_RESUME_TASK1; /*!< VDM context resume task 1 */
+               IMG_UINT64      uTAReg_VDM_CONTEXT_RESUME_TASK2; /*!< VDM context resume task 2 */
+
+               IMG_UINT64      uTAReg_VDM_CONTEXT_STORE_TASK3;
+               IMG_UINT64      uTAReg_VDM_CONTEXT_STORE_TASK4;
+
+               IMG_UINT64      uTAReg_VDM_CONTEXT_RESUME_TASK3;
+               IMG_UINT64      uTAReg_VDM_CONTEXT_RESUME_TASK4;
+       } asTAState[2];
+
+} RGXFWIF_TAREGISTERS_CSWITCH;
+/*! @} End of Defgroup ContextSwitching */
+
+#define RGXFWIF_TAREGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_TAREGISTERS_CSWITCH)
+
+typedef struct
+{
+       IMG_UINT64      uCDMReg_CDM_CONTEXT_PDS0;
+       IMG_UINT64      uCDMReg_CDM_CONTEXT_PDS1;
+       IMG_UINT64      uCDMReg_CDM_TERMINATE_PDS;
+       IMG_UINT64      uCDMReg_CDM_TERMINATE_PDS1;
+
+       /* CDM resume controls */
+       IMG_UINT64      uCDMReg_CDM_RESUME_PDS0;
+       IMG_UINT64      uCDMReg_CDM_CONTEXT_PDS0_B;
+       IMG_UINT64      uCDMReg_CDM_RESUME_PDS0_B;
+
+} RGXFWIF_CDM_REGISTERS_CSWITCH;
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Render context static register controls for context switch
+ */
+typedef struct
+{
+       RGXFWIF_TAREGISTERS_CSWITCH     RGXFW_ALIGN asCtxSwitch_GeomRegs[RGX_NUM_GEOM_CORES];   /*!< Geom registers for ctx switch */
+} RGXFWIF_STATIC_RENDERCONTEXT_STATE;
+
+#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE)
+
+typedef struct
+{
+       RGXFWIF_CDM_REGISTERS_CSWITCH   RGXFW_ALIGN sCtxSwitch_Regs;    /*!< CDM registers for ctx switch */
+} RGXFWIF_STATIC_COMPUTECONTEXT_STATE;
+
+#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE)
+
+/*!
+       @Brief Context reset reason. Last reset reason for a reset context.
+*/
+typedef enum
+{
+       RGX_CONTEXT_RESET_REASON_NONE                = 0,       /*!< No reset reason recorded */
+       RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP       = 1,       /*!< Caused a reset due to locking up */
+       RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP     = 2,       /*!< Affected by another context locking up */
+       RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING   = 3,       /*!< Overran the global deadline */
+       RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4,       /*!< Affected by another context overrunning */
+       RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5,       /*!< Forced reset to ensure scheduling requirements */
+       RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM        = 6,       /*!< CDM Mission/safety checksum mismatch */
+       RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM        = 7,       /*!< TRP checksum mismatch */
+       RGX_CONTEXT_RESET_REASON_GPU_ECC_OK          = 8,       /*!< GPU ECC error (corrected, OK) */
+       RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR         = 9,       /*!< GPU ECC error (uncorrected, HWR) */
+       RGX_CONTEXT_RESET_REASON_FW_ECC_OK           = 10,      /*!< FW ECC error (corrected, OK) */
+       RGX_CONTEXT_RESET_REASON_FW_ECC_ERR          = 11,      /*!< FW ECC error (uncorrected, ERR) */
+       RGX_CONTEXT_RESET_REASON_FW_WATCHDOG         = 12,      /*!< FW Safety watchdog triggered */
+       RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT        = 13,      /*!< FW page fault (no HWR) */
+       RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR         = 14,      /*!< FW execution error (GPU reset requested) */
+       RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR     = 15,      /*!< Host watchdog detected FW error */
+       RGX_CONTEXT_GEOM_OOM_DISABLED                = 16,      /*!< Geometry DM OOM event is not allowed */
+} RGX_CONTEXT_RESET_REASON;
+
+/*!
+       @Brief Context reset data shared with the host
+*/
+typedef struct
+{
+       RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */
+       IMG_UINT32 ui32ResetExtJobRef;  /*!< External Job ID */
+} RGX_CONTEXT_RESET_REASON_DATA;
+#endif /*  RGX_FWIF_SHARED_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_heaps.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_heaps.h
new file mode 100644 (file)
index 0000000..e41e400
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX heap definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_HEAPS_H)
+#define RGX_HEAPS_H
+
+/*
+  Identify heaps by their names
+*/
+#define RGX_GENERAL_SVM_HEAP_IDENT          "General SVM"               /*!< RGX General SVM (shared virtual memory) Heap Identifier */
+#define RGX_GENERAL_HEAP_IDENT              "General"                   /*!< RGX General Heap Identifier */
+#define RGX_GENERAL_NON4K_HEAP_IDENT        "General NON-4K"            /*!< RGX General non-4K Heap Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT          "PDS Code and Data"         /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT              "USC Code"                  /*!< RGX USC Code Heap Identifier */
+#define RGX_VK_CAPT_REPLAY_HEAP_IDENT       "Vulkan Capture Replay"     /*!< RGX Vulkan capture replay buffer Heap Identifier */
+#define RGX_SIGNALS_HEAP_IDENT              "Signals"                   /*!< Signals Heap Identifier */
+#define RGX_FBCDC_HEAP_IDENT                "FBCDC"                     /*!< RGX FBCDC State Table Heap Identifier */
+#define RGX_FBCDC_LARGE_HEAP_IDENT          "Large FBCDC"               /*!< RGX Large FBCDC State Table Heap Identifier */
+#define RGX_CMP_MISSION_RMW_HEAP_IDENT      "Compute Mission RMW"       /*!< Compute Mission RMW Heap Identifier */
+#define RGX_CMP_SAFETY_RMW_HEAP_IDENT       "Compute Safety RMW"        /*!< Compute Safety RMW Heap Identifier */
+#define RGX_TEXTURE_STATE_HEAP_IDENT        "Texture State"             /*!< Texture State Heap Identifier */
+#define RGX_VISIBILITY_TEST_HEAP_IDENT      "Visibility Test"           /*!< Visibility Test Heap Identifier */
+
+/* Services client internal heap identification */
+#define RGX_RGNHDR_BRN_63142_HEAP_IDENT     "RgnHdr BRN63142"           /*!< RGX RgnHdr BRN63142 Heap Identifier */
+#define RGX_TQ3DPARAMETERS_HEAP_IDENT       "TQ3DParameters"            /*!< RGX TQ 3D Parameters Heap Identifier */
+#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT   "MMU INIA BRN65273"         /*!< MMU BRN65273 Heap A Identifier */
+#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT   "MMU INIB BRN65273"         /*!< MMU BRN65273 Heap B Identifier */
+#endif /* RGX_HEAPS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_hwperf.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_hwperf.h
new file mode 100644 (file)
index 0000000..fa711b0
--- /dev/null
@@ -0,0 +1,1607 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HWPerf and Debug Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common data types definitions for hardware performance API
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_H_
+#define RGX_HWPERF_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at
+ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/******************************************************************************
+ * Includes and Defines
+ *****************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#include "rgx_common.h"
+#include "rgx_hwperf_common.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_sync_km.h"
+
+
+#if !defined(__KERNEL__)
+/* User-mode and Firmware definitions only */
+
+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER)
+
+/* HWPerf interface assumption checks */
+static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPerf protocol definition");
+
+/*! The number of indirectly addressable TPU_MSC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX(((IMG_UINT32)RGX_FEATURE_NUM_CLUSTERS >> 1), 1U)
+
+/*! The number of indirectly addressable USC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS)
+
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+
+ /*! Defines the number of performance counter blocks that are directly
+  * addressable in the RGX register map for S. */
+#  define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      1 /* JONES */
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       (RGX_NUM_PHANTOMS)
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      1 /* BLACKPEARL */
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         2 /* TPU, TEXAS */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */
+
+# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+
+  /*! Defines the number of performance counter blocks that are directly
+   * addressable in the RGX register map. */
+#   define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS    2 /* TORNADO, TA */
+
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       (RGX_NUM_PHANTOMS)
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      2 /* RASTER, TEXAS */
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         1 /* TPU */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+
+# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */
+
+ /*! Defines the number of performance counter blocks that are
+  * addressable in the RGX register map for Series 6. */
+#  define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      3 /* TA, RASTER, HUB */
+#  define RGX_HWPERF_INDIRECT_BY_PHANTOM       0 /* PHANTOM is not there in Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */
+#  define RGX_HWPERF_PHANTOM_NONDUST_BLKS      0
+#  define RGX_HWPERF_PHANTOM_DUST_BLKS         1 /* TPU */
+#  define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+
+# endif
+
+/*! The number of performance counters in each layout block defined for UM/FW code */
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+  #define RGX_HWPERF_CNTRS_IN_BLK 6
+ #else
+  #define RGX_HWPERF_CNTRS_IN_BLK 4
+#endif
+
+#endif /* #if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) */
+#else /* defined(__KERNEL__) */
+/* Kernel/server definitions - not used, hence invalid definitions */
+
+# define RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC 0xFF
+
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST    RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS      RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM       RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS      RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+# define RGX_HWPERF_PHANTOM_DUST_BLKS         RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+
+#endif
+
+/*! The number of custom non-mux counter blocks supported */
+#define RGX_HWPERF_MAX_CUSTOM_BLKS 5U
+
+/*! The number of counters supported in each non-mux counter block */
+#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U
+
+/*! The number of directly-addressable counters allowed in non-mux counter blocks */
+#define RGX_CNTBLK_COUNTERS_MAX ((IMG_UINT32)PVRSRV_HWPERF_COUNTERS_PERBLK + 0U)
+
+
+/******************************************************************************
+ * Data Stream Common Types
+ *****************************************************************************/
+
+/*! All the Data Masters HWPerf is aware of. When a new DM is added to this
+ * list, it should be appended at the end to maintain backward compatibility
+ * of HWPerf data.
+ */
+typedef enum {
+
+       RGX_HWPERF_DM_GP,
+       RGX_HWPERF_DM_2D,
+       RGX_HWPERF_DM_TA,
+       RGX_HWPERF_DM_3D,
+       RGX_HWPERF_DM_CDM,
+       RGX_HWPERF_DM_RTU,
+       RGX_HWPERF_DM_SHG,
+       RGX_HWPERF_DM_TDM,
+
+       RGX_HWPERF_DM_LAST,
+
+       RGX_HWPERF_DM_INVALID = 0x1FFFFFFF
+} RGX_HWPERF_DM;
+
+/*! Define containing bit position for 32bit feature flags used in hwperf and api */
+typedef IMG_UINT32 RGX_HWPERF_FEATURE_FLAGS;
+#define RGX_HWPERF_FEATURE_PERFBUS_FLAG                0x0001U
+#define RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG  0x0002U
+#define RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG  0x0004U
+#define RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG     0x0008U
+#define RGX_HWPERF_FEATURE_ROGUEXE_FLAG                0x0010U
+#define RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG   0x0020U
+#define RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG             0x0040U
+#define RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION         0x0080U
+#define RGX_HWPERF_FEATURE_MULTICORE_FLAG              0x0100U
+#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG               0x0800U
+#define RGX_HWPERF_FEATURE_ROGUE_FLAG                  0x1000U
+#define RGX_HWPERF_FEATURE_OCEANIC_FLAG                0x2000U
+
+/*! This structure holds the data of a firmware packet. */
+typedef struct
+{
+       RGX_HWPERF_DM eDM;                              /*!< DataMaster identifier, see RGX_HWPERF_DM */
+       IMG_UINT32 ui32TxtActCyc;               /*!< Meta TXTACTCYC register value */
+       IMG_UINT32 ui32FWPerfCount0;    /*!< Meta/MIPS PERF_COUNT0 register */
+       IMG_UINT32 ui32FWPerfCount1;    /*!< Meta/MIPS PERF_COUNT1 register */
+       IMG_UINT32 ui32TimeCorrIndex;   /*!< Internal field */
+       IMG_UINT32 ui32Padding;                 /*!< Reserved */
+} RGX_HWPERF_FW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA);
+
+/*! This structure holds the data of a hardware packet, including counters. */
+typedef struct
+{
+       IMG_UINT32 ui32DMCyc;         /*!< DataMaster cycle count register, 0 if none */
+       IMG_UINT32 ui32FrameNum;      /*!< Frame number, undefined on some DataMasters */
+       IMG_UINT32 ui32PID;           /*!< Process identifier */
+       IMG_UINT32 ui32DMContext;     /*!< GPU Data Master (FW) Context */
+       IMG_UINT32 ui32WorkTarget;    /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */
+       IMG_UINT32 ui32ExtJobRef;     /*!< Client driver context job reference used for tracking/debugging */
+       IMG_UINT32 ui32IntJobRef;     /*!< RGX Data master context job reference used for tracking/debugging */
+       IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */
+       IMG_UINT32 ui32BlkInfo;       /*!< <31..16> NumBlocks <15..0> Counter block stream offset */
+       IMG_UINT32 ui32WorkCtx;       /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */
+       IMG_UINT32 ui32CtxPriority;   /*!< Context priority */
+       IMG_UINT32 ui32GPUIdMask;     /*!< GPU IDs active within this event */
+       IMG_UINT32 ui32KickInfo;      /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */
+       IMG_UINT32 ui32Padding;       /*!< Reserved. To ensure correct alignment */
+       IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */
+       IMG_UINT32 ui32Padding2;      /*!< Reserved. To ensure correct alignment (not written in the packet) */
+} RGX_HWPERF_HW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA);
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream);
+
+typedef struct
+{
+       IMG_UINT32 ui32DMCyc;         /*!< DataMaster cycle count register, 0 if none */
+       IMG_UINT32 ui32FrameNum;      /*!< Frame number, undefined on some DataMasters */
+       IMG_UINT32 ui32PID;           /*!< Process identifier */
+       IMG_UINT32 ui32DMContext;     /*!< GPU Data Master (FW) Context */
+       IMG_UINT32 ui32WorkTarget[4]; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */
+                                     /*!< V2A Block count / Client driver context job reference used for tracking/debugging */
+                                     /*!< RGX Data master context job reference used for tracking/debugging */
+                                     /*!< V2 Block count / Index to the time correlation at the time the packet was generated */
+} RGX_HWPERF_HW_DATA_V2;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA_V2);
+
+/*! Mask for use with the aui32CountBlksStream field when decoding the
+ * counter block ID and mask word. */
+#define RGX_HWPERF_CNTBLK_ID_MASK      0xFFFF0000U
+#define RGX_HWPERF_CNTBLK_ID_SHIFT     16U
+
+/*! Obtains the counter block ID word from an aui32CountBlksStream field.
+ * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit
+ * within group (3-0) */
+#define RGX_HWPERF_GET_CNTBLK_IDW(_word)           ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+
+/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words of
+ * a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)])
+
+/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */
+#define RGX_HWPERF_GET_CNTBLK_GPUW(_word)          ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT))
+
+#define RGX_HWPERF_GET_CNT_MASKW(_word)           ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK)))
+
+/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words
+ * of a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)])
+
+/*! Context switch packet event */
+typedef struct
+{
+       RGX_HWPERF_DM   eDM;                                    /*!< DataMaster identifier, see RGX_HWPERF_DM */
+       IMG_UINT32              ui32DMContext;                  /*!< GPU Data Master (FW) Context */
+       IMG_UINT32              ui32FrameNum;                   /*!< Client Frame number (TA, 3D only) */
+       IMG_UINT32              ui32TxtActCyc;                  /*!< Meta TXTACTCYC register value */
+       IMG_UINT32              ui32PerfCycle;                  /*!< Cycle count. Used to measure HW context store latency */
+       IMG_UINT32              ui32PerfPhase;                  /*!< Phase. Used to determine geometry content */
+       IMG_UINT32              ui32Padding[2];                 /*!< Padding to 8 DWords */
+} RGX_HWPERF_CSW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA);
+
+/*! Enumeration of clocks supporting this event */
+typedef enum
+{
+       RGX_HWPERF_CLKS_CHG_INVALID = 0,
+
+       RGX_HWPERF_CLKS_CHG_NAME_CORE = 1,
+
+       RGX_HWPERF_CLKS_CHG_LAST,
+} RGX_HWPERF_CLKS_CHG_NAME;
+
+/*! This structure holds the data of a clocks change packet. */
+typedef struct
+{
+       IMG_UINT64                ui64NewClockSpeed;         /*!< New Clock Speed (in Hz) */
+       RGX_HWPERF_CLKS_CHG_NAME  eClockName;                /*!< Clock name */
+       IMG_UINT32                ui32CalibratedClockSpeed;  /*!< Calibrated new GPU clock speed (in Hz) */
+       IMG_UINT64                ui64OSTimeStamp;           /*!< OSTimeStamp sampled by the host */
+       IMG_UINT64                ui64CRTimeStamp;           /*!< CRTimeStamp sampled by the host and
+                                                                 correlated to OSTimeStamp */
+} RGX_HWPERF_CLKS_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA);
+
+/*! Enumeration of GPU utilisation states supported by this event */
+typedef IMG_UINT32 RGX_HWPERF_GPU_STATE;
+
+/*! This structure holds the data of a GPU utilisation state change packet. */
+typedef struct
+{
+       RGX_HWPERF_GPU_STATE    eState;         /*!< New GPU utilisation state */
+       IMG_UINT32                              uiUnused1;      /*!< Padding */
+       IMG_UINT32                              uiUnused2;      /*!< Padding */
+       IMG_UINT32                              uiUnused3;      /*!< Padding */
+} RGX_HWPERF_GPU_STATE_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA);
+
+
+/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */
+#define HWPERF_PWR_EST_V1_SIG  0x48504531
+
+/*! Macros to obtain a component field from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31)
+#define RGX_HWPERF_GET_PWR_EST_GPUID(_word)     (((_word)&0x70000000)>>28)
+/*!< Obtains the GPU ID from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_UNIT(_word)      (((_word)&0x0F000000)>>24)
+#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word)    ((_word)&0x0000FFFF)
+
+#define RGX_HWPERF_PWR_EST_HIGH_OFFSET         (31)
+#define RGX_HWPERF_PWR_EST_GPUID_OFFSET                (28)
+#define RGX_HWPERF_PWR_EST_GPUID_MASK          (0x7U)
+#define RGX_HWPERF_PWR_EST_UNIT_OFFSET         (24)
+#define RGX_HWPERF_PWR_EST_UNIT_MASK           (0xFU)
+#define RGX_HWPERF_PWR_EST_VALUE_MASK          (0xFFFFU)
+
+/*! This macro constructs a counter ID for a power estimate data stream from
+ * the component parts of: high word flag, unit id, GPU id, counter number */
+#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \
+                       ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<<RGX_HWPERF_PWR_EST_HIGH_OFFSET) | \
+                       ((IMG_UINT32) ((IMG_UINT32)(_unit)&RGX_HWPERF_PWR_EST_UNIT_MASK)<<RGX_HWPERF_PWR_EST_UNIT_OFFSET) | \
+                       ((IMG_UINT32) ((IMG_UINT32)(_core)&RGX_HWPERF_PWR_EST_GPUID_MASK)<<RGX_HWPERF_PWR_EST_GPUID_OFFSET) | \
+                                                  ((_number)&RGX_HWPERF_PWR_EST_VALUE_MASK)))
+
+/*! This structure holds the data for a power estimate packet. */
+typedef struct
+{
+       IMG_UINT32  ui32StreamVersion;  /*!< Version word, HWPERF_PWR_EST_V1_SIG */
+       IMG_UINT32  ui32StreamSize;     /*!< Size of array in bytes of stream data
+                                            held in the aui32StreamData member */
+       IMG_UINT32  aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */
+       IMG_UINT32  ui32Padding; /*!< Reserved. To ensure correct alignment */
+} RGX_HWPERF_PWR_EST_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA);
+
+/*! Enumeration of the kinds of power change events that can occur */
+typedef enum
+{
+       RGX_HWPERF_PWR_UNDEFINED    = 0,
+       RGX_HWPERF_PWR_ON           = 1, /*!< Whole device powered on */
+       RGX_HWPERF_PWR_OFF          = 2, /*!< Whole device powered off */
+       RGX_HWPERF_PWR_UP           = 3, /*!< Power turned on to a HW domain */
+       RGX_HWPERF_PWR_DOWN         = 4, /*!< Power turned off to a HW domain */
+       RGX_HWPERF_PWR_SAFETY_RESET = 5, /*!< Resetting the GPU HW units for safety reasons */
+       RGX_HWPERF_PWR_PHR_FULL     = 6, /*!< Periodic HW full GPU Reset */
+
+       RGX_HWPERF_PWR_LAST,
+} RGX_HWPERF_PWR;
+
+/*! This structure holds the data of a power packet. */
+typedef struct
+{
+       RGX_HWPERF_PWR eChange;                  /*!< Defines the type of power change */
+       IMG_UINT32     ui32Domains;              /*!< HW Domains affected */
+       IMG_UINT64     ui64OSTimeStamp;          /*!< OSTimeStamp sampled by the host */
+       IMG_UINT64     ui64CRTimeStamp;          /*!< CRTimeStamp sampled by the host and
+                                                     correlated to OSTimeStamp */
+       IMG_UINT32     ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time
+                                                     the two timers were correlated */
+       IMG_UINT32     ui32Unused1;              /*!< Padding */
+} RGX_HWPERF_PWR_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA);
+
+
+/*
+ * PDVFS, GPU clock frequency changes and workload estimation profiling
+ * data.
+ */
+/*! DVFS and work estimation events. */
+typedef enum
+{
+       RGX_HWPERF_DVFS_EV_INVALID,                 /*!< Invalid value. */
+       RGX_HWPERF_DVFS_EV_PROACTIVE_EST_START,     /*!< Proactive DVFS estimate start */
+       RGX_HWPERF_DVFS_EV_PROACTIVE_EST_FINISHED,  /*!< Proactive DVFS estimate finished */
+       RGX_HWPERF_DVFS_EV_REACTIVE_EST_START,      /*!< Reactive DVFS estimate start */
+       RGX_HWPERF_DVFS_EV_REACTIVE_EST_FINISHED,   /*!< Reactive DVFS estimate finished */
+       /* workload estimation */
+       RGX_HWPERF_DVFS_EV_WORK_EST_START,          /*!< Workload estimation start */
+       RGX_HWPERF_DVFS_EV_WORK_EST_FINISHED,       /*!< Workload estimation finished */
+       RGX_HWPERF_DVFS_EV_FREQ_CHG,                /*!< DVFS OPP/clock frequency change */
+
+       RGX_HWPERF_DVFS_EV_LAST               /*!< Number of element. */
+} RGX_HWPERF_DVFS_EV;
+
+/*! Enumeration of DVFS transitions that can occur */
+typedef enum
+{
+       RGX_HWPERF_DVFS_OPP_NONE        = 0x0,  /*!< No OPP change, already operating at required freq */
+#if defined(SUPPORT_PDVFS_IDLE)
+       RGX_HWPERF_DVFS_OPP_IDLE        = 0x1,  /*!< GPU is idle, defer the OPP change */
+#endif
+       /* 0x2 to 0xF reserved */
+       RGX_HWPERF_DVFS_OPP_UPDATE      = 0x10, /*!< OPP change, new point is encoded in bits [3:0] */
+       RGX_HWPERF_DVFS_OPP_LAST        = 0x20,
+} RGX_HWPERF_DVFS_OPP;
+
+typedef union
+{
+       /*! This structure holds the data of a proactive DVFS calculation packet. */
+       struct
+       {
+               IMG_UINT64     ui64DeadlineInus;         /*!< Next deadline in microseconds */
+               IMG_UINT32     ui32Frequency;            /*!< Required freq to meet deadline at 90% utilisation */
+               IMG_UINT32     ui32WorkloadCycles;       /*!< Current workload estimate in cycles */
+               IMG_UINT32     ui32TxtActCyc;            /*!< Meta TXTACTCYC register value */
+       } sProDVFSCalc;
+
+       /*! This structure holds the data of a reactive DVFS calculation packet. */
+       struct
+       {
+               IMG_UINT32     ui32Frequency;            /*!< Required freq to achieve average 90% utilisation */
+               IMG_UINT32     ui32Utilisation;          /*!< GPU utilisation since last update */
+               IMG_UINT32     ui32TxtActCyc;            /*!< Meta TXTACTCYC register value */
+       } sDVFSCalc;
+
+       /*! This structure holds the data of a work estimation packet. */
+       struct
+       {
+               IMG_UINT32     ui32CyclesPrediction;     /*!< Predicted cycle count for this workload */
+               IMG_UINT32     ui32CyclesTaken;          /*!< Actual cycle count for this workload */
+               RGXFWIF_DM     eDM;                      /*!< Target DM */
+               IMG_UINT32     ui32ReturnDataIndex;      /*!< Index into workload estimation table */
+               IMG_UINT32     ui32TxtActCyc;            /*!< Meta TXTACTCYC register value */
+       } sWorkEst;
+
+       /*! This structure holds the data of an OPP clock frequency transition packet. */
+       struct
+       {
+               IMG_UINT32     ui32OPPData;              /*!< OPP transition */
+       } sOPP;
+
+} RGX_HWPERF_DVFS_DETAIL;
+
+/*! DVFS sub-event data structure */
+typedef struct {
+       RGX_HWPERF_DVFS_EV      eEventType;          /*!< DVFS sub-event type */
+       RGX_HWPERF_DVFS_DETAIL  uData;               /*!< DVFS sub-event data */
+} RGX_HWPERF_DVFS_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_DVFS_DATA);
+
+
+/*! Firmware Activity event. */
+typedef enum
+{
+       RGX_HWPERF_FWACT_EV_INVALID,            /*!< Invalid value. */
+       RGX_HWPERF_FWACT_EV_REGS_SET,           /*!< Registers set. */
+       RGX_HWPERF_FWACT_EV_HWR_DETECTED,       /*!< HWR detected. */
+       RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*!< Reset required. */
+       RGX_HWPERF_FWACT_EV_HWR_RECOVERED,      /*!< HWR recovered. */
+       RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*!< Freelist ready. */
+       RGX_HWPERF_FWACT_EV_FEATURES,           /*!< Features present */
+       RGX_HWPERF_FWACT_EV_FILTER_SET,         /*!< Event filter set. */
+
+       RGX_HWPERF_FWACT_EV_LAST                /*!< Number of element. */
+} RGX_HWPERF_FWACT_EV;
+
+/*! Cause of the HWR event. */
+typedef enum
+{
+       RGX_HWPERF_HWR_REASON_INVALID,              /*!< Invalid value. */
+       RGX_HWPERF_HWR_REASON_LOCKUP,               /*!< Lockup. */
+       RGX_HWPERF_HWR_REASON_PAGEFAULT,            /*!< Page fault. */
+       RGX_HWPERF_HWR_REASON_POLLFAIL,             /*!< Poll fail. */
+       RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN,     /*!< Deadline overrun. */
+       RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*!< Hard Context Switch deadline overrun. */
+
+       RGX_HWPERF_HWR_REASON_LAST                  /*!< Number of elements. */
+} RGX_HWPERF_HWR_REASON;
+
+
+/* Fixed size for BVNC string so it does not alter packet data format
+ * Check it is large enough against official BVNC string length maximum
+ */
+#define RGX_HWPERF_MAX_BVNC_LEN (24U)
+static_assert((RGX_HWPERF_MAX_BVNC_LEN >= RGX_BVNC_STR_SIZE_MAX),
+                         "Space inside HWPerf packet data for BVNC string insufficient");
+
+#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (16U)
+
+/*! BVNC Features */
+typedef struct
+{
+       /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+       IMG_UINT16 ui16BlockID;
+
+       /*! Number of counters in this block type */
+       IMG_UINT16 ui16NumCounters;
+
+       /*! Number of blocks of this type */
+       IMG_UINT16 ui16NumBlocks;
+
+       /*! Reserved for future use */
+       IMG_UINT16 ui16Reserved;
+} RGX_HWPERF_BVNC_BLOCK;
+
+/*! BVNC Features */
+typedef struct
+{
+       IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */
+       IMG_UINT32 ui32BvncKmFeatureFlags;               /*!< See RGX_HWPERF_FEATURE_FLAGS */
+       IMG_UINT16 ui16BvncBlocks;                       /*!< Number of blocks described in aBvncBlocks */
+       IMG_UINT16 ui16BvncGPUCores;                     /*!< Number of GPU cores present */
+       RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */
+} RGX_HWPERF_BVNC;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC);
+
+/*! Performance Counter Configuration data element. */
+typedef struct
+{
+       IMG_UINT32 ui32BlockID;               /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */
+       IMG_UINT32 ui32NumCounters;           /*!< Number of counters configured */
+       IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX];  /*!< Counters configured (ui32NumCounters worth of entries) */
+} RGX_HWPERF_COUNTER_CFG_DATA_EL;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL);
+
+/*! Performance Counter Configuration data. */
+typedef struct
+{
+       IMG_UINT32 ui32EnabledBlocks;          /*!< Number of Enabled Blocks. */
+       RGX_HWPERF_COUNTER_CFG_DATA_EL uData;  /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */
+       IMG_UINT32 ui32Padding;                /*!< reserved */
+} RGX_HWPERF_COUNTER_CFG;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG);
+
+/*! Sub-event's data. */
+typedef union
+{
+       struct
+       {
+               RGX_HWPERF_DM eDM;                              /*!< Data Master ID. */
+               RGX_HWPERF_HWR_REASON eReason;  /*!< Reason of the HWR. */
+               IMG_UINT32 ui32DMContext;               /*!< FW render context */
+       } sHWR;                                                         /*!< HWR sub-event data. */
+
+       RGX_HWPERF_BVNC sBVNC;              /*!< BVNC Features. See RGX_HWPERF_BVNC */
+       struct
+       {
+               IMG_UINT32 ui32EvMaskLo;                /*!< Low order 32 bits of Filter Mask */
+               IMG_UINT32 ui32EvMaskHi;                /*!< High order 32 bits of Filter Mask */
+       } sEvMsk;                                                       /*!< HW Filter Mask */
+       RGX_HWPERF_COUNTER_CFG sPCC;        /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */
+} RGX_HWPERF_FWACT_DETAIL;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL);
+
+/*! This structure holds the data of a FW activity event packet */
+typedef struct
+{
+       RGX_HWPERF_FWACT_EV eEvType;           /*!< Event type. */
+       RGX_HWPERF_FWACT_DETAIL uFwActDetail;  /*!< Data of the sub-event. */
+       IMG_UINT32 ui32Padding;                /*!< Reserved. */
+} RGX_HWPERF_FWACT_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA);
+
+
+typedef enum {
+       RGX_HWPERF_UFO_EV_UPDATE,          /*!< Update on the UFO objects. */
+       RGX_HWPERF_UFO_EV_CHECK_SUCCESS,   /*!< Successful check on UFO objects. */
+       RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */
+       RGX_HWPERF_UFO_EV_CHECK_FAIL,      /*!< Unsuccessful check on UFO objects. */
+       RGX_HWPERF_UFO_EV_PRCHECK_FAIL,    /*!< Unsuccessful partial render check on UFO objects. */
+       RGX_HWPERF_UFO_EV_FORCE_UPDATE,    /*!< Forced erroring of the UFO objects. */
+
+       RGX_HWPERF_UFO_EV_LAST             /*!< Reserved. Do not use. */
+} RGX_HWPERF_UFO_EV;
+
+/*! Data stream tuple. */
+typedef union
+{
+       struct
+       {
+               IMG_UINT32 ui32FWAddr;        /*!< UFO's unique address */
+               IMG_UINT32 ui32Value;         /*!< Value of the UFO object */
+       } sCheckSuccess;
+       struct
+       {
+               IMG_UINT32 ui32FWAddr;        /*!< UFO's unique address */
+               IMG_UINT32 ui32Value;         /*!< Value of the UFO object */
+               IMG_UINT32 ui32Required;      /*!< Value of the UFO object required by the fence */
+       } sCheckFail;
+       struct
+       {
+               IMG_UINT32 ui32FWAddr;        /*!< UFO's unique address */
+               IMG_UINT32 ui32OldValue;      /*!< Value of UFO object before update */
+               IMG_UINT32 ui32NewValue;      /*!< Value of UFO object after update */
+       } sUpdate;
+} RGX_HWPERF_UFO_DATA_ELEMENT;
+
+/*! This structure holds the packet payload data for UFO event. */
+typedef struct
+{
+       RGX_HWPERF_UFO_EV eEvType;     /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */
+       IMG_UINT32 ui32TimeCorrIndex;  /*!< Index to the timer correlation data
+                                        at the time the packet was generated.
+                                        Used to approximate Host timestamps for
+                                        these events. */
+       IMG_UINT32 ui32PID;            /*!< Client process identifier */
+       IMG_UINT32 ui32ExtJobRef;      /*!< Reference used by callers of the RGX
+                                        API to track submitted work (for
+                                        debugging/trace purposes) */
+       IMG_UINT32 ui32IntJobRef;      /*!< Internal reference used to track
+                                        submitted work (for debugging / trace
+                                        purposes) */
+       IMG_UINT32 ui32DMContext;      /*!< GPU Data Master (FW) Context.
+                                        RenderContext for TA and 3D, Common
+                                        Context for other DMs */
+       IMG_UINT32 ui32StreamInfo;     /*!< Encoded number of elements in the
+                                        stream and stream data offset in the
+                                        payload */
+       RGX_HWPERF_DM eDM;             /*!< Data Master number, see RGX_HWPERF_DM */
+       IMG_UINT32 ui32Padding;        /*!< Unused, reserved */
+       IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];  /*!< Series of tuples holding UFO objects data */
+} RGX_HWPERF_UFO_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA);
+
+
+/*!
+ * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent
+ * between KICK_START / KICK_END inclusively for all event types.
+ */
+typedef enum
+{
+       RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */
+       RGX_HWPERF_KICK_TYPE_TQ2D, /*!< 2D TQ Kick */
+       RGX_HWPERF_KICK_TYPE_TQ3D, /*!< 3D TQ Kick */
+       RGX_HWPERF_KICK_TYPE_CDM,  /*!< Compute Kick */
+       RGX_HWPERF_KICK_TYPE_RS,   /*!< Ray Store Kick */
+       RGX_HWPERF_KICK_TYPE_VRDM, /*!< Vertex Ray Data Master Kick */
+       RGX_HWPERF_KICK_TYPE_TQTDM,/*!< 2D Data Master TQ Kick */
+       RGX_HWPERF_KICK_TYPE_SYNC, /*!< Sync Kick */
+       RGX_HWPERF_KICK_TYPE_TA,   /*!< TA Kick */
+       RGX_HWPERF_KICK_TYPE_3D,   /*!< 3D Kick */
+       RGX_HWPERF_KICK_TYPE_LAST,
+
+       RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff
+} RGX_HWPERF_KICK_TYPE;
+
+typedef struct
+{
+       RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for
+                                          scheduling on GPU hardware.
+                                          See RGX_HWPERF_KICK_TYPE */
+       IMG_UINT32 ui32PID;               /*!< Client process identifier */
+       IMG_UINT32 ui32ExtJobRef;         /*!< Reference used by callers of the RGX API
+                                          to track submitted work (for debugging /
+                                          trace purposes) */
+       IMG_UINT32 ui32IntJobRef;         /*!< internal reference used to track submitted
+                                          work (for debugging / trace purposes) */
+       IMG_UINT32 ui32DMContext;         /*!< GPU Data Master (FW) Context */
+       IMG_UINT32 ui32Padding;           /*!< Unused, reserved */
+       IMG_UINT64 ui64CheckFence_UID;    /*!< ID of fence gating work execution on GPU */
+       IMG_UINT64 ui64UpdateFence_UID;   /*!< ID of fence triggered after work completes on GPU */
+       IMG_UINT64 ui64DeadlineInus;      /*!< Workload deadline in system monotonic time */
+       IMG_UINT32 ui32CycleEstimate;     /*!< Estimated cycle time for the workload */
+       PVRSRV_FENCE hCheckFence;         /*!< Fence this enqueue task waits for, before starting */
+       PVRSRV_FENCE hUpdateFence;        /*!< Fence this enqueue task signals, on completion */
+       PVRSRV_TIMELINE hUpdateTimeline;  /*!< Timeline on which the above hUpdateFence is created */
+
+       /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_ENQ_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+       RGX_HWPERF_UFO_EV eEvType;   /*!< Subtype of the event */
+       IMG_UINT32 ui32StreamInfo;   /*!< Encoded number of elements in the stream and
+                                     stream data offset in the payload */
+#ifdef __CHECKER__
+       /* Since we're not conforming to the C99 standard by not using a flexible
+        * array member need to add a special case for Smatch static code analyser. */
+       IMG_UINT32 aui32StreamData[];
+#else
+       IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+                                    /*!< Series of tuples holding UFO objects data */
+
+       IMG_UINT32 ui32Padding;      /*!< Reserved, align structure size to 8 bytes */
+#endif
+} RGX_HWPERF_HOST_UFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*!
+ * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been
+ * Allocated, Freed or Modified. The values are used to determine which event
+ * data structure to use to decode the data from the event stream
+ */
+typedef enum
+{
+       RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID,   /*!< Invalid */
+       RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC,      /*!< SyncPrim */
+       RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED,
+                                                /*!< Timeline resource packets are
+                                                  now emitted in client hwperf buffer */
+       RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */
+       RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP,   /*!< Sync Checkpoint */
+       RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW,  /*!< Fence created on SW timeline */
+
+       RGX_HWPERF_HOST_RESOURCE_TYPE_LAST       /*!< End of enumeration */
+} RGX_HWPERF_HOST_RESOURCE_TYPE;
+
+typedef union
+{
+       /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer
+     *  generated in the HOST stream. Timeline data is now provided in the
+     *  CLIENT stream instead.
+     */
+       struct
+       {
+               IMG_UINT32 uiPid;             /*!< Identifier of owning process */
+               IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */
+               IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+                                             /*!< Label or name given to the sync resource */
+               IMG_UINT32 ui32Padding;       /*!< Reserved. Align structure size to 8 bytes */
+       } sTimelineAlloc;
+
+       /*! Data for TYPE_FENCE_PVR */
+       struct
+       {
+               IMG_PID uiPID;                /*!< Identifier of owning process */
+               PVRSRV_FENCE hFence;          /*!< Unique identifier for the fence resource */
+               IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point
+                                               backing this fence on the GPU */
+               IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+                                             /*!< Label or name given to the sync resource */
+       } sFenceAlloc;
+
+       /*! Data for TYPE_SYNC_CP */
+       struct
+       {
+               IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */
+               PVRSRV_TIMELINE hTimeline;     /*!< Unique identifier for the timeline resource */
+               IMG_PID uiPID;                 /*!< Identifier of owning process */
+               PVRSRV_FENCE hFence;           /*!< Unique identifier for the fence resource */
+               IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+                                              /*!< Label or name given to the sync resource */
+       } sSyncCheckPointAlloc;
+
+       /*! Data for TYPE_FENCE_SW */
+       struct
+       {
+               IMG_PID uiPID;                 /*!< Identifier of owning process */
+               PVRSRV_FENCE hSWFence;         /*!< Unique identifier for the SWFence resource */
+               PVRSRV_TIMELINE hSWTimeline;   /*!< Unique identifier for the timeline resource */
+               IMG_UINT64 ui64SyncPtIndex;    /*!< Sync-pt index where this SW timeline has reached */
+               IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+                                              /*!< Label or name given to the sync resource */
+       } sSWFenceAlloc;
+
+       /*! Data for TYPE_SYNC */
+       struct
+       {
+               IMG_UINT32 ui32FWAddr;         /*!< Identifier of sync resource */
+               IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+                                              /*!< Label or name given to the sync resource */
+       } sSyncAlloc;
+} RGX_HWPERF_HOST_ALLOC_DETAIL;
+
+typedef struct
+{
+       RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType;
+                                        /*!< This describes the type of the resource
+                                         allocated in the driver. See
+                                         RGX_HWPERF_HOST_RESOURCE_TYPE */
+       RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail;
+                                        /*!< Union of structures providing further
+                                         data regarding the resource allocated.
+                                         Size of data varies with union member that
+                                         is present, check ``ui32AllocType`` value
+                                         to decode */
+} RGX_HWPERF_HOST_ALLOC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+       /*! Data for TYPE_TIMELINE (*Deprecated*) */
+       struct
+       {
+               IMG_UINT32 uiPid;             /*!< Identifier of owning process */
+               IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */
+               IMG_UINT32 ui32Padding;       /*!< Reserved. Align structure size to 8 bytes */
+       } sTimelineDestroy;
+
+       /*! Data for TYPE_FENCE_PVR */
+       struct
+       {
+               IMG_UINT64 ui64Fence_UID;     /*!< Unique identifier for the fence resource */
+               IMG_UINT32 ui32Padding;       /*!< Reserved. */
+       } sFenceDestroy;
+
+       /*! Data for TYPE_SYNC_CP */
+       struct
+       {
+               IMG_UINT32 ui32CheckPt_FWAddr;  /*!< Unique identifier for the check point resource */
+       } sSyncCheckPointFree;
+
+       /*! Data for TYPE_SYNC */
+       struct
+       {
+               IMG_UINT32 ui32FWAddr;        /*!< Unique identifier for the sync resource */
+       } sSyncFree;
+} RGX_HWPERF_HOST_FREE_DETAIL;
+
+typedef struct
+{
+       RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType;
+                                     /*!< This describes the type of the resource
+                                      freed or released by the driver. See
+                                      RGX_HWPERF_HOST_RESOURCE_TYPE */
+       RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail;
+                                     /*!< Union of structures providing further data
+                                      regarding the resource freed. Size of data
+                                      varies with union member that is present,
+                                      check ``ui32FreeType`` value to decode */
+       IMG_UINT32 ui32Padding;       /*!< Reserved. Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_FREE_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+       IMG_UINT64 ui64CRTimestamp;   /*!< CR timer value from the latest entry of
+                                      the time domains correlation table */
+       IMG_UINT64 ui64OSTimestamp;   /*!< OS timestamp from the latest entry of the
+                                      time domains correlation table */
+       IMG_UINT32 ui32ClockSpeed;    /*!< GPU clock speed from the latest entry of
+                                      the time domains correlation table */
+       IMG_UINT32 ui32Padding;       /*!< Reserved, align structure size to 8 bytes */
+} RGX_HWPERF_HOST_CLK_SYNC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+       /*! Data for TYPE_FENCE_PVR */
+       struct
+       {
+               IMG_UINT64 ui64NewFence_UID;  /*!< Unique identifier for the new merged fence
+                                              resource that has been created */
+               IMG_UINT64 ui64InFence1_UID;  /*!< Unique identifier for the fence resource */
+               IMG_UINT64 ui64InFence2_UID;  /*!< Unique identifier of the check point backing
+                                              the fence on the GPU */
+               IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+                                             /*!< Label or name given to the sync resource */
+               IMG_UINT32 ui32Padding;       /*!< Reserved. Align structure size to 8 bytes */
+       } sFenceMerge;
+} RGX_HWPERF_HOST_MODIFY_DETAIL;
+
+typedef struct
+{
+       RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType;
+                                       /*!< Describes the type of the resource
+                                        modified by the driver. See
+                                        RGX_HWPERF_HOST_RESOURCE_TYPE */
+
+       RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail;
+                                       /*!< Union of structures providing further
+                                        data regarding the resource modified.
+                                        Size of data varies with union member that
+                                        is present.
+                                        Check ``uiModifyType`` value to decode */
+} RGX_HWPERF_HOST_MODIFY_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+       RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK,            /*!< Device OK */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING,    /*!< Device responding to requests */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD,          /*!< Device not responding */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT,         /*!< Device has faulted */
+
+       RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST
+} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS;
+
+typedef enum
+{
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0,     /*!< Invalid */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE,              /*!< No underlying health reason. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED,          /*!< Device has asserted. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING,      /*!< Device poll has failed. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS,          /*!< Device timeout has fired. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,     /*!< Queue has become corrupt. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED,     /*!< Queue has stalled. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING,            /*!< Device is idling. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING,        /*!< Device restarting. */
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */
+
+       RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST
+} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON;
+
+/*! RGX_HWPERF_DEV_INFO_EV values */
+typedef enum
+{
+       RGX_HWPERF_DEV_INFO_EV_HEALTH,      /*!< Health sub-event */
+
+       RGX_HWPERF_DEV_INFO_EV_LAST         /*!< Last enumeration value */
+} RGX_HWPERF_DEV_INFO_EV;
+
+/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing
+ *  further data regarding the device's status
+ */
+typedef union
+{
+       /*! Data for device status event */
+       struct
+       {
+               RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus;
+                                                /*!< Device's health status */
+               RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason;
+                                                /*!< Reason for device's health status */
+       } sDeviceStatus;
+} RGX_HWPERF_HOST_DEV_INFO_DETAIL;
+
+/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */
+typedef struct
+{
+       IMG_UINT32                      ui32Padding;
+                                   /*!< Reserved. Align structure size to 8 bytes */
+       RGX_HWPERF_DEV_INFO_EV          eEvType;
+                                   /*!< Type of the sub-event. See
+                                     RGX_HWPERF_DEV_INFO_EV */
+       RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail;
+                                   /*!< Union of structures providing further data
+                                     regarding the device's status. Size of data
+                                     varies with union member that is present,
+                                     check ``eEvType`` value to decode */
+} RGX_HWPERF_HOST_DEV_INFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                        "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */
+typedef enum
+{
+       RGX_HWPERF_INFO_EV_MEM_USAGE,     /*!< Memory usage event */
+       RGX_HWPERF_INFO_EV_LAST           /*!< End of enumeration */
+} RGX_HWPERF_INFO_EV;
+
+/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the
+ * RGX_HWPERF_HOST_INFO_DATA event.
+ */
+typedef union
+{
+       /*! Host Memory usage statistics */
+       struct
+       {
+               IMG_UINT32 ui32TotalMemoryUsage;   /*!< Total memory usage */
+               /*! Detailed memory usage */
+               struct
+               {
+                       IMG_UINT32 ui32Pid;              /*!< Process ID */
+                       IMG_UINT32 ui32KernelMemUsage;   /*!< Kernel memory usage */
+                       IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */
+               } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS];
+       } sMemUsageStats;
+} RGX_HWPERF_HOST_INFO_DETAIL;
+
+/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device
+ * memory usage information.
+ */
+typedef struct
+{
+       IMG_UINT32 ui32Padding;       /*!< Reserved. Align structure size to 8 bytes */
+       RGX_HWPERF_INFO_EV eEvType;   /*!< Type of subevent. See RGX_HWPERF_INFO_EV */
+       RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail;
+                                     /*!< Union of structures providing further data
+                                      regarding memory usage. Size varies with union
+                                      member that is present, check ``eEvType``
+                                      value to decode */
+} RGX_HWPERF_HOST_INFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! FENCE_WAIT_TYPE definitions */
+typedef enum
+{
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0,    /*!< Begin */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END,          /*!< End */
+
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST,         /*!< Do not use */
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE;
+
+/*! FENCE_WAIT_RESULT definitions */
+typedef enum
+{
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT,     /*!< Timed Out */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED,      /*!< Passed */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR,       /*!< Errored */
+
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST,        /*!< Do not use */
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT;
+
+/*! FENCE_WAIT_DETAIL Event Payload */
+typedef union
+{
+/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */
+       struct
+       {
+               IMG_UINT32 ui32TimeoutInMs;                     /*!< Wait timeout (ms) */
+       } sBegin;
+
+       /*! Data for SYNC_FENCE_WAIT_TYPE_END */
+       struct
+       {
+               RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */
+       } sEnd;
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL;
+
+/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure
+ * is received whenever the host driver handles a wait for sync event request.
+ */
+typedef struct
+{
+       IMG_PID uiPID;          /*!< Identifier of the owning process */
+       PVRSRV_FENCE hFence;    /*!< Unique identifier for the fence resource */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType;
+                               /*!< Type of the subevent, see
+                                RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail;
+                               /*!< Union of structures providing further data
+                                regarding device's status. Size of data varies with
+                                union member that is present, check ``eType`` value
+                                to decode */
+
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA.
+ * Software Timeline Advanced Event Payload. This data structure is received
+ * whenever the host driver processes a Software Timeline Advanced event.
+ */
+typedef struct
+{
+       IMG_PID uiPID;                /*!< Identifier of the owning process */
+       PVRSRV_TIMELINE hTimeline;    /*!< Unique identifier for the timeline resource */
+       IMG_UINT64 ui64SyncPtIndex;   /*!< Index of the sync point to which the
+                                      timeline has advanced */
+
+} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+       RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0,  /*!< Invalid */
+       RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */
+
+       RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST,         /*!< Do not use */
+} RGX_HWPERF_HOST_CLIENT_INFO_TYPE;
+
+typedef struct
+{
+       IMG_PID uiClientPID; /*!< Client process identifier */
+       IMG_UINT32 ui32Length;  /*!< Number of bytes present in ``acName`` */
+       IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */
+} RGX_HWPERF_HOST_CLIENT_PROC_NAME;
+
+#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \
+       ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen)))
+
+typedef union
+{
+       struct
+       {
+               IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */
+               RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+       } sProcName;
+} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL;
+
+typedef struct
+{
+       IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */
+       RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType;
+                               /*!< Type of the subevent, see
+                                RGX_HWPERF_HOST_CLIENT_INFO_TYPE */
+       RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail;
+                               /*!< Union of structures. Size of data
+                                varies with union member that is present,
+                                check ``eType`` value to decode */
+
+} RGX_HWPERF_HOST_CLIENT_INFO_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+                         "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+       RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE,
+       RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER,
+       RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS,
+       RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA,
+
+       RGX_HWPERF_RESOURCE_TYPE_COUNT
+} RGX_HWPERF_RESOURCE_CAPTURE_TYPE;
+
+typedef struct
+{
+       IMG_UINT32 ui32Height;
+       IMG_UINT32 ui32Width;
+       IMG_UINT32 ui32BPP;
+       IMG_UINT32 ui32PixFormat;
+} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO;
+
+typedef struct
+{
+       IMG_INT32  i32XOffset;        /*!< render surface X shift */
+       IMG_INT32  i32YOffset;        /*!< render surface Y shift */
+       IMG_UINT32 ui32WidthInTiles;  /*!< number of TLT data points in X */
+       IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */
+} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO;
+
+typedef union
+{
+       struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES
+       {
+               IMG_UINT32 ui32RenderSurfaceCount;
+               RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+       } sRenderSurfaces;
+
+       struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS
+       {
+               RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+       } sTLTBuffers;
+} RGX_RESOURCE_CAPTURE_DETAIL;
+
+typedef struct
+{
+       RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType;
+       IMG_PID uPID;
+       IMG_UINT32 ui32ContextID;
+       IMG_UINT32 ui32FrameNum;
+       IMG_UINT32 ui32CapturedTaskJobRef;      /* The job ref of the HW task that emitted the data */
+       IMG_INT32 eClientModule;                        /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */
+       RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */
+} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO;
+
+#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail)
+
+/*! Tile Lifetime Tracking header size. Only available if
+ * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via
+ * SUPPORT_TLT_PERF
+ */
+#define RGX_TLT_HARDWARE_HDR_SIZE   (16U)
+
+/* PVRSRVGetHWPerfResourceCaptureResult */
+typedef enum
+{
+       RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0,
+       RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK,                                  /* We got data ok, expect more packets for this request. */
+       RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY,                   /* Signals a timeout on the connection - no data available yet. */
+       RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS,    /* The request completed successfully, signals the end of packets for the request. */
+       RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE             /* The request failed, signals the end of packets for the request. */
+} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS;
+
+typedef struct
+{
+       IMG_PID uPID;                                           /* In case of a failed request pass the caller the PID and context ID. */
+       IMG_UINT32 ui32CtxID;
+       RGX_RESOURCE_CAPTURE_INFO *psInfo;      /* Various meta-data regarding the captured resource which aid the requester when,
+                                                                                       unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */
+       IMG_BYTE *pbData;                                       /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */
+} RGX_RESOURCE_CAPTURE_RESULT;
+
+/*! This type is a union of packet payload data structures associated with
+ * various FW and Host events */
+typedef union
+{
+       RGX_HWPERF_FW_DATA             sFW;           /*!< Firmware event packet data,
+                                                      events ``0x01-0x06`` */
+       RGX_HWPERF_HW_DATA             sHW;           /*!< Hardware event packet data,
+                                                      events ``0x07-0x19``, ``0x28-0x29`` */
+       RGX_HWPERF_CLKS_CHG_DATA       sCLKSCHG;      /*!< Clock change event packet
+                                                      data, events ``0x1A`` */
+       RGX_HWPERF_GPU_STATE_CHG_DATA  sGPUSTATECHG;  /*!< GPU utilisation state
+                                                      change event packet data,
+                                                      events ``0x1B`` */
+       RGX_HWPERF_PWR_EST_DATA        sPWREST;       /*!< Power estimate event
+                                                      packet data,
+                                                      events ``0x20-0x22`` */
+       RGX_HWPERF_PWR_CHG_DATA        sPWR;          /*!< Power event packet data,
+                                                      events ``0x23`` */
+       RGX_HWPERF_CSW_DATA            sCSW;          /*!< Context switch packet data,
+                                                      events ``0x30-0x31`` */
+       RGX_HWPERF_DVFS_DATA           sDVFS;         /*!< DVFS activity data,
+                                                      events ``0x32`` */
+       RGX_HWPERF_UFO_DATA            sUFO;          /*!< UFO data, events ``0x38`` */
+       RGX_HWPERF_FWACT_DATA          sFWACT;        /*!< Firmware activity event
+                                                      packet data,
+                                                      events ``0x39`` */
+       /* */
+       RGX_HWPERF_HOST_ENQ_DATA       sENQ;          /*!< Host ENQ data,
+                                                      events ``0x01`` (Host) */
+       RGX_HWPERF_HOST_UFO_DATA       sHUFO;         /*!< Host UFO data,
+                                                      events ``0x02`` (Host) */
+       RGX_HWPERF_HOST_ALLOC_DATA     sHALLOC;       /*!< Host Alloc data,
+                                                      events ``0x03`` (Host) */
+       RGX_HWPERF_HOST_CLK_SYNC_DATA  sHCLKSYNC;     /*!< Host CLK_SYNC data,
+                                                      events ``0x04`` (Host) */
+       RGX_HWPERF_HOST_FREE_DATA      sHFREE;        /*!< Host Free data,
+                                                      events ``0x05`` (Host) */
+       RGX_HWPERF_HOST_MODIFY_DATA    sHMOD;         /*!< Host Modify data,
+                                                      events ``0x06`` (Host) */
+       RGX_HWPERF_HOST_DEV_INFO_DATA  sHDEVINFO;     /*!< Host device info data,
+                                                      events ``0x07`` (Host) */
+       RGX_HWPERF_HOST_INFO_DATA      sHINFO;        /*!< Host info data,
+                                                      events ``0x08`` (Host) */
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT;   /*!< Host fence-wait data,
+                                                      events ``0x09`` (Host) */
+       RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance
+                                                      data, events ``0x0A`` (Host) */
+       RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info,
+                                                      events ``0x0B`` (Host) */
+
+} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA);
+
+#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))))
+
+#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr)       \
+       ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType))))
+
+/******************************************************************************
+ * API Types
+ *****************************************************************************/
+
+/*! Counter block IDs for all the hardware blocks with counters.
+ * Directly addressable blocks must have a value between 0..15 [0..0xF].
+ * Indirect groups have following encoding:
+ * First hex digit (LSB) represents a unit number within the group
+ * and the second hex digit represents the group number.
+ * Group 0 is the direct group, all others are indirect groups.
+ */
+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID;
+
+/*! Directly addressable counter blocks */
+#if defined(DOXYGEN)
+/*! _RGX_HWPERF_CNTBLK_ID */
+#endif
+#define        RGX_CNTBLK_ID_TA                         0x0000U
+#define        RGX_CNTBLK_ID_RASTER             0x0001U /*!< Non-cluster grouping cores */
+#define        RGX_CNTBLK_ID_HUB                        0x0002U /*!< Non-cluster grouping cores */
+#define        RGX_CNTBLK_ID_TORNADO            0x0003U /*!< XT cores */
+#define        RGX_CNTBLK_ID_JONES                      0x0004U /*!< S7 cores */
+#if defined(RGX_FEATURE_HWPERF_OCEANIC)
+#define        RGX_CNTBLK_ID_DIRECT_LAST        0x0003U /*!< Indirect blocks start from here */
+#else
+#define        RGX_CNTBLK_ID_DIRECT_LAST        0x0005U /*!< Indirect blocks start from here */
+#endif /* defined(RGX_FEATURE_HWPERF_OCEANIC) */
+
+#define        RGX_CNTBLK_ID_BF_DEPRECATED      0x0005U /*!< Doppler unit (DEPRECATED) */
+#define        RGX_CNTBLK_ID_BT_DEPRECATED      0x0006U /*!< Doppler unit (DEPRECATED) */
+#define        RGX_CNTBLK_ID_RT_DEPRECATED      0x0007U /*!< Doppler unit (DEPRECATED) */
+#define        RGX_CNTBLK_ID_SH_DEPRECATED      0x0008U /*!< Ray tracing unit (DEPRECATED) */
+
+
+/*! Indirectly addressable counter blocks. DA blocks indicate counter blocks
+ *  where the counter registers are directly accessible
+ */
+#define        RGX_CNTBLK_ID_TPU_MCU0           0x0010U /*!< Addressable by Dust */
+#define RGX_CNTBLK_ID_TPU_MCU0_DA       0x8010U
+#define        RGX_CNTBLK_ID_TPU_MCU1           0x0011U
+#define RGX_CNTBLK_ID_TPU_MCU1_DA       0x8011U
+#define        RGX_CNTBLK_ID_TPU_MCU2           0x0012U
+#define RGX_CNTBLK_ID_TPU_MCU2_DA       0x8012U
+#define        RGX_CNTBLK_ID_TPU_MCU3           0x0013U
+#define RGX_CNTBLK_ID_TPU_MCU3_DA       0x8013U
+#define        RGX_CNTBLK_ID_TPU_MCU4           0x0014U
+#define RGX_CNTBLK_ID_TPU_MCU4_DA       0x8014U
+#define        RGX_CNTBLK_ID_TPU_MCU5           0x0015U
+#define RGX_CNTBLK_ID_TPU_MCU5_DA       0x8015U
+#define        RGX_CNTBLK_ID_TPU_MCU6           0x0016U
+#define RGX_CNTBLK_ID_TPU_MCU6_DA       0x8016U
+#define        RGX_CNTBLK_ID_TPU_MCU7           0x0017U
+#define RGX_CNTBLK_ID_TPU_MCU7_DA       0x8017U
+#define        RGX_CNTBLK_ID_TPU_MCU_ALL        0x4010U
+#define        RGX_CNTBLK_ID_TPU_MCU_ALL_DA     0xC010U
+
+#define        RGX_CNTBLK_ID_USC0                       0x0020U /*!< Addressable by Cluster */
+#define RGX_CNTBLK_ID_USC0_DA           0x8020U
+#define        RGX_CNTBLK_ID_USC1                       0x0021U
+#define RGX_CNTBLK_ID_USC1_DA           0x8021U
+#define        RGX_CNTBLK_ID_USC2                       0x0022U
+#define RGX_CNTBLK_ID_USC2_DA           0x8022U
+#define        RGX_CNTBLK_ID_USC3                       0x0023U
+#define RGX_CNTBLK_ID_USC3_DA           0x8023U
+#define        RGX_CNTBLK_ID_USC4                       0x0024U
+#define RGX_CNTBLK_ID_USC4_DA           0x8024U
+#define        RGX_CNTBLK_ID_USC5                       0x0025U
+#define RGX_CNTBLK_ID_USC5_DA           0x8025U
+#define        RGX_CNTBLK_ID_USC6                       0x0026U
+#define RGX_CNTBLK_ID_USC6_DA           0x8026U
+#define        RGX_CNTBLK_ID_USC7                       0x0027U
+#define RGX_CNTBLK_ID_USC7_DA           0x8027U
+#define        RGX_CNTBLK_ID_USC8                       0x0028U
+#define RGX_CNTBLK_ID_USC8_DA           0x8028U
+#define        RGX_CNTBLK_ID_USC9                       0x0029U
+#define RGX_CNTBLK_ID_USC9_DA           0x8029U
+#define        RGX_CNTBLK_ID_USC10                      0x002AU
+#define RGX_CNTBLK_ID_USC10_DA          0x802AU
+#define        RGX_CNTBLK_ID_USC11                      0x002BU
+#define RGX_CNTBLK_ID_USC11_DA          0x802BU
+#define        RGX_CNTBLK_ID_USC12                      0x002CU
+#define RGX_CNTBLK_ID_USC12_DA          0x802CU
+#define        RGX_CNTBLK_ID_USC13                      0x002DU
+#define RGX_CNTBLK_ID_USC13_DA          0x802DU
+#define        RGX_CNTBLK_ID_USC14                      0x002EU
+#define RGX_CNTBLK_ID_USC14_DA          0x802EU
+#define        RGX_CNTBLK_ID_USC15                      0x002FU
+#define RGX_CNTBLK_ID_USC15_DA          0x802FU
+#define        RGX_CNTBLK_ID_USC_ALL            0x4020U
+#define        RGX_CNTBLK_ID_USC_ALL_DA         0xC020U
+
+#define        RGX_CNTBLK_ID_TEXAS0             0x0030U /*!< Addressable by Phantom in XT, Dust in S7 */
+#define        RGX_CNTBLK_ID_TEXAS1             0x0031U
+#define        RGX_CNTBLK_ID_TEXAS2             0x0032U
+#define        RGX_CNTBLK_ID_TEXAS3             0x0033U
+#define        RGX_CNTBLK_ID_TEXAS4             0x0034U
+#define        RGX_CNTBLK_ID_TEXAS5             0x0035U
+#define        RGX_CNTBLK_ID_TEXAS6             0x0036U
+#define        RGX_CNTBLK_ID_TEXAS7             0x0037U
+#define        RGX_CNTBLK_ID_TEXAS_ALL          0x4030U
+
+#define        RGX_CNTBLK_ID_RASTER0            0x0040U /*!< Addressable by Phantom, XT only */
+#define        RGX_CNTBLK_ID_RASTER1            0x0041U
+#define        RGX_CNTBLK_ID_RASTER2            0x0042U
+#define        RGX_CNTBLK_ID_RASTER3            0x0043U
+#define        RGX_CNTBLK_ID_RASTER_ALL         0x4040U
+
+#define        RGX_CNTBLK_ID_BLACKPEARL0        0x0050U /*!< Addressable by Phantom, S7, only */
+#define        RGX_CNTBLK_ID_BLACKPEARL1        0x0051U
+#define        RGX_CNTBLK_ID_BLACKPEARL2        0x0052U
+#define        RGX_CNTBLK_ID_BLACKPEARL3        0x0053U
+#define        RGX_CNTBLK_ID_BLACKPEARL_ALL 0x4050U
+
+#define        RGX_CNTBLK_ID_PBE0                       0x0060U /*!< Addressable by Cluster in S7 and PBE2_IN_XE */
+#define        RGX_CNTBLK_ID_PBE1                       0x0061U
+#define        RGX_CNTBLK_ID_PBE2                       0x0062U
+#define        RGX_CNTBLK_ID_PBE3                       0x0063U
+#define        RGX_CNTBLK_ID_PBE4                       0x0064U
+#define        RGX_CNTBLK_ID_PBE5                       0x0065U
+#define        RGX_CNTBLK_ID_PBE6                       0x0066U
+#define        RGX_CNTBLK_ID_PBE7                       0x0067U
+#define        RGX_CNTBLK_ID_PBE8                       0x0068U
+#define        RGX_CNTBLK_ID_PBE9                       0x0069U
+#define        RGX_CNTBLK_ID_PBE10                      0x006AU
+#define        RGX_CNTBLK_ID_PBE11                      0x006BU
+#define        RGX_CNTBLK_ID_PBE12                      0x006CU
+#define        RGX_CNTBLK_ID_PBE13                      0x006DU
+#define        RGX_CNTBLK_ID_PBE14                      0x006EU
+#define        RGX_CNTBLK_ID_PBE15                      0x006FU
+#define        RGX_CNTBLK_ID_PBE_ALL            0x4060U
+
+#define        RGX_CNTBLK_ID_LAST                       0x0070U /*!< End of PBE block */
+
+#define        RGX_CNTBLK_ID_BX_TU0_DEPRECATED          0x0070U /*!< Doppler unit, DEPRECATED */
+#define        RGX_CNTBLK_ID_BX_TU1_DEPRECATED          0x0071U
+#define        RGX_CNTBLK_ID_BX_TU2_DEPRECATED          0x0072U
+#define        RGX_CNTBLK_ID_BX_TU3_DEPRECATED          0x0073U
+#define        RGX_CNTBLK_ID_BX_TU_ALL_DEPRECATED       0x4070U
+
+#define        RGX_CNTBLK_ID_CUSTOM0            0x70F0U
+#define        RGX_CNTBLK_ID_CUSTOM1            0x70F1U
+#define        RGX_CNTBLK_ID_CUSTOM2            0x70F2U
+#define        RGX_CNTBLK_ID_CUSTOM3            0x70F3U
+#define        RGX_CNTBLK_ID_CUSTOM4_FW         0x70F4U /*!< Custom block used for getting statistics held in the FW */
+#define        RGX_CNTBLK_ID_CUSTOM_MASK        0x70FFU
+
+
+/* Masks for the counter block ID*/
+#define        RGX_CNTBLK_ID_UNIT_MASK      (0x000FU)
+#define        RGX_CNTBLK_ID_GROUP_MASK     (0x00F0U)
+#define        RGX_CNTBLK_ID_GROUP_SHIFT    (4U)
+#define        RGX_CNTBLK_ID_MC_GPU_MASK    (0x0F00U)
+#define        RGX_CNTBLK_ID_MC_GPU_SHIFT   (8U)
+#define        RGX_CNTBLK_ID_UNIT_ALL_MASK  (0x4000U)
+#define        RGX_CNTBLK_ID_DA_MASK        (0x8000U) /*!< Block with directly accessible counter registers */
+
+#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## _n) - (IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## 0) + 1u)
+
+/*! The number of layout blocks defined with configurable multiplexed
+ * performance counters, hence excludes custom counter blocks.
+ */
+#if defined(RGX_FEATURE_HWPERF_OCEANIC)
+#define RGX_HWPERF_MAX_MUX_BLKS (\
+       (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST    +\
+       RGX_CNTBLK_INDIRECT_COUNT(PBE,         0) )
+
+#define RGX_HWPERF_MAX_DA_BLKS (\
+       (IMG_UINT32)RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU,     0)+\
+       RGX_CNTBLK_INDIRECT_COUNT(USC,                     0) )
+
+#define RGX_HWPERF_MAX_DEFINED_BLKS (\
+       (IMG_UINT32)RGX_HWPERF_MAX_MUX_BLKS     +\
+       RGX_HWPERF_MAX_DA_BLKS                   )
+#else
+#define RGX_HWPERF_MAX_DEFINED_BLKS  (\
+       (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST    +\
+       RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU,     7)+\
+       RGX_CNTBLK_INDIRECT_COUNT(USC,        15)+\
+       RGX_CNTBLK_INDIRECT_COUNT(TEXAS,       7)+\
+       RGX_CNTBLK_INDIRECT_COUNT(RASTER,      3)+\
+       RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL,  3)+\
+       RGX_CNTBLK_INDIRECT_COUNT(PBE,        15) )
+#define RGX_HWPERF_MAX_MUX_BLKS      (\
+    RGX_HWPERF_MAX_DEFINED_BLKS      )
+#endif
+
+static_assert(
+       ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN),
+       "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient");
+
+#define RGX_HWPERF_EVENT_MASK_VALUE(e)      (IMG_UINT64_C(1) << (IMG_UINT32)(e))
+
+#define RGX_CUSTOM_FW_CNTRS    \
+                X(TA_LOCAL_FL_SIZE,    0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) |         \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) |  \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED))      \
+                                                                                                        \
+                X(TA_GLOBAL_FL_SIZE,   0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) |         \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) |  \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED))      \
+                                                                                                        \
+                X(3D_LOCAL_FL_SIZE,    0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) |         \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))      \
+                                                                                                        \
+                X(3D_GLOBAL_FL_SIZE,   0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) |         \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))      \
+                                                                                                        \
+                X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) |         \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK))
+
+/*! Counter IDs for the firmware held statistics */
+typedef enum
+{
+#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id,
+       RGX_CUSTOM_FW_CNTRS
+#undef X
+
+       /* always the last entry in the list */
+       RGX_CUSTOM_FW_CNTR_LAST
+} RGX_HWPERF_CUSTOM_FW_CNTR_ID;
+
+/*! Identifier for each counter in a performance counting module */
+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID;
+
+#define        RGX_CNTBLK_COUNTER0_ID 0U
+#define        RGX_CNTBLK_COUNTER1_ID 1U
+#define        RGX_CNTBLK_COUNTER2_ID 2U
+#define        RGX_CNTBLK_COUNTER3_ID 3U
+#define        RGX_CNTBLK_COUNTER4_ID 4U
+#define        RGX_CNTBLK_COUNTER5_ID 5U
+       /* MAX value used in server handling of counter config arrays */
+#define        RGX_CNTBLK_MUX_COUNTERS_MAX 6U
+
+
+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */
+#define MASK_RANGE_IMPL(b1, b2)        ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)(b1))
+#define MASK_RANGE(R)                  MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE)
+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e))
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_DEFAULT       RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \
+                                            RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG)
+#define RGX_HWPERF_EVENT_MASK_ALL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+
+/*! HWPerf Firmware event masks
+ * @par
+ * All FW Start/End/Debug (SED) events. */
+#define RGX_HWPERF_EVENT_MASK_FW_SED    (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_FW_UFO    (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO))
+#define RGX_HWPERF_EVENT_MASK_FW_CSW    (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\
+                                          RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED))
+/*! All FW events. */
+#define RGX_HWPERF_EVENT_MASK_ALL_FW    (RGX_HWPERF_EVENT_MASK_FW_SED |\
+                                          RGX_HWPERF_EVENT_MASK_FW_UFO |\
+                                          RGX_HWPERF_EVENT_MASK_FW_CSW)
+
+/*! HW Periodic events (1ms interval). */
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC   (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC))
+/*! All HW Kick/Finish events. */
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\
+                                               MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\
+                                              ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_HW        (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\
+                                              RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST   (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR       (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\
+                                              RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\
+                                              RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG))
+
+/*! HWPerf Host event masks
+ */
+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ  (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO   (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR   (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC))
+
+
+/*! Type used in the RGX API RGXConfigMuxHWPerfCounters() */
+typedef struct
+{
+       /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+       IMG_UINT16 ui16BlockID;
+
+       /*! 4 or 6 LSBs used to select counters to configure in this block. */
+       IMG_UINT8  ui8CounterSelect;
+
+       /*! 4 or 6 LSBs used as MODE bits for the counters in the group. */
+       IMG_UINT8  ui8Mode;
+
+       /*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */
+       IMG_UINT8  aui8GroupSelect[RGX_CNTBLK_MUX_COUNTERS_MAX];
+
+       /*! 16 LSBs used as the BIT_SELECT value for the counter. */
+       IMG_UINT16 aui16BitSelect[RGX_CNTBLK_MUX_COUNTERS_MAX];
+
+       /*! 14 LSBs used as the BATCH_MAX value for the counter. */
+       IMG_UINT32 aui32BatchMax[RGX_CNTBLK_MUX_COUNTERS_MAX];
+
+       /*! 14 LSBs used as the BATCH_MIN value for the counter. */
+       IMG_UINT32 aui32BatchMin[RGX_CNTBLK_MUX_COUNTERS_MAX];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_MUX_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_MUX_CNTBLK);
+
+/*! Type used in the RGX API RGXConfigHWPerfCounters() */
+typedef struct
+{
+       /*! Reserved for future use */
+       IMG_UINT32 ui32Reserved;
+
+       /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+       IMG_UINT16 ui16BlockID;
+
+       /*! Number of configured counters within this block */
+       IMG_UINT16 ui16NumCounters;
+
+       /*! Counter register values */
+       IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_options.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgx_options.h
new file mode 100644 (file)
index 0000000..91fc652
--- /dev/null
@@ -0,0 +1,304 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX build options
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which provides up to
+ *  log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and
+ *  (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM.
+ * The corresponding bit is set if the build option was enabled at compile
+ * time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST switch should
+ * be enabled in a client program which includes this header. Then the client
+ * can test specific build flags by reading the bit value at
+ *  ##OPTIONNAME##_SET_OFFSET
+ * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield
+ * remains backwards compatible.
+ */
+
+#ifndef RGX_OPTIONS_H
+#define RGX_OPTIONS_H
+
+#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+
+#define NO_HARDWARE_OPTION     "NO_HARDWARE  "
+#if defined(NO_HARDWARE) || defined(INTERNAL_TEST)
+       #define NO_HARDWARE_SET_OFFSET  OPTIONS_BIT0
+       #define OPTIONS_BIT0            (0x1UL << 0)
+       #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT0            0x0UL
+#endif /* NO_HARDWARE */
+
+#define PDUMP_OPTION   "PDUMP  "
+#if defined(PDUMP) || defined(INTERNAL_TEST)
+       #define PDUMP_SET_OFFSET        OPTIONS_BIT1
+       #define OPTIONS_BIT1            (0x1UL << 1)
+       #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT1            0x0UL
+#endif /* PDUMP */
+
+/* No longer used */
+#define INTERNAL_TEST_OPTION   "INTERNAL_TEST  "
+#if defined(INTERNAL_TEST)
+       #define UNUSED_SET_OFFSET       OPTIONS_BIT2
+       #define OPTIONS_BIT2            (0x1UL << 2)
+       #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT2            0x0UL
+#endif
+
+/* No longer used */
+#define UNUSED_OPTION  " "
+#if defined(INTERNAL_TEST)
+       #define OPTIONS_BIT3            (0x1UL << 3)
+       #define INTERNAL_TEST_OPTION    "INTERNAL_TEST  "
+       #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT3            0x0UL
+#endif
+
+#define SUPPORT_RGX_OPTION     " "
+#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST)
+       #define SUPPORT_RGX_SET_OFFSET  OPTIONS_BIT4
+       #define OPTIONS_BIT4            (0x1UL << 4)
+       #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT4            0x0UL
+#endif /* SUPPORT_RGX */
+
+#define SUPPORT_SECURE_EXPORT_OPTION   "SECURE_EXPORTS  "
+#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST)
+       #define SUPPORT_SECURE_EXPORT_SET_OFFSET        OPTIONS_BIT5
+       #define OPTIONS_BIT5            (0x1UL << 5)
+       #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT5            0x0UL
+#endif /* SUPPORT_SECURE_EXPORT */
+
+#define SUPPORT_INSECURE_EXPORT_OPTION "INSECURE_EXPORTS  "
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST)
+       #define SUPPORT_INSECURE_EXPORT_SET_OFFSET      OPTIONS_BIT6
+       #define OPTIONS_BIT6            (0x1UL << 6)
+       #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT6            0x0UL
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+#define SUPPORT_VFP_OPTION     "VFP  "
+#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST)
+       #define SUPPORT_VFP_SET_OFFSET  OPTIONS_BIT7
+       #define OPTIONS_BIT7            (0x1UL << 7)
+       #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT7            0x0UL
+#endif /* SUPPORT_VFP */
+
+#define SUPPORT_WORKLOAD_ESTIMATION_OPTION     "WORKLOAD_ESTIMATION  "
+#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST)
+       #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET      OPTIONS_BIT8
+       #define OPTIONS_BIT8            (0x1UL << 8)
+       #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT8            0x0UL
+#endif /* SUPPORT_WORKLOAD_ESTIMATION */
+#define OPTIONS_WORKLOAD_ESTIMATION_MASK       (0x1UL << 8)
+
+#define SUPPORT_PDVFS_OPTION   "PDVFS  "
+#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST)
+       #define SUPPORT_PDVFS_OFFSET    OPTIONS_BIT9
+       #define OPTIONS_BIT9            (0x1UL << 9)
+       #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT9            0x0UL
+#endif /* SUPPORT_PDVFS */
+#define OPTIONS_PDVFS_MASK     (0x1UL << 9)
+
+#define DEBUG_OPTION   "DEBUG  "
+#if defined(DEBUG) || defined(INTERNAL_TEST)
+       #define DEBUG_SET_OFFSET        OPTIONS_BIT10
+       #define OPTIONS_BIT10           (0x1UL << 10)
+       #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT10           0x0UL
+#endif /* DEBUG */
+/* The bit position of this should be the same as DEBUG_SET_OFFSET option
+ * when defined.
+ */
+#define OPTIONS_DEBUG_MASK     (0x1UL << 10)
+
+#define SUPPORT_BUFFER_SYNC_OPTION     "BUFFER_SYNC  "
+#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST)
+       #define SUPPORT_BUFFER_SYNC_SET_OFFSET  OPTIONS_BIT11
+       #define OPTIONS_BIT11           (0x1UL << 11)
+       #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT11           0x0UL
+#endif /* SUPPORT_BUFFER_SYNC */
+
+#define SUPPORT_AUTOVZ_OPTION  "AUTOVZ  "
+#if defined(SUPPORT_AUTOVZ)
+       #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT12
+       #define OPTIONS_BIT12     (0x1UL << 12)
+       #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT12     0x0UL
+#endif /* SUPPORT_AUTOVZ */
+
+#define SUPPORT_AUTOVZ_HW_REGS_OPTION  "AUTOVZ_HW_REGS  "
+#if defined(SUPPORT_AUTOVZ_HW_REGS)
+       #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT13
+       #define OPTIONS_BIT13     (0x1UL << 13)
+       #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT13     0x0UL
+#endif /* SUPPORT_AUTOVZ_HW_REGS */
+
+#define RGX_FW_IRQ_OS_COUNTERS_OPTION  "FW_IRQ_OS_COUNTERS  "
+#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST)
+       #define SUPPORT_FW_IRQ_REG_COUNTERS             OPTIONS_BIT14
+       #define OPTIONS_BIT14           (0x1UL << 14)
+       #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT14           0x0UL
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+
+#define VALIDATION_EN_MASK     (0x1UL << 15)
+#define SUPPORT_VALIDATION_OPTION      "VALIDATION  "
+#if defined(SUPPORT_VALIDATION)
+       #define SUPPORT_VALIDATION_OFFSET               OPTIONS_BIT15
+       #define OPTIONS_BIT15           (0x1UL << 15)
+       #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM
+       #error "Bit exceeds reserved range"
+       #endif
+#else
+       #define OPTIONS_BIT15           0x0UL
+#endif /* SUPPORT_VALIDATION */
+
+#define RGX_BUILD_OPTIONS_KM   \
+       (OPTIONS_BIT0  |\
+        OPTIONS_BIT1  |\
+        OPTIONS_BIT2  |\
+        OPTIONS_BIT3  |\
+        OPTIONS_BIT4  |\
+        OPTIONS_BIT6  |\
+        OPTIONS_BIT7  |\
+        OPTIONS_BIT8  |\
+        OPTIONS_BIT9  |\
+        OPTIONS_BIT10 |\
+        OPTIONS_BIT11 |\
+        OPTIONS_BIT12 |\
+        OPTIONS_BIT13 |\
+        OPTIONS_BIT14 |\
+        OPTIONS_BIT15)
+
+#define RGX_BUILD_OPTIONS_LIST \
+       { \
+               NO_HARDWARE_OPTION, \
+               PDUMP_OPTION, \
+               INTERNAL_TEST_OPTION, \
+               UNUSED_OPTION, \
+               SUPPORT_RGX_OPTION, \
+               SUPPORT_SECURE_EXPORT_OPTION, \
+               SUPPORT_INSECURE_EXPORT_OPTION, \
+               SUPPORT_VFP_OPTION, \
+               SUPPORT_WORKLOAD_ESTIMATION_OPTION, \
+               SUPPORT_PDVFS_OPTION, \
+               DEBUG_OPTION, \
+               SUPPORT_BUFFER_SYNC_OPTION, \
+               SUPPORT_AUTOVZ_OPTION, \
+               SUPPORT_AUTOVZ_HW_REGS_OPTION, \
+               RGX_FW_IRQ_OS_COUNTERS_OPTION, \
+               SUPPORT_VALIDATION_OPTION \
+       }
+
+#define RGX_BUILD_OPTIONS_MASK_FW \
+       (RGX_BUILD_OPTIONS_MASK_KM & \
+        ~OPTIONS_BIT11)
+
+#define OPTIONS_BIT31          (0x1UL << 31)
+#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
+#error "Bit exceeds reserved range"
+#endif
+#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31
+
+#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31)
+
+#define OPTIONS_STRICT (RGX_BUILD_OPTIONS &                  \
+                        ~(OPTIONS_DEBUG_MASK               | \
+                          OPTIONS_WORKLOAD_ESTIMATION_MASK | \
+                          OPTIONS_PDVFS_MASK))
+
+#endif /* RGX_OPTIONS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgxheapconfig.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgxheapconfig.h
new file mode 100644 (file)
index 0000000..abb6308
--- /dev/null
@@ -0,0 +1,290 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Device virtual memory map
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory heaps device specific configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHEAPCONFIG_H
+#define RGXHEAPCONFIG_H
+
+#include "rgxdefs_km.h"
+
+
+#define RGX_HEAP_SIZE_4KiB       IMG_UINT64_C(0x0000001000)
+#define RGX_HEAP_SIZE_64KiB      IMG_UINT64_C(0x0000010000)
+#define RGX_HEAP_SIZE_256KiB     IMG_UINT64_C(0x0000040000)
+
+#define RGX_HEAP_SIZE_1MiB       IMG_UINT64_C(0x0000100000)
+#define RGX_HEAP_SIZE_2MiB       IMG_UINT64_C(0x0000200000)
+#define RGX_HEAP_SIZE_4MiB       IMG_UINT64_C(0x0000400000)
+#define RGX_HEAP_SIZE_16MiB      IMG_UINT64_C(0x0001000000)
+#define RGX_HEAP_SIZE_256MiB     IMG_UINT64_C(0x0010000000)
+
+#define RGX_HEAP_SIZE_1GiB       IMG_UINT64_C(0x0040000000)
+#define RGX_HEAP_SIZE_2GiB       IMG_UINT64_C(0x0080000000)
+#define RGX_HEAP_SIZE_4GiB       IMG_UINT64_C(0x0100000000)
+#define RGX_HEAP_SIZE_16GiB      IMG_UINT64_C(0x0400000000)
+#define RGX_HEAP_SIZE_32GiB      IMG_UINT64_C(0x0800000000)
+#define RGX_HEAP_SIZE_64GiB      IMG_UINT64_C(0x1000000000)
+#define RGX_HEAP_SIZE_128GiB     IMG_UINT64_C(0x2000000000)
+#define RGX_HEAP_SIZE_256GiB     IMG_UINT64_C(0x4000000000)
+#define RGX_HEAP_SIZE_512GiB     IMG_UINT64_C(0x8000000000)
+
+/*
+       RGX Device Virtual Address Space Definitions
+
+       This file defines the RGX virtual address heaps that are used in
+       application memory contexts. It also shows where the Firmware memory heap
+       fits into this, but the firmware heap is only ever created in the
+       Services KM/server component.
+
+       RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed,
+       on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+       respectively. Therefore if clients use multiple configs they must still
+       be consistent with their definitions for these heaps.
+
+       Shared virtual memory (GENERAL_SVM) support requires half of the address
+       space (512 GiB) be reserved for SVM allocations to mirror application CPU
+       addresses. However, if BRN_65273 WA is active in which case the SVM heap
+       is disabled. This is reflected in the device connection capability bits
+       returned to user space.
+
+       The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the
+       general (4KiB) heap and the general non-4K heap. The first 128 GiB is used
+       for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the
+       GENERAL_NON4K_HEAP. This heap has a default page-size of 16K.
+       AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it
+       to these values: 4K,64K,256K,1M,2M.
+
+       The heaps defined for BRN_65273 _replace_ the non-BRN equivalents below
+       when this BRN WA is active on affected cores. This is different to most
+       other BRNs and hence has been given its own header file for clarity,
+       see below. This is a special case, other BRNs that need 1 or 2 additional
+       heaps should be added to this file, like BRN_63142 below.
+       NOTE: All regular heaps below greater than 1GB require a BRN_65273 WA heap.
+
+       Base addresses have to be a multiple of 4MiB
+       Heaps must not start at 0x0000000000, as this is reserved for internal
+       use within device memory layer.
+       Range comments, those starting in column 0 below are a section heading of
+       sorts and are above the heaps in that range. Often this is the reserved
+       size of the heap within the range.
+*/
+
+/* This BRN requires a different virtual memory map from the standard one
+ * defined in this file below. Hence the alternative heap definitions for this
+ * BRN are provided in a separate file for clarity. */
+#include "rgxheapconfig_65273.h"
+
+
+/* 0x00_0000_0000 ************************************************************/
+
+/* 0x00_0000_0000 - 0x00_0040_0000 **/
+       /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/
+
+       /* BRN_65273 TQ3DPARAMETERS base 0x0000010000 */
+       /* BRN_65273 GENERAL base        0x65C0000000 */
+       /* BRN_65273 GENERAL_NON4K base  0x73C0000000 */
+
+/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/
+       /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/
+       #define RGX_GENERAL_SVM_HEAP_BASE           IMG_UINT64_C(0x0000400000)
+       #define RGX_GENERAL_SVM_HEAP_SIZE           (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB)
+
+
+/* 0x80_0000_0000 ************************************************************/
+
+/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/
+       /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/
+       #define RGX_GENERAL_HEAP_BASE               IMG_UINT64_C(0x8000000000)
+       #define RGX_GENERAL_HEAP_SIZE               RGX_HEAP_SIZE_128GiB
+
+       /* BRN_65273 PDSCODEDATA base    0xA800000000 */
+
+/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/
+       /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/
+
+/* B0_0000_0000 - 0xB7_FFFF_FFFF **/
+       /* 704 GiB to 736 GiB, size of 32 GiB : FREE **/
+
+       /* BRN_65273 USCCODE base        0xBA00000000 */
+
+/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/
+       /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/
+       #define RGX_GENERAL_NON4K_HEAP_BASE         IMG_UINT64_C(0xB800000000)
+       #define RGX_GENERAL_NON4K_HEAP_SIZE         RGX_HEAP_SIZE_32GiB
+
+
+/* 0xC0_0000_0000 ************************************************************/
+
+/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/
+       /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/
+
+/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/
+       /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/
+       #define RGX_PDSCODEDATA_HEAP_BASE           IMG_UINT64_C(0xDA00000000)
+       #define RGX_PDSCODEDATA_HEAP_SIZE           RGX_HEAP_SIZE_4GiB
+
+/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF **/
+       /* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/
+       /* HWBRN63142 workaround requires Region Header memory to be at the top
+          of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the
+          address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB. */
+       #define RGX_RGNHDR_BRN_63142_HEAP_BASE      IMG_UINT64_C(0xDBF0000000)
+       #define RGX_RGNHDR_BRN_63142_HEAP_SIZE      RGX_HEAP_SIZE_256MiB
+
+/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF **/
+       /* 880 GiB to 896 GiB, size of 16 GiB : FREE **/
+
+/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF **/
+       /* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP **/
+       #define RGX_USCCODE_HEAP_BASE               IMG_UINT64_C(0xE000000000)
+       #define RGX_USCCODE_HEAP_SIZE               RGX_HEAP_SIZE_4GiB
+
+/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/
+       /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/
+
+/* 0xE1_C000_000 - 0xE1_FFFF_FFFF **/
+       /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/
+
+       /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in
+          application memory contexts, see:
+           RGX_FIRMWARE_RAW_HEAP_BASE
+           RGX_FIRMWARE_RAW_HEAP_SIZE
+          See header for other sub-heaps details
+       */
+
+/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF **/
+       /* 904 GiB to 912 GiB, size of 8 GiB : FREE **/
+
+       /* BRN_65273 VISIBILITY_TEST base 0xE400000000 */
+
+/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/
+       /* 912 GiB to 928 GiB, size 16 GiB : TQ3DPARAMETERS_HEAP **/
+       /* Aligned to match RGX_CR_ISP_PIXEL_BASE at 16 GiB */
+       #define RGX_TQ3DPARAMETERS_HEAP_BASE        IMG_UINT64_C(0xE400000000)
+       #define RGX_TQ3DPARAMETERS_HEAP_SIZE        RGX_HEAP_SIZE_16GiB
+
+/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/
+       /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/
+
+/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/
+       /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/
+       #define RGX_VK_CAPT_REPLAY_HEAP_BASE        IMG_UINT64_C(0xE900000000)
+       #define RGX_VK_CAPT_REPLAY_HEAP_SIZE        RGX_HEAP_SIZE_1GiB
+
+/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/
+       /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/
+
+/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/
+       /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/
+       /* CDM Signals heap (31 signals less one reserved for Services).
+        * Size 960B rounded up to minimum heap size */
+       #define RGX_SIGNALS_HEAP_BASE               IMG_UINT64_C(0xEA00000000)
+       #define RGX_SIGNALS_HEAP_SIZE               DEVMEM_HEAP_MINIMUM_SIZE
+
+/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/
+       /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/
+
+/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/
+       /* 940 GiB to 944 GiB, size of 4 GiB : RESERVED VOLCANIC  **/
+
+/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/
+       /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/
+       #define RGX_FBCDC_HEAP_BASE                 IMG_UINT64_C(0xEC00000000)
+       #define RGX_FBCDC_HEAP_SIZE                 RGX_HEAP_SIZE_2MiB
+
+/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/
+       /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/
+       #define RGX_FBCDC_LARGE_HEAP_BASE           IMG_UINT64_C(0xEC40000000)
+       #define RGX_FBCDC_LARGE_HEAP_SIZE           RGX_HEAP_SIZE_2MiB
+
+/* 0xEC_8000_0000 - 0xED_FFFF_FFFF **/
+       /* 946 GiB to 952 GiB, size of 6 GiB : RESERVED VOLCANIC  **/
+
+/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/
+       /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/
+       #define RGX_CMP_MISSION_RMW_HEAP_BASE       IMG_UINT64_C(0xEE00000000)
+       #define RGX_CMP_MISSION_RMW_HEAP_SIZE       RGX_HEAP_SIZE_1GiB
+
+/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/
+       /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/
+
+/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/
+       /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/
+       #define RGX_CMP_SAFETY_RMW_HEAP_BASE        IMG_UINT64_C(0xEF00000000)
+       #define RGX_CMP_SAFETY_RMW_HEAP_SIZE        RGX_HEAP_SIZE_1GiB
+
+/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/
+       /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/
+
+/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/
+       /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */
+       #define RGX_TEXTURE_STATE_HEAP_BASE         IMG_UINT64_C(0xF000000000)
+       #define RGX_TEXTURE_STATE_HEAP_SIZE         RGX_HEAP_SIZE_4GiB
+
+/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/
+       /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/
+
+/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/
+       /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/
+       #define RGX_VISIBILITY_TEST_HEAP_BASE       IMG_UINT64_C(0xF200000000)
+       #define RGX_VISIBILITY_TEST_HEAP_SIZE       RGX_HEAP_SIZE_2MiB
+
+/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/
+       /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/
+
+       /* BRN_65273 MMU_INIA base 0xF800000000 */
+       /* BRN_65273 MMU_INIB base 0xF900000000 */
+
+/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF **/
+       /* 972 GiB to 1024 GiB, size of 52 GiB : FREE **/
+
+
+
+/* 0xFF_FFFF_FFFF ************************************************************/
+
+/*     End of RGX Device Virtual Address Space definitions */
+
+#endif /* RGXHEAPCONFIG_H */
+
+/******************************************************************************
+ End of file (rgxheapconfig.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/rgxheapconfig_65273.h b/drivers/gpu/drm/img/img-rogue/include/rogue/rgxheapconfig_65273.h
new file mode 100644 (file)
index 0000000..31f90fe
--- /dev/null
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Device virtual memory map for BRN_65273.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory heaps device specific configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHEAPCONFIG_65273_H
+#define RGXHEAPCONFIG_65273_H
+
+/*
+       RGX Device Virtual Address Space Definitions
+
+       This file defines the RGX virtual address replacement heaps that are used in
+       in application memory contexts for BRN_65273.
+
+       The heaps defined for BRN_65273 _replace_ the non-BRN equivalents when this
+       BRN WA is active on affected cores. This is different to most other BRNs
+       and hence has been given its own header file for clarity. The SVM_HEAP is
+       also disabled and unavailable when the WA is active. This is reflected
+       in the device connection capability bits returned to user space.
+       NOTE: All regular heaps in rgxheapconfig.h greater than 1GB require
+             a BRN_65273 WA heap.
+
+       Base addresses must have to be a multiple of 4MiB
+       Heaps must not start at 0x0000000000, as this is reserved for internal
+       use within device memory layer.
+       Range comments, those starting in column 0 below are a section heading of
+       sorts and are above the heaps in that range.
+*/
+
+
+/* 0x00_0000_0000 ************************************************************/
+
+/* 0x00_0001_0000 - 0x00_3FFF_FFFF **/
+       /* HWBRN65273 workaround requires TQ memory to start at 64 KiB and use a
+        * unique single 0.99GiB PCE entry. */
+       #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE  IMG_UINT64_C(0x0000010000)
+       #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE  (RGX_HEAP_SIZE_1GiB - RGX_HEAP_SIZE_64KiB)
+
+/* 0x65_C000_0000 - 0x66_3FFF_FFFF **/
+       /* HWBRN65273 workaround requires General Heap to use a unique PCE entry for each GiB in range */
+       #define RGX_GENERAL_BRN_65273_HEAP_BASE         IMG_UINT64_C(0x65C0000000)
+       #define RGX_GENERAL_BRN_65273_HEAP_SIZE         RGX_HEAP_SIZE_2GiB
+
+/* 0x73_C000_0000 - 0x74_3FFF_FFFF **/
+       /* HWBRN65273 workaround requires Non4K memory to use a unique PCE entry for each GiB in range */
+       #define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE   IMG_UINT64_C(0x73C0000000)
+       #define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE   RGX_HEAP_SIZE_2GiB
+
+
+/* 0x80_0000_0000 ************************************************************/
+
+/* 0xA8_0000_0000 - 0xA8_3FFF_FFFF **/
+       /* HWBRN65273 workaround requires PDS memory to use a unique single 1GiB PCE entry. */
+       #define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE     IMG_UINT64_C(0xA800000000)
+       #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE     RGX_HEAP_SIZE_1GiB
+
+/* 0xBA_0000_0000 - 0xBA_3FFF_FFFF **/
+       /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */
+       #define RGX_USCCODE_BRN_65273_HEAP_BASE         IMG_UINT64_C(0xBA00000000)
+       #define RGX_USCCODE_BRN_65273_HEAP_SIZE         RGX_HEAP_SIZE_1GiB
+
+
+/* 0xC0_0000_0000 ************************************************************/
+
+/* 0xE4_0000_0000 - 0xE4_001F_FFFF **/
+       /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */
+       #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE IMG_UINT64_C(0xE400000000)
+       #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2MiB
+
+/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/
+       /* HWBRN65273 workaround requires two Region Header buffers 4GiB apart. */
+       #define RGX_MMU_INIA_BRN_65273_HEAP_BASE        IMG_UINT64_C(0xF800000000)
+       #define RGX_MMU_INIA_BRN_65273_HEAP_SIZE        RGX_HEAP_SIZE_1GiB
+       #define RGX_MMU_INIB_BRN_65273_HEAP_BASE        IMG_UINT64_C(0xF900000000)
+       #define RGX_MMU_INIB_BRN_65273_HEAP_SIZE        RGX_HEAP_SIZE_1GiB
+
+
+/* 0xFF_FFFF_FFFF ************************************************************/
+
+/*     End of RGX Device Virtual Address Space definitions */
+
+#endif /* RGXHEAPCONFIG_65273_H */
+
+/******************************************************************************
+ End of file (rgxheapconfig_65273.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h b/drivers/gpu/drm/img/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h
new file mode 100644 (file)
index 0000000..431273d
--- /dev/null
@@ -0,0 +1,158 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TC_CLOCKS_H)
+#define TC_CLOCKS_H
+
+/*
+ * The core clock speed is passed through a multiplier depending on the TC
+ * version.
+ *
+ * On TC_ES1: Multiplier = x3, final speed = 270MHz
+ * On TC_ES2: Multiplier = x6, final speed = 540MHz
+ * On TCF5:   Multiplier = 1x final speed = 45MHz
+ *
+ *
+ * The base (unmultiplied speed) can be adjusted using a module parameter
+ * called "sys_core_clk_speed", a number in Hz.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start
+ *
+ * would result in a core speed of 60MHz xMultiplier.
+ *
+ *
+ * The memory clock is unmultiplied and can be adjusted using a module
+ * parameter called "sys_mem_clk_speed", this should be the number in Hz for
+ * the memory clock speed.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the memory clock speed set to 100MHz.
+ *
+ *
+ * Same applies to the system interface clock speed, "sys_sysif_clk_speed".
+ * Needed for TCF5 but not for TC_ES2/ES1.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the system clock speed set to 45MHz.
+ *
+ *
+ * All parameters can be specified at once, e.g.,
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start
+ */
+
+#define RGX_TC_SYS_CLOCK_SPEED         (25000000) /*< At the moment just used for TCF5 */
+#define RGX_TC_CLOCK_MULTIPLEX    (1)
+
+#if defined(TC_APOLLO_TCF5_22_46_54_330)
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED       (100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (45000000)
+ #define RGX_TC_SYS_CLOCK_SPEED                (45000000)
+#elif defined(TC_APOLLO_TCF5_22_49_21_16) || \
+      defined(TC_APOLLO_TCF5_22_60_22_29) || \
+      defined(TC_APOLLO_TCF5_22_75_22_25)
+ #define RGX_TC_CORE_CLOCK_SPEED       (20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (50000000)
+#elif defined(TC_APOLLO_TCF5_22_67_54_30)
+ #define RGX_TC_CORE_CLOCK_SPEED       (100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (45000000)
+#elif defined(TC_APOLLO_TCF5_22_89_204_18)
+ #define RGX_TC_CORE_CLOCK_SPEED       (50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (25000000)
+#elif defined(TC_APOLLO_TCF5_22_86_104_218)
+ #define RGX_TC_CORE_CLOCK_SPEED       (30000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (40000000)
+#elif defined(TC_APOLLO_TCF5_22_88_104_318)
+ #define RGX_TC_CORE_CLOCK_SPEED       (28000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (40000000)
+#elif defined(TC_APOLLO_TCF5_22_98_54_230)
+ #define RGX_TC_CORE_CLOCK_SPEED       (100000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (40000000)
+#elif defined(TC_APOLLO_TCF5_22_102_54_38)
+ #define RGX_TC_CORE_CLOCK_SPEED       (80000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (25000000)
+#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED)
+ /* TC TCF5 (22.*) fallback frequencies */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED       (20000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED                (25000000)
+#elif defined(TC_APOLLO_TCF5_33_8_22_1)
+ #define RGX_TC_CORE_CLOCK_SPEED       (25000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (45000000)
+#elif defined(TC_APOLLO_TCF5_REFERENCE)
+ /* TC TCF5 (Reference bitfile) */
+ #undef RGX_TC_SYS_CLOCK_SPEED
+ #define RGX_TC_CORE_CLOCK_SPEED       (50000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (50000000)
+ #define RGX_TC_SYS_CLOCK_SPEED                (45000000)
+#elif defined(TC_APOLLO_BONNIE)
+ /* TC Bonnie */
+ #define RGX_TC_CORE_CLOCK_SPEED       (18000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (65000000)
+#elif defined(TC_APOLLO_ES2)
+ /* TC ES2 */
+ #define RGX_TC_CORE_CLOCK_SPEED       (90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (104000000)
+#elif defined(TC_ORION)
+ #define RGX_TC_CORE_CLOCK_SPEED       (40000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (100000000)
+ #define RGX_TC_SYS_CLOCK_SPEED                (25000000)
+#elif defined(TC_APOLLO_TCF5_29_19_52_202)
+ #define RGX_TC_CORE_CLOCK_SPEED       (25000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (40000000)
+#elif defined(TC_APOLLO_TCF5_29_18_204_508)
+ #define RGX_TC_CORE_CLOCK_SPEED       (15000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (35000000)
+#else
+ /* TC ES1 */
+ #define RGX_TC_CORE_CLOCK_SPEED       (90000000)
+ #define RGX_TC_MEM_CLOCK_SPEED                (65000000)
+#endif
+
+#endif /* if !defined(TC_CLOCKS_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/include/services_km.h b/drivers/gpu/drm/img/img-rogue/include/services_km.h
new file mode 100644 (file)
index 0000000..91ee3b2
--- /dev/null
@@ -0,0 +1,180 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services API Kernel mode Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported services API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SERVICES_KM_H
+#define SERVICES_KM_H
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+/*! 4k page size definition */
+#define PVRSRV_4K_PAGE_SIZE                                    4096UL      /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT         12          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 16k page size definition */
+#define PVRSRV_16K_PAGE_SIZE                                   16384UL      /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT                14          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 64k page size definition */
+#define PVRSRV_64K_PAGE_SIZE                                   65536UL      /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT                16          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 256k page size definition */
+#define PVRSRV_256K_PAGE_SIZE                                  262144UL      /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT               18          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 1MB page size definition */
+#define PVRSRV_1M_PAGE_SIZE                                    1048576UL      /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT         20          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 2MB page size definition */
+#define PVRSRV_2M_PAGE_SIZE                                    2097152UL      /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT         21          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+
+/*!
+ * @AddToGroup SRVConnectInterfaces
+ * @{
+ */
+
+#ifndef PVRSRV_DEV_CONNECTION_TYPEDEF
+#define PVRSRV_DEV_CONNECTION_TYPEDEF
+/*!
+ * Forward declaration (look on connection.h)
+ */
+typedef struct PVRSRV_DEV_CONNECTION_TAG PVRSRV_DEV_CONNECTION;
+#endif
+
+/*!
+ * @Anchor SRV_FLAGS
+ * @Name SRV_FLAGS: Services connection flags
+ * Allows to define per-client policy for Services.
+ * @{
+ */
+
+/*
+ *   Use of the 32-bit connection flags mask
+ *   ( X = taken/in use, - = available/unused )
+ *
+ *   31  27     20             6 4   0
+ *    |   |      |             | |   |
+ *    X---XXXXXXXX-------------XXX----
+ */
+
+#define SRV_NO_HWPERF_CLIENT_STREAM     (1UL << 4)  /*!< Don't create HWPerf for this connection */
+#define SRV_FLAGS_CLIENT_64BIT_COMPAT   (1UL << 5)  /*!< This flags gets set if the client is 64 Bit compatible. */
+#define SRV_FLAGS_CLIENT_SLR_DISABLED   (1UL << 6)  /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */
+#define SRV_FLAGS_PDUMPCTRL             (1UL << 31) /*!< PDump Ctrl client flag */
+
+/*! @} SRV_FLAGS */
+
+/*! @} End of SRVConnectInterfaces */
+
+/*
+ * Bits 20 - 27 are used to pass information needed for validation
+ * of the GPU Virtualisation Validation mechanism. In particular:
+ *
+ * Bits:
+ * [20 - 22]: OSid of the memory region that will be used for allocations
+ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses
+ *            regarding that memory context.
+ *      [26]: If the AXI Protection register will be set to secure for that OSid
+ *      [27]: If the Emulator Wrapper Register checking for protection violation
+ *            will be set to secure for that OSid
+ */
+
+#define VIRTVAL_FLAG_OSID_SHIFT        (20)
+#define SRV_VIRTVAL_FLAG_OSID_MASK     (7U << VIRTVAL_FLAG_OSID_SHIFT)
+
+#define VIRTVAL_FLAG_OSIDREG_SHIFT     (23)
+#define SRV_VIRTVAL_FLAG_OSIDREG_MASK  (7U << VIRTVAL_FLAG_OSIDREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPREG_SHIFT     (26)
+#define SRV_VIRTVAL_FLAG_AXIPREG_MASK  (1U << VIRTVAL_FLAG_AXIPREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPTD_SHIFT      (27)
+#define SRV_VIRTVAL_FLAG_AXIPTD_MASK   (1U << VIRTVAL_FLAG_AXIPTD_SHIFT)
+
+
+/* Size of pointer on a 64 bit machine */
+#define        POINTER_SIZE_64BIT      (8U)
+
+
+/*
+    Pdump flags which are accessible to Services clients
+*/
+#define PDUMP_NONE          0x00000000U /*<! No flags */
+
+#define PDUMP_BLKDATA       0x10000000U /*<! This flag indicates block-mode PDump data to be recorded in
+                                                          Block script stream in addition to Main script stream,
+                                                          if capture mode is set to BLOCKED */
+
+#define PDUMP_CONT          0x40000000U /*<! Output this entry always regardless of framed capture range,
+                                                          used by client applications being dumped. */
+#define PDUMP_PERSIST       0x80000000U /*<! Output this entry always regardless of app and range,
+                                                          used by persistent resources created after
+                                                          driver initialisation that must appear in
+                                                          all PDump captures in that session. */
+
+/* Valid range of values for pdump block length in Block mode of PDump */
+#define PDUMP_BLOCKLEN_MIN          10
+#define PDUMP_BLOCKLEN_MAX          1000
+
+#define PDUMP_FRAME_MIN             0
+#define PDUMP_FRAME_MAX             (IMG_UINT32_MAX - 1)
+#define PDUMP_FRAME_UNSET           IMG_UINT32_MAX
+
+/* Status of the device. */
+typedef enum
+{
+       PVRSRV_DEVICE_STATUS_UNKNOWN,        /* status of the device is unknown */
+       PVRSRV_DEVICE_STATUS_OK,             /* the device is operational */
+       PVRSRV_DEVICE_STATUS_NOT_RESPONDING, /* the device is not responding */
+       PVRSRV_DEVICE_STATUS_DEVICE_ERROR    /* the device is not operational */
+} PVRSRV_DEVICE_STATUS;
+
+#endif /* SERVICES_KM_H */
+/**************************************************************************//**
+End of file (services_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/servicesext.h b/drivers/gpu/drm/img/img-rogue/include/servicesext.h
new file mode 100644 (file)
index 0000000..5d685b2
--- /dev/null
@@ -0,0 +1,156 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services definitions required by external drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides services data structures, defines and prototypes
+                required by external drivers
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SERVICESEXT_H)
+#define SERVICESEXT_H
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_3dtypes.h"
+#include "pvrsrv_device_types.h"
+
+/*
+ * Lock buffer read/write flags
+ */
+#define PVRSRV_LOCKFLG_READONLY                (1)             /*!< The locking process will only read the locked surface */
+
+/*!
+ *****************************************************************************
+ *     Services State
+ *****************************************************************************/
+typedef enum _PVRSRV_SERVICES_STATE_
+{
+       PVRSRV_SERVICES_STATE_UNDEFINED = 0,
+       PVRSRV_SERVICES_STATE_OK,
+       PVRSRV_SERVICES_STATE_BAD,
+} PVRSRV_SERVICES_STATE;
+
+
+/*!
+ *****************************************************************************
+ *     States for power management
+ *****************************************************************************/
+/*!
+  System Power State Enum
+ */
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+       PVRSRV_SYS_POWER_STATE_Unspecified              = -1,   /*!< Unspecified : Uninitialised */
+       PVRSRV_SYS_POWER_STATE_OFF                              = 0,    /*!< Off */
+       PVRSRV_SYS_POWER_STATE_ON                               = 1,    /*!< On */
+
+       PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; /*!< Typedef for ptr to PVRSRV_SYS_POWER_STATE */
+
+/*!
+  Device Power State Enum
+ */
+typedef IMG_INT32 PVRSRV_DEV_POWER_STATE;
+typedef IMG_INT32 *PPVRSRV_DEV_POWER_STATE;    /*!< Typedef for ptr to PVRSRV_DEV_POWER_STATE */ /* PRQA S 3205 */
+#define PVRSRV_DEV_POWER_STATE_DEFAULT -1      /*!< Default state for the device */
+#define PVRSRV_DEV_POWER_STATE_OFF              0      /*!< Unpowered */
+#define PVRSRV_DEV_POWER_STATE_ON               1      /*!< Running */
+
+/*!
+  Power Flags Enum
+ */
+typedef IMG_UINT32 PVRSRV_POWER_FLAGS;
+#define PVRSRV_POWER_FLAGS_NONE                0U                      /*!< No flags */
+#define PVRSRV_POWER_FLAGS_FORCED      1U << 0         /*!< Power the transition should not fail */
+#define PVRSRV_POWER_FLAGS_SUSPEND     1U << 1         /*!< Power transition is due to OS suspend request */
+
+/* Clock speed handler prototypes */
+
+/*!
+  Typedef for a pointer to a Function that will be called before a transition
+  from one clock speed to another. See also PFN_POST_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE                          hDevHandle,
+                                                                                                  PVRSRV_DEV_POWER_STATE       eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a Function that will be called after a transition
+  from one clock speed to another. See also PFN_PRE_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE                         hDevHandle,
+                                                                                                       PVRSRV_DEV_POWER_STATE  eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a function that will be called to transition the
+  device to a forced idle state. Used in unison with (forced) power requests,
+  DVFS and cluster count changes.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE            hDevHandle,
+                                                                                                IMG_BOOL               bDeviceOffPermitted);
+
+/*!
+  Typedef for a pointer to a function that will be called to cancel a forced
+  idle state and return the firmware back to a state where the hardware can be
+  scheduled.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE     hDevHandle);
+
+typedef PVRSRV_ERROR (*PFN_GPU_UNITS_POWER_CHANGE) (IMG_HANDLE         hDevHandle,
+                                                                                                       IMG_UINT32              ui32SESPowerState);
+
+/*!
+ *****************************************************************************
+ * This structure is used for OS independent registry (profile) access
+ *****************************************************************************/
+
+typedef struct PVRSRV_REGISTRY_INFO_TAG
+{
+       IMG_UINT32      ui32DevCookie;
+       IMG_PCHAR       pszKey;
+       IMG_PCHAR       pszValue;
+       IMG_PCHAR       pszBuf;
+       IMG_UINT32      ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+#endif /* SERVICESEXT_H */
+/******************************************************************************
+ End of file (servicesext.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/sync_checkpoint_external.h b/drivers/gpu/drm/img/img-rogue/include/sync_checkpoint_external.h
new file mode 100644 (file)
index 0000000..19b5011
--- /dev/null
@@ -0,0 +1,83 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation checkpoint structures that are visible
+                               internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_CHECKPOINT_EXTERNAL_H
+#define SYNC_CHECKPOINT_EXTERNAL_H
+
+#include "img_types.h"
+
+#ifndef CHECKPOINT_TYPES
+#define CHECKPOINT_TYPES
+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct SYNC_CHECKPOINT_TAG *PSYNC_CHECKPOINT;
+#endif
+
+/* PVRSRV_SYNC_CHECKPOINT states.
+ * The OS native sync implementation should call pfnIsSignalled() to determine if a
+ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the
+ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state)
+ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync.
+ */
+typedef IMG_UINT32 PVRSRV_SYNC_CHECKPOINT_STATE;
+
+#define PVRSRV_SYNC_CHECKPOINT_UNDEF         0x000U
+#define PVRSRV_SYNC_CHECKPOINT_ACTIVE        0xac1U  /*!< checkpoint has not signalled */
+#define PVRSRV_SYNC_CHECKPOINT_SIGNALLED     0x519U  /*!< checkpoint has signalled */
+#define PVRSRV_SYNC_CHECKPOINT_ERRORED       0xeffU   /*!< checkpoint has been errored */
+
+
+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(fwaddr)   (((fwaddr) & 0x1U) != 0U)
+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT(ufoptr)                  (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR((ufoptr)->puiAddrUFO.ui32Addr))
+
+/* Maximum number of sync checkpoints the firmware supports in one fence */
+#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U
+
+/*!
+ * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which
+ * represents a foreign sync point or collection of foreign sync points.
+ */
+#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U)
+
+#endif /* SYNC_CHECKPOINT_EXTERNAL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/sync_prim_internal.h b/drivers/gpu/drm/img/img-rogue/include/sync_prim_internal.h
new file mode 100644 (file)
index 0000000..77164c2
--- /dev/null
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation typedef header
+@Description    Defines synchronisation types that are used internally
+                only
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_INTERNAL_H
+#define SYNC_INTERNAL_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <powervr/mem_types.h>
+
+/* These are included here as the typedefs are required
+ * internally.
+ */
+
+typedef struct SYNC_PRIM_CONTEXT_TAG *PSYNC_PRIM_CONTEXT;
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG
+{
+       volatile uint32_t __iomem *pui32LinAddr;        /*!< User pointer to the primitive */
+} PVRSRV_CLIENT_SYNC_PRIM;
+
+/*!
+ * Bundled information for a sync prim operation
+ *
+ *   Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP
+ *   Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP_TAG
+{
+       #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK        (1U << 0)
+       #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE       (1U << 1)
+       #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2))
+       uint32_t                    ui32Flags;       /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */
+       PVRSRV_CLIENT_SYNC_PRIM    *psSync;          /*!< Pointer to the client sync primitive */
+       uint32_t                    ui32FenceValue;  /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */
+       uint32_t                    ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */
+} PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* SYNC_INTERNAL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/apollo_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/apollo_regs.h
new file mode 100644 (file)
index 0000000..4081e21
--- /dev/null
@@ -0,0 +1,108 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(APOLLO_REGS_H)
+#define APOLLO_REGS_H
+
+#include "tc_clocks.h"
+
+/* TC TCF5 */
+#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1)
+#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000)
+#define TC5_SYS_APOLLO_REG_PDP2_SIZE   (0x7C4)
+
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000)
+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE   (0x14)
+
+#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000)
+#define TC5_SYS_APOLLO_REG_HDMI_SIZE   (0x1C)
+
+/* TC ES2 */
+#define TCF_TEMP_SENSOR_SPI_OFFSET     0xe
+#define TCF_TEMP_SENSOR_TO_C(raw)      (((raw) * 248 / 4096) - 54)
+
+/* Number of bytes that are broken */
+#define SYS_DEV_MEM_BROKEN_BYTES       (1024 * 1024)
+#define SYS_DEV_MEM_REGION_SIZE                (0x40000000 - SYS_DEV_MEM_BROKEN_BYTES)
+
+/* Apollo reg on base register 0 */
+#define SYS_APOLLO_REG_PCI_BASENUM     (0)
+#define SYS_APOLLO_REG_REGION_SIZE     (0x00010000)
+
+#define SYS_APOLLO_REG_SYS_OFFSET      (0x0000)
+#define SYS_APOLLO_REG_SYS_SIZE                (0x0400)
+
+#define SYS_APOLLO_REG_PLL_OFFSET      (0x1000)
+#define SYS_APOLLO_REG_PLL_SIZE                (0x0400)
+
+#define SYS_APOLLO_REG_HOST_OFFSET     (0x4050)
+#define SYS_APOLLO_REG_HOST_SIZE       (0x0014)
+
+#define SYS_APOLLO_REG_PDP1_OFFSET     (0xC000)
+#define SYS_APOLLO_REG_PDP1_SIZE       (0x2000)
+
+/* Offsets for flashing Apollo PROMs from base 0 */
+#define APOLLO_FLASH_STAT_OFFSET       (0x4058)
+#define APOLLO_FLASH_DATA_WRITE_OFFSET (0x4050)
+#define APOLLO_FLASH_RESET_OFFSET      (0x4060)
+
+#define APOLLO_FLASH_FIFO_STATUS_MASK   (0xF)
+#define APOLLO_FLASH_FIFO_STATUS_SHIFT  (0)
+#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF)
+#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16)
+
+#define APOLLO_FLASH_PROG_COMPLETE_BIT (0x1)
+#define APOLLO_FLASH_PROG_PROGRESS_BIT (0x2)
+#define APOLLO_FLASH_PROG_FAILED_BIT   (0x4)
+#define APOLLO_FLASH_INV_FILETYPE_BIT  (0x8)
+
+#define APOLLO_FLASH_FIFO_SIZE         (8)
+
+/* RGX reg on base register 1 */
+#define SYS_RGX_REG_PCI_BASENUM                (1)
+#define SYS_RGX_REG_REGION_SIZE                (0x7FFFF)
+
+/* Device memory (including HP mapping) on base register 2 */
+#define SYS_DEV_MEM_PCI_BASENUM                (2)
+
+#endif /* APOLLO_REGS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/bonnie_tcf.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/bonnie_tcf.h
new file mode 100644 (file)
index 0000000..fc87ec7
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* bonnie_tcf.h - Bonnie TCF register definitions */
+
+/* tab size 4 */
+
+#ifndef BONNIE_TCF_DEFS_H
+#define BONNIE_TCF_DEFS_H
+
+#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK                                                     0x00000000
+#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS                                                    0x00004000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP                         0x00008000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN          0x0000C000
+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG           0x00010000
+#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN                                                      0x00014000
+#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX                                                                0x00018000
+#define BONNIE_TCF_OFFSET_SAI_RX_1                                                                     0x0001C000
+#define BONNIE_TCF_OFFSET_SAI_RX_SDR                                                           0x00040000
+#define BONNIE_TCF_OFFSET_SAI_TX_1                                                                     0x00044000
+#define BONNIE_TCF_OFFSET_SAI_TX_SDR                                                           0x00068000
+
+#define BONNIE_TCF_OFFSET_SAI_RX_DELTA                                                         0x00004000
+#define BONNIE_TCF_OFFSET_SAI_TX_DELTA                                                         0x00004000
+
+#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS                                                         0x0000000C
+#define BONNIE_TCF_OFFSET_SAI_EYES                                                                     0x00000010
+#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK                                                                0x00000018
+
+
+#endif /* BONNIE_TCF_DEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_defs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_defs.h
new file mode 100644 (file)
index 0000000..6234887
--- /dev/null
@@ -0,0 +1,326 @@
+/****************************************************************************
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Odin Memory Map - View from PCIe
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+****************************************************************************/
+
+#ifndef _ODIN_DEFS_H_
+#define _ODIN_DEFS_H_
+
+/* These defines have not been autogenerated */
+
+#define PCI_VENDOR_ID_ODIN                  (0x1AEE)
+#define DEVICE_ID_ODIN                      (0x1010)
+#define DEVICE_ID_TBA                       (0x1CF2)
+
+/* PCI BAR 0 contains the PDP regs and the Odin system regs */
+#define ODN_SYS_BAR                         0
+#define ODN_SYS_REGION_SIZE                 0x000800000 /* 8MB */
+
+#define ODN_SYS_REGS_OFFSET                 0
+#define ODN_SYS_REGS_SIZE                   0x000400000 /* 4MB */
+
+#define ODN_PDP_REGS_OFFSET                 0x000440000
+#define ODN_PDP_REGS_SIZE                   0x000040000 /* 256k */
+
+#define ODN_PDP2_REGS_OFFSET                0x000480000
+#define ODN_PDP2_REGS_SIZE                  0x000040000 /* 256k */
+
+#define ODN_PDP2_PFIM_OFFSET                0x000500000
+#define ODN_PDP2_PFIM_SIZE                  0x000040000 /* 256k */
+
+#define ODIN_DMA_REGS_OFFSET                0x0004C0000
+#define ODIN_DMA_REGS_SIZE                  0x000040000 /* 256k */
+
+#define ODIN_DMA_CHAN_REGS_SIZE             0x000001000 /*   4k */
+
+/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */
+#define ODN_DUT_SOCIF_BAR                   2
+#define ODN_DUT_SOCIF_OFFSET                0x000000000
+#define ODN_DUT_SOCIF_SIZE                  0x004000000 /* 64MB */
+
+/* PCI BAR 4 contains the on-board 1GB DDR memory */
+#define ODN_DDR_BAR                         4
+#define ODN_DDR_MEM_OFFSET                  0x000000000
+#define ODN_DDR_MEM_SIZE                    0x040000000 /* 1GB */
+
+/* Odin system register banks */
+#define ODN_REG_BANK_CORE                   0x00000
+#define ODN_REG_BANK_TCF_SPI_MASTER         0x02000
+#define ODN_REG_BANK_ODN_CLK_BLK            0x0A000
+#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR   0x0C000
+#define ODN_REG_BANK_DB_TYPE_ID             0x0C200
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_TCFVUOCTA   0x000000C6U
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK   0x000000C0U
+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT  0x6
+#define ODN_REG_BANK_ODN_I2C                0x0E000
+#define ODN_REG_BANK_MULTI_CLK_ALIGN        0x20000
+#define ODN_REG_BANK_ALIGN_DATA_TX          0x22000
+#define ODN_REG_BANK_SAI_RX_DDR_0           0x24000
+#define ODN_REG_BANK_SAI_RX_DDR(n)          (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n))
+#define ODN_REG_BANK_SAI_TX_DDR_0           0x3A000
+#define ODN_REG_BANK_SAI_TX_DDR(n)          (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n))
+#define ODN_REG_BANK_SAI_TX_SDR             0x4E000
+
+/* Odin SPI regs */
+#define ODN_SPI_MST_ADDR_RDNWR              0x0000
+#define ODN_SPI_MST_WDATA                   0x0004
+#define ODN_SPI_MST_RDATA                   0x0008
+#define ODN_SPI_MST_STATUS                  0x000C
+#define ODN_SPI_MST_GO                      0x0010
+
+
+/*
+   Odin CLK regs - the odn_clk_blk module defs are not auto generated
+ */
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1           0x620
+#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT         0
+#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK          0x00000FC0U
+#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2           0x624
+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT         6
+#define ODN_PDP_PCLK_ODIV2_EDGE_MASK             0x00000080U
+#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT            7
+
+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3           0x61C
+
+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1           0x628
+#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT         0
+#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK                 0x00000FC0U
+#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2           0x62C
+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT         6
+#define ODN_PDP_MCLK_ODIV2_EDGE_MASK             0x00000080U
+#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT            7
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG1            0x650
+#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK           0x0000003FU
+#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT          0
+#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK           0x00000FC0U
+#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT          6
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG2            0x654
+#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK           0x00000040U
+#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT          6
+#define ODN_PDP_PCLK_MUL2_EDGE_MASK              0x00000080U
+#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT             7
+
+#define ODN_PDP_P_CLK_MULTIPLIER_REG3            0x64C
+
+#define ODN_PDP_P_CLK_IN_DIVIDER_REG             0x658
+#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK           0x0000003FU
+#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT          0
+#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK           0x00000FC0U
+#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT          6
+#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK           0x00001000U
+#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT          12
+#define ODN_PDP_PCLK_IDIV_EDGE_MASK              0x00002000U
+#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT             13
+
+/*
+ * DUT core clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1                (0x0028)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2                (0x002C)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT     (7)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER1                 (0x0050)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT   (0)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER2                 (0x0054)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK       (0x00007000U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT      (12)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK    (0x00000800U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT   (11)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK       (0x00000080U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT      (7)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK    (0x00000040U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT   (6)
+
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1                 (0x0058)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK       (0x00002000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT      (13)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK    (0x00001000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT   (12)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT   (0)
+
+/*
+ * DUT interface clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1               (0x0220)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK  (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK  (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0)
+
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2               (0x0224)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK     (0x00000080U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT    (7)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK  (0x00000040U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1                (0x0250)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2                (0x0254)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK      (0x00007000U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT     (12)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK   (0x00000800U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT  (11)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT     (7)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1                (0x0258)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK      (0x00002000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT     (13)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK   (0x00001000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT  (12)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT  (0)
+
+
+/*
+ * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2
+ * All in Hz
+ */
+#define ODN_INPUT_CLOCK_SPEED                        (100000000U)
+#define ODN_INPUT_CLOCK_SPEED_MIN                    (10000000U)
+#define ODN_INPUT_CLOCK_SPEED_MAX                    (933000000U)
+#define ODN_OUTPUT_CLOCK_SPEED_MIN                   (4690000U)
+#define ODN_OUTPUT_CLOCK_SPEED_MAX                   (933000000U)
+#define ODN_VCO_MIN                                  (600000000U)
+#define ODN_VCO_MAX                                  (1440000000U)
+#define ODN_PFD_MIN                                  (10000000U)
+#define ODN_PFD_MAX                                  (500000000U)
+
+/*
+ * Max values that can be set in DRP registers
+ */
+#define ODN_OREG_VALUE_MAX                            (126.875f)
+#define ODN_MREG_VALUE_MAX                            (126.875f)
+#define ODN_DREG_VALUE_MAX                            (126U)
+
+
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE                (0x00000001U)
+#define ODN_MMCM_LOCK_STATUS_DUT_IF                  (0x00000002U)
+#define ODN_MMCM_LOCK_STATUS_PDPP                    (0x00000008U)
+
+/*
+    Odin interrupt flags
+*/
+#define ODN_INTERRUPT_ENABLE_PDP1           (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT)
+#define ODN_INTERRUPT_ENABLE_PDP2           (1 << ODN_INTERRUPT_ENABLE_PDP2_SHIFT)
+#define ODN_INTERRUPT_ENABLE_DUT            (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT)
+#define ODN_INTERRUPT_STATUS_PDP1           (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT)
+#define ODN_INTERRUPT_STATUS_PDP2           (1 << ODN_INTERRUPT_STATUS_PDP2_SHIFT)
+#define ODN_INTERRUPT_STATUS_DUT            (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT)
+#define ODN_INTERRUPT_CLEAR_PDP1            (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT)
+#define ODN_INTERRUPT_CLEAR_PDP2            (1 << ODN_INTERRUPT_CLR_PDP2_SHIFT)
+#define ODN_INTERRUPT_CLEAR_DUT             (1 << ODN_INTERRUPT_CLR_DUT_SHIFT)
+
+#define ODN_INTERRUPT_ENABLE_CDMA           (1 << ODN_INTERRUPT_ENABLE_CDMA_SHIFT)
+#define ODN_INTERRUPT_STATUS_CDMA           (1 << ODN_INTERRUPT_STATUS_CDMA_SHIFT)
+#define ODN_INTERRUPT_CLEAR_CDMA            (1 << ODN_INTERRUPT_CLR_CDMA_SHIFT)
+
+#define ODN_INTERRUPT_ENABLE_CDMA2          (1 << (ODN_INTERRUPT_ENABLE_CDMA_SHIFT + 1))
+#define ODN_INTERRUPT_STATUS_CDMA2          (1 << (ODN_INTERRUPT_STATUS_CDMA_SHIFT + 1))
+#define ODN_INTERRUPT_CLEAR_CDMA2           (1 << (ODN_INTERRUPT_CLR_CDMA_SHIFT + 1))
+
+/*
+   Other defines
+*/
+#define ODN_STREAM_OFF                      0
+#define ODN_STREAM_ON                       1
+#define ODN_SYNC_GEN_DISABLE                0
+#define ODN_SYNC_GEN_ENABLE                 1
+#define ODN_INTERLACE_DISABLE               0
+#define ODN_INTERLACE_ENABLE                1
+#define ODN_PIXEL_CLOCK_INVERTED            1
+#define ODN_HSYNC_POLARITY_ACTIVE_HIGH      1
+
+#define ODN_PDP_INTCLR_ALL                  0x000FFFFFU
+#define        ODN_PDP_INTSTAT_ALL_OURUN_MASK      0x000FFFF0U
+
+/*
+   DMA defs
+*/
+#define ODN_CDMA_ADDR_WIDTH                35
+#define ODN_DMA_HW_DESC_HEAP_SIZE          0x100000
+#define ODN_DMA_CHAN_RX                    0
+#define ODN_DMA_CHAN_TX                    1
+
+#define ODIN_DMA_TX_CHAN_NAME              "tx"
+#define ODIN_DMA_RX_CHAN_NAME              "rx"
+
+/*
+   FBC defs
+*/
+#define ODIN_PFIM_RELNUM                   (005U)
+
+#endif /* _ODIN_DEFS_H_ */
+
+/*****************************************************************************
+ End of file (odn_defs.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_pdp_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_pdp_regs.h
new file mode 100644 (file)
index 0000000..da47a25
--- /dev/null
@@ -0,0 +1,8540 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* tab size 4 */
+
+#ifndef ODN_PDP_REGS_H
+#define ODN_PDP_REGS_H
+
+/* Odin-PDP hardware register definitions */
+
+
+#define ODN_PDP_GRPH1SURF_OFFSET                                       (0x0000)
+
+/* PDP, GRPH1SURF, GRPH1PIXFMT
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK                     (0xF8000000)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK          (0x0000001F)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT                    (27)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH           (5)
+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USEGAMMA
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK           (0x04000000)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT          (26)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH         (1)
+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USECSC
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK                     (0x02000000)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT                    (25)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH           (1)
+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK                (0x01000000)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK     (0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT       (24)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH      (1)
+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH1SURF, GRPH1USELUT
+*/
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK                     (0x00800000)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT                    (23)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH           (1)
+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH2SURF_OFFSET                                       (0x0004)
+
+/* PDP, GRPH2SURF, GRPH2PIXFMT
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK                     (0xF8000000)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK          (0x0000001F)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT                    (27)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH           (5)
+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USEGAMMA
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK           (0x04000000)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT          (26)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH         (1)
+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USECSC
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK                     (0x02000000)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT                    (25)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH           (1)
+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK                (0x01000000)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK     (0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT       (24)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH      (1)
+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH2SURF, GRPH2USELUT
+*/
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK                     (0x00800000)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT                    (23)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH           (1)
+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH3SURF_OFFSET                                       (0x0008)
+
+/* PDP, GRPH3SURF, GRPH3PIXFMT
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK                     (0xF8000000)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK          (0x0000001F)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT                    (27)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH           (5)
+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USEGAMMA
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK           (0x04000000)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT          (26)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH         (1)
+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USECSC
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK                     (0x02000000)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT                    (25)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH           (1)
+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK                (0x01000000)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK     (0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT       (24)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH      (1)
+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH3SURF, GRPH3USELUT
+*/
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK                     (0x00800000)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT                    (23)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH           (1)
+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH4SURF_OFFSET                                       (0x000C)
+
+/* PDP, GRPH4SURF, GRPH4PIXFMT
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK                     (0xF8000000)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK          (0x0000001F)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT                    (27)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH           (5)
+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USEGAMMA
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK           (0x04000000)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT          (26)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH         (1)
+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USECSC
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK                     (0x02000000)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT                    (25)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH           (1)
+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK                (0x01000000)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK     (0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT       (24)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH      (1)
+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH4SURF, GRPH4USELUT
+*/
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK                     (0x00800000)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT                    (23)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH           (1)
+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID1SURF_OFFSET                                                (0x0010)
+
+/* PDP, VID1SURF, VID1PIXFMT
+*/
+#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK                       (0xF8000000)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK                    (0x0000001F)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT                      (27)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH                     (5)
+#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEGAMMA
+*/
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK                     (0x04000000)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK          (0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT                    (26)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH           (1)
+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1SURF, VID1USECSC
+*/
+#define ODN_PDP_VID1SURF_VID1USECSC_MASK                       (0x02000000)
+#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT                      (25)
+#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH                     (1)
+#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEI2P
+*/
+#define ODN_PDP_VID1SURF_VID1USEI2P_MASK                       (0x01000000)
+#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT                      (24)
+#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH                     (1)
+#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID1SURF, VID1COSITED
+*/
+#define ODN_PDP_VID1SURF_VID1COSITED_MASK                      (0x00800000)
+#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK           (0x00000001)
+#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT                     (23)
+#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH                    (1)
+#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEHQCD
+*/
+#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK                      (0x00400000)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK           (0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT                     (22)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH                    (1)
+#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1SURF, VID1USEINSTREAM
+*/
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK          (0x00200000)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK       (0x00000001)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT         (21)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH                (1)
+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2SURF_OFFSET                                                (0x0014)
+
+/* PDP, VID2SURF, VID2PIXFMT
+*/
+#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK                       (0xF8000000)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK                    (0x0000001F)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT                      (27)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH                     (5)
+#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID2SURF, VID2COSITED
+*/
+#define ODN_PDP_VID2SURF_VID2COSITED_MASK                      (0x00800000)
+#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK           (0x00000001)
+#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT                     (23)
+#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH                    (1)
+#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2SURF, VID2USEGAMMA
+*/
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK                     (0x04000000)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK          (0x00000001)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT                    (26)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH           (1)
+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2SURF, VID2USECSC
+*/
+#define ODN_PDP_VID2SURF_VID2USECSC_MASK                       (0x02000000)
+#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT                      (25)
+#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH                     (1)
+#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID3SURF_OFFSET                                                (0x0018)
+
+/* PDP, VID3SURF, VID3PIXFMT
+*/
+#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK                       (0xF8000000)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK                    (0x0000001F)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT                      (27)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH                     (5)
+#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID3SURF, VID3COSITED
+*/
+#define ODN_PDP_VID3SURF_VID3COSITED_MASK                      (0x00800000)
+#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK           (0x00000001)
+#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT                     (23)
+#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH                    (1)
+#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3SURF, VID3USEGAMMA
+*/
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK                     (0x04000000)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK          (0x00000001)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT                    (26)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH           (1)
+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3SURF, VID3USECSC
+*/
+#define ODN_PDP_VID3SURF_VID3USECSC_MASK                       (0x02000000)
+#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT                      (25)
+#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH                     (1)
+#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID4SURF_OFFSET                                                (0x001C)
+
+/* PDP, VID4SURF, VID4PIXFMT
+*/
+#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK                       (0xF8000000)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK                    (0x0000001F)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT                      (27)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH                     (5)
+#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID4SURF, VID4COSITED
+*/
+#define ODN_PDP_VID4SURF_VID4COSITED_MASK                      (0x00800000)
+#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK           (0x00000001)
+#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT                     (23)
+#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH                    (1)
+#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID4SURF, VID4USEGAMMA
+*/
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK                     (0x04000000)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK          (0x00000001)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT                    (26)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH           (1)
+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4SURF, VID4USECSC
+*/
+#define ODN_PDP_VID4SURF_VID4USECSC_MASK                       (0x02000000)
+#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT                      (25)
+#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH                     (1)
+#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GRPH1CTRL_OFFSET                                       (0x0020)
+
+/* PDP, GRPH1CTRL, GRPH1STREN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK                      (0x80000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT                     (31)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH                    (1)
+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYEN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK                     (0x40000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT                    (30)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH           (1)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1CKEYSRC
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK                    (0x20000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK         (0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT           (29)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH          (1)
+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLEND
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK                      (0x18000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK           (0x00000003)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT                     (27)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH                    (2)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1BLENDPOS
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK           (0x07000000)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK                (0x00000007)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT          (24)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH         (3)
+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1CTRL, GRPH1DITHEREN
+*/
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK           (0x00800000)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT          (23)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH         (1)
+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CTRL_OFFSET                                       (0x0024)
+
+/* PDP, GRPH2CTRL, GRPH2STREN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK                      (0x80000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT                     (31)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH                    (1)
+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYEN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK                     (0x40000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT                    (30)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH           (1)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2CKEYSRC
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK                    (0x20000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK         (0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT           (29)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH          (1)
+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLEND
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK                      (0x18000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK           (0x00000003)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT                     (27)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH                    (2)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2BLENDPOS
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK           (0x07000000)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK                (0x00000007)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT          (24)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH         (3)
+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2CTRL, GRPH2DITHEREN
+*/
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK           (0x00800000)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT          (23)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH         (1)
+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CTRL_OFFSET                                       (0x0028)
+
+/* PDP, GRPH3CTRL, GRPH3STREN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK                      (0x80000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT                     (31)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH                    (1)
+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYEN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK                     (0x40000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT                    (30)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH           (1)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3CKEYSRC
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK                    (0x20000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK         (0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT           (29)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH          (1)
+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLEND
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK                      (0x18000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK           (0x00000003)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT                     (27)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH                    (2)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3BLENDPOS
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK           (0x07000000)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK                (0x00000007)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT          (24)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH         (3)
+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3CTRL, GRPH3DITHEREN
+*/
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK           (0x00800000)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT          (23)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH         (1)
+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CTRL_OFFSET                                       (0x002C)
+
+/* PDP, GRPH4CTRL, GRPH4STREN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK                      (0x80000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT                     (31)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH                    (1)
+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYEN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK                     (0x40000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT                    (30)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH           (1)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4CKEYSRC
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK                    (0x20000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK         (0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT           (29)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH          (1)
+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLEND
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK                      (0x18000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK           (0x00000003)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT                     (27)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH                    (2)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4BLENDPOS
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK           (0x07000000)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK                (0x00000007)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT          (24)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH         (3)
+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4CTRL, GRPH4DITHEREN
+*/
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK           (0x00800000)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT          (23)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH         (1)
+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1CTRL_OFFSET                                                (0x0030)
+
+/* PDP, VID1CTRL, VID1STREN
+*/
+#define ODN_PDP_VID1CTRL_VID1STREN_MASK                                (0x80000000)
+#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK                     (0x00000001)
+#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT                       (31)
+#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH                      (1)
+#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYEN
+*/
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK                       (0x40000000)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT                      (30)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH                     (1)
+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID1CTRL, VID1CKEYSRC
+*/
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK                      (0x20000000)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK           (0x00000001)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT                     (29)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH                    (1)
+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLEND
+*/
+#define ODN_PDP_VID1CTRL_VID1BLEND_MASK                                (0x18000000)
+#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK                     (0x00000003)
+#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT                       (27)
+#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH                      (2)
+#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID1CTRL, VID1BLENDPOS
+*/
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK                     (0x07000000)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK          (0x00000007)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT                    (24)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH           (3)
+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1CTRL, VID1DITHEREN
+*/
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK                     (0x00800000)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT                    (23)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH           (1)
+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID2CTRL_OFFSET                                                (0x0034)
+
+/* PDP, VID2CTRL, VID2STREN
+*/
+#define ODN_PDP_VID2CTRL_VID2STREN_MASK                                (0x80000000)
+#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK                     (0x00000001)
+#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT                       (31)
+#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH                      (1)
+#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYEN
+*/
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK                       (0x40000000)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT                      (30)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH                     (1)
+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID2CTRL, VID2CKEYSRC
+*/
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK                      (0x20000000)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK           (0x00000001)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT                     (29)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH                    (1)
+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLEND
+*/
+#define ODN_PDP_VID2CTRL_VID2BLEND_MASK                                (0x18000000)
+#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK                     (0x00000003)
+#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT                       (27)
+#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH                      (2)
+#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID2CTRL, VID2BLENDPOS
+*/
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK                     (0x07000000)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK          (0x00000007)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT                    (24)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH           (3)
+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2CTRL, VID2DITHEREN
+*/
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK                     (0x00800000)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT                    (23)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH           (1)
+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID3CTRL_OFFSET                                                (0x0038)
+
+/* PDP, VID3CTRL, VID3STREN
+*/
+#define ODN_PDP_VID3CTRL_VID3STREN_MASK                                (0x80000000)
+#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK                     (0x00000001)
+#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT                       (31)
+#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH                      (1)
+#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYEN
+*/
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK                       (0x40000000)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT                      (30)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH                     (1)
+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID3CTRL, VID3CKEYSRC
+*/
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK                      (0x20000000)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK           (0x00000001)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT                     (29)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH                    (1)
+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLEND
+*/
+#define ODN_PDP_VID3CTRL_VID3BLEND_MASK                                (0x18000000)
+#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK                     (0x00000003)
+#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT                       (27)
+#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH                      (2)
+#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID3CTRL, VID3BLENDPOS
+*/
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK                     (0x07000000)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK          (0x00000007)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT                    (24)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH           (3)
+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3CTRL, VID3DITHEREN
+*/
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK                     (0x00800000)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT                    (23)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH           (1)
+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID4CTRL_OFFSET                                                (0x003C)
+
+/* PDP, VID4CTRL, VID4STREN
+*/
+#define ODN_PDP_VID4CTRL_VID4STREN_MASK                                (0x80000000)
+#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK                     (0x00000001)
+#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT                       (31)
+#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH                      (1)
+#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYEN
+*/
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK                       (0x40000000)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK                    (0x00000001)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT                      (30)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH                     (1)
+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID4CTRL, VID4CKEYSRC
+*/
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK                      (0x20000000)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK           (0x00000001)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT                     (29)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH                    (1)
+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLEND
+*/
+#define ODN_PDP_VID4CTRL_VID4BLEND_MASK                                (0x18000000)
+#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK                     (0x00000003)
+#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT                       (27)
+#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH                      (2)
+#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID4CTRL, VID4BLENDPOS
+*/
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK                     (0x07000000)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK          (0x00000007)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT                    (24)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH           (3)
+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4CTRL, VID4DITHEREN
+*/
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK                     (0x00800000)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT                    (23)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH           (1)
+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID1UCTRL_OFFSET                                       (0x0050)
+
+/* PDP, VID1UCTRL, VID1UVHALFSTR
+*/
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK           (0xC0000000)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK                (0x00000003)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT          (30)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH         (2)
+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2UCTRL_OFFSET                                       (0x0054)
+
+/* PDP, VID2UCTRL, VID2UVHALFSTR
+*/
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK           (0xC0000000)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK                (0x00000003)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT          (30)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH         (2)
+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3UCTRL_OFFSET                                       (0x0058)
+
+/* PDP, VID3UCTRL, VID3UVHALFSTR
+*/
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK           (0xC0000000)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK                (0x00000003)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT          (30)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH         (2)
+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4UCTRL_OFFSET                                       (0x005C)
+
+/* PDP, VID4UCTRL, VID4UVHALFSTR
+*/
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK           (0xC0000000)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK                (0x00000003)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT          (30)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH         (2)
+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1STRIDE_OFFSET                                     (0x0060)
+
+/* PDP, GRPH1STRIDE, GRPH1STRIDE
+*/
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK           (0xFFC00000)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT          (22)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH         (10)
+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2STRIDE_OFFSET                                     (0x0064)
+
+/* PDP, GRPH2STRIDE, GRPH2STRIDE
+*/
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK           (0xFFC00000)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT          (22)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH         (10)
+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3STRIDE_OFFSET                                     (0x0068)
+
+/* PDP, GRPH3STRIDE, GRPH3STRIDE
+*/
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK           (0xFFC00000)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT          (22)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH         (10)
+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4STRIDE_OFFSET                                     (0x006C)
+
+/* PDP, GRPH4STRIDE, GRPH4STRIDE
+*/
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK           (0xFFC00000)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT          (22)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH         (10)
+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1STRIDE_OFFSET                                      (0x0070)
+
+/* PDP, VID1STRIDE, VID1STRIDE
+*/
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK                     (0xFFC00000)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT                    (22)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH           (10)
+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID2STRIDE_OFFSET                                      (0x0074)
+
+/* PDP, VID2STRIDE, VID2STRIDE
+*/
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK                     (0xFFC00000)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT                    (22)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH           (10)
+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID3STRIDE_OFFSET                                      (0x0078)
+
+/* PDP, VID3STRIDE, VID3STRIDE
+*/
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK                     (0xFFC00000)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT                    (22)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH           (10)
+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID4STRIDE_OFFSET                                      (0x007C)
+
+/* PDP, VID4STRIDE, VID4STRIDE
+*/
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK                     (0xFFC00000)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT                    (22)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH           (10)
+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH1SIZE_OFFSET                                       (0x0080)
+
+/* PDP, GRPH1SIZE, GRPH1WIDTH
+*/
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK                      (0x0FFF0000)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT                     (16)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH                    (12)
+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH1SIZE, GRPH1HEIGHT
+*/
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT                    (0)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH           (12)
+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH2SIZE_OFFSET                                       (0x0084)
+
+/* PDP, GRPH2SIZE, GRPH2WIDTH
+*/
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK                      (0x0FFF0000)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT                     (16)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH                    (12)
+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH2SIZE, GRPH2HEIGHT
+*/
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT                    (0)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH           (12)
+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH3SIZE_OFFSET                                       (0x0088)
+
+/* PDP, GRPH3SIZE, GRPH3WIDTH
+*/
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK                      (0x0FFF0000)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT                     (16)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH                    (12)
+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH3SIZE, GRPH3HEIGHT
+*/
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT                    (0)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH           (12)
+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH4SIZE_OFFSET                                       (0x008C)
+
+/* PDP, GRPH4SIZE, GRPH4WIDTH
+*/
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK                      (0x0FFF0000)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT                     (16)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH                    (12)
+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH4SIZE, GRPH4HEIGHT
+*/
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT                    (0)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH           (12)
+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID1SIZE_OFFSET                                                (0x0090)
+
+/* PDP, VID1SIZE, VID1WIDTH
+*/
+#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK                                (0x0FFF0000)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK                     (0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT                       (16)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH                      (12)
+#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID1SIZE, VID1HEIGHT
+*/
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK                       (0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT                      (0)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH                     (12)
+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID2SIZE_OFFSET                                                (0x0094)
+
+/* PDP, VID2SIZE, VID2WIDTH
+*/
+#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK                                (0x0FFF0000)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK                     (0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT                       (16)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH                      (12)
+#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID2SIZE, VID2HEIGHT
+*/
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK                       (0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT                      (0)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH                     (12)
+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID3SIZE_OFFSET                                                (0x0098)
+
+/* PDP, VID3SIZE, VID3WIDTH
+*/
+#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK                                (0x0FFF0000)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK                     (0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT                       (16)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH                      (12)
+#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID3SIZE, VID3HEIGHT
+*/
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK                       (0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT                      (0)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH                     (12)
+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID4SIZE_OFFSET                                                (0x009C)
+
+/* PDP, VID4SIZE, VID4WIDTH
+*/
+#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK                                (0x0FFF0000)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK                     (0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT                       (16)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH                      (12)
+#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VID4SIZE, VID4HEIGHT
+*/
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK                       (0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT                      (0)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH                     (12)
+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GRPH1POSN_OFFSET                                       (0x00A0)
+
+/* PDP, GRPH1POSN, GRPH1XSTART
+*/
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK                     (0x0FFF0000)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT                    (16)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH           (12)
+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH1POSN, GRPH1YSTART
+*/
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT                    (0)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH           (12)
+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH2POSN_OFFSET                                       (0x00A4)
+
+/* PDP, GRPH2POSN, GRPH2XSTART
+*/
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK                     (0x0FFF0000)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT                    (16)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH           (12)
+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH2POSN, GRPH2YSTART
+*/
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT                    (0)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH           (12)
+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH3POSN_OFFSET                                       (0x00A8)
+
+/* PDP, GRPH3POSN, GRPH3XSTART
+*/
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK                     (0x0FFF0000)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT                    (16)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH           (12)
+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH3POSN, GRPH3YSTART
+*/
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT                    (0)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH           (12)
+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH4POSN_OFFSET                                       (0x00AC)
+
+/* PDP, GRPH4POSN, GRPH4XSTART
+*/
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK                     (0x0FFF0000)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT                    (16)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH           (12)
+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, GRPH4POSN, GRPH4YSTART
+*/
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK                     (0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK          (0x00000FFF)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT                    (0)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH           (12)
+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID1POSN_OFFSET                                                (0x00B0)
+
+/* PDP, VID1POSN, VID1XSTART
+*/
+#define ODN_PDP_VID1POSN_VID1XSTART_MASK                       (0x0FFF0000)
+#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT                      (16)
+#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH                     (12)
+#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID1POSN, VID1YSTART
+*/
+#define ODN_PDP_VID1POSN_VID1YSTART_MASK                       (0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT                      (0)
+#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH                     (12)
+#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID2POSN_OFFSET                                                (0x00B4)
+
+/* PDP, VID2POSN, VID2XSTART
+*/
+#define ODN_PDP_VID2POSN_VID2XSTART_MASK                       (0x0FFF0000)
+#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT                      (16)
+#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH                     (12)
+#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID2POSN, VID2YSTART
+*/
+#define ODN_PDP_VID2POSN_VID2YSTART_MASK                       (0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT                      (0)
+#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH                     (12)
+#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID3POSN_OFFSET                                                (0x00B8)
+
+/* PDP, VID3POSN, VID3XSTART
+*/
+#define ODN_PDP_VID3POSN_VID3XSTART_MASK                       (0x0FFF0000)
+#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT                      (16)
+#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH                     (12)
+#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID3POSN, VID3YSTART
+*/
+#define ODN_PDP_VID3POSN_VID3YSTART_MASK                       (0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT                      (0)
+#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH                     (12)
+#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID4POSN_OFFSET                                                (0x00BC)
+
+/* PDP, VID4POSN, VID4XSTART
+*/
+#define ODN_PDP_VID4POSN_VID4XSTART_MASK                       (0x0FFF0000)
+#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT                      (16)
+#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH                     (12)
+#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VID4POSN, VID4YSTART
+*/
+#define ODN_PDP_VID4POSN_VID4YSTART_MASK                       (0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK                    (0x00000FFF)
+#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT                      (0)
+#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH                     (12)
+#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GRPH1GALPHA_OFFSET                                     (0x00C0)
+
+/* PDP, GRPH1GALPHA, GRPH1GALPHA
+*/
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK           (0x000003FF)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT          (0)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH         (10)
+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2GALPHA_OFFSET                                     (0x00C4)
+
+/* PDP, GRPH2GALPHA, GRPH2GALPHA
+*/
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK           (0x000003FF)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT          (0)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH         (10)
+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3GALPHA_OFFSET                                     (0x00C8)
+
+/* PDP, GRPH3GALPHA, GRPH3GALPHA
+*/
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK           (0x000003FF)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT          (0)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH         (10)
+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4GALPHA_OFFSET                                     (0x00CC)
+
+/* PDP, GRPH4GALPHA, GRPH4GALPHA
+*/
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK           (0x000003FF)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT          (0)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH         (10)
+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1GALPHA_OFFSET                                      (0x00D0)
+
+/* PDP, VID1GALPHA, VID1GALPHA
+*/
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK                     (0x000003FF)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT                    (0)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH           (10)
+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID2GALPHA_OFFSET                                      (0x00D4)
+
+/* PDP, VID2GALPHA, VID2GALPHA
+*/
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK                     (0x000003FF)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT                    (0)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH           (10)
+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID3GALPHA_OFFSET                                      (0x00D8)
+
+/* PDP, VID3GALPHA, VID3GALPHA
+*/
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK                     (0x000003FF)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT                    (0)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH           (10)
+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID4GALPHA_OFFSET                                  (0x00DC)
+
+/* PDP, VID4GALPHA, VID4GALPHA
+*/
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK                     (0x000003FF)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT                    (0)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH           (10)
+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_GRPH1CKEY_R_OFFSET                                     (0x00E0)
+
+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R
+*/
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK           (0x000003FF)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT          (0)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH         (10)
+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1CKEY_GB_OFFSET                            (0x00E4)
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G
+*/
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK          (0x03FF0000)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT         (16)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH                (10)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B
+*/
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK          (0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT         (0)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH                (10)
+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CKEY_R_OFFSET                                     (0x00E8)
+
+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R
+*/
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK           (0x000003FF)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT          (0)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH         (10)
+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2CKEY_GB_OFFSET                                    (0x00EC)
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G
+*/
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK          (0x03FF0000)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT         (16)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH                (10)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B
+*/
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK          (0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT         (0)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH                (10)
+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CKEY_R_OFFSET                                     (0x00F0)
+
+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R
+*/
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK           (0x000003FF)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT          (0)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH         (10)
+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3CKEY_GB_OFFSET                                    (0x00F4)
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G
+*/
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK          (0x03FF0000)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT         (16)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH                (10)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B
+*/
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK          (0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT         (0)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH                (10)
+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CKEY_R_OFFSET                                     (0x00F8)
+
+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R
+*/
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK           (0x000003FF)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK                (0x000003FF)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT          (0)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH         (10)
+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4CKEY_GB_OFFSET                                    (0x00FC)
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G
+*/
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK          (0x03FF0000)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT         (16)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH                (10)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B
+*/
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK          (0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK       (0x000003FF)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT         (0)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH                (10)
+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1CKEY_R_OFFSET                                      (0x0100)
+
+/* PDP, VID1CKEY_R, VID1CKEY_R
+*/
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK                     (0x000003FF)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT                    (0)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH           (10)
+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID1CKEY_GB_OFFSET                                     (0x0104)
+
+/* PDP, VID1CKEY_GB, VID1CKEY_G
+*/
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK                    (0x03FF0000)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT           (16)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH          (10)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID1CKEY_GB, VID1CKEY_B
+*/
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK                    (0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT           (0)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH          (10)
+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID2CKEY_R_OFFSET                                      (0x0108)
+
+/* PDP, VID2CKEY_R, VID2CKEY_R
+*/
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK                     (0x000003FF)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT                    (0)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH           (10)
+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID2CKEY_GB_OFFSET                                     (0x010C)
+
+/* PDP, VID2CKEY_GB, VID2CKEY_G
+*/
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK                    (0x03FF0000)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT           (16)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH          (10)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID2CKEY_GB, VID2CKEY_B
+*/
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK                    (0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT           (0)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH          (10)
+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID3CKEY_R_OFFSET                                      (0x0110)
+
+/* PDP, VID3CKEY_R, VID3CKEY_R
+*/
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK                     (0x000003FF)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT                    (0)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH           (10)
+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID3CKEY_GB_OFFSET                                     (0x0114)
+
+/* PDP, VID3CKEY_GB, VID3CKEY_G
+*/
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK                    (0x03FF0000)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT           (16)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH          (10)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID3CKEY_GB, VID3CKEY_B
+*/
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK                    (0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT           (0)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH          (10)
+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID4CKEY_R_OFFSET                                      (0x0118)
+
+/* PDP, VID4CKEY_R, VID4CKEY_R
+*/
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK                     (0x000003FF)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT                    (0)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH           (10)
+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID4CKEY_GB_OFFSET                                     (0x011C)
+
+/* PDP, VID4CKEY_GB, VID4CKEY_G
+*/
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK                    (0x03FF0000)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT           (16)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH          (10)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID4CKEY_GB, VID4CKEY_B
+*/
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK                    (0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT           (0)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH          (10)
+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_GRPH1BLND2_R_OFFSET                                    (0x0120)
+
+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK          (0x80000000)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT         (31)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH                (1)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1LINDBL
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK          (0x20000000)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT         (29)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH                (1)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R
+*/
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK      (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT     (0)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH    (10)
+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1BLND2_GB_OFFSET                           (0x0124)
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G
+*/
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK     (0x03FF0000)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT    (16)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B
+*/
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK     (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT    (0)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BLND2_R_OFFSET                                    (0x0128)
+
+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK          (0x80000000)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT         (31)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH                (1)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2LINDBL
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK          (0x20000000)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT         (29)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH                (1)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R
+*/
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK      (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT     (0)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH    (10)
+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BLND2_GB_OFFSET                           (0x012C)
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G
+*/
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK     (0x03FF0000)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT    (16)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B
+*/
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK     (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT    (0)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BLND2_R_OFFSET                                    (0x0130)
+
+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK          (0x80000000)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT         (31)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH                (1)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3LINDBL
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK          (0x20000000)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT         (29)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH                (1)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R
+*/
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK      (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT     (0)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH    (10)
+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BLND2_GB_OFFSET                           (0x0134)
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G
+*/
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK     (0x03FF0000)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT    (16)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B
+*/
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK     (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT    (0)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BLND2_R_OFFSET                                    (0x0138)
+
+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK          (0x80000000)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT         (31)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH                (1)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4LINDBL
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK          (0x20000000)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK       (0x00000001)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT         (29)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH                (1)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R
+*/
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK      (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT     (0)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH    (10)
+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BLND2_GB_OFFSET                           (0x013C)
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G
+*/
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK     (0x03FF0000)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT    (16)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B
+*/
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK     (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT    (0)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10)
+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1BLND2_R_OFFSET                                     (0x0140)
+
+/* PDP, VID1BLND2_R, VID1CKEYMASK_R
+*/
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK                (0x000003FF)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK     (0x000003FF)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT       (0)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH      (10)
+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID1BLND2_GB_OFFSET                                    (0x0144)
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G
+*/
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK       (0x03FF0000)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT      (16)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH     (10)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B
+*/
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK       (0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT      (0)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH     (10)
+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2BLND2_R_OFFSET                         (0x0148)
+
+/* PDP, VID2BLND2_R, VID2CKEYMASK_R
+*/
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK                (0x000003FF)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK     (0x000003FF)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT       (0)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH      (10)
+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID2BLND2_GB_OFFSET                        (0x014C)
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G
+*/
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK       (0x03FF0000)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT      (16)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH     (10)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B
+*/
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK       (0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT      (0)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH     (10)
+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3BLND2_R_OFFSET                         (0x0150)
+
+/* PDP, VID3BLND2_R, VID3CKEYMASK_R
+*/
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK                (0x000003FF)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK     (0x000003FF)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT       (0)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH      (10)
+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID3BLND2_GB_OFFSET                        (0x0154)
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G
+*/
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK       (0x03FF0000)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT      (16)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH     (10)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B
+*/
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK       (0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT      (0)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH     (10)
+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4BLND2_R_OFFSET                         (0x0158)
+
+/* PDP, VID4BLND2_R, VID4CKEYMASK_R
+*/
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK                (0x000003FF)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK     (0x000003FF)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT       (0)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH      (10)
+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID4BLND2_GB_OFFSET                        (0x015C)
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G
+*/
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK       (0x03FF0000)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT      (16)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH     (10)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B
+*/
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK       (0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK    (0x000003FF)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT      (0)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH     (10)
+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET                (0x0160)
+
+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD
+*/
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK        (0x00000001)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET                (0x0164)
+
+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD
+*/
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET                (0x0168)
+
+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD
+*/
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET                (0x016C)
+
+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD
+*/
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1)
+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET                 (0x0170)
+
+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD
+*/
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1)
+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET                 (0x0174)
+
+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD
+*/
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1)
+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET                 (0x0178)
+
+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD
+*/
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1)
+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET                 (0x017C)
+
+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD
+*/
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1)
+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH1BASEADDR_OFFSET                   (0x0180)
+
+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR
+*/
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK       (0xFFFFFFE0)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK    (0x07FFFFFF)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT      (5)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH     (27)
+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH2BASEADDR_OFFSET                   (0x0184)
+
+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR
+*/
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK       (0xFFFFFFE0)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK    (0x07FFFFFF)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT      (5)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH     (27)
+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH3BASEADDR_OFFSET                   (0x0188)
+
+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR
+*/
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK       (0xFFFFFFE0)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK    (0x07FFFFFF)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT      (5)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH     (27)
+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GRPH4BASEADDR_OFFSET                   (0x018C)
+
+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR
+*/
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK       (0xFFFFFFE0)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK    (0x07FFFFFF)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT      (5)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH     (27)
+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1BASEADDR_OFFSET                        (0x0190)
+
+/* PDP, VID1BASEADDR, VID1BASEADDR
+*/
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK         (0xFFFFFFE0)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK      (0x07FFFFFF)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT                (5)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH       (27)
+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2BASEADDR_OFFSET                        (0x0194)
+
+/* PDP, VID2BASEADDR, VID2BASEADDR
+*/
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK         (0xFFFFFFE0)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK      (0x07FFFFFF)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT                (5)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH       (27)
+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3BASEADDR_OFFSET                        (0x0198)
+
+/* PDP, VID3BASEADDR, VID3BASEADDR
+*/
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK         (0xFFFFFFE0)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK      (0x07FFFFFF)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT                (5)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH       (27)
+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4BASEADDR_OFFSET                        (0x019C)
+
+/* PDP, VID4BASEADDR, VID4BASEADDR
+*/
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK         (0xFFFFFFE0)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK      (0x07FFFFFF)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT                (5)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH       (27)
+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1UBASEADDR_OFFSET                           (0x01B0)
+
+/* PDP, VID1UBASEADDR, VID1UBASEADDR
+*/
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK       (0xFFFFFFE0)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK    (0x07FFFFFF)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT      (5)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH     (27)
+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2UBASEADDR_OFFSET                   (0x01B4)
+
+/* PDP, VID2UBASEADDR, VID2UBASEADDR
+*/
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID3UBASEADDR_OFFSET                   (0x01B8)
+
+/* PDP, VID3UBASEADDR, VID3UBASEADDR
+*/
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID4UBASEADDR_OFFSET                   (0x01BC)
+
+/* PDP, VID4UBASEADDR, VID4UBASEADDR
+*/
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID1VBASEADDR_OFFSET                   (0x01D0)
+
+/* PDP, VID1VBASEADDR, VID1VBASEADDR
+*/
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID2VBASEADDR_OFFSET                   (0x01D4)
+
+/* PDP, VID2VBASEADDR, VID2VBASEADDR
+*/
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID3VBASEADDR_OFFSET                   (0x01D8)
+
+/* PDP, VID3VBASEADDR, VID3VBASEADDR
+*/
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID4VBASEADDR_OFFSET           (0x01DC)
+
+/* PDP, VID4VBASEADDR, VID4VBASEADDR
+*/
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK               (0xFFFFFFE0)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK            (0x07FFFFFF)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT              (5)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH             (27)
+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET                (0x0230)
+
+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP
+*/
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK            (0x007F0000)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK         (0x0000007F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT           (16)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH          (7)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP
+*/
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK            (0x0000003F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK         (0x0000003F)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT           (0)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH          (6)
+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET                (0x0234)
+
+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP
+*/
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK            (0x007F0000)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK         (0x0000007F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT           (16)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH          (7)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP
+*/
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK            (0x0000003F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK         (0x0000003F)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT           (0)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH          (6)
+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET                (0x0238)
+
+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP
+*/
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK            (0x007F0000)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK         (0x0000007F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT           (16)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH          (7)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP
+*/
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK            (0x0000003F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK         (0x0000003F)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT           (0)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH          (6)
+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET                (0x023C)
+
+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP
+*/
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK            (0x007F0000)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK         (0x0000007F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT           (16)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH          (7)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP
+*/
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK            (0x0000003F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK         (0x0000003F)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT           (0)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH          (6)
+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET              (0x0240)
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK               (0x000000F0)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK            (0x0000000F)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT              (4)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH             (4)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK                (0x00000008)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK             (0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT               (3)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH              (1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK              (0x00000004)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT             (2)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH            (1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN
+*/
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK               (0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK            (0x00000001)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT              (0)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH             (1)
+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET              (0x0244)
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK               (0x000000F0)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK            (0x0000000F)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT              (4)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH             (4)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK                (0x00000008)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK             (0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT               (3)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH              (1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK              (0x00000004)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT             (2)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH            (1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN
+*/
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK               (0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK            (0x00000001)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT              (0)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH             (1)
+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET              (0x0248)
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK               (0x000000F0)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK            (0x0000000F)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT              (4)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH             (4)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK                (0x00000008)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK             (0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT               (3)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH              (1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK              (0x00000004)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT             (2)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH            (1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN
+*/
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK               (0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK            (0x00000001)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT              (0)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH             (1)
+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET              (0x024C)
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK               (0x000000F0)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK            (0x0000000F)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT              (4)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH             (4)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK                (0x00000008)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK             (0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT               (3)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH              (1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK              (0x00000004)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK           (0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT             (2)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH            (1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN
+*/
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK               (0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK            (0x00000001)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT              (0)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH             (1)
+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET               (0x0250)
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK         (0x000000F0)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK              (0x0000000F)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT                (4)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH               (4)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK          (0x00000008)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK               (0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT         (3)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH                (1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK                (0x00000004)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK             (0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT               (2)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH              (1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN
+*/
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK         (0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK              (0x00000001)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT                (0)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH               (1)
+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET               (0x0254)
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK         (0x000000F0)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK              (0x0000000F)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT                (4)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH               (4)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK          (0x00000008)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK               (0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT         (3)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH                (1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK                (0x00000004)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK             (0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT               (2)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH              (1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN
+*/
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK         (0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK              (0x00000001)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT                (0)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH               (1)
+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET               (0x0258)
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK         (0x000000F0)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK              (0x0000000F)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT                (4)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH               (4)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK          (0x00000008)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK               (0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT         (3)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH                (1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK                (0x00000004)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK             (0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT               (2)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH              (1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN
+*/
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK         (0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK              (0x00000001)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT                (0)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH               (1)
+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET               (0x025C)
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK         (0x000000F0)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK              (0x0000000F)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT                (4)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH               (4)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK          (0x00000008)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK               (0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT         (3)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH                (1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK                (0x00000004)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK             (0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT               (2)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH              (1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN
+*/
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK         (0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK              (0x00000001)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT                (0)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH               (1)
+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1SKIPCTRL_OFFSET            (0x0270)
+
+/* PDP, VID1SKIPCTRL, VID1HSKIP
+*/
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK            (0x0FFF0000)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT           (16)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH          (12)
+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID1SKIPCTRL, VID1VSKIP
+*/
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK            (0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT           (0)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH          (12)
+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID2SKIPCTRL_OFFSET            (0x0274)
+
+/* PDP, VID2SKIPCTRL, VID2HSKIP
+*/
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK            (0x0FFF0000)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT           (16)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH          (12)
+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID2SKIPCTRL, VID2VSKIP
+*/
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK            (0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT           (0)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH          (12)
+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID3SKIPCTRL_OFFSET            (0x0278)
+
+/* PDP, VID3SKIPCTRL, VID3HSKIP
+*/
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK            (0x0FFF0000)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT           (16)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH          (12)
+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID3SKIPCTRL, VID3VSKIP
+*/
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK            (0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT           (0)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH          (12)
+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID4SKIPCTRL_OFFSET            (0x027C)
+
+/* PDP, VID4SKIPCTRL, VID4HSKIP
+*/
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK            (0x0FFF0000)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT           (16)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH          (12)
+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID4SKIPCTRL, VID4VSKIP
+*/
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK            (0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT           (0)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH          (12)
+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID1SCALECTRL_OFFSET           (0x0460)
+
+/* PDP, VID1SCALECTRL, VID1HSCALEBP
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK                (0x80000000)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK             (0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT               (31)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH              (1)
+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSCALEBP
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK                (0x40000000)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK             (0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT               (30)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH              (1)
+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK              (0x20000000)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK           (0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT             (29)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH            (1)
+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK              (0x08000000)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK           (0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT             (27)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH            (1)
+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1PAN_EN
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK          (0x00040000)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK               (0x00000001)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT         (18)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH                (1)
+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VORDER
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK          (0x00030000)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK               (0x00000003)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT         (16)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH                (2)
+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID1SCALECTRL, VID1VPITCH
+*/
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK          (0x0000FFFF)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT         (0)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH                (16)
+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID1VSINIT_OFFSET              (0x0464)
+
+/* PDP, VID1VSINIT, VID1VINITIAL1
+*/
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK          (0xFFFF0000)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT         (16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH                (16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID1VSINIT, VID1VINITIAL0
+*/
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK          (0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT         (0)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH                (16)
+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF0_OFFSET             (0x0468)
+
+/* PDP, VID1VCOEFF0, VID1VCOEFF0
+*/
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF1_OFFSET             (0x046C)
+
+/* PDP, VID1VCOEFF1, VID1VCOEFF1
+*/
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF2_OFFSET             (0x0470)
+
+/* PDP, VID1VCOEFF2, VID1VCOEFF2
+*/
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF3_OFFSET             (0x0474)
+
+/* PDP, VID1VCOEFF3, VID1VCOEFF3
+*/
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF4_OFFSET             (0x0478)
+
+/* PDP, VID1VCOEFF4, VID1VCOEFF4
+*/
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF5_OFFSET             (0x047C)
+
+/* PDP, VID1VCOEFF5, VID1VCOEFF5
+*/
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF6_OFFSET             (0x0480)
+
+/* PDP, VID1VCOEFF6, VID1VCOEFF6
+*/
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF7_OFFSET             (0x0484)
+
+/* PDP, VID1VCOEFF7, VID1VCOEFF7
+*/
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1VCOEFF8_OFFSET             (0x0488)
+
+/* PDP, VID1VCOEFF8, VID1VCOEFF8
+*/
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK           (0x000000FF)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK                (0x000000FF)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH         (8)
+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HSINIT_OFFSET              (0x048C)
+
+/* PDP, VID1HSINIT, VID1HINITIAL
+*/
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK           (0xFFFF0000)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK                (0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT          (16)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH         (16)
+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID1HSINIT, VID1HPITCH
+*/
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK             (0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK          (0x0000FFFF)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT            (0)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH           (16)
+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF0_OFFSET             (0x0490)
+
+/* PDP, VID1HCOEFF0, VID1HCOEFF0
+*/
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF1_OFFSET             (0x0494)
+
+/* PDP, VID1HCOEFF1, VID1HCOEFF1
+*/
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF2_OFFSET             (0x0498)
+
+/* PDP, VID1HCOEFF2, VID1HCOEFF2
+*/
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF3_OFFSET             (0x049C)
+
+/* PDP, VID1HCOEFF3, VID1HCOEFF3
+*/
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF4_OFFSET             (0x04A0)
+
+/* PDP, VID1HCOEFF4, VID1HCOEFF4
+*/
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF5_OFFSET             (0x04A4)
+
+/* PDP, VID1HCOEFF5, VID1HCOEFF5
+*/
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF6_OFFSET             (0x04A8)
+
+/* PDP, VID1HCOEFF6, VID1HCOEFF6
+*/
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF7_OFFSET             (0x04AC)
+
+/* PDP, VID1HCOEFF7, VID1HCOEFF7
+*/
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF8_OFFSET             (0x04B0)
+
+/* PDP, VID1HCOEFF8, VID1HCOEFF8
+*/
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF9_OFFSET             (0x04B4)
+
+/* PDP, VID1HCOEFF9, VID1HCOEFF9
+*/
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT          (0)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH         (32)
+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF10_OFFSET            (0x04B8)
+
+/* PDP, VID1HCOEFF10, VID1HCOEFF10
+*/
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH               (32)
+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF11_OFFSET            (0x04BC)
+
+/* PDP, VID1HCOEFF11, VID1HCOEFF11
+*/
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH               (32)
+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF12_OFFSET            (0x04C0)
+
+/* PDP, VID1HCOEFF12, VID1HCOEFF12
+*/
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH               (32)
+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF13_OFFSET            (0x04C4)
+
+/* PDP, VID1HCOEFF13, VID1HCOEFF13
+*/
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH               (32)
+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF14_OFFSET            (0x04C8)
+
+/* PDP, VID1HCOEFF14, VID1HCOEFF14
+*/
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH               (32)
+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF15_OFFSET            (0x04CC)
+
+/* PDP, VID1HCOEFF15, VID1HCOEFF15
+*/
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH               (32)
+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1HCOEFF16_OFFSET            (0x04D0)
+
+/* PDP, VID1HCOEFF16, VID1HCOEFF16
+*/
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK         (0x000000FF)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK              (0x000000FF)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT                (0)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH               (8)
+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID1SCALESIZE_OFFSET           (0x04D4)
+
+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH
+*/
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK              (0x0FFF0000)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT             (16)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH            (12)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT
+*/
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK             (0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT            (0)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH           (12)
+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_CORE_ID_OFFSET         (0x04E0)
+
+/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID
+*/
+#define ODN_PDP_CORE_ID_GROUP_ID_MASK          (0xFF000000)
+#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK               (0x000000FF)
+#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT         (24)
+#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH                (8)
+#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID
+*/
+#define ODN_PDP_CORE_ID_CORE_ID_MASK           (0x00FF0000)
+#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK                (0x000000FF)
+#define ODN_PDP_CORE_ID_CORE_ID_SHIFT          (16)
+#define ODN_PDP_CORE_ID_CORE_ID_LENGTH         (8)
+#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID
+*/
+#define ODN_PDP_CORE_ID_CONFIG_ID_MASK         (0x0000FFFF)
+#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK              (0x0000FFFF)
+#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT                (0)
+#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH               (16)
+#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_CORE_REV_OFFSET                (0x04F0)
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV
+*/
+#define ODN_PDP_CORE_REV_MAJOR_REV_MASK                (0x00FF0000)
+#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK             (0x000000FF)
+#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT               (16)
+#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH              (8)
+#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV
+*/
+#define ODN_PDP_CORE_REV_MINOR_REV_MASK                (0x0000FF00)
+#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK             (0x000000FF)
+#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT               (8)
+#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH              (8)
+#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV
+*/
+#define ODN_PDP_CORE_REV_MAINT_REV_MASK                (0x000000FF)
+#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK             (0x000000FF)
+#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT               (0)
+#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH              (8)
+#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID2SCALECTRL_OFFSET           (0x0500)
+
+/* PDP, VID2SCALECTRL, VID2HSCALEBP
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK                (0x80000000)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK             (0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT               (31)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH              (1)
+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSCALEBP
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK                (0x40000000)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK             (0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT               (30)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH              (1)
+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK              (0x20000000)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK           (0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT             (29)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH            (1)
+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK              (0x08000000)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK           (0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT             (27)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH            (1)
+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2PAN_EN
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK          (0x00040000)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK               (0x00000001)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT         (18)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH                (1)
+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VORDER
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK          (0x00030000)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK               (0x00000003)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT         (16)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH                (2)
+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID2SCALECTRL, VID2VPITCH
+*/
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK          (0x0000FFFF)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT         (0)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH                (16)
+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID2VSINIT_OFFSET              (0x0504)
+
+/* PDP, VID2VSINIT, VID2VINITIAL1
+*/
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK          (0xFFFF0000)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT         (16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH                (16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID2VSINIT, VID2VINITIAL0
+*/
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK          (0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT         (0)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH                (16)
+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF0_OFFSET             (0x0508)
+
+/* PDP, VID2VCOEFF0, VID2VCOEFF0
+*/
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF1_OFFSET             (0x050C)
+
+/* PDP, VID2VCOEFF1, VID2VCOEFF1
+*/
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF2_OFFSET             (0x0510)
+
+/* PDP, VID2VCOEFF2, VID2VCOEFF2
+*/
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF3_OFFSET             (0x0514)
+
+/* PDP, VID2VCOEFF3, VID2VCOEFF3
+*/
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF4_OFFSET             (0x0518)
+
+/* PDP, VID2VCOEFF4, VID2VCOEFF4
+*/
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF5_OFFSET             (0x051C)
+
+/* PDP, VID2VCOEFF5, VID2VCOEFF5
+*/
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF6_OFFSET             (0x0520)
+
+/* PDP, VID2VCOEFF6, VID2VCOEFF6
+*/
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF7_OFFSET             (0x0524)
+
+/* PDP, VID2VCOEFF7, VID2VCOEFF7
+*/
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2VCOEFF8_OFFSET             (0x0528)
+
+/* PDP, VID2VCOEFF8, VID2VCOEFF8
+*/
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK           (0x000000FF)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK                (0x000000FF)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH         (8)
+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HSINIT_OFFSET              (0x052C)
+
+/* PDP, VID2HSINIT, VID2HINITIAL
+*/
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK           (0xFFFF0000)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK                (0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT          (16)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH         (16)
+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID2HSINIT, VID2HPITCH
+*/
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK             (0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK          (0x0000FFFF)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT            (0)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH           (16)
+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF0_OFFSET             (0x0530)
+
+/* PDP, VID2HCOEFF0, VID2HCOEFF0
+*/
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF1_OFFSET             (0x0534)
+
+/* PDP, VID2HCOEFF1, VID2HCOEFF1
+*/
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF2_OFFSET             (0x0538)
+
+/* PDP, VID2HCOEFF2, VID2HCOEFF2
+*/
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF3_OFFSET             (0x053C)
+
+/* PDP, VID2HCOEFF3, VID2HCOEFF3
+*/
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF4_OFFSET             (0x0540)
+
+/* PDP, VID2HCOEFF4, VID2HCOEFF4
+*/
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF5_OFFSET             (0x0544)
+
+/* PDP, VID2HCOEFF5, VID2HCOEFF5
+*/
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF6_OFFSET             (0x0548)
+
+/* PDP, VID2HCOEFF6, VID2HCOEFF6
+*/
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF7_OFFSET             (0x054C)
+
+/* PDP, VID2HCOEFF7, VID2HCOEFF7
+*/
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF8_OFFSET             (0x0550)
+
+/* PDP, VID2HCOEFF8, VID2HCOEFF8
+*/
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF9_OFFSET             (0x0554)
+
+/* PDP, VID2HCOEFF9, VID2HCOEFF9
+*/
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT          (0)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH         (32)
+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF10_OFFSET            (0x0558)
+
+/* PDP, VID2HCOEFF10, VID2HCOEFF10
+*/
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH               (32)
+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF11_OFFSET            (0x055C)
+
+/* PDP, VID2HCOEFF11, VID2HCOEFF11
+*/
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH               (32)
+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF12_OFFSET            (0x0560)
+
+/* PDP, VID2HCOEFF12, VID2HCOEFF12
+*/
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH               (32)
+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF13_OFFSET            (0x0564)
+
+/* PDP, VID2HCOEFF13, VID2HCOEFF13
+*/
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH               (32)
+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF14_OFFSET            (0x0568)
+
+/* PDP, VID2HCOEFF14, VID2HCOEFF14
+*/
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH               (32)
+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF15_OFFSET            (0x056C)
+
+/* PDP, VID2HCOEFF15, VID2HCOEFF15
+*/
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH               (32)
+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2HCOEFF16_OFFSET            (0x0570)
+
+/* PDP, VID2HCOEFF16, VID2HCOEFF16
+*/
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK         (0x000000FF)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK              (0x000000FF)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT                (0)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH               (8)
+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID2SCALESIZE_OFFSET           (0x0574)
+
+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH
+*/
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK              (0x0FFF0000)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT             (16)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH            (12)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT
+*/
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK             (0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT            (0)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH           (12)
+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID3SCALECTRL_OFFSET           (0x0578)
+
+/* PDP, VID3SCALECTRL, VID3HSCALEBP
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK                (0x80000000)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK             (0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT               (31)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH              (1)
+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSCALEBP
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK                (0x40000000)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK             (0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT               (30)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH              (1)
+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK              (0x20000000)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK           (0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT             (29)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH            (1)
+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK              (0x08000000)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK           (0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT             (27)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH            (1)
+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3PAN_EN
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK          (0x00040000)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK               (0x00000001)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT         (18)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH                (1)
+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VORDER
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK          (0x00030000)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK               (0x00000003)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT         (16)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH                (2)
+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID3SCALECTRL, VID3VPITCH
+*/
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK          (0x0000FFFF)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT         (0)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH                (16)
+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID3VSINIT_OFFSET              (0x057C)
+
+/* PDP, VID3VSINIT, VID3VINITIAL1
+*/
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK          (0xFFFF0000)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT         (16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH                (16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID3VSINIT, VID3VINITIAL0
+*/
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK          (0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT         (0)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH                (16)
+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF0_OFFSET             (0x0580)
+
+/* PDP, VID3VCOEFF0, VID3VCOEFF0
+*/
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF1_OFFSET             (0x0584)
+
+/* PDP, VID3VCOEFF1, VID3VCOEFF1
+*/
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF2_OFFSET             (0x0588)
+
+/* PDP, VID3VCOEFF2, VID3VCOEFF2
+*/
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF3_OFFSET             (0x058C)
+
+/* PDP, VID3VCOEFF3, VID3VCOEFF3
+*/
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF4_OFFSET             (0x0590)
+
+/* PDP, VID3VCOEFF4, VID3VCOEFF4
+*/
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF5_OFFSET             (0x0594)
+
+/* PDP, VID3VCOEFF5, VID3VCOEFF5
+*/
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF6_OFFSET             (0x0598)
+
+/* PDP, VID3VCOEFF6, VID3VCOEFF6
+*/
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF7_OFFSET             (0x059C)
+
+/* PDP, VID3VCOEFF7, VID3VCOEFF7
+*/
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3VCOEFF8_OFFSET             (0x05A0)
+
+/* PDP, VID3VCOEFF8, VID3VCOEFF8
+*/
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK           (0x000000FF)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK                (0x000000FF)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH         (8)
+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HSINIT_OFFSET              (0x05A4)
+
+/* PDP, VID3HSINIT, VID3HINITIAL
+*/
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK           (0xFFFF0000)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK                (0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT          (16)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH         (16)
+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID3HSINIT, VID3HPITCH
+*/
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK             (0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK          (0x0000FFFF)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT            (0)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH           (16)
+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF0_OFFSET             (0x05A8)
+
+/* PDP, VID3HCOEFF0, VID3HCOEFF0
+*/
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF1_OFFSET             (0x05AC)
+
+/* PDP, VID3HCOEFF1, VID3HCOEFF1
+*/
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF2_OFFSET             (0x05B0)
+
+/* PDP, VID3HCOEFF2, VID3HCOEFF2
+*/
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF3_OFFSET             (0x05B4)
+
+/* PDP, VID3HCOEFF3, VID3HCOEFF3
+*/
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF4_OFFSET             (0x05B8)
+
+/* PDP, VID3HCOEFF4, VID3HCOEFF4
+*/
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF5_OFFSET             (0x05BC)
+
+/* PDP, VID3HCOEFF5, VID3HCOEFF5
+*/
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF6_OFFSET             (0x05C0)
+
+/* PDP, VID3HCOEFF6, VID3HCOEFF6
+*/
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF7_OFFSET             (0x05C4)
+
+/* PDP, VID3HCOEFF7, VID3HCOEFF7
+*/
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF8_OFFSET             (0x05C8)
+
+/* PDP, VID3HCOEFF8, VID3HCOEFF8
+*/
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF9_OFFSET             (0x05CC)
+
+/* PDP, VID3HCOEFF9, VID3HCOEFF9
+*/
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT          (0)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH         (32)
+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF10_OFFSET            (0x05D0)
+
+/* PDP, VID3HCOEFF10, VID3HCOEFF10
+*/
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH       (32)
+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF11_OFFSET            (0x05D4)
+
+/* PDP, VID3HCOEFF11, VID3HCOEFF11
+*/
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH       (32)
+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF12_OFFSET            (0x05D8)
+
+/* PDP, VID3HCOEFF12, VID3HCOEFF12
+*/
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH               (32)
+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF13_OFFSET            (0x05DC)
+
+/* PDP, VID3HCOEFF13, VID3HCOEFF13
+*/
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH               (32)
+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF14_OFFSET            (0x05E0)
+
+/* PDP, VID3HCOEFF14, VID3HCOEFF14
+*/
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH               (32)
+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF15_OFFSET            (0x05E4)
+
+/* PDP, VID3HCOEFF15, VID3HCOEFF15
+*/
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK              (0xFFFFFFFF)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH               (32)
+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3HCOEFF16_OFFSET            (0x05E8)
+
+/* PDP, VID3HCOEFF16, VID3HCOEFF16
+*/
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK         (0x000000FF)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK              (0x000000FF)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT                (0)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH               (8)
+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID3SCALESIZE_OFFSET           (0x05EC)
+
+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH
+*/
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK              (0x0FFF0000)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT             (16)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH            (12)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT
+*/
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK             (0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT            (0)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH           (12)
+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID4SCALECTRL_OFFSET           (0x05F0)
+
+/* PDP, VID4SCALECTRL, VID4HSCALEBP
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK                (0x80000000)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK     (0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT       (31)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH      (1)
+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSCALEBP
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK                (0x40000000)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK     (0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT       (30)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH      (1)
+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK      (0x20000000)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK   (0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT     (29)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH    (1)
+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK      (0x08000000)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK   (0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT     (27)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH    (1)
+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4PAN_EN
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK          (0x00040000)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK       (0x00000001)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT         (18)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH                (1)
+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VORDER
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK          (0x00030000)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK       (0x00000003)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT         (16)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH                (2)
+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID4SCALECTRL, VID4VPITCH
+*/
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK          (0x0000FFFF)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK       (0x0000FFFF)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT         (0)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH                (16)
+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID4VSINIT_OFFSET                      (0x05F4)
+
+/* PDP, VID4VSINIT, VID4VINITIAL1
+*/
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK          (0xFFFF0000)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT         (16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH                (16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID4VSINIT, VID4VINITIAL0
+*/
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK          (0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK               (0x0000FFFF)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT         (0)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH                (16)
+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF0_OFFSET                     (0x05F8)
+
+/* PDP, VID4VCOEFF0, VID4VCOEFF0
+*/
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF1_OFFSET                     (0x05FC)
+
+/* PDP, VID4VCOEFF1, VID4VCOEFF1
+*/
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF2_OFFSET                     (0x0600)
+
+/* PDP, VID4VCOEFF2, VID4VCOEFF2
+*/
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF3_OFFSET                     (0x0604)
+
+/* PDP, VID4VCOEFF3, VID4VCOEFF3
+*/
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF4_OFFSET                     (0x0608)
+
+/* PDP, VID4VCOEFF4, VID4VCOEFF4
+*/
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF5_OFFSET                     (0x060C)
+
+/* PDP, VID4VCOEFF5, VID4VCOEFF5
+*/
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF6_OFFSET                     (0x0610)
+
+/* PDP, VID4VCOEFF6, VID4VCOEFF6
+*/
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF7_OFFSET                     (0x0614)
+
+/* PDP, VID4VCOEFF7, VID4VCOEFF7
+*/
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4VCOEFF8_OFFSET                     (0x0618)
+
+/* PDP, VID4VCOEFF8, VID4VCOEFF8
+*/
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK           (0x000000FF)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK                (0x000000FF)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH         (8)
+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HSINIT_OFFSET                      (0x061C)
+
+/* PDP, VID4HSINIT, VID4HINITIAL
+*/
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK           (0xFFFF0000)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK                (0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT          (16)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH         (16)
+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID4HSINIT, VID4HPITCH
+*/
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK             (0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK          (0x0000FFFF)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT            (0)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH           (16)
+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF0_OFFSET                     (0x0620)
+
+/* PDP, VID4HCOEFF0, VID4HCOEFF0
+*/
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF1_OFFSET                     (0x0624)
+
+/* PDP, VID4HCOEFF1, VID4HCOEFF1
+*/
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF2_OFFSET                     (0x0628)
+
+/* PDP, VID4HCOEFF2, VID4HCOEFF2
+*/
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF3_OFFSET                     (0x062C)
+
+/* PDP, VID4HCOEFF3, VID4HCOEFF3
+*/
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF4_OFFSET                     (0x0630)
+
+/* PDP, VID4HCOEFF4, VID4HCOEFF4
+*/
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF5_OFFSET                     (0x0634)
+
+/* PDP, VID4HCOEFF5, VID4HCOEFF5
+*/
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF6_OFFSET                     (0x0638)
+
+/* PDP, VID4HCOEFF6, VID4HCOEFF6
+*/
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF7_OFFSET                     (0x063C)
+
+/* PDP, VID4HCOEFF7, VID4HCOEFF7
+*/
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF8_OFFSET                     (0x0640)
+
+/* PDP, VID4HCOEFF8, VID4HCOEFF8
+*/
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF9_OFFSET                     (0x0644)
+
+/* PDP, VID4HCOEFF9, VID4HCOEFF9
+*/
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK           (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK                (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT          (0)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH         (32)
+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF10_OFFSET                    (0x0648)
+
+/* PDP, VID4HCOEFF10, VID4HCOEFF10
+*/
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH       (32)
+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF11_OFFSET                    (0x064C)
+
+/* PDP, VID4HCOEFF11, VID4HCOEFF11
+*/
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH       (32)
+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF12_OFFSET                    (0x0650)
+
+/* PDP, VID4HCOEFF12, VID4HCOEFF12
+*/
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH       (32)
+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF13_OFFSET                    (0x0654)
+
+/* PDP, VID4HCOEFF13, VID4HCOEFF13
+*/
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH       (32)
+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF14_OFFSET                    (0x0658)
+
+/* PDP, VID4HCOEFF14, VID4HCOEFF14
+*/
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH       (32)
+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF15_OFFSET                    (0x065C)
+
+/* PDP, VID4HCOEFF15, VID4HCOEFF15
+*/
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK         (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK      (0xFFFFFFFF)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH       (32)
+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4HCOEFF16_OFFSET                    (0x0660)
+
+/* PDP, VID4HCOEFF16, VID4HCOEFF16
+*/
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK         (0x000000FF)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK      (0x000000FF)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT                (0)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH       (8)
+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_VID4SCALESIZE_OFFSET                   (0x0664)
+
+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH
+*/
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK              (0x0FFF0000)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK           (0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT             (16)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH            (12)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT
+*/
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK             (0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK          (0x00000FFF)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT            (0)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH           (12)
+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND0_OFFSET                            (0x0668)
+
+/* PDP, PORTER_BLND0, BLND0BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND0, BLND0PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND1_OFFSET                            (0x066C)
+
+/* PDP, PORTER_BLND1, BLND1BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND1, BLND1PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND2_OFFSET                            (0x0670)
+
+/* PDP, PORTER_BLND2, BLND2BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND2, BLND2PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND3_OFFSET                            (0x0674)
+
+/* PDP, PORTER_BLND3, BLND3BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND3, BLND3PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND4_OFFSET                            (0x0678)
+
+/* PDP, PORTER_BLND4, BLND4BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND4, BLND4PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND5_OFFSET                            (0x067C)
+
+/* PDP, PORTER_BLND5, BLND5BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND5, BLND5PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND6_OFFSET                            (0x0680)
+
+/* PDP, PORTER_BLND6, BLND6BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND6, BLND6PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_PORTER_BLND7_OFFSET                            (0x0684)
+
+/* PDP, PORTER_BLND7, BLND7BLENDTYPE
+*/
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK               (0x00000010)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK            (0x00000001)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT              (4)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH             (1)
+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, PORTER_BLND7, BLND7PORTERMODE
+*/
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK              (0x0000000F)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK           (0x0000000F)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT             (0)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH            (4)
+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET          (0x06C8)
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK             (0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT            (16)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH           (10)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK            (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT           (0)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH          (10)
+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET                (0x06CC)
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX
+*/
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK          (0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT         (16)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH                (10)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN
+*/
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK          (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT         (0)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH                (10)
+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET                (0x06D0)
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R
+*/
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK           (0x0FFF0000)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT          (16)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH         (12)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G
+*/
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK           (0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT          (0)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH         (12)
+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET         (0x06D4)
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK              (0x20000000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK           (0x00000001)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT             (29)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH            (1)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK             (0x10000000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT            (28)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH           (1)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK         (0x03FF0000)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK      (0x000003FF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT                (16)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH       (10)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B
+*/
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK            (0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT           (0)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH          (12)
+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET          (0x06D8)
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK             (0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT            (16)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH           (10)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK            (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT           (0)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH          (10)
+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET                                (0x06DC)
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX
+*/
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK          (0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT         (16)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH                (10)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN
+*/
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK          (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT         (0)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH                (10)
+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET                                (0x06E0)
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R
+*/
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK           (0x0FFF0000)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT          (16)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH         (12)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G
+*/
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK           (0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT          (0)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH         (12)
+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET                         (0x06E4)
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK              (0x20000000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK           (0x00000001)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT             (29)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH            (1)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK             (0x10000000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT            (28)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH           (1)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK         (0x03FF0000)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK      (0x000003FF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT                (16)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH       (10)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B
+*/
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK            (0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT           (0)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH          (12)
+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET          (0x06E8)
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK             (0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT            (16)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH           (10)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK            (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT           (0)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH          (10)
+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET                        (0x06EC)
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX
+*/
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK          (0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT         (16)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH                (10)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN
+*/
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK          (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT         (0)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH                (10)
+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET                (0x06F0)
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R
+*/
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK           (0x0FFF0000)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT          (16)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH         (12)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G
+*/
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK           (0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT          (0)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH         (12)
+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET         (0x06F4)
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK              (0x20000000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK           (0x00000001)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT             (29)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH            (1)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK             (0x10000000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT            (28)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH           (1)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK         (0x03FF0000)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK      (0x000003FF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT                (16)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH       (10)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B
+*/
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK            (0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT           (0)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH          (12)
+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET          (0x06F8)
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS
+*/
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK             (0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK          (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT            (16)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH           (10)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE
+*/
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK            (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK         (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT           (0)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH          (10)
+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET                (0x06FC)
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX
+*/
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK          (0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT         (16)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH                (10)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN
+*/
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK          (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK       (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT         (0)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH                (10)
+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET                        (0x0700)
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R
+*/
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK           (0x0FFF0000)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT          (16)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH         (12)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G
+*/
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK           (0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK                (0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT          (0)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH         (12)
+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET         (0x0704)
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK      (0x20000000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK   (0x00000001)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT     (29)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH    (1)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK             (0x10000000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK          (0x00000001)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT            (28)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH           (1)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK         (0x03FF0000)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK      (0x000003FF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT                (16)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH       (10)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE
+
+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B
+*/
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK            (0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK         (0x00000FFF)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT           (0)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH          (12)
+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF0_OFFSET                       (0x0708)
+
+/* PDP, CSCCOEFF0, CSCCOEFFRU
+*/
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK              (0x003FF800)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT             (11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, CSCCOEFF0, CSCCOEFFRY
+*/
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK              (0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT             (0)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF1_OFFSET                       (0x070C)
+
+/* PDP, CSCCOEFF1, CSCCOEFFGY
+*/
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK              (0x003FF800)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT             (11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, CSCCOEFF1, CSCCOEFFRV
+*/
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK              (0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT             (0)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF2_OFFSET                       (0x0710)
+
+/* PDP, CSCCOEFF2, CSCCOEFFGV
+*/
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK              (0x003FF800)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT             (11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, CSCCOEFF2, CSCCOEFFGU
+*/
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK              (0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT             (0)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF3_OFFSET                       (0x0714)
+
+/* PDP, CSCCOEFF3, CSCCOEFFBU
+*/
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK              (0x003FF800)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT             (11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, CSCCOEFF3, CSCCOEFFBY
+*/
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK              (0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT             (0)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_CSCCOEFF4_OFFSET                       (0x0718)
+
+/* PDP, CSCCOEFF4, CSCCOEFFBV
+*/
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK              (0x000007FF)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK           (0x000007FF)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT             (0)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH            (11)
+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_BGNDCOL_AR_OFFSET                      (0x071C)
+
+/* PDP, BGNDCOL_AR, BGNDCOL_A
+*/
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK              (0x03FF0000)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK           (0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT             (16)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH            (10)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, BGNDCOL_AR, BGNDCOL_R
+*/
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK              (0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK           (0x000003FF)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT             (0)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH            (10)
+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_BGNDCOL_GB_OFFSET                      (0x0720)
+
+/* PDP, BGNDCOL_GB, BGNDCOL_G
+*/
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK              (0x03FF0000)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT             (16)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH            (10)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, BGNDCOL_GB, BGNDCOL_B
+*/
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK              (0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT             (0)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH            (10)
+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_BORDCOL_R_OFFSET                       (0x0724)
+
+/* PDP, BORDCOL_R, BORDCOL_R
+*/
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK               (0x000003FF)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT              (0)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH             (10)
+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_BORDCOL_GB_OFFSET                      (0x0728)
+
+/* PDP, BORDCOL_GB, BORDCOL_G
+*/
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK              (0x03FF0000)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT             (16)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH            (10)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, BORDCOL_GB, BORDCOL_B
+*/
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK              (0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT             (0)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH            (10)
+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_LINESTAT_OFFSET                                (0x0734)
+
+/* PDP, LINESTAT, LINENO
+*/
+#define ODN_PDP_LINESTAT_LINENO_MASK                   (0x00001FFF)
+#define ODN_PDP_LINESTAT_LINENO_LSBMASK                        (0x00001FFF)
+#define ODN_PDP_LINESTAT_LINENO_SHIFT                  (0)
+#define ODN_PDP_LINESTAT_LINENO_LENGTH                 (13)
+#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD           IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET       (0x0738)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK          (0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT         (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK          (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT         (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET               (0x073C)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK          (0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT         (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK          (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT         (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET               (0x0740)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK          (0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT         (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK          (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT         (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET               (0x0744)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK          (0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT         (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK          (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK       (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT         (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH                (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET          (0x0748)
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK             (0x3FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK          (0x00003FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT            (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH           (14)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK           (0x00000030)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK                (0x00000003)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT          (4)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH         (2)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK              (0x00000001)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK           (0x00000001)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT             (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH            (1)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET         (0x074C)
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK               (0x0FFF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK            (0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT              (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH             (12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK               (0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK            (0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT              (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH             (12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET          (0x0750)
+
+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK                (0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK             (0x00000FFF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT               (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH              (12)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET          (0x0754)
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK         (0x03FF0000)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK              (0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT                (16)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH               (10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK         (0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK              (0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT                (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH               (10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET           (0x0758)
+
+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R
+*/
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK          (0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK               (0x000003FF)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT         (0)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH                (10)
+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_SIGNAT_R_OFFSET                (0x075C)
+
+/* PDP, SIGNAT_R, SIGNATURE_R
+*/
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK              (0x000003FF)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK           (0x000003FF)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT             (0)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH            (10)
+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_SIGNAT_GB_OFFSET               (0x0760)
+
+/* PDP, SIGNAT_GB, SIGNATURE_G
+*/
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK             (0x03FF0000)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK          (0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT            (16)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH           (10)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, SIGNAT_GB, SIGNATURE_B
+*/
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK             (0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK          (0x000003FF)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT            (0)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH           (10)
+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET            (0x0764)
+
+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK              (0x00000004)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK           (0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT             (2)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH            (1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK              (0x00000002)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK           (0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT             (1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH            (1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK
+*/
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK           (0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK                (0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT          (0)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH         (1)
+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET          (0x0768)
+
+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED
+*/
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK          (0x00000002)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK               (0x00000001)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT         (1)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH                (1)
+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_DBGCTRL_OFFSET         (0x076C)
+
+/* PDP, DBGCTRL, DBG_READ
+*/
+#define ODN_PDP_DBGCTRL_DBG_READ_MASK          (0x00000002)
+#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK               (0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT         (1)
+#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH                (1)
+#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, DBGCTRL, DBG_ENAB
+*/
+#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK          (0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK               (0x00000001)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT         (0)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH                (1)
+#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_DBGDATA_R_OFFSET               (0x0770)
+
+/* PDP, DBGDATA_R, DBG_DATA_R
+*/
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK              (0x000003FF)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK           (0x000003FF)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT             (0)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH            (10)
+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_DBGDATA_GB_OFFSET              (0x0774)
+
+/* PDP, DBGDATA_GB, DBG_DATA_G
+*/
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK             (0x03FF0000)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK          (0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT            (16)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH           (10)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, DBGDATA_GB, DBG_DATA_B
+*/
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK             (0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK          (0x000003FF)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT            (0)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH           (10)
+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_DBGSIDE_OFFSET                         (0x0778)
+
+/* PDP, DBGSIDE, DBG_VAL
+*/
+#define ODN_PDP_DBGSIDE_DBG_VAL_MASK                   (0x00000008)
+#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK                        (0x00000001)
+#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT                  (3)
+#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH                 (1)
+#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD           IMG_FALSE
+
+/* PDP, DBGSIDE, DBG_SIDE
+*/
+#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK                  (0x00000007)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK               (0x00000007)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT                 (0)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH                        (3)
+#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD          IMG_FALSE
+
+#define ODN_PDP_OUTPUT_OFFSET                          (0x077C)
+
+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT
+*/
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK           (0x00000002)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK                (0x00000001)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT          (1)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH         (1)
+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, OUTPUT, OUTPUT_CONFIG
+*/
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK              (0x00000001)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK           (0x00000001)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT             (0)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH            (1)
+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_SYNCCTRL_OFFSET                                (0x0780)
+
+/* PDP, SYNCCTRL, SYNCACTIVE
+*/
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK               (0x80000000)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK            (0x00000001)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT              (31)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH             (1)
+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, SYNCCTRL, ODN_PDP_RST
+*/
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK              (0x20000000)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK           (0x00000001)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT             (29)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH            (1)
+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, SYNCCTRL, POWERDN
+*/
+#define ODN_PDP_SYNCCTRL_POWERDN_MASK                  (0x10000000)
+#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK               (0x00000001)
+#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT                 (28)
+#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH                        (1)
+#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD          IMG_FALSE
+
+/* PDP, SYNCCTRL, LOWPWRMODE
+*/
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK               (0x08000000)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK            (0x00000001)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT              (27)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH             (1)
+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDSYNCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK               (0x04000000)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK            (0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT              (26)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH             (1)
+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDINTCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK               (0x02000000)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK            (0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT              (25)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH             (1)
+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDCTRL
+*/
+#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK                  (0x01000000)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK               (0x00000001)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT                 (24)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH                        (1)
+#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD          IMG_FALSE
+
+/* PDP, SYNCCTRL, UPDWAIT
+*/
+#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK                  (0x000F0000)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK               (0x0000000F)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT                 (16)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH                        (4)
+#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD          IMG_FALSE
+
+/* PDP, SYNCCTRL, FIELD_EN
+*/
+#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK                 (0x00002000)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK              (0x00000001)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT                        (13)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH               (1)
+#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD         IMG_FALSE
+
+/* PDP, SYNCCTRL, CSYNC_EN
+*/
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK                 (0x00001000)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK              (0x00000001)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT                        (12)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH               (1)
+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD         IMG_FALSE
+
+/* PDP, SYNCCTRL, CLKPOL
+*/
+#define ODN_PDP_SYNCCTRL_CLKPOL_MASK           (0x00000800)
+#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK                (0x00000001)
+#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT          (11)
+#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH         (1)
+#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, SYNCCTRL, VS_SLAVE
+*/
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK         (0x00000080)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK              (0x00000001)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT                (7)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH               (1)
+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE
+
+/* PDP, SYNCCTRL, HS_SLAVE
+*/
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK         (0x00000040)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK              (0x00000001)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT                (6)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH               (1)
+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKPOL
+*/
+#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK          (0x00000020)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK               (0x00000001)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT         (5)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH                (1)
+#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, SYNCCTRL, BLNKDIS
+*/
+#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK          (0x00000010)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK               (0x00000001)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT         (4)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH                (1)
+#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, SYNCCTRL, VSPOL
+*/
+#define ODN_PDP_SYNCCTRL_VSPOL_MASK            (0x00000008)
+#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK         (0x00000001)
+#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT           (3)
+#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH          (1)
+#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, SYNCCTRL, VSDIS
+*/
+#define ODN_PDP_SYNCCTRL_VSDIS_MASK            (0x00000004)
+#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK         (0x00000001)
+#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT           (2)
+#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH          (1)
+#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, SYNCCTRL, HSPOL
+*/
+#define ODN_PDP_SYNCCTRL_HSPOL_MASK            (0x00000002)
+#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK         (0x00000001)
+#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT           (1)
+#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH          (1)
+#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, SYNCCTRL, HSDIS
+*/
+#define ODN_PDP_SYNCCTRL_HSDIS_MASK            (0x00000001)
+#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK         (0x00000001)
+#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT           (0)
+#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH          (1)
+#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_HSYNC1_OFFSET                  (0x0784)
+
+/* PDP, HSYNC1, HBPS
+*/
+#define ODN_PDP_HSYNC1_HBPS_MASK               (0x1FFF0000)
+#define ODN_PDP_HSYNC1_HBPS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_HSYNC1_HBPS_SHIFT              (16)
+#define ODN_PDP_HSYNC1_HBPS_LENGTH             (13)
+#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, HSYNC1, HT
+*/
+#define ODN_PDP_HSYNC1_HT_MASK                 (0x00001FFF)
+#define ODN_PDP_HSYNC1_HT_LSBMASK              (0x00001FFF)
+#define ODN_PDP_HSYNC1_HT_SHIFT                        (0)
+#define ODN_PDP_HSYNC1_HT_LENGTH               (13)
+#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD         IMG_FALSE
+
+#define ODN_PDP_HSYNC2_OFFSET                  (0x0788)
+
+/* PDP, HSYNC2, HAS
+*/
+#define ODN_PDP_HSYNC2_HAS_MASK                        (0x1FFF0000)
+#define ODN_PDP_HSYNC2_HAS_LSBMASK             (0x00001FFF)
+#define ODN_PDP_HSYNC2_HAS_SHIFT               (16)
+#define ODN_PDP_HSYNC2_HAS_LENGTH              (13)
+#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, HSYNC2, HLBS
+*/
+#define ODN_PDP_HSYNC2_HLBS_MASK               (0x00001FFF)
+#define ODN_PDP_HSYNC2_HLBS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_HSYNC2_HLBS_SHIFT              (0)
+#define ODN_PDP_HSYNC2_HLBS_LENGTH             (13)
+#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_HSYNC3_OFFSET                  (0x078C)
+
+/* PDP, HSYNC3, HFPS
+*/
+#define ODN_PDP_HSYNC3_HFPS_MASK               (0x1FFF0000)
+#define ODN_PDP_HSYNC3_HFPS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_HSYNC3_HFPS_SHIFT              (16)
+#define ODN_PDP_HSYNC3_HFPS_LENGTH             (13)
+#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, HSYNC3, HRBS
+*/
+#define ODN_PDP_HSYNC3_HRBS_MASK               (0x00001FFF)
+#define ODN_PDP_HSYNC3_HRBS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_HSYNC3_HRBS_SHIFT              (0)
+#define ODN_PDP_HSYNC3_HRBS_LENGTH             (13)
+#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VSYNC1_OFFSET                  (0x0790)
+
+/* PDP, VSYNC1, VBPS
+*/
+#define ODN_PDP_VSYNC1_VBPS_MASK               (0x1FFF0000)
+#define ODN_PDP_VSYNC1_VBPS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_VSYNC1_VBPS_SHIFT              (16)
+#define ODN_PDP_VSYNC1_VBPS_LENGTH             (13)
+#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VSYNC1, VT
+*/
+#define ODN_PDP_VSYNC1_VT_MASK                 (0x00001FFF)
+#define ODN_PDP_VSYNC1_VT_LSBMASK              (0x00001FFF)
+#define ODN_PDP_VSYNC1_VT_SHIFT                        (0)
+#define ODN_PDP_VSYNC1_VT_LENGTH               (13)
+#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD         IMG_FALSE
+
+#define ODN_PDP_VSYNC2_OFFSET                  (0x0794)
+
+/* PDP, VSYNC2, VAS
+*/
+#define ODN_PDP_VSYNC2_VAS_MASK                        (0x1FFF0000)
+#define ODN_PDP_VSYNC2_VAS_LSBMASK             (0x00001FFF)
+#define ODN_PDP_VSYNC2_VAS_SHIFT               (16)
+#define ODN_PDP_VSYNC2_VAS_LENGTH              (13)
+#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD                IMG_FALSE
+
+/* PDP, VSYNC2, VTBS
+*/
+#define ODN_PDP_VSYNC2_VTBS_MASK               (0x00001FFF)
+#define ODN_PDP_VSYNC2_VTBS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_VSYNC2_VTBS_SHIFT              (0)
+#define ODN_PDP_VSYNC2_VTBS_LENGTH             (13)
+#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_VSYNC3_OFFSET                  (0x0798)
+
+/* PDP, VSYNC3, VFPS
+*/
+#define ODN_PDP_VSYNC3_VFPS_MASK               (0x1FFF0000)
+#define ODN_PDP_VSYNC3_VFPS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_VSYNC3_VFPS_SHIFT              (16)
+#define ODN_PDP_VSYNC3_VFPS_LENGTH             (13)
+#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, VSYNC3, VBBS
+*/
+#define ODN_PDP_VSYNC3_VBBS_MASK               (0x00001FFF)
+#define ODN_PDP_VSYNC3_VBBS_LSBMASK            (0x00001FFF)
+#define ODN_PDP_VSYNC3_VBBS_SHIFT              (0)
+#define ODN_PDP_VSYNC3_VBBS_LENGTH             (13)
+#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_INTSTAT_OFFSET                 (0x079C)
+
+/* PDP, INTSTAT, INTS_VID4ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK             (0x00080000)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT            (19)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK             (0x00040000)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT            (18)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK             (0x00020000)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT            (17)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK             (0x00010000)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT            (16)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK            (0x00008000)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT           (15)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK            (0x00004000)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT           (14)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK            (0x00002000)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT           (13)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1ORUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK            (0x00001000)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT           (12)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID4URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK             (0x00000800)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT            (11)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID3URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK             (0x00000400)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT            (10)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID2URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK             (0x00000200)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT            (9)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VID1URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK             (0x00000100)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK          (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT            (8)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH           (1)
+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH4URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK            (0x00000080)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT           (7)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH3URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK            (0x00000040)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT           (6)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH2URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK            (0x00000020)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT           (5)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_GRPH1URUN
+*/
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK            (0x00000010)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT           (4)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH          (1)
+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK1
+*/
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK               (0x00000008)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK            (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT              (3)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH             (1)
+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, INTSTAT, INTS_VBLNK0
+*/
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK               (0x00000004)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK            (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT              (2)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH             (1)
+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK1
+*/
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK               (0x00000002)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK            (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT              (1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH             (1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, INTSTAT, INTS_HBLNK0
+*/
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK               (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK            (0x00000001)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT              (0)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH             (1)
+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_INTENAB_OFFSET                         (0x07A0)
+
+/* PDP, INTENAB, INTEN_VID4ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK            (0x00080000)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT           (19)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK            (0x00040000)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT           (18)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK            (0x00020000)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT           (17)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK            (0x00010000)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT           (16)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK           (0x00008000)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT          (15)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK           (0x00004000)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT          (14)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK           (0x00002000)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT          (13)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1ORUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK           (0x00001000)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT          (12)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID4URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK            (0x00000800)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT           (11)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID3URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK            (0x00000400)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT           (10)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID2URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK            (0x00000200)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT           (9)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VID1URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK            (0x00000100)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT           (8)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH          (1)
+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH4URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK           (0x00000080)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT          (7)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH3URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK           (0x00000040)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT          (6)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH2URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK           (0x00000020)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT          (5)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_GRPH1URUN
+*/
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK           (0x00000010)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT          (4)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH         (1)
+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK1
+*/
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK              (0x00000008)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK           (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT             (3)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH            (1)
+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, INTENAB, INTEN_VBLNK0
+*/
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK              (0x00000004)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK           (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT             (2)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH            (1)
+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK1
+*/
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK              (0x00000002)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK           (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT             (1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH            (1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, INTENAB, INTEN_HBLNK0
+*/
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK              (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK           (0x00000001)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT             (0)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH            (1)
+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_INTCLR_OFFSET          (0x07A4)
+
+/* PDP, INTCLR, INTCLR_VID4ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK            (0x00080000)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT           (19)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK            (0x00040000)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT           (18)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK            (0x00020000)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT           (17)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK            (0x00010000)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT           (16)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK           (0x00008000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT          (15)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK           (0x00004000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT          (14)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK           (0x00002000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT          (13)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1ORUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK           (0x00001000)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT          (12)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID4URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK            (0x00000800)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT           (11)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID3URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK            (0x00000400)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT           (10)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID2URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK            (0x00000200)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT           (9)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VID1URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK            (0x00000100)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK         (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT           (8)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH          (1)
+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH4URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK           (0x00000080)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT          (7)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH3URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK           (0x00000040)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT          (6)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH2URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK           (0x00000020)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT          (5)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_GRPH1URUN
+*/
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK           (0x00000010)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK                (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT          (4)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH         (1)
+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK1
+*/
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK              (0x00000008)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK           (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT             (3)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH            (1)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_VBLNK0
+*/
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK              (0x00000004)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK           (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT             (2)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH            (1)
+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK1
+*/
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK              (0x00000002)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK           (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT             (1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH            (1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, INTCLR, INTCLR_HBLNK0
+*/
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK              (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK           (0x00000001)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT             (0)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH            (1)
+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_MEMCTRL_OFFSET         (0x07A8)
+
+/* PDP, MEMCTRL, MEMREFRESH
+*/
+#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK                (0xC0000000)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK             (0x00000003)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT               (30)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH              (2)
+#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, MEMCTRL, BURSTLEN
+*/
+#define ODN_PDP_MEMCTRL_BURSTLEN_MASK          (0x000000FF)
+#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK               (0x000000FF)
+#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT         (0)
+#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH                (8)
+#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_MEM_THRESH_OFFSET              (0x07AC)
+
+/* PDP, MEM_THRESH, UVTHRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK            (0xFF000000)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK         (0x000000FF)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT           (24)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH          (8)
+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, MEM_THRESH, YTHRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK             (0x001FF000)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK          (0x000001FF)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT            (12)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH           (9)
+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, MEM_THRESH, THRESHOLD
+*/
+#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK              (0x000001FF)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK           (0x000001FF)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT             (0)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH            (9)
+#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET               (0x07B0)
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON
+*/
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK                (0x00000010)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK             (0x00000001)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT               (4)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH              (1)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL
+*/
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK          (0x00000007)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK               (0x00000007)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT         (0)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH                (3)
+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_GAMMA0_R_OFFSET                (0x07B4)
+
+/* PDP, GAMMA0_R, GAMMA0_R
+*/
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT                (0)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH               (10)
+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA0_GB_OFFSET               (0x07B8)
+
+/* PDP, GAMMA0_GB, GAMMA0_G
+*/
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT               (16)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH              (10)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA0_GB, GAMMA0_B
+*/
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT               (0)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH              (10)
+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA1_R_OFFSET                (0x07BC)
+
+/* PDP, GAMMA1_R, GAMMA1_R
+*/
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT                (0)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH               (10)
+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA1_GB_OFFSET               (0x07C0)
+
+/* PDP, GAMMA1_GB, GAMMA1_G
+*/
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT               (16)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH              (10)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA1_GB, GAMMA1_B
+*/
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT               (0)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH              (10)
+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA2_R_OFFSET                (0x07C4)
+
+/* PDP, GAMMA2_R, GAMMA2_R
+*/
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT                (0)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH               (10)
+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA2_GB_OFFSET               (0x07C8)
+
+/* PDP, GAMMA2_GB, GAMMA2_G
+*/
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT               (16)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH              (10)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA2_GB, GAMMA2_B
+*/
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT               (0)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH              (10)
+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA3_R_OFFSET                (0x07CC)
+
+/* PDP, GAMMA3_R, GAMMA3_R
+*/
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT                (0)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH               (10)
+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA3_GB_OFFSET               (0x07D0)
+
+/* PDP, GAMMA3_GB, GAMMA3_G
+*/
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT               (16)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH              (10)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA3_GB, GAMMA3_B
+*/
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT               (0)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH              (10)
+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA4_R_OFFSET                (0x07D4)
+
+/* PDP, GAMMA4_R, GAMMA4_R
+*/
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT                (0)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH               (10)
+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA4_GB_OFFSET               (0x07D8)
+
+/* PDP, GAMMA4_GB, GAMMA4_G
+*/
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT               (16)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH              (10)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA4_GB, GAMMA4_B
+*/
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT               (0)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH              (10)
+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA5_R_OFFSET                (0x07DC)
+
+/* PDP, GAMMA5_R, GAMMA5_R
+*/
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT                (0)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH               (10)
+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA5_GB_OFFSET               (0x07E0)
+
+/* PDP, GAMMA5_GB, GAMMA5_G
+*/
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT               (16)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH              (10)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA5_GB, GAMMA5_B
+*/
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT               (0)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH              (10)
+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA6_R_OFFSET                (0x07E4)
+
+/* PDP, GAMMA6_R, GAMMA6_R
+*/
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT                (0)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH               (10)
+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA6_GB_OFFSET               (0x07E8)
+
+/* PDP, GAMMA6_GB, GAMMA6_G
+*/
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT               (16)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH              (10)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA6_GB, GAMMA6_B
+*/
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT               (0)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH              (10)
+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA7_R_OFFSET                (0x07EC)
+
+/* PDP, GAMMA7_R, GAMMA7_R
+*/
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT                (0)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH               (10)
+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA7_GB_OFFSET               (0x07F0)
+
+/* PDP, GAMMA7_GB, GAMMA7_G
+*/
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT               (16)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH              (10)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA7_GB, GAMMA7_B
+*/
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT               (0)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH              (10)
+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA8_R_OFFSET                (0x07F4)
+
+/* PDP, GAMMA8_R, GAMMA8_R
+*/
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT                (0)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH               (10)
+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA8_GB_OFFSET               (0x07F8)
+
+/* PDP, GAMMA8_GB, GAMMA8_G
+*/
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT               (16)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH              (10)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA8_GB, GAMMA8_B
+*/
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT               (0)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH              (10)
+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA9_R_OFFSET                (0x07FC)
+
+/* PDP, GAMMA9_R, GAMMA9_R
+*/
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK         (0x000003FF)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK              (0x000003FF)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT                (0)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH               (10)
+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_GAMMA9_GB_OFFSET               (0x0800)
+
+/* PDP, GAMMA9_GB, GAMMA9_G
+*/
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK                (0x03FF0000)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT               (16)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH              (10)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GAMMA9_GB, GAMMA9_B
+*/
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK                (0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK             (0x000003FF)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT               (0)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH              (10)
+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GAMMA10_R_OFFSET               (0x0804)
+
+/* PDP, GAMMA10_R, GAMMA10_R
+*/
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT              (0)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH             (10)
+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA10_GB_OFFSET              (0x0808)
+
+/* PDP, GAMMA10_GB, GAMMA10_G
+*/
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT             (16)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH            (10)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA10_GB, GAMMA10_B
+*/
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT             (0)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH            (10)
+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA11_R_OFFSET               (0x080C)
+
+/* PDP, GAMMA11_R, GAMMA11_R
+*/
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT              (0)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH             (10)
+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA11_GB_OFFSET              (0x0810)
+
+/* PDP, GAMMA11_GB, GAMMA11_G
+*/
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT             (16)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH            (10)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA11_GB, GAMMA11_B
+*/
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT             (0)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH            (10)
+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA12_R_OFFSET               (0x0814)
+
+/* PDP, GAMMA12_R, GAMMA12_R
+*/
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT              (0)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH             (10)
+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA12_GB_OFFSET              (0x0818)
+
+/* PDP, GAMMA12_GB, GAMMA12_G
+*/
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT             (16)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH            (10)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA12_GB, GAMMA12_B
+*/
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT             (0)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH            (10)
+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA13_R_OFFSET               (0x081C)
+
+/* PDP, GAMMA13_R, GAMMA13_R
+*/
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT              (0)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH             (10)
+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA13_GB_OFFSET              (0x0820)
+
+/* PDP, GAMMA13_GB, GAMMA13_G
+*/
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT             (16)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH            (10)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA13_GB, GAMMA13_B
+*/
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT             (0)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH            (10)
+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA14_R_OFFSET               (0x0824)
+
+/* PDP, GAMMA14_R, GAMMA14_R
+*/
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT              (0)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH             (10)
+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA14_GB_OFFSET              (0x0828)
+
+/* PDP, GAMMA14_GB, GAMMA14_G
+*/
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT             (16)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH            (10)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA14_GB, GAMMA14_B
+*/
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT             (0)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH            (10)
+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA15_R_OFFSET               (0x082C)
+
+/* PDP, GAMMA15_R, GAMMA15_R
+*/
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT              (0)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH             (10)
+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA15_GB_OFFSET              (0x0830)
+
+/* PDP, GAMMA15_GB, GAMMA15_G
+*/
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT             (16)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH            (10)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA15_GB, GAMMA15_B
+*/
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT             (0)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH            (10)
+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA16_R_OFFSET               (0x0834)
+
+/* PDP, GAMMA16_R, GAMMA16_R
+*/
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT              (0)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH             (10)
+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA16_GB_OFFSET              (0x0838)
+
+/* PDP, GAMMA16_GB, GAMMA16_G
+*/
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT             (16)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH            (10)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA16_GB, GAMMA16_B
+*/
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT             (0)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH            (10)
+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA17_R_OFFSET               (0x083C)
+
+/* PDP, GAMMA17_R, GAMMA17_R
+*/
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT              (0)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH             (10)
+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA17_GB_OFFSET              (0x0840)
+
+/* PDP, GAMMA17_GB, GAMMA17_G
+*/
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT             (16)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH            (10)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA17_GB, GAMMA17_B
+*/
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT             (0)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH            (10)
+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA18_R_OFFSET               (0x0844)
+
+/* PDP, GAMMA18_R, GAMMA18_R
+*/
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT              (0)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH             (10)
+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA18_GB_OFFSET              (0x0848)
+
+/* PDP, GAMMA18_GB, GAMMA18_G
+*/
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT             (16)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH            (10)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA18_GB, GAMMA18_B
+*/
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT             (0)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH            (10)
+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA19_R_OFFSET               (0x084C)
+
+/* PDP, GAMMA19_R, GAMMA19_R
+*/
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT              (0)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH             (10)
+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA19_GB_OFFSET              (0x0850)
+
+/* PDP, GAMMA19_GB, GAMMA19_G
+*/
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT             (16)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH            (10)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA19_GB, GAMMA19_B
+*/
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT             (0)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH            (10)
+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA20_R_OFFSET               (0x0854)
+
+/* PDP, GAMMA20_R, GAMMA20_R
+*/
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT              (0)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH             (10)
+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA20_GB_OFFSET              (0x0858)
+
+/* PDP, GAMMA20_GB, GAMMA20_G
+*/
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT             (16)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH            (10)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA20_GB, GAMMA20_B
+*/
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT             (0)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH            (10)
+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA21_R_OFFSET               (0x085C)
+
+/* PDP, GAMMA21_R, GAMMA21_R
+*/
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT              (0)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH             (10)
+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA21_GB_OFFSET              (0x0860)
+
+/* PDP, GAMMA21_GB, GAMMA21_G
+*/
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT             (16)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH            (10)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA21_GB, GAMMA21_B
+*/
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT             (0)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH            (10)
+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA22_R_OFFSET               (0x0864)
+
+/* PDP, GAMMA22_R, GAMMA22_R
+*/
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT              (0)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH             (10)
+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA22_GB_OFFSET              (0x0868)
+
+/* PDP, GAMMA22_GB, GAMMA22_G
+*/
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT             (16)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH            (10)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA22_GB, GAMMA22_B
+*/
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT             (0)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH            (10)
+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA23_R_OFFSET               (0x086C)
+
+/* PDP, GAMMA23_R, GAMMA23_R
+*/
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT              (0)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH             (10)
+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA23_GB_OFFSET              (0x0870)
+
+/* PDP, GAMMA23_GB, GAMMA23_G
+*/
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT             (16)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH            (10)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA23_GB, GAMMA23_B
+*/
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT             (0)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH            (10)
+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA24_R_OFFSET               (0x0874)
+
+/* PDP, GAMMA24_R, GAMMA24_R
+*/
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT              (0)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH             (10)
+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA24_GB_OFFSET              (0x0878)
+
+/* PDP, GAMMA24_GB, GAMMA24_G
+*/
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT             (16)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH            (10)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA24_GB, GAMMA24_B
+*/
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT             (0)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH            (10)
+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA25_R_OFFSET               (0x087C)
+
+/* PDP, GAMMA25_R, GAMMA25_R
+*/
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT              (0)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH             (10)
+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA25_GB_OFFSET              (0x0880)
+
+/* PDP, GAMMA25_GB, GAMMA25_G
+*/
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT             (16)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH            (10)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA25_GB, GAMMA25_B
+*/
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT             (0)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH            (10)
+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA26_R_OFFSET               (0x0884)
+
+/* PDP, GAMMA26_R, GAMMA26_R
+*/
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT              (0)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH             (10)
+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA26_GB_OFFSET              (0x0888)
+
+/* PDP, GAMMA26_GB, GAMMA26_G
+*/
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT             (16)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH            (10)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA26_GB, GAMMA26_B
+*/
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT             (0)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH            (10)
+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA27_R_OFFSET               (0x088C)
+
+/* PDP, GAMMA27_R, GAMMA27_R
+*/
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT              (0)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH             (10)
+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA27_GB_OFFSET              (0x0890)
+
+/* PDP, GAMMA27_GB, GAMMA27_G
+*/
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT             (16)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH            (10)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA27_GB, GAMMA27_B
+*/
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT             (0)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH            (10)
+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA28_R_OFFSET               (0x0894)
+
+/* PDP, GAMMA28_R, GAMMA28_R
+*/
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT              (0)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH             (10)
+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA28_GB_OFFSET              (0x0898)
+
+/* PDP, GAMMA28_GB, GAMMA28_G
+*/
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT             (16)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH            (10)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA28_GB, GAMMA28_B
+*/
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT             (0)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH            (10)
+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA29_R_OFFSET               (0x089C)
+
+/* PDP, GAMMA29_R, GAMMA29_R
+*/
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT              (0)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH             (10)
+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA29_GB_OFFSET              (0x08A0)
+
+/* PDP, GAMMA29_GB, GAMMA29_G
+*/
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT             (16)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH            (10)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA29_GB, GAMMA29_B
+*/
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT             (0)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH            (10)
+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA30_R_OFFSET               (0x08A4)
+
+/* PDP, GAMMA30_R, GAMMA30_R
+*/
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT              (0)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH             (10)
+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA30_GB_OFFSET              (0x08A8)
+
+/* PDP, GAMMA30_GB, GAMMA30_G
+*/
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT             (16)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH            (10)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA30_GB, GAMMA30_B
+*/
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT             (0)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH            (10)
+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA31_R_OFFSET               (0x08AC)
+
+/* PDP, GAMMA31_R, GAMMA31_R
+*/
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT              (0)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH             (10)
+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA31_GB_OFFSET              (0x08B0)
+
+/* PDP, GAMMA31_GB, GAMMA31_G
+*/
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT             (16)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH            (10)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA31_GB, GAMMA31_B
+*/
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT             (0)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH            (10)
+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GAMMA32_R_OFFSET                       (0x08B4)
+
+/* PDP, GAMMA32_R, GAMMA32_R
+*/
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK               (0x000003FF)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK            (0x000003FF)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT              (0)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH             (10)
+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD       IMG_FALSE
+
+#define ODN_PDP_GAMMA32_GB_OFFSET                      (0x08B8)
+
+/* PDP, GAMMA32_GB, GAMMA32_G
+*/
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK              (0x03FF0000)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT             (16)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH            (10)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, GAMMA32_GB, GAMMA32_B
+*/
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK              (0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK           (0x000003FF)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT             (0)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH            (10)
+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_VEVENT_OFFSET                          (0x08BC)
+
+/* PDP, VEVENT, VEVENT
+*/
+#define ODN_PDP_VEVENT_VEVENT_MASK                     (0x1FFF0000)
+#define ODN_PDP_VEVENT_VEVENT_LSBMASK                  (0x00001FFF)
+#define ODN_PDP_VEVENT_VEVENT_SHIFT                    (16)
+#define ODN_PDP_VEVENT_VEVENT_LENGTH                   (13)
+#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD             IMG_FALSE
+
+/* PDP, VEVENT, VFETCH
+*/
+#define ODN_PDP_VEVENT_VFETCH_MASK                     (0x00001FFF)
+#define ODN_PDP_VEVENT_VFETCH_LSBMASK                  (0x00001FFF)
+#define ODN_PDP_VEVENT_VFETCH_SHIFT                    (0)
+#define ODN_PDP_VEVENT_VFETCH_LENGTH                   (13)
+#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD             IMG_FALSE
+
+#define ODN_PDP_HDECTRL_OFFSET                         (0x08C0)
+
+/* PDP, HDECTRL, HDES
+*/
+#define ODN_PDP_HDECTRL_HDES_MASK              (0x1FFF0000)
+#define ODN_PDP_HDECTRL_HDES_LSBMASK           (0x00001FFF)
+#define ODN_PDP_HDECTRL_HDES_SHIFT             (16)
+#define ODN_PDP_HDECTRL_HDES_LENGTH            (13)
+#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, HDECTRL, HDEF
+*/
+#define ODN_PDP_HDECTRL_HDEF_MASK              (0x00001FFF)
+#define ODN_PDP_HDECTRL_HDEF_LSBMASK           (0x00001FFF)
+#define ODN_PDP_HDECTRL_HDEF_SHIFT             (0)
+#define ODN_PDP_HDECTRL_HDEF_LENGTH            (13)
+#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_VDECTRL_OFFSET                 (0x08C4)
+
+/* PDP, VDECTRL, VDES
+*/
+#define ODN_PDP_VDECTRL_VDES_MASK              (0x1FFF0000)
+#define ODN_PDP_VDECTRL_VDES_LSBMASK           (0x00001FFF)
+#define ODN_PDP_VDECTRL_VDES_SHIFT             (16)
+#define ODN_PDP_VDECTRL_VDES_LENGTH            (13)
+#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VDECTRL, VDEF
+*/
+#define ODN_PDP_VDECTRL_VDEF_MASK              (0x00001FFF)
+#define ODN_PDP_VDECTRL_VDEF_LSBMASK           (0x00001FFF)
+#define ODN_PDP_VDECTRL_VDEF_SHIFT             (0)
+#define ODN_PDP_VDECTRL_VDEF_LENGTH            (13)
+#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_OPMASK_R_OFFSET                        (0x08C8)
+
+/* PDP, OPMASK_R, MASKLEVEL
+*/
+#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK                (0x80000000)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK             (0x00000001)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT               (31)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH              (1)
+#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, OPMASK_R, BLANKLEVEL
+*/
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK               (0x40000000)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK            (0x00000001)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT              (30)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH             (1)
+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD       IMG_FALSE
+
+/* PDP, OPMASK_R, MASKR
+*/
+#define ODN_PDP_OPMASK_R_MASKR_MASK            (0x000003FF)
+#define ODN_PDP_OPMASK_R_MASKR_LSBMASK         (0x000003FF)
+#define ODN_PDP_OPMASK_R_MASKR_SHIFT           (0)
+#define ODN_PDP_OPMASK_R_MASKR_LENGTH          (10)
+#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_OPMASK_GB_OFFSET               (0x08CC)
+
+/* PDP, OPMASK_GB, MASKG
+*/
+#define ODN_PDP_OPMASK_GB_MASKG_MASK           (0x03FF0000)
+#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK                (0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKG_SHIFT          (16)
+#define ODN_PDP_OPMASK_GB_MASKG_LENGTH         (10)
+#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, OPMASK_GB, MASKB
+*/
+#define ODN_PDP_OPMASK_GB_MASKB_MASK           (0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK                (0x000003FF)
+#define ODN_PDP_OPMASK_GB_MASKB_SHIFT          (0)
+#define ODN_PDP_OPMASK_GB_MASKB_LENGTH         (10)
+#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET         (0x08D0)
+
+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN
+*/
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK              (0xFFFFFFF0)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK           (0x0FFFFFFF)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT             (4)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH            (28)
+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_REGLD_ADDR_STAT_OFFSET         (0x08D4)
+
+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT
+*/
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK             (0xFFFFFFF0)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK          (0x0FFFFFFF)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT            (4)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH           (28)
+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_REGLD_STAT_OFFSET              (0x08D8)
+
+/* PDP, REGLD_STAT, REGLD_ADDREN
+*/
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK           (0x00800000)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK                (0x00000001)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT          (23)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH         (1)
+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_REGLD_CTRL_OFFSET              (0x08DC)
+
+/* PDP, REGLD_CTRL, REGLD_ADDRLEN
+*/
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK          (0xFF000000)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK       (0x000000FF)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT         (24)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH                (8)
+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, REGLD_CTRL, REGLD_VAL
+*/
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK              (0x00800000)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK           (0x00000001)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT             (23)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH            (1)
+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_UPDCTRL_OFFSET                 (0x08E0)
+
+/* PDP, UPDCTRL, UPDFIELD
+*/
+#define ODN_PDP_UPDCTRL_UPDFIELD_MASK          (0x00000001)
+#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK       (0x00000001)
+#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT         (0)
+#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH                (1)
+#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_INTCTRL_OFFSET                 (0x08E4)
+
+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE
+*/
+#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK                (0x00010000)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK     (0x00000001)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT       (16)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH      (1)
+#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO
+*/
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK      (0x00001FFF)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK   (0x00001FFF)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT     (0)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH    (13)
+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_PDISETUP_OFFSET                (0x0900)
+
+/* PDP, PDISETUP, PDI_BLNKLVL
+*/
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK              (0x00000040)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK           (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT             (6)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH            (1)
+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, PDISETUP, PDI_BLNK
+*/
+#define ODN_PDP_PDISETUP_PDI_BLNK_MASK         (0x00000020)
+#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK              (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT                (5)
+#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH               (1)
+#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE
+
+/* PDP, PDISETUP, PDI_PWR
+*/
+#define ODN_PDP_PDISETUP_PDI_PWR_MASK          (0x00000010)
+#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK               (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT         (4)
+#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH                (1)
+#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, PDISETUP, PDI_EN
+*/
+#define ODN_PDP_PDISETUP_PDI_EN_MASK           (0x00000008)
+#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK                (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_EN_SHIFT          (3)
+#define ODN_PDP_PDISETUP_PDI_EN_LENGTH         (1)
+#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, PDISETUP, PDI_GDEN
+*/
+#define ODN_PDP_PDISETUP_PDI_GDEN_MASK         (0x00000004)
+#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK              (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT                (2)
+#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH               (1)
+#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE
+
+/* PDP, PDISETUP, PDI_NFEN
+*/
+#define ODN_PDP_PDISETUP_PDI_NFEN_MASK         (0x00000002)
+#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK              (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT                (1)
+#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH               (1)
+#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE
+
+/* PDP, PDISETUP, PDI_CR
+*/
+#define ODN_PDP_PDISETUP_PDI_CR_MASK           (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK                (0x00000001)
+#define ODN_PDP_PDISETUP_PDI_CR_SHIFT          (0)
+#define ODN_PDP_PDISETUP_PDI_CR_LENGTH         (1)
+#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_PDITIMING0_OFFSET              (0x0904)
+
+/* PDP, PDITIMING0, PDI_PWRSVGD
+*/
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK            (0x0F000000)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK         (0x0000000F)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT           (24)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH          (4)
+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_LSDEL
+*/
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK              (0x007F0000)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK           (0x0000007F)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT             (16)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH            (7)
+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, PDITIMING0, PDI_PWRSV2GD2
+*/
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK          (0x000003FF)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK               (0x000003FF)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT         (0)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH                (10)
+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_PDITIMING1_OFFSET              (0x0908)
+
+/* PDP, PDITIMING1, PDI_NLDEL
+*/
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK              (0x000F0000)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK           (0x0000000F)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT             (16)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH            (4)
+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, PDITIMING1, PDI_ACBDEL
+*/
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK             (0x000003FF)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK          (0x000003FF)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT            (0)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH           (10)
+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD     IMG_FALSE
+
+#define ODN_PDP_PDICOREID_OFFSET               (0x090C)
+
+/* PDP, PDICOREID, PDI_GROUP_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK            (0xFF000000)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK         (0x000000FF)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT           (24)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH          (8)
+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CORE_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK             (0x00FF0000)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK          (0x000000FF)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT            (16)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH           (8)
+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, PDICOREID, PDI_CONFIG_ID
+*/
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK           (0x0000FFFF)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK                (0x0000FFFF)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT          (0)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH         (16)
+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_PDICOREREV_OFFSET              (0x0910)
+
+/* PDP, PDICOREREV, PDI_MAJOR_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK          (0x00FF0000)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK               (0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT         (16)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH                (8)
+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MINOR_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK          (0x0000FF00)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK               (0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT         (8)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH                (8)
+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, PDICOREREV, PDI_MAINT_REV
+*/
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK          (0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK               (0x000000FF)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT         (0)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH                (8)
+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX2_OFFSET           (0x0920)
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK           (0x000000C0)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK                (0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT          (6)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH         (2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK           (0x00000030)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK                (0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT          (4)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH         (2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK           (0x0000000C)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK                (0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT          (2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH         (2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK           (0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK                (0x00000003)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT          (0)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH         (2)
+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD   IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX4_0_OFFSET         (0x0924)
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK         (0xF0000000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT                (28)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK         (0x0F000000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK         (0x00F00000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT                (20)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK         (0x000F0000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT                (16)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK         (0x0000F000)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK         (0x00000F00)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT                (8)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK         (0x000000F0)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT                (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK         (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX4_1_OFFSET         (0x0928)
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK         (0xF0000000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT                (28)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK         (0x0F000000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK         (0x00F00000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT                (20)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK         (0x000F0000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT                (16)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK         (0x0000F000)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK         (0x00000F00)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT                (8)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK         (0x000000F0)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT                (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2
+*/
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK         (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK              (0x0000000F)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH               (4)
+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_0_OFFSET         (0x092C)
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_1_OFFSET         (0x0930)
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0
+*/
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_2_OFFSET         (0x0934)
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_3_OFFSET         (0x0938)
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1
+*/
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_4_OFFSET         (0x093C)
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2
+*/
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_5_OFFSET         (0x0940)
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_6_OFFSET         (0x0944)
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3
+*/
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_7_OFFSET         (0x0948)
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4
+*/
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_8_OFFSET         (0x094C)
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_9_OFFSET         (0x0950)
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK         (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT                (24)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK         (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT                (18)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK         (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT                (12)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK         (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT                (6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE
+
+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5
+*/
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK         (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK              (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT                (0)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH               (6)
+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_10_OFFSET                (0x0954)
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK                (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT               (24)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK                (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT               (18)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK                (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT               (12)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK                (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT               (6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK                (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT               (0)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_11_OFFSET                (0x0958)
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK                (0x3F000000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT               (24)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK                (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT               (18)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK                (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT               (12)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK                (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT               (6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6
+*/
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK                (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT               (0)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_DITHERMATRIX8_12_OFFSET                (0x095C)
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK                (0x00FC0000)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT               (18)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK                (0x0003F000)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT               (12)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK                (0x00000FC0)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT               (6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7
+*/
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK                (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK             (0x0000003F)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT               (0)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH              (6)
+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_GRPH1_MEMCTRL_OFFSET           (0x0960)
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK          (0x80000000)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT         (31)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH                (1)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN
+*/
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK              (0x000000FF)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK           (0x000000FF)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT             (0)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH            (8)
+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET                (0x0964)
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK                (0xFF000000)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK             (0x000000FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT               (24)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH              (8)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK         (0x001FF000)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK              (0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT                (12)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH               (9)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD
+*/
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK          (0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK               (0x000001FF)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT         (0)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH                (9)
+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_GRPH2_MEMCTRL_OFFSET           (0x0968)
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK          (0x80000000)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT         (31)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH                (1)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN
+*/
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK              (0x000000FF)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK           (0x000000FF)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT             (0)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH            (8)
+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET                (0x096C)
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK                (0xFF000000)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK             (0x000000FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT               (24)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH              (8)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK         (0x001FF000)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK              (0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT                (12)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH               (9)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD
+*/
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK          (0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK               (0x000001FF)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT         (0)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH                (9)
+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_GRPH3_MEMCTRL_OFFSET           (0x0970)
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK          (0x80000000)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT         (31)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH                (1)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN
+*/
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK              (0x000000FF)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK           (0x000000FF)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT             (0)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH            (8)
+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET                (0x0974)
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK                (0xFF000000)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK             (0x000000FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT               (24)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH              (8)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK         (0x001FF000)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK              (0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT                (12)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH               (9)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD
+*/
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK          (0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK               (0x000001FF)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT         (0)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH                (9)
+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_GRPH4_MEMCTRL_OFFSET           (0x0978)
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK          (0x80000000)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT         (31)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH                (1)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN
+*/
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK              (0x000000FF)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK           (0x000000FF)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT             (0)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH            (8)
+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET                (0x097C)
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK                (0xFF000000)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK             (0x000000FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT               (24)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH              (8)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD        IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK         (0x001FF000)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK              (0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT                (12)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH               (9)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE
+
+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD
+*/
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK          (0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK               (0x000001FF)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT         (0)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH                (9)
+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+#define ODN_PDP_VID1_MEMCTRL_OFFSET            (0x0980)
+
+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK            (0x80000000)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK         (0x00000001)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT           (31)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH          (1)
+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN
+*/
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK                (0x000000FF)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK             (0x000000FF)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT               (0)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH              (8)
+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID1_MEM_THRESH_OFFSET         (0x0984)
+
+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK          (0xFF000000)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK               (0x000000FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT         (24)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH                (8)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK           (0x001FF000)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK                (0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT          (12)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH         (9)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD
+*/
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK            (0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK         (0x000001FF)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT           (0)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH          (9)
+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID2_MEMCTRL_OFFSET            (0x0988)
+
+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK            (0x80000000)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK         (0x00000001)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT           (31)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH          (1)
+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN
+*/
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK                (0x000000FF)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK             (0x000000FF)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT               (0)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH              (8)
+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID2_MEM_THRESH_OFFSET         (0x098C)
+
+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK          (0xFF000000)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK               (0x000000FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT         (24)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH                (8)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK           (0x001FF000)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK                (0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT          (12)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH         (9)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD
+*/
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK            (0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK         (0x000001FF)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT           (0)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH          (9)
+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID3_MEMCTRL_OFFSET            (0x0990)
+
+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK            (0x80000000)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK         (0x00000001)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT           (31)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH          (1)
+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN
+*/
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK                (0x000000FF)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK             (0x000000FF)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT               (0)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH              (8)
+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID3_MEM_THRESH_OFFSET         (0x0994)
+
+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK          (0xFF000000)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK               (0x000000FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT         (24)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH                (8)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK           (0x001FF000)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK                (0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT          (12)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH         (9)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD
+*/
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK            (0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK         (0x000001FF)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT           (0)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH          (9)
+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID4_MEMCTRL_OFFSET            (0x0998)
+
+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL
+*/
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK            (0x80000000)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK         (0x00000001)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT           (31)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH          (1)
+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN
+*/
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK                (0x000000FF)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK             (0x000000FF)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT               (0)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH              (8)
+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD        IMG_FALSE
+
+#define ODN_PDP_VID4_MEM_THRESH_OFFSET         (0x099C)
+
+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK          (0xFF000000)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK               (0x000000FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT         (24)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH                (8)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK           (0x001FF000)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK                (0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT          (12)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH         (9)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD
+*/
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK            (0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK         (0x000001FF)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT           (0)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH          (9)
+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET              (0x09A0)
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK          (0x80000000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT         (31)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH                (1)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK           (0x40000000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT          (30)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH         (1)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK           (0x3F800000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT          (23)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH         (7)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK           (0x007F0000)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT          (16)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH         (7)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK            (0x0000FF00)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT           (8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH          (8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK            (0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT           (0)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH          (8)
+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET              (0x09A4)
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK          (0x80000000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT         (31)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH                (1)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK           (0x40000000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT          (30)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH         (1)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK           (0x3F800000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT          (23)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH         (7)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK           (0x007F0000)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT          (16)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH         (7)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK            (0x0000FF00)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT           (8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH          (8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK            (0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT           (0)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH          (8)
+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET              (0x09A8)
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK          (0x80000000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT         (31)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH                (1)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK           (0x40000000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT          (30)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH         (1)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK           (0x3F800000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT          (23)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH         (7)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK           (0x007F0000)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT          (16)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH         (7)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK            (0x0000FF00)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT           (8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH          (8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK            (0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT           (0)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH          (8)
+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET              (0x09AC)
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK          (0x80000000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK               (0x00000001)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT         (31)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH                (1)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD  IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK           (0x40000000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK                (0x00000001)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT          (30)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH         (1)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK           (0x3F800000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT          (23)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH         (7)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK           (0x007F0000)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK                (0x0000007F)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT          (16)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH         (7)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD   IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK            (0x0000FF00)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT           (8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH          (8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK            (0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK         (0x000000FF)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT           (0)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH          (8)
+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD    IMG_FALSE
+
+#define ODN_PDP_VID1_PANIC_THRESH_OFFSET               (0x09B0)
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK            (0x80000000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK         (0x00000001)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT           (31)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH          (1)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK             (0x40000000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK          (0x00000001)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT            (30)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH           (1)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK             (0x3F800000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT            (23)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH           (7)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK             (0x007F0000)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT            (16)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH           (7)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK              (0x0000FF00)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT             (8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH            (8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK              (0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT             (0)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH            (8)
+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_VID2_PANIC_THRESH_OFFSET               (0x09B4)
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK            (0x80000000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK         (0x00000001)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT           (31)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH          (1)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK             (0x40000000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK          (0x00000001)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT            (30)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH           (1)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK             (0x3F800000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT            (23)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH           (7)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK             (0x007F0000)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT            (16)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH           (7)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK              (0x0000FF00)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT             (8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH            (8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK              (0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT             (0)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH            (8)
+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_VID3_PANIC_THRESH_OFFSET               (0x09B8)
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK            (0x80000000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK         (0x00000001)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT           (31)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH          (1)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK             (0x40000000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK          (0x00000001)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT            (30)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH           (1)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK             (0x3F800000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT            (23)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH           (7)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK             (0x007F0000)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT            (16)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH           (7)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK              (0x0000FF00)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT             (8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH            (8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK              (0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT             (0)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH            (8)
+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_VID4_PANIC_THRESH_OFFSET               (0x09BC)
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK            (0x80000000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK         (0x00000001)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT           (31)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH          (1)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD    IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK             (0x40000000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK          (0x00000001)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT            (30)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH           (1)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK             (0x3F800000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT            (23)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH           (7)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK             (0x007F0000)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK          (0x0000007F)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT            (16)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH           (7)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD     IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK              (0x0000FF00)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT             (8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH            (8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD      IMG_FALSE
+
+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN
+*/
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK              (0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK           (0x000000FF)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT             (0)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH            (8)
+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD      IMG_FALSE
+
+#define ODN_PDP_BURST_BOUNDARY_OFFSET          (0x09C0)
+
+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY
+*/
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK             (0x0000003F)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK          (0x0000003F)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT            (0)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH           (6)
+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD     IMG_FALSE
+
+
+/* ---------------------- End of register definitions ---------------------- */
+
+/* NUMREG defines the extent of register address space.
+*/
+
+#define                ODN_PDP_NUMREG     ((0x09C0 >> 2)+1)
+
+/* Info about video plane addresses */
+#define ODN_PDP_YADDR_BITS             (ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH)
+#define ODN_PDP_YADDR_ALIGN            5
+#define ODN_PDP_UADDR_BITS             (ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH)
+#define ODN_PDP_UADDR_ALIGN            5
+#define ODN_PDP_VADDR_BITS             (ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH)
+#define ODN_PDP_VADDR_ALIGN            5
+
+#define ODN_PDP_YSTRIDE_BITS   (ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH)
+#define ODN_PDP_YSTRIDE_ALIGN  5
+
+#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1)
+#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1)
+
+/* Maximum 6 bytes per pixel for RGB161616 */
+#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6)
+
+/* Round up */
+#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE)
+
+#define ODN_PDP_YADDR_MAX              (((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN)
+#define ODN_PDP_UADDR_MAX              (((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN)
+#define ODN_PDP_VADDR_MAX              (((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN)
+#define ODN_PDP_YSTRIDE_MAX            ((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN)
+#define ODN_PDP_YADDR_ALIGNMASK                ((1 << ODN_PDP_YADDR_ALIGN) - 1)
+#define ODN_PDP_UADDR_ALIGNMASK                ((1 << ODN_PDP_UADDR_ALIGN) - 1)
+#define ODN_PDP_VADDR_ALIGNMASK                ((1 << ODN_PDP_VADDR_ALIGN) - 1)
+#define ODN_PDP_YSTRIDE_ALIGNMASK      ((1 << ODN_PDP_YSTRIDE_ALIGN) - 1)
+
+/* Field Values (some are reserved for future use) */
+#define ODN_PDP_SURF_PIXFMT_RGB332                                     0x3
+#define ODN_PDP_SURF_PIXFMT_ARGB4444                           0x4
+#define ODN_PDP_SURF_PIXFMT_ARGB1555                           0x5
+#define ODN_PDP_SURF_PIXFMT_RGB888                                     0x6
+#define ODN_PDP_SURF_PIXFMT_RGB565                                     0x7
+#define ODN_PDP_SURF_PIXFMT_ARGB8888                           0x8
+#define ODN_PDP_SURF_PIXFMT_420_PL8                                    0x9
+#define ODN_PDP_SURF_PIXFMT_420_PL8IVU                         0xA
+#define ODN_PDP_SURF_PIXFMT_420_PL8IUV                         0xB
+#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888                    0xC
+#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888                    0xD
+#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888                    0xE
+#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888                    0xF
+#define ODN_PDP_SURF_PIXFMT_AYUV8888                           0x10
+#define ODN_PDP_SURF_PIXFMT_YUV101010                          0x15
+#define ODN_PDP_SURF_PIXFMT_RGB101010                          0x17
+#define ODN_PDP_SURF_PIXFMT_420_PL10IUV                                0x18
+#define ODN_PDP_SURF_PIXFMT_420_PL10IVU                                0x19
+#define ODN_PDP_SURF_PIXFMT_422_PL10IUV                                0x1A
+#define ODN_PDP_SURF_PIXFMT_422_PL10IVU                                0x1B
+#define ODN_PDP_SURF_PIXFMT_RGB121212                          0x1E
+#define ODN_PDP_SURF_PIXFMT_RGB161616                          0x1F
+
+#define ODN_PDP_CTRL_CKEYSRC_PREV                                      0x0
+#define ODN_PDP_CTRL_CKEYSRC_CUR                                       0x1
+
+#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS                      0x0
+#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK                       0x1
+#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK                       0x2
+#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH                                0x3
+
+#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0                0x0
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1                0x1
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2                0x2
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3                0x3
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4                0x4
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5                0x5
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6                0x6
+#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7                0x7
+
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE      0x0
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1
+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2
+
+#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1
+#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS    10
+
+/*---------------------------------------------------------------------------*/
+
+#endif /* ODN_PDP_REGS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/odin_regs.h
new file mode 100644 (file)
index 0000000..5d58216
--- /dev/null
@@ -0,0 +1,1026 @@
+/******************************************************************************
+@Title          Odin system control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Odin FPGA register defs for IMG 3rd generation TCF
+
+       Auto generated headers, eg. odn_core.h:
+               regconv -d . -a 8 odn_core.def
+
+       Source files :
+               odn_core.def
+               mca_debug.def
+               sai_rx_debug.def
+               sai_tx_debug.def
+               ad_tx.def
+
+       Changes:
+               Removed obsolete copyright dates
+               Changed lower case to upper case
+                       (eg. odn_core changed to ODN_CORE)
+               Changed PVR5__ to ODN_
+               Merged multiple .def files into one header
+
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+/* tab size 4 */
+
+#ifndef _ODIN_REGS_H_
+#define _ODIN_REGS_H_
+
+/******************************
+  Generated from: odn_core.def
+*******************************/
+
+/*
+       Register ID
+*/
+#define ODN_CORE_ID                             0x0000
+#define ODN_ID_VARIANT_MASK                     0x0000FFFFU
+#define ODN_ID_VARIANT_SHIFT                    0
+#define ODN_ID_VARIANT_SIGNED                   0
+
+#define ODN_ID_ID_MASK                          0xFFFF0000U
+#define ODN_ID_ID_SHIFT                         16
+#define ODN_ID_ID_SIGNED                        0
+
+/*
+       Register REL
+*/
+#define ODN_CORE_REL                            0x0004
+#define ODN_REL_MINOR_MASK                      0x0000FFFFU
+#define ODN_REL_MINOR_SHIFT                     0
+#define ODN_REL_MINOR_SIGNED                    0
+
+#define ODN_REL_MAJOR_MASK                      0xFFFF0000U
+#define ODN_REL_MAJOR_SHIFT                     16
+#define ODN_REL_MAJOR_SIGNED                    0
+
+/*
+       Register CHANGE_SET
+*/
+#define ODN_CORE_CHANGE_SET                     0x0008
+#define ODN_CHANGE_SET_SET_MASK                 0xFFFFFFFFU
+#define ODN_CHANGE_SET_SET_SHIFT                0
+#define ODN_CHANGE_SET_SET_SIGNED               0
+
+/*
+       Register USER_ID
+*/
+#define ODN_CORE_USER_ID                        0x000C
+#define ODN_USER_ID_ID_MASK                     0x000000FFU
+#define ODN_USER_ID_ID_SHIFT                    0
+#define ODN_USER_ID_ID_SIGNED                   0
+
+/*
+       Register USER_BUILD
+*/
+#define ODN_CORE_USER_BUILD                     0x0010
+#define ODN_USER_BUILD_BUILD_MASK               0xFFFFFFFFU
+#define ODN_USER_BUILD_BUILD_SHIFT              0
+#define ODN_USER_BUILD_BUILD_SIGNED             0
+
+/*
+       Register SW_IF_VERSION
+*/
+#define ODN_CORE_SW_IF_VERSION                  0x0014
+#define ODN_SW_IF_VERSION_VERSION_MASK          0x0000FFFFU
+#define ODN_SW_IF_VERSION_VERSION_SHIFT         0
+#define ODN_SW_IF_VERSION_VERSION_SIGNED        0
+
+/*
+       Register INTERNAL_RESETN
+*/
+#define ODN_CORE_INTERNAL_RESETN                0x0080
+#define ODN_INTERNAL_RESETN_DDR_MASK            0x00000001U
+#define ODN_INTERNAL_RESETN_DDR_SHIFT           0
+#define ODN_INTERNAL_RESETN_DDR_SIGNED          0
+
+#define ODN_INTERNAL_RESETN_MIG0_MASK           0x00000002U
+#define ODN_INTERNAL_RESETN_MIG0_SHIFT          1
+#define ODN_INTERNAL_RESETN_MIG0_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_MIG1_MASK           0x00000004U
+#define ODN_INTERNAL_RESETN_MIG1_SHIFT          2
+#define ODN_INTERNAL_RESETN_MIG1_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PDP1_MASK           0x00000008U
+#define ODN_INTERNAL_RESETN_PDP1_SHIFT          3
+#define ODN_INTERNAL_RESETN_PDP1_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PDP2_MASK           0x00000010U
+#define ODN_INTERNAL_RESETN_PDP2_SHIFT          4
+#define ODN_INTERNAL_RESETN_PDP2_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PERIP_MASK          0x00000020U
+#define ODN_INTERNAL_RESETN_PERIP_SHIFT         5
+#define ODN_INTERNAL_RESETN_PERIP_SIGNED        0
+
+#define ODN_INTERNAL_RESETN_GIST_MASK           0x00000040U
+#define ODN_INTERNAL_RESETN_GIST_SHIFT          6
+#define ODN_INTERNAL_RESETN_GIST_SIGNED         0
+
+#define ODN_INTERNAL_RESETN_PIKE_MASK           0x00000080U
+#define ODN_INTERNAL_RESETN_PIKE_SHIFT          7
+#define ODN_INTERNAL_RESETN_PIKE_SIGNED         0
+
+/*
+       Register EXTERNAL_RESETN
+*/
+#define ODN_CORE_EXTERNAL_RESETN                0x0084
+#define ODN_EXTERNAL_RESETN_DUT_MASK            0x00000001U
+#define ODN_EXTERNAL_RESETN_DUT_SHIFT           0
+#define ODN_EXTERNAL_RESETN_DUT_SIGNED          0
+
+#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK        0x00000002U
+#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT       1
+#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED      0
+
+#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_MASK    0x00000004U
+#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SHIFT   2
+#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SIGNED  0
+
+#define ODN_EXTERNAL_RESETN_DUT_IF_MASK         0x00000008U
+#define ODN_EXTERNAL_RESETN_DUT_IF_SHIFT        3
+#define ODN_EXTERNAL_RESETN_DUT_IF_SIGNED       0
+
+#define ODN_EXTERNAL_RESETN_DUT1_MASK           0x00000010U
+#define ODN_EXTERNAL_RESETN_DUT1_SHIFT          4
+#define ODN_EXTERNAL_RESETN_DUT1_SIGNED         0
+
+#define ODN_EXTERNAL_RESETN_DUT2_MASK           0x00000020U
+#define ODN_EXTERNAL_RESETN_DUT2_SHIFT          5
+#define ODN_EXTERNAL_RESETN_DUT2_SIGNED         0
+
+/*
+       Register EXTERNAL_RESET
+*/
+#define ODN_CORE_EXTERNAL_RESET                 0x0088
+#define ODN_EXTERNAL_RESET_PVT_CAL_MASK         0x00000001U
+#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT        0
+#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED       0
+
+#define ODN_EXTERNAL_RESET_PLL_MASK             0x00000002U
+#define ODN_EXTERNAL_RESET_PLL_SHIFT            1
+#define ODN_EXTERNAL_RESET_PLL_SIGNED           0
+
+/*
+       Register INTERNAL_AUTO_RESETN
+*/
+#define ODN_CORE_INTERNAL_AUTO_RESETN           0x008C
+#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK       0x00000001U
+#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT      0
+#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED     0
+
+/*
+       Register CLK_GEN_RESET
+*/
+#define ODN_CORE_CLK_GEN_RESET                  0x0090
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK    0x00000001U
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT   0
+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED  0
+
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK      0x00000002U
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT     1
+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED    0
+
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK       0x00000004U
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT      2
+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED     0
+
+#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK         0x00000008U
+#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT        3
+#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED       0
+
+/*
+       Register INTERRUPT_STATUS
+*/
+#define ODN_CORE_INTERRUPT_STATUS               0x0100
+#define ODN_INTERRUPT_STATUS_DUT_MASK           0x00000001U
+#define ODN_INTERRUPT_STATUS_DUT_SHIFT          0
+#define ODN_INTERRUPT_STATUS_DUT_SIGNED         0
+
+#define ODN_INTERRUPT_STATUS_PDP1_MASK          0x00000002U
+#define ODN_INTERRUPT_STATUS_PDP1_SHIFT         1
+#define ODN_INTERRUPT_STATUS_PDP1_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_PDP2_MASK          0x00000004U
+#define ODN_INTERRUPT_STATUS_PDP2_SHIFT         2
+#define ODN_INTERRUPT_STATUS_PDP2_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_PERIP_MASK         0x00000008U
+#define ODN_INTERRUPT_STATUS_PERIP_SHIFT        3
+#define ODN_INTERRUPT_STATUS_PERIP_SIGNED       0
+
+#define ODN_INTERRUPT_STATUS_UART_MASK          0x00000010U
+#define ODN_INTERRUPT_STATUS_UART_SHIFT         4
+#define ODN_INTERRUPT_STATUS_UART_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6
+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_DUT2_MASK          0x00000200U
+#define ODN_INTERRUPT_STATUS_DUT2_SHIFT         9
+#define ODN_INTERRUPT_STATUS_DUT2_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_MASK 0x00000400U
+#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SHIFT 10
+#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SIGNED 0
+
+#define ODN_INTERRUPT_STATUS_CDMA_MASK          0x00001800U
+#define ODN_INTERRUPT_STATUS_CDMA_SHIFT         11
+#define ODN_INTERRUPT_STATUS_CDMA_SIGNED        0
+
+#define ODN_INTERRUPT_STATUS_OS_IRQ_MASK        0x001FE000U
+#define ODN_INTERRUPT_STATUS_OS_IRQ_SHIFT       13
+#define ODN_INTERRUPT_STATUS_OS_IRQ_SIGNED      0
+
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK      0x40000000U
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT     30
+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED    0
+
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31
+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0
+
+/*
+       Register INTERRUPT_ENABLE
+*/
+#define ODN_CORE_INTERRUPT_ENABLE               0x0104
+#define ODN_INTERRUPT_ENABLE_DUT_MASK           0x00000001U
+#define ODN_INTERRUPT_ENABLE_DUT_SHIFT          0
+#define ODN_INTERRUPT_ENABLE_DUT_SIGNED         0
+
+#define ODN_INTERRUPT_ENABLE_PDP1_MASK          0x00000002U
+#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT         1
+#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_PDP2_MASK          0x00000004U
+#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT         2
+#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_PERIP_MASK         0x00000008U
+#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT        3
+#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED       0
+
+#define ODN_INTERRUPT_ENABLE_UART_MASK          0x00000010U
+#define ODN_INTERRUPT_ENABLE_UART_SHIFT         4
+#define ODN_INTERRUPT_ENABLE_UART_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6
+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_DUT2_MASK          0x00000200U
+#define ODN_INTERRUPT_ENABLE_DUT2_SHIFT         9
+#define ODN_INTERRUPT_ENABLE_DUT2_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_MASK 0x00000400U
+#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SHIFT 10
+#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SIGNED 0
+
+#define ODN_INTERRUPT_ENABLE_CDMA_MASK          0x00001800U
+#define ODN_INTERRUPT_ENABLE_CDMA_SHIFT         11
+#define ODN_INTERRUPT_ENABLE_CDMA_SIGNED        0
+
+#define ODN_INTERRUPT_ENABLE_OS_IRQ_MASK        0x001FE000U
+#define ODN_INTERRUPT_ENABLE_OS_IRQ_SHIFT       13
+#define ODN_INTERRUPT_ENABLE_OS_IRQ_SIGNED      0
+
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK      0x40000000U
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT     30
+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED    0
+
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31
+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0
+
+/*
+       Register INTERRUPT_CLR
+*/
+#define ODN_CORE_INTERRUPT_CLR                  0x010C
+#define ODN_INTERRUPT_CLR_DUT_MASK              0x00000001U
+#define ODN_INTERRUPT_CLR_DUT_SHIFT             0
+#define ODN_INTERRUPT_CLR_DUT_SIGNED            0
+
+#define ODN_INTERRUPT_CLR_PDP1_MASK             0x00000002U
+#define ODN_INTERRUPT_CLR_PDP1_SHIFT            1
+#define ODN_INTERRUPT_CLR_PDP1_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_PDP2_MASK             0x00000004U
+#define ODN_INTERRUPT_CLR_PDP2_SHIFT            2
+#define ODN_INTERRUPT_CLR_PDP2_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_PERIP_MASK            0x00000008U
+#define ODN_INTERRUPT_CLR_PERIP_SHIFT           3
+#define ODN_INTERRUPT_CLR_PERIP_SIGNED          0
+
+#define ODN_INTERRUPT_CLR_UART_MASK             0x00000010U
+#define ODN_INTERRUPT_CLR_UART_SHIFT            4
+#define ODN_INTERRUPT_CLR_UART_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK  0x00000020U
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5
+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK   0x00000040U
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT  6
+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7
+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK  0x00000100U
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8
+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_DUT2_MASK             0x00000200U
+#define ODN_INTERRUPT_CLR_DUT2_SHIFT            9
+#define PVR5__INTERRUPT_CLR_DUT2_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_MASK 0x00000400U
+#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SHIFT 10
+#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SIGNED 0
+
+#define ODN_INTERRUPT_CLR_CDMA_MASK             0x00001800U
+#define ODN_INTERRUPT_CLR_CDMA_SHIFT            11
+#define ODN_INTERRUPT_CLR_CDMA_SIGNED           0
+
+#define ODN_INTERRUPT_CLR_OS_IRQ_MASK           0x001FE000U
+#define ODN_INTERRUPT_CLR_OS_IRQ_SHIFT          13
+#define ODN_INTERRUPT_CLR_OS_IRQ_SIGNED         0
+
+#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK         0x40000000U
+#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT        30
+#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED       0
+
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK     0x80000000U
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT    31
+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED   0
+
+/*
+       Register INTERRUPT_TEST
+*/
+#define ODN_CORE_INTERRUPT_TEST                 0x0110
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK  0x00000001U
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0
+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0
+
+/*
+       Register INTERRUPT_TIMEOUT_CLR
+*/
+#define ODN_CORE_INTERRUPT_TIMEOUT_CLR      0x0114
+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U
+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1
+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0
+
+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U
+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0
+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0
+
+/*
+       Register INTERRUPT_TIMEOUT
+*/
+#define ODN_CORE_INTERRUPT_TIMEOUT          0x0118
+#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU
+#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0
+#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0
+/*
+       Register SYSTEM_ID
+*/
+#define ODN_CORE_SYSTEM_ID                      0x011C
+#define ODN_SYSTEM_ID_ID_MASK                   0x0000FFFFU
+#define ODN_SYSTEM_ID_ID_SHIFT                  0
+#define ODN_SYSTEM_ID_ID_SIGNED                 0
+
+/*
+       Register SUPPORTED_FEATURES
+*/
+#define ODN_CORE_SUPPORTED_FEATURES             0x0120
+#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_MASK 0xFFFFFFFEU
+#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SHIFT 1
+#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SIGNED 0
+
+#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_MASK 0x00000001U
+#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SHIFT 0
+#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SIGNED 0
+
+/*
+       Register NUM_GPIO
+*/
+#define ODN_CORE_NUM_GPIO                       0x0180
+#define ODN_NUM_GPIO_NUMBER_MASK                0x0000000FU
+#define ODN_NUM_GPIO_NUMBER_SHIFT               0
+#define ODN_NUM_GPIO_NUMBER_SIGNED              0
+
+/*
+       Register GPIO_EN
+*/
+#define ODN_CORE_GPIO_EN                        0x0184
+#define ODN_GPIO_EN_DIRECTION_MASK              0x000000FFU
+#define ODN_GPIO_EN_DIRECTION_SHIFT             0
+#define ODN_GPIO_EN_DIRECTION_SIGNED            0
+
+/*
+       Register GPIO
+*/
+#define ODN_CORE_GPIO                           0x0188
+#define ODN_GPIO_GPIO_MASK                      0x000000FFU
+#define ODN_GPIO_GPIO_SHIFT                     0
+#define ODN_GPIO_GPIO_SIGNED                    0
+
+/*
+       Register NUM_DUT_CTRL
+*/
+#define ODN_CORE_NUM_DUT_CTRL                   0x0190
+#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK          0xFFFFFFFFU
+#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT         0
+#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED        0
+
+/*
+       Register DUT_CTRL1
+*/
+#define ODN_CORE_DUT_CTRL1                      0x0194
+#define ODN_DUT_CTRL1_CONTROL1_MASK             0x3FFFFFFFU
+#define ODN_DUT_CTRL1_CONTROL1_SHIFT            0
+#define ODN_DUT_CTRL1_CONTROL1_SIGNED           0
+
+#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK          0x40000000U
+#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT         30
+#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED        0
+
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK       0x80000000U
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT      31
+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED     0
+
+/*
+       Register DUT_CTRL2
+*/
+#define ODN_CORE_DUT_CTRL2                      0x0198
+#define ODN_DUT_CTRL2_CONTROL2_MASK             0xFFFFFFFFU
+#define ODN_DUT_CTRL2_CONTROL2_SHIFT            0
+#define ODN_DUT_CTRL2_CONTROL2_SIGNED           0
+
+/*
+       Register NUM_DUT_STAT
+*/
+#define ODN_CORE_NUM_DUT_STAT                   0x019C
+#define ODN_NUM_DUT_STAT_NUM_PINS_MASK          0xFFFFFFFFU
+#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT         0
+#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED        0
+
+/*
+       Register DUT_STAT1
+*/
+#define ODN_CORE_DUT_STAT1                      0x01A0
+#define ODN_DUT_STAT1_STATUS1_MASK              0xFFFFFFFFU
+#define ODN_DUT_STAT1_STATUS1_SHIFT             0
+#define ODN_DUT_STAT1_STATUS1_SIGNED            0
+
+/*
+       Register DUT_STAT2
+*/
+#define ODN_CORE_DUT_STAT2                      0x01A4
+#define ODN_DUT_STAT2_STATUS2_MASK              0xFFFFFFFFU
+#define ODN_DUT_STAT2_STATUS2_SHIFT             0
+#define ODN_DUT_STAT2_STATUS2_SIGNED            0
+
+/*
+       Register DASH_LEDS
+*/
+#define ODN_CORE_DASH_LEDS                      0x01A8
+#define ODN_DASH_LEDS_REPA_MASK                 0xFFF00000U
+#define ODN_DASH_LEDS_REPA_SHIFT                20
+#define ODN_DASH_LEDS_REPA_SIGNED               0
+
+#define ODN_DASH_LEDS_PIKE_MASK                 0x00000FFFU
+#define ODN_DASH_LEDS_PIKE_SHIFT                0
+#define ODN_DASH_LEDS_PIKE_SIGNED               0
+
+/*
+       Register DUT_CLK_INFO
+*/
+#define ODN_CORE_DUT_CLK_INFO                   0x01B0
+#define ODN_DUT_CLK_INFO_CORE_MASK              0x0000FFFFU
+#define ODN_DUT_CLK_INFO_CORE_SHIFT             0
+#define ODN_DUT_CLK_INFO_CORE_SIGNED            0
+
+#define ODN_DUT_CLK_INFO_MEM_MASK               0xFFFF0000U
+#define ODN_DUT_CLK_INFO_MEM_SHIFT              16
+#define ODN_DUT_CLK_INFO_MEM_SIGNED             0
+
+/*
+       Register DUT_CLK_PHSE
+*/
+#define ODN_CORE_DUT_CLK_PHSE                   0x01B4
+#define ODN_DUT_CLK_PHSE_MEM_REQ_MASK           0x0000FFFFU
+#define ODN_DUT_CLK_PHSE_MEM_REQ_SHIFT          0
+#define ODN_DUT_CLK_PHSE_MEM_REQ_SIGNED         0
+
+#define ODN_DUT_CLK_PHSE_MEM_RD_MASK            0xFFFF0000U
+#define ODN_DUT_CLK_PHSE_MEM_RD_SHIFT           16
+#define ODN_DUT_CLK_PHSE_MEM_RD_SIGNED          0
+
+/*
+       Register CORE_STATUS
+*/
+#define ODN_CORE_CORE_STATUS                    0x0200
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK   0x00000001U
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT  0
+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4
+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5
+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6
+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0
+
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7
+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8
+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0
+
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9
+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0
+
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12
+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13
+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+/*
+       Register CORE_CONTROL
+*/
+#define ODN_CORE_CORE_CONTROL                   0x0204
+#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK       0x0000001FU
+#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT      0
+#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8
+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0
+
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK    0x00001C00U
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT   10
+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED  0
+
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13
+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0
+
+#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK       0x00070000U
+#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT      16
+#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK       0x00700000U
+#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT      20
+#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED     0
+
+#define ODN_CORE_CONTROL_DUT_OFFSET_MASK        0x07000000U
+#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT       24
+#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED      0
+
+/*
+       Register REG_BANK_STATUS
+*/
+#define ODN_CORE_REG_BANK_STATUS                0x0208
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0
+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0
+
+/*
+       Register MMCM_LOCK_STATUS
+*/
+#define ODN_CORE_MMCM_LOCK_STATUS               0x020C
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_MASK      0x00000001U
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SHIFT     0
+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SIGNED    0
+
+#define ODN_MMCM_LOCK_STATUS_DUT_IF_MASK        0x00000002U
+#define ODN_MMCM_LOCK_STATUS_DUT_IF_SHIFT       1
+#define ODN_MMCM_LOCK_STATUS_DUT_IF_SIGNED      0
+
+#define ODN_MMCM_LOCK_STATUS_MULTI_MASK         0x00000004U
+#define ODN_MMCM_LOCK_STATUS_MULTI_SHIFT        2
+#define ODN_MMCM_LOCK_STATUS_MULTI_SIGNED       0
+
+#define ODN_MMCM_LOCK_STATUS_PDPP_MASK          0x00000008U
+#define ODN_MMCM_LOCK_STATUS_PDPP_SHIFT         3
+#define ODN_MMCM_LOCK_STATUS_PDPP_SIGNED        0
+
+/*
+       Register GIST_STATUS
+*/
+#define ODN_CORE_GIST_STATUS                    0x0210
+#define ODN_GIST_STATUS_MST_MASK                0x000001FFU
+#define ODN_GIST_STATUS_MST_SHIFT               0
+#define ODN_GIST_STATUS_MST_SIGNED              0
+
+#define ODN_GIST_STATUS_SLV_MASK                0x001FF000U
+#define ODN_GIST_STATUS_SLV_SHIFT               12
+#define ODN_GIST_STATUS_SLV_SIGNED              0
+
+#define ODN_GIST_STATUS_SLV_OUT_MASK            0x03000000U
+#define ODN_GIST_STATUS_SLV_OUT_SHIFT           24
+#define ODN_GIST_STATUS_SLV_OUT_SIGNED          0
+
+#define ODN_GIST_STATUS_MST_OUT_MASK            0x70000000U
+#define ODN_GIST_STATUS_MST_OUT_SHIFT           28
+#define ODN_GIST_STATUS_MST_OUT_SIGNED          0
+
+/*
+       Register DUT_MST_ADD
+*/
+#define ODN_CORE_DUT_MST_ADD                    0x0214
+#define ODN_DUT_MST_ADD_SLV_OUT_MASK            0x0000003FU
+#define ODN_DUT_MST_ADD_SLV_OUT_SHIFT           0
+#define ODN_DUT_MST_ADD_SLV_OUT_SIGNED          0
+
+/*
+       Register DUT_MULTIPLX_INFO
+*/
+#define ODN_CORE_DUT_MULTIPLX_INFO              0x0218
+#define ODN_DUT_MULTIPLX_INFO_MEM_MASK          0x000000FFU
+#define ODN_DUT_MULTIPLX_INFO_MEM_SHIFT         0
+#define ODN_DUT_MULTIPLX_INFO_MEM_SIGNED        0
+
+/****************************
+  Generated from: ad_tx.def
+*****************************/
+
+/*
+       Register ADT_CONTROL
+*/
+#define ODN_AD_TX_DEBUG_ADT_CONTROL             0x0000
+#define ODN_SET_ADTX_READY_MASK                 0x00000004U
+#define ODN_SET_ADTX_READY_SHIFT                2
+#define ODN_SET_ADTX_READY_SIGNED               0
+
+#define ODN_SEND_ALIGN_DATA_MASK                0x00000002U
+#define ODN_SEND_ALIGN_DATA_SHIFT               1
+#define ODN_SEND_ALIGN_DATA_SIGNED              0
+
+#define ODN_ENABLE_FLUSHING_MASK                0x00000001U
+#define ODN_ENABLE_FLUSHING_SHIFT               0
+#define ODN_ENABLE_FLUSHING_SIGNED              0
+
+/*
+       Register ADT_STATUS
+*/
+#define ODN_AD_TX_DEBUG_ADT_STATUS              0x0004
+#define ODN_REQUEST_COMPLETE_MASK               0x00000001U
+#define ODN_REQUEST_COMPLETE_SHIFT              0
+#define ODN_REQUEST_COMPLETE_SIGNED             0
+
+
+/******************************
+ Generated from: mca_debug.def
+*******************************/
+
+/*
+       Register MCA_CONTROL
+*/
+#define ODN_MCA_DEBUG_MCA_CONTROL               0x0000
+#define ODN_ALIGN_START_MASK                    0x00000001U
+#define ODN_ALIGN_START_SHIFT                   0
+#define ODN_ALIGN_START_SIGNED                  0
+
+/*
+       Register MCA_STATUS
+*/
+#define ODN_MCA_DEBUG_MCA_STATUS                0x0004
+#define ODN_TCHECK_SDEBUG_MASK                  0x40000000U
+#define ODN_TCHECK_SDEBUG_SHIFT                 30
+#define ODN_TCHECK_SDEBUG_SIGNED                0
+
+#define ODN_CHECK_SDEBUG_MASK                   0x20000000U
+#define ODN_CHECK_SDEBUG_SHIFT                  29
+#define ODN_CHECK_SDEBUG_SIGNED                 0
+
+#define ODN_ALIGN_SDEBUG_MASK                   0x10000000U
+#define ODN_ALIGN_SDEBUG_SHIFT                  28
+#define ODN_ALIGN_SDEBUG_SIGNED                 0
+
+#define ODN_FWAIT_SDEBUG_MASK                   0x08000000U
+#define ODN_FWAIT_SDEBUG_SHIFT                  27
+#define ODN_FWAIT_SDEBUG_SIGNED                 0
+
+#define ODN_IDLE_SDEBUG_MASK                    0x04000000U
+#define ODN_IDLE_SDEBUG_SHIFT                   26
+#define ODN_IDLE_SDEBUG_SIGNED                  0
+
+#define ODN_FIFO_FULL_MASK                      0x03FF0000U
+#define ODN_FIFO_FULL_SHIFT                     16
+#define ODN_FIFO_FULL_SIGNED                    0
+
+#define ODN_FIFO_EMPTY_MASK                     0x0000FFC0U
+#define ODN_FIFO_EMPTY_SHIFT                    6
+#define ODN_FIFO_EMPTY_SIGNED                   0
+
+#define ODN_TAG_CHECK_ERROR_MASK                0x00000020U
+#define ODN_TAG_CHECK_ERROR_SHIFT               5
+#define ODN_TAG_CHECK_ERROR_SIGNED              0
+
+#define ODN_ALIGN_CHECK_ERROR_MASK              0x00000010U
+#define ODN_ALIGN_CHECK_ERROR_SHIFT             4
+#define ODN_ALIGN_CHECK_ERROR_SIGNED            0
+
+#define ODN_ALIGN_ERROR_MASK                    0x00000008U
+#define ODN_ALIGN_ERROR_SHIFT                   3
+#define ODN_ALIGN_ERROR_SIGNED                  0
+
+#define ODN_TAG_CHECKING_OK_MASK                0x00000004U
+#define ODN_TAG_CHECKING_OK_SHIFT               2
+#define ODN_TAG_CHECKING_OK_SIGNED              0
+
+#define ODN_ALIGN_CHECK_OK_MASK                 0x00000002U
+#define ODN_ALIGN_CHECK_OK_SHIFT                1
+#define ODN_ALIGN_CHECK_OK_SIGNED               0
+
+#define ODN_ALIGNMENT_FOUND_MASK                0x00000001U
+#define ODN_ALIGNMENT_FOUND_SHIFT               0
+#define ODN_ALIGNMENT_FOUND_SIGNED              0
+
+
+/*********************************
+ Generated from: sai_rx_debug.def
+**********************************/
+
+/*
+       Register SIG_RESULT
+*/
+#define ODN_SAI_RX_DEBUG_SIG_RESULT             0x0000
+#define ODN_SIG_RESULT_VALUE_MASK               0xFFFFFFFFU
+#define ODN_SIG_RESULT_VALUE_SHIFT              0
+#define ODN_SIG_RESULT_VALUE_SIGNED             0
+
+/*
+       Register INIT_SIG
+*/
+#define ODN_SAI_RX_DEBUG_INIT_SIG               0x0004
+#define ODN_INIT_SIG_VALUE_MASK                 0x00000001U
+#define ODN_INIT_SIG_VALUE_SHIFT                0
+#define ODN_INIT_SIG_VALUE_SIGNED               0
+
+/*
+       Register SAI_BYPASS
+*/
+#define ODN_SAI_RX_DEBUG_SAI_BYPASS             0x0008
+#define ODN_BYPASS_CLK_TAPS_VALUE_MASK          0x000003FFU
+#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT         0
+#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED        0
+
+#define ODN_BYPASS_SET_MASK                     0x00010000U
+#define ODN_BYPASS_SET_SHIFT                    16
+#define ODN_BYPASS_SET_SIGNED                   0
+
+#define ODN_BYPASS_EN_MASK                      0x00100000U
+#define ODN_BYPASS_EN_SHIFT                     20
+#define ODN_BYPASS_EN_SIGNED                    0
+
+#define ODN_EN_STATUS_MASK                      0x01000000U
+#define ODN_EN_STATUS_SHIFT                     24
+#define ODN_EN_STATUS_SIGNED                    0
+
+/*
+       Register SAI_CLK_TAPS
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS           0x000C
+#define ODN_CLK_TAPS_VALUE_MASK                 0x000003FFU
+#define ODN_CLK_TAPS_VALUE_SHIFT                0
+#define ODN_CLK_TAPS_VALUE_SIGNED               0
+
+#define ODN_TRAINING_COMPLETE_MASK              0x00010000U
+#define ODN_TRAINING_COMPLETE_SHIFT             16
+#define ODN_TRAINING_COMPLETE_SIGNED            0
+
+/*
+       Register SAI_EYES
+*/
+#define ODN_SAI_RX_DEBUG_SAI_EYES               0x0010
+#define ODN_MIN_EYE_END_MASK                    0x0000FFFFU
+#define ODN_MIN_EYE_END_SHIFT                   0
+#define ODN_MIN_EYE_END_SIGNED                  0
+
+#define ODN_MAX_EYE_START_MASK                  0xFFFF0000U
+#define ODN_MAX_EYE_START_SHIFT                 16
+#define ODN_MAX_EYE_START_SIGNED                0
+
+/*
+       Register SAI_DDR_INVERT
+*/
+#define ODN_SAI_RX_DEBUG_SAI_DDR_INVERT         0x0014
+#define ODN_DDR_INVERT_MASK                     0x00000001U
+#define ODN_DDR_INVERT_SHIFT                    0
+#define ODN_DDR_INVERT_SIGNED                   0
+
+#define ODN_OVERIDE_VALUE_MASK                  0x00010000U
+#define ODN_OVERIDE_VALUE_SHIFT                 16
+#define ODN_OVERIDE_VALUE_SIGNED                0
+
+#define ODN_INVERT_OVERIDE_MASK                 0x00100000U
+#define ODN_INVERT_OVERIDE_SHIFT                20
+#define ODN_INVERT_OVERIDE_SIGNED               0
+
+/*
+       Register SAI_TRAIN_ACK
+*/
+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK          0x0018
+#define ODN_TRAIN_ACK_FAIL_MASK                 0x00000001U
+#define ODN_TRAIN_ACK_FAIL_SHIFT                0
+#define ODN_TRAIN_ACK_FAIL_SIGNED               0
+
+#define ODN_TRAIN_ACK_FAIL_COUNT_MASK           0x000000F0U
+#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT          4
+#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED         0
+
+#define ODN_TRAIN_ACK_COMPLETE_MASK             0x00000100U
+#define ODN_TRAIN_ACK_COMPLETE_SHIFT            8
+#define ODN_TRAIN_ACK_COMPLETE_SIGNED           0
+
+#define ODN_TRAIN_ACK_OVERIDE_MASK              0x00001000U
+#define ODN_TRAIN_ACK_OVERIDE_SHIFT             12
+#define ODN_TRAIN_ACK_OVERIDE_SIGNED            0
+
+/*
+       Register SAI_TRAIN_ACK_COUNT
+*/
+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT    0x001C
+#define ODN_TRAIN_COUNT_MASK                    0xFFFFFFFFU
+#define ODN_TRAIN_COUNT_SHIFT                   0
+#define ODN_TRAIN_COUNT_SIGNED                  0
+
+/*
+       Register SAI_CHANNEL_NUMBER
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER     0x0020
+#define ODN_CHANNEL_NUMBER_MASK                 0x0000FFFFU
+#define ODN_CHANNEL_NUMBER_SHIFT                0
+#define ODN_CHANNEL_NUMBER_SIGNED               0
+
+/*
+       Register SAI_CHANNEL_EYE_START
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START  0x0024
+#define ODN_CHANNEL_EYE_START_MASK              0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_START_SHIFT             0
+#define ODN_CHANNEL_EYE_START_SIGNED            0
+
+/*
+       Register SAI_CHANNEL_EYE_END
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END    0x0028
+#define ODN_CHANNEL_EYE_END_MASK                0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_END_SHIFT               0
+#define ODN_CHANNEL_EYE_END_SIGNED              0
+
+/*
+       Register SAI_CHANNEL_EYE_PATTERN
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C
+#define ODN_CHANNEL_EYE_PATTERN_MASK            0xFFFFFFFFU
+#define ODN_CHANNEL_EYE_PATTERN_SHIFT           0
+#define ODN_CHANNEL_EYE_PATTERN_SIGNED          0
+
+/*
+       Register SAI_CHANNEL_EYE_DEBUG
+*/
+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG  0x0030
+#define ODN_CHANNEL_EYE_SENSE_MASK              0x00000001U
+#define ODN_CHANNEL_EYE_SENSE_SHIFT             0
+#define ODN_CHANNEL_EYE_SENSE_SIGNED            0
+
+#define ODN_CHANNEL_EYE_COMPLETE_MASK           0x00000002U
+#define ODN_CHANNEL_EYE_COMPLETE_SHIFT          1
+#define ODN_CHANNEL_EYE_COMPLETE_SIGNED         0
+
+
+/*********************************
+ Generated from: sai_tx_debug.def
+**********************************/
+
+/*
+       Register SIG_RESULT
+*/
+#define ODN_SAI_TX_DEBUG_SIG_RESULT             0x0000
+#define ODN_TX_SIG_RESULT_VALUE_MASK            0xFFFFFFFFU
+#define ODN_TX_SIG_RESULT_VALUE_SHIFT           0
+#define ODN_TX_SIG_RESULT_VALUE_SIGNED          0
+
+/*
+       Register INIT_SIG
+*/
+#define ODN_SAI_TX_DEBUG_INIT_SIG               0x0004
+#define ODN_TX_INIT_SIG_VALUE_MASK              0x00000001U
+#define ODN_TX_INIT_SIG_VALUE_SHIFT             0
+#define ODN_TX_INIT_SIG_VALUE_SIGNED            0
+
+/*
+       Register SAI_BYPASS
+*/
+#define ODN_SAI_TX_DEBUG_SAI_BYPASS             0x0008
+#define ODN_TX_BYPASS_EN_MASK                   0x00000001U
+#define ODN_TX_BYPASS_EN_SHIFT                  0
+#define ODN_TX_BYPASS_EN_SIGNED                 0
+
+#define ODN_TX_ACK_RESEND_MASK                  0x00000002U
+#define ODN_TX_ACK_RESEND_SHIFT                 1
+#define ODN_TX_ACK_RESEND_SIGNED                0
+
+#define ODN_TX_DISABLE_ACK_SEND_MASK            0x00000004U
+#define ODN_TX_DISABLE_ACK_SEND_SHIFT           2
+#define ODN_TX_DISABLE_ACK_SEND_SIGNED          0
+
+/*
+       Register SAI_STATUS
+*/
+#define ODN_SAI_TX_DEBUG_SAI_STATUS             0x000C
+#define ODN_TX_TRAINING_COMPLETE_MASK           0x00000001U
+#define ODN_TX_TRAINING_COMPLETE_SHIFT          0
+#define ODN_TX_TRAINING_COMPLETE_SIGNED         0
+
+#define ODN_TX_TRAINING_ACK_COMPLETE_MASK       0x00000002U
+#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT      1
+#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED     0
+
+
+
+#endif /* _ODIN_REGS_H_ */
+
+/******************************************************************************
+ End of file (odin_regs.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/orion_defs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/orion_defs.h
new file mode 100644 (file)
index 0000000..1691151
--- /dev/null
@@ -0,0 +1,183 @@
+/****************************************************************************
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Orion Memory Map - View from PCIe
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+****************************************************************************/
+
+#ifndef _ORION_DEFS_H_
+#define _ORION_DEFS_H_
+
+/*
+ * These defines have not been autogenerated
+ * Only values different from Odin will be included here
+ */
+
+#define DEVICE_ID_ORION                          0x1020
+
+/* Odin system register banks */
+#define SRS_REG_BANK_ODN_CLK_BLK                 0x02000
+
+/*
+ * Orion CLK regs - the srs_clk_blk module defs are not auto generated
+ */
+#define SRS_PDP_P_CLK_OUT_DIVIDER_REG1           0x620
+#define SRS_PDP_PCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define SRS_PDP_PCLK_ODIV1_LO_TIME_SHIFT         0
+#define SRS_PDP_PCLK_ODIV1_HI_TIME_MASK          0x00000FC0U
+#define SRS_PDP_PCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define SRS_PDP_P_CLK_OUT_DIVIDER_REG2           0x624
+#define SRS_PDP_PCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define SRS_PDP_PCLK_ODIV2_NOCOUNT_SHIFT         6
+#define SRS_PDP_PCLK_ODIV2_EDGE_MASK             0x00000080U
+#define SRS_PDP_PCLK_ODIV2_EDGE_SHIFT            7
+#define SRS_PDP_PCLK_ODIV2_FRAC_MASK             0x00007C00U
+#define SRS_PDP_PCLK_ODIV2_FRAC_SHIFT            10
+
+#define SRS_PDP_P_CLK_OUT_DIVIDER_REG3           0x61C
+
+#define SRS_PDP_M_CLK_OUT_DIVIDER_REG1           0x628
+#define SRS_PDP_MCLK_ODIV1_LO_TIME_MASK          0x0000003FU
+#define SRS_PDP_MCLK_ODIV1_LO_TIME_SHIFT         0
+#define SRS_PDP_MCLK_ODIV1_HI_TIME_MASK          0x00000FC0U
+#define SRS_PDP_MCLK_ODIV1_HI_TIME_SHIFT         6
+
+#define SRS_PDP_M_CLK_OUT_DIVIDER_REG2           0x62C
+#define SRS_PDP_MCLK_ODIV2_NOCOUNT_MASK          0x00000040U
+#define SRS_PDP_MCLK_ODIV2_NOCOUNT_SHIFT         6
+#define SRS_PDP_MCLK_ODIV2_EDGE_MASK             0x00000080U
+#define SRS_PDP_MCLK_ODIV2_EDGE_SHIFT            7
+
+#define SRS_PDP_P_CLK_MULTIPLIER_REG1            0x650
+#define SRS_PDP_PCLK_MUL1_LO_TIME_MASK           0x0000003FU
+#define SRS_PDP_PCLK_MUL1_LO_TIME_SHIFT          0
+#define SRS_PDP_PCLK_MUL1_HI_TIME_MASK           0x00000FC0U
+#define SRS_PDP_PCLK_MUL1_HI_TIME_SHIFT          6
+
+#define SRS_PDP_P_CLK_MULTIPLIER_REG2            0x654
+#define SRS_PDP_PCLK_MUL2_NOCOUNT_MASK           0x00000040U
+#define SRS_PDP_PCLK_MUL2_NOCOUNT_SHIFT          6
+#define SRS_PDP_PCLK_MUL2_EDGE_MASK              0x00000080U
+#define SRS_PDP_PCLK_MUL2_EDGE_SHIFT             7
+#define SRS_PDP_PCLK_MUL2_FRAC_MASK              0x00007C00U
+#define SRS_PDP_PCLK_MUL2_FRAC_SHIFT             10
+
+#define SRS_PDP_P_CLK_MULTIPLIER_REG3            0x64C
+
+#define SRS_PDP_P_CLK_IN_DIVIDER_REG             0x658
+#define SRS_PDP_PCLK_IDIV_LO_TIME_MASK           0x0000003FU
+#define SRS_PDP_PCLK_IDIV_LO_TIME_SHIFT          0
+#define SRS_PDP_PCLK_IDIV_HI_TIME_MASK           0x00000FC0U
+#define SRS_PDP_PCLK_IDIV_HI_TIME_SHIFT          6
+#define SRS_PDP_PCLK_IDIV_NOCOUNT_MASK           0x00001000U
+#define SRS_PDP_PCLK_IDIV_NOCOUNT_SHIFT          12
+#define SRS_PDP_PCLK_IDIV_EDGE_MASK              0x00002000U
+#define SRS_PDP_PCLK_IDIV_EDGE_SHIFT             13
+
+/*
+ * DUT core clock input divider, DUT reference clock input divider
+ */
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1                0x0020
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK   0x00000FC0U
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT  6
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK   0x0000003FU
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT  0
+
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2                0x0024
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK      0x00000080U
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT     7
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK   0x00000040U
+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT  6
+
+#define SRS_DUT_REF_CLK_OUT_DIVIDER1                 0x0028
+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_MASK    0x00000FC0U
+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_SHIFT   6
+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_MASK    0x0000003FU
+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_SHIFT   0
+
+#define SRS_DUT_REF_CLK_OUT_DIVIDER2                 0x002C
+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_MASK       0x00000080U
+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_SHIFT      7
+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_MASK    0x00000040U
+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT   6
+
+/*
+ * DUT interface reference clock input divider
+ */
+
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1                0x0228
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_MASK   0x00000FC0U
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_SHIFT  6
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_MASK   0x0000003FU
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_SHIFT  0
+
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2                0x022C
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_MASK      0x00000080U
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_SHIFT     7
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_MASK   0x00000040U
+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT  6
+
+/*
+ * Min max values from Xilinx Virtex Ultrascale data sheet DS893,
+ * for speed grade 1. All in Hz.
+ */
+#define SRS_INPUT_CLOCK_SPEED                       100000000U
+#define SRS_INPUT_CLOCK_SPEED_MIN                   10000000U
+#define SRS_INPUT_CLOCK_SPEED_MAX                   800000000U
+#define SRS_OUTPUT_CLOCK_SPEED_MIN                  4690000U
+#define SRS_OUTPUT_CLOCK_SPEED_MAX                  630000000U
+#define SRS_VCO_MIN                                 600000000U
+#define SRS_VCO_MAX                                 1200000000U
+#define SRS_PFD_MIN                                 10000000U
+#define SRS_PFD_MAX                                 450000000U
+
+/*
+ * Orion interrupt flags
+ */
+#define SRS_INTERRUPT_ENABLE_PDP1          (1 << SRS_INTERRUPT_ENABLE_PDP_SHIFT)
+#define SRS_INTERRUPT_ENABLE_DUT           (1 << SRS_INTERRUPT_ENABLE_DUT_SHIFT)
+#define SRS_INTERRUPT_STATUS_PDP1          (1 << SRS_INTERRUPT_STATUS_PDP_SHIFT)
+#define SRS_INTERRUPT_STATUS_DUT           (1 << SRS_INTERRUPT_STATUS_DUT_SHIFT)
+#define SRS_INTERRUPT_CLEAR_PDP1           (1 << SRS_INTERRUPT_CLR_PDP_SHIFT)
+#define SRS_INTERRUPT_CLEAR_DUT            (1 << SRS_INTERRUPT_CLR_DUT_SHIFT)
+
+#endif /* _ORION_DEFS_H_ */
+
+/*****************************************************************************
+ End of file (orion_defs.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/orion_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/orion_regs.h
new file mode 100644 (file)
index 0000000..2a626bd
--- /dev/null
@@ -0,0 +1,439 @@
+/******************************************************************************
+@Title          Orion system control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Orion FPGA register defs for Sirius RTL
+@Author         Autogenerated
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+#ifndef _OUT_DRV_H_
+#define _OUT_DRV_H_
+
+/*
+       Register ID
+*/
+#define SRS_CORE_ID                         0x0000
+#define SRS_ID_VARIANT_MASK                     0x0000FFFFU
+#define SRS_ID_VARIANT_SHIFT                    0
+#define SRS_ID_VARIANT_SIGNED                   0
+
+#define SRS_ID_ID_MASK                          0xFFFF0000U
+#define SRS_ID_ID_SHIFT                         16
+#define SRS_ID_ID_SIGNED                        0
+
+/*
+       Register REVISION
+*/
+#define SRS_CORE_REVISION                   0x0004
+#define SRS_REVISION_MINOR_MASK                 0x000000FFU
+#define SRS_REVISION_MINOR_SHIFT                0
+#define SRS_REVISION_MINOR_SIGNED               0
+
+#define SRS_REVISION_MAJOR_MASK                 0x00000F00U
+#define SRS_REVISION_MAJOR_SHIFT                8
+#define SRS_REVISION_MAJOR_SIGNED               0
+
+/*
+       Register CHANGE_SET
+*/
+#define SRS_CORE_CHANGE_SET                 0x0008
+#define SRS_CHANGE_SET_SET_MASK                 0xFFFFFFFFU
+#define SRS_CHANGE_SET_SET_SHIFT                0
+#define SRS_CHANGE_SET_SET_SIGNED               0
+
+/*
+       Register USER_ID
+*/
+#define SRS_CORE_USER_ID                    0x000C
+#define SRS_USER_ID_ID_MASK                     0x0000000FU
+#define SRS_USER_ID_ID_SHIFT                    0
+#define SRS_USER_ID_ID_SIGNED                   0
+
+/*
+       Register USER_BUILD
+*/
+#define SRS_CORE_USER_BUILD                 0x0010
+#define SRS_USER_BUILD_BUILD_MASK               0xFFFFFFFFU
+#define SRS_USER_BUILD_BUILD_SHIFT              0
+#define SRS_USER_BUILD_BUILD_SIGNED             0
+
+/*
+       Register SOFT_RESETN
+*/
+#define SRS_CORE_SOFT_RESETN                0x0080
+#define SRS_SOFT_RESETN_DDR_MASK                0x00000001U
+#define SRS_SOFT_RESETN_DDR_SHIFT               0
+#define SRS_SOFT_RESETN_DDR_SIGNED              0
+
+#define SRS_SOFT_RESETN_USB_MASK                0x00000002U
+#define SRS_SOFT_RESETN_USB_SHIFT               1
+#define SRS_SOFT_RESETN_USB_SIGNED              0
+
+#define SRS_SOFT_RESETN_PDP_MASK                0x00000004U
+#define SRS_SOFT_RESETN_PDP_SHIFT               2
+#define SRS_SOFT_RESETN_PDP_SIGNED              0
+
+#define SRS_SOFT_RESETN_GIST_MASK               0x00000008U
+#define SRS_SOFT_RESETN_GIST_SHIFT              3
+#define SRS_SOFT_RESETN_GIST_SIGNED             0
+
+/*
+       Register DUT_SOFT_RESETN
+*/
+#define SRS_CORE_DUT_SOFT_RESETN            0x0084
+#define SRS_DUT_SOFT_RESETN_EXTERNAL_MASK       0x00000001U
+#define SRS_DUT_SOFT_RESETN_EXTERNAL_SHIFT      0
+#define SRS_DUT_SOFT_RESETN_EXTERNAL_SIGNED     0
+
+/*
+       Register SOFT_AUTO_RESETN
+*/
+#define SRS_CORE_SOFT_AUTO_RESETN           0x0088
+#define SRS_SOFT_AUTO_RESETN_CFG_MASK           0x00000001U
+#define SRS_SOFT_AUTO_RESETN_CFG_SHIFT          0
+#define SRS_SOFT_AUTO_RESETN_CFG_SIGNED         0
+
+/*
+       Register CLK_GEN_RESET
+*/
+#define SRS_CORE_CLK_GEN_RESET              0x0090
+#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK    0x00000001U
+#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT   0
+#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED  0
+
+#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK      0x00000002U
+#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT     1
+#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED    0
+
+#define SRS_CLK_GEN_RESET_MULTI_MMCM_MASK       0x00000004U
+#define SRS_CLK_GEN_RESET_MULTI_MMCM_SHIFT      2
+#define SRS_CLK_GEN_RESET_MULTI_MMCM_SIGNED     0
+
+#define SRS_CLK_GEN_RESET_PDP_MMCM_MASK         0x00000008U
+#define SRS_CLK_GEN_RESET_PDP_MMCM_SHIFT        3
+#define SRS_CLK_GEN_RESET_PDP_MMCM_SIGNED       0
+
+/*
+       Register DUT_MEM
+*/
+#define SRS_CORE_DUT_MEM                    0x0120
+#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_MASK  0x0000FFFFU
+#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SHIFT 0
+#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SIGNED 0
+
+#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_MASK 0xFFFF0000U
+#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SHIFT 16
+#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SIGNED 0
+
+/*
+       Register APM
+*/
+#define SRS_CORE_APM                        0x0150
+#define SRS_APM_RESET_EVENT_MASK                0x00000001U
+#define SRS_APM_RESET_EVENT_SHIFT               0
+#define SRS_APM_RESET_EVENT_SIGNED              0
+
+#define SRS_APM_CAPTURE_EVENT_MASK              0x00000002U
+#define SRS_APM_CAPTURE_EVENT_SHIFT             1
+#define SRS_APM_CAPTURE_EVENT_SIGNED            0
+
+/*
+       Register NUM_GPIO
+*/
+#define SRS_CORE_NUM_GPIO                   0x0180
+#define SRS_NUM_GPIO_NUMBER_MASK                0x0000000FU
+#define SRS_NUM_GPIO_NUMBER_SHIFT               0
+#define SRS_NUM_GPIO_NUMBER_SIGNED              0
+
+/*
+       Register GPIO_EN
+*/
+#define SRS_CORE_GPIO_EN                    0x0184
+#define SRS_GPIO_EN_DIRECTION_MASK              0x000000FFU
+#define SRS_GPIO_EN_DIRECTION_SHIFT             0
+#define SRS_GPIO_EN_DIRECTION_SIGNED            0
+
+/*
+       Register GPIO
+*/
+#define SRS_CORE_GPIO                       0x0188
+#define SRS_GPIO_GPIO_MASK                      0x000000FFU
+#define SRS_GPIO_GPIO_SHIFT                     0
+#define SRS_GPIO_GPIO_SIGNED                    0
+
+/*
+       Register SPI_MASTER_IFACE
+*/
+#define SRS_CORE_SPI_MASTER_IFACE           0x018C
+#define SRS_SPI_MASTER_IFACE_ENABLE_MASK        0x00000001U
+#define SRS_SPI_MASTER_IFACE_ENABLE_SHIFT       0
+#define SRS_SPI_MASTER_IFACE_ENABLE_SIGNED      0
+
+/*
+       Register SRS_IP_STATUS
+*/
+#define SRS_CORE_SRS_IP_STATUS              0x0200
+#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U
+#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SHIFT 0
+#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SIGNED 0
+
+#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_MASK 0x00000002U
+#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SHIFT 1
+#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SIGNED 0
+
+#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00000004U
+#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 2
+#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00000008U
+#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 3
+#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0
+
+/*
+       Register CORE_CONTROL
+*/
+#define SRS_CORE_CORE_CONTROL               0x0204
+#define SRS_CORE_CONTROL_BAR4_OFFSET_MASK       0x0000001FU
+#define SRS_CORE_CONTROL_BAR4_OFFSET_SHIFT      0
+#define SRS_CORE_CONTROL_BAR4_OFFSET_SIGNED     0
+
+#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U
+#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8
+#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0
+
+#define SRS_CORE_CONTROL_HDMI_MODULE_EN_MASK    0x00001C00U
+#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SHIFT   10
+#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SIGNED  0
+
+/*
+       Register REG_BANK_STATUS
+*/
+#define SRS_CORE_REG_BANK_STATUS            0x0208
+#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU
+#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0
+#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0
+
+/*
+       Register MMCM_LOCK_STATUS
+*/
+#define SRS_CORE_MMCM_LOCK_STATUS           0x020C
+#define SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK      0x00000001U
+#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SHIFT     0
+#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SIGNED    0
+
+#define SRS_MMCM_LOCK_STATUS_DUT_IF_MASK        0x00000002U
+#define SRS_MMCM_LOCK_STATUS_DUT_IF_SHIFT       1
+#define SRS_MMCM_LOCK_STATUS_DUT_IF_SIGNED      0
+
+#define SRS_MMCM_LOCK_STATUS_MULTI_MASK         0x00000004U
+#define SRS_MMCM_LOCK_STATUS_MULTI_SHIFT        2
+#define SRS_MMCM_LOCK_STATUS_MULTI_SIGNED       0
+
+#define SRS_MMCM_LOCK_STATUS_PDP_MASK           0x00000008U
+#define SRS_MMCM_LOCK_STATUS_PDP_SHIFT          3
+#define SRS_MMCM_LOCK_STATUS_PDP_SIGNED         0
+
+/*
+       Register GIST_STATUS
+*/
+#define SRS_CORE_GIST_STATUS                0x0210
+#define SRS_GIST_STATUS_MST_MASK                0x000001FFU
+#define SRS_GIST_STATUS_MST_SHIFT               0
+#define SRS_GIST_STATUS_MST_SIGNED              0
+
+#define SRS_GIST_STATUS_SLV_MASK                0x001FF000U
+#define SRS_GIST_STATUS_SLV_SHIFT               12
+#define SRS_GIST_STATUS_SLV_SIGNED              0
+
+#define SRS_GIST_STATUS_SLV_OUT_MASK            0x03000000U
+#define SRS_GIST_STATUS_SLV_OUT_SHIFT           24
+#define SRS_GIST_STATUS_SLV_OUT_SIGNED          0
+
+#define SRS_GIST_STATUS_MST_OUT_MASK            0x70000000U
+#define SRS_GIST_STATUS_MST_OUT_SHIFT           28
+#define SRS_GIST_STATUS_MST_OUT_SIGNED          0
+
+/*
+       Register SENSOR_BOARD
+*/
+#define SRS_CORE_SENSOR_BOARD               0x0214
+#define SRS_SENSOR_BOARD_ID_MASK                0x00000003U
+#define SRS_SENSOR_BOARD_ID_SHIFT               0
+#define SRS_SENSOR_BOARD_ID_SIGNED              0
+
+/*
+       Register INTERRUPT_STATUS
+*/
+#define SRS_CORE_INTERRUPT_STATUS           0x0218
+#define SRS_INTERRUPT_STATUS_DUT_MASK           0x00000001U
+#define SRS_INTERRUPT_STATUS_DUT_SHIFT          0
+#define SRS_INTERRUPT_STATUS_DUT_SIGNED         0
+
+#define SRS_INTERRUPT_STATUS_PDP_MASK           0x00000002U
+#define SRS_INTERRUPT_STATUS_PDP_SHIFT          1
+#define SRS_INTERRUPT_STATUS_PDP_SIGNED         0
+
+#define SRS_INTERRUPT_STATUS_I2C_MASK           0x00000004U
+#define SRS_INTERRUPT_STATUS_I2C_SHIFT          2
+#define SRS_INTERRUPT_STATUS_I2C_SIGNED         0
+
+#define SRS_INTERRUPT_STATUS_SPI_MASK           0x00000008U
+#define SRS_INTERRUPT_STATUS_SPI_SHIFT          3
+#define SRS_INTERRUPT_STATUS_SPI_SIGNED         0
+
+#define SRS_INTERRUPT_STATUS_APM_MASK           0x00000010U
+#define SRS_INTERRUPT_STATUS_APM_SHIFT          4
+#define SRS_INTERRUPT_STATUS_APM_SIGNED         0
+
+#define SRS_INTERRUPT_STATUS_OS_IRQ_MASK        0x00001FE0U
+#define SRS_INTERRUPT_STATUS_OS_IRQ_SHIFT       5
+#define SRS_INTERRUPT_STATUS_OS_IRQ_SIGNED      0
+
+#define SRS_INTERRUPT_STATUS_IRQ_TEST_MASK      0x40000000U
+#define SRS_INTERRUPT_STATUS_IRQ_TEST_SHIFT     30
+#define SRS_INTERRUPT_STATUS_IRQ_TEST_SIGNED    0
+
+#define SRS_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U
+#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31
+#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0
+
+/*
+       Register INTERRUPT_ENABLE
+*/
+#define SRS_CORE_INTERRUPT_ENABLE           0x021C
+#define SRS_INTERRUPT_ENABLE_DUT_MASK           0x00000001U
+#define SRS_INTERRUPT_ENABLE_DUT_SHIFT          0
+#define SRS_INTERRUPT_ENABLE_DUT_SIGNED         0
+
+#define SRS_INTERRUPT_ENABLE_PDP_MASK           0x00000002U
+#define SRS_INTERRUPT_ENABLE_PDP_SHIFT          1
+#define SRS_INTERRUPT_ENABLE_PDP_SIGNED         0
+
+#define SRS_INTERRUPT_ENABLE_I2C_MASK           0x00000004U
+#define SRS_INTERRUPT_ENABLE_I2C_SHIFT          2
+#define SRS_INTERRUPT_ENABLE_I2C_SIGNED         0
+
+#define SRS_INTERRUPT_ENABLE_SPI_MASK           0x00000008U
+#define SRS_INTERRUPT_ENABLE_SPI_SHIFT          3
+#define SRS_INTERRUPT_ENABLE_SPI_SIGNED         0
+
+#define SRS_INTERRUPT_ENABLE_APM_MASK           0x00000010U
+#define SRS_INTERRUPT_ENABLE_APM_SHIFT          4
+#define SRS_INTERRUPT_ENABLE_APM_SIGNED         0
+
+#define SRS_INTERRUPT_ENABLE_OS_IRQ_MASK        0x00001FE0U
+#define SRS_INTERRUPT_ENABLE_OS_IRQ_SHIFT       5
+#define SRS_INTERRUPT_ENABLE_OS_IRQ_SIGNED      0
+
+#define SRS_INTERRUPT_ENABLE_IRQ_TEST_MASK      0x40000000U
+#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SHIFT     30
+#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SIGNED    0
+
+#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U
+#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31
+#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0
+
+/*
+       Register INTERRUPT_CLR
+*/
+#define SRS_CORE_INTERRUPT_CLR              0x0220
+#define SRS_INTERRUPT_CLR_DUT_MASK              0x00000001U
+#define SRS_INTERRUPT_CLR_DUT_SHIFT             0
+#define SRS_INTERRUPT_CLR_DUT_SIGNED            0
+
+#define SRS_INTERRUPT_CLR_PDP_MASK              0x00000002U
+#define SRS_INTERRUPT_CLR_PDP_SHIFT             1
+#define SRS_INTERRUPT_CLR_PDP_SIGNED            0
+
+#define SRS_INTERRUPT_CLR_I2C_MASK              0x00000004U
+#define SRS_INTERRUPT_CLR_I2C_SHIFT             2
+#define SRS_INTERRUPT_CLR_I2C_SIGNED            0
+
+#define SRS_INTERRUPT_CLR_SPI_MASK              0x00000008U
+#define SRS_INTERRUPT_CLR_SPI_SHIFT             3
+#define SRS_INTERRUPT_CLR_SPI_SIGNED            0
+
+#define SRS_INTERRUPT_CLR_APM_MASK              0x00000010U
+#define SRS_INTERRUPT_CLR_APM_SHIFT             4
+#define SRS_INTERRUPT_CLR_APM_SIGNED            0
+
+#define SRS_INTERRUPT_CLR_OS_IRQ_MASK           0x00001FE0U
+#define SRS_INTERRUPT_CLR_OS_IRQ_SHIFT          5
+#define SRS_INTERRUPT_CLR_OS_IRQ_SIGNED         0
+
+#define SRS_INTERRUPT_CLR_IRQ_TEST_MASK         0x40000000U
+#define SRS_INTERRUPT_CLR_IRQ_TEST_SHIFT        30
+#define SRS_INTERRUPT_CLR_IRQ_TEST_SIGNED       0
+
+#define SRS_INTERRUPT_CLR_MASTER_CLEAR_MASK     0x80000000U
+#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SHIFT    31
+#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SIGNED   0
+
+/*
+       Register INTERRUPT_TEST
+*/
+#define SRS_CORE_INTERRUPT_TEST             0x0224
+#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_MASK  0x00000001U
+#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0
+#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0
+
+/*
+       Register INTERRUPT_TIMEOUT_CLR
+*/
+#define SRS_CORE_INTERRUPT_TIMEOUT_CLR      0x0228
+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U
+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1
+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0
+
+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U
+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0
+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0
+
+/*
+       Register INTERRUPT_TIMEOUT
+*/
+#define SRS_CORE_INTERRUPT_TIMEOUT          0x022C
+#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU
+#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0
+#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0
+
+#endif /* _OUT_DRV_H_ */
+
+/******************************************************************************
+ End of file (orion_regs.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pdp_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pdp_regs.h
new file mode 100644 (file)
index 0000000..bd26b06
--- /dev/null
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PDP_REGS_H__)
+#define __PDP_REGS_H__
+
+/*************************************************************************/ /*!
+ PCI Device Information
+*/ /**************************************************************************/
+
+#define DCPDP_VENDOR_ID_POWERVR                        (0x1010)
+
+#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA                (0x1CF1)
+#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA       (0x1CF2)
+
+/*************************************************************************/ /*!
+ PCI Device Base Address Information
+*/ /**************************************************************************/
+
+/* PLL and PDP registers on base address register 0 */
+#define DCPDP_REG_PCI_BASENUM                  (0)
+
+#define DCPDP_PCI_PLL_REG_OFFSET               (0x1000)
+#define DCPDP_PCI_PLL_REG_SIZE                 (0x0400)
+
+#define DCPDP_PCI_PDP_REG_OFFSET               (0xC000)
+#define DCPDP_PCI_PDP_REG_SIZE                 (0x2000)
+
+/*************************************************************************/ /*!
+ Misc register information
+*/ /**************************************************************************/
+
+/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */
+#define DCPDP_STR1SURF_FORMAT_ARGB8888         (0xE)
+#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT     (4)
+#define DCPDP_STR1POSN_STRIDE_SHIFT            (4)
+
+#endif /* !defined(__PDP_REGS_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pfim_defs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pfim_defs.h
new file mode 100644 (file)
index 0000000..d39c06f
--- /dev/null
@@ -0,0 +1,69 @@
+/******************************************************************************
+@Title          Odin PFIM definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Odin register defs for PDP-FBDC Interface Module
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+#ifndef _PFIM_DEFS_H_
+#define _PFIM_DEFS_H_
+
+/* Supported FBC modes */
+#define ODIN_PFIM_MOD_LINEAR       (0x00)
+#define ODIN_PFIM_FBCDC_8X8_V12    (0x01)
+#define ODIN_PFIM_FBCDC_16X4_V12   (0x02)
+#define ODIN_PFIM_FBCDC_MAX        (0x03)
+
+/* Supported pixel formats */
+#define ODN_PFIM_PIXFMT_NONE       (0x00)
+#define ODN_PFIM_PIXFMT_ARGB8888   (0x0C)
+#define ODN_PFIM_PIXFMT_RGB565     (0x05)
+
+/* Tile types */
+#define ODN_PFIM_TILETYPE_8X8      (0x01)
+#define ODN_PFIM_TILETYPE_16X4     (0x02)
+#define ODN_PFIM_TILETYPE_32x2     (0x03)
+
+#define PFIM_ROUNDUP(X, Y)         (((X) + ((Y) - 1U)) & ~((Y) - 1U))
+#define PFIM_RND_TAG               (0x10)
+
+#endif /* _PFIM_DEFS_H_ */
+
+/******************************************************************************
+ End of file (pfim_defs.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pfim_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/pfim_regs.h
new file mode 100644 (file)
index 0000000..4b8ff82
--- /dev/null
@@ -0,0 +1,265 @@
+/******************************************************************************
+@Title          Odin PFIM control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Odin register defs for PDP-FBDC Interface Module
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+#ifndef _PFIM_REGS_H_
+#define _PFIM_REGS_H_
+
+/*
+       Register CR_PFIM_NUM_TILES
+*/
+#define CR_PFIM_NUM_TILES 0x0000
+#define CR_PFIM_NUM_TILES_MASK 0x007FFFFFU
+#define CR_PFIM_NUM_TILES_SHIFT 0
+#define CR_PFIM_NUM_TILES_SIGNED 0
+
+/*
+       Register CR_PFIM_TILES_PER_LINE
+*/
+#define CR_PFIM_TILES_PER_LINE 0x0004
+#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_MASK 0x000000FFU
+#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SHIFT 0
+#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB
+*/
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB 0x0008
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_MASK 0xFFFFFFFFU
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SHIFT 0
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB
+*/
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB 0x000C
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_MASK 0x00000003U
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SHIFT 0
+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_UV_BASE_ADDR_LSB
+*/
+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB 0x0010
+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_MASK 0xFFFFFFFFU
+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SHIFT 0
+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_UV_BASE_ADDR_MSB
+*/
+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB 0x0014
+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_MASK 0x00000003U
+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SHIFT 0
+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SIGNED 0
+
+/*
+       Register CR_PFIM_PDP_Y_BASE_ADDR
+*/
+#define CR_PFIM_PDP_Y_BASE_ADDR 0x0018
+#define CR_PFIM_PDP_Y_BASE_ADDR_MASK 0xFFFFFFFFU
+#define CR_PFIM_PDP_Y_BASE_ADDR_SHIFT 0
+#define CR_PFIM_PDP_Y_BASE_ADDR_SIGNED 0
+
+/*
+       Register CR_PFIM_PDP_UV_BASE_ADDR
+*/
+#define CR_PFIM_PDP_UV_BASE_ADDR 0x001C
+#define CR_PFIM_PDP_UV_BASE_ADDR_MASK 0xFFFFFFFFU
+#define CR_PFIM_PDP_UV_BASE_ADDR_SHIFT 0
+#define CR_PFIM_PDP_UV_BASE_ADDR_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_REQ_CONTEXT
+*/
+#define CR_PFIM_FBDC_REQ_CONTEXT 0x0020
+#define CR_PFIM_FBDC_REQ_CONTEXT_MASK 0x00000007U
+#define CR_PFIM_FBDC_REQ_CONTEXT_SHIFT 0
+#define CR_PFIM_FBDC_REQ_CONTEXT_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_REQ_TAG
+*/
+#define CR_PFIM_FBDC_REQ_TAG 0x0024
+#define CR_PFIM_FBDC_REQ_TAG_YARGB_MASK     0x00000003U
+#define CR_PFIM_FBDC_REQ_TAG_YARGB_SHIFT    0
+#define CR_PFIM_FBDC_REQ_TAG_YARGB_SIGNED   0
+
+#define CR_PFIM_FBDC_REQ_TAG_UV_MASK        0x00000030U
+#define CR_PFIM_FBDC_REQ_TAG_UV_SHIFT       4
+#define CR_PFIM_FBDC_REQ_TAG_UV_SIGNED      0
+
+/*
+       Register CR_PFIM_FBDC_REQ_SB_TAG
+*/
+#define CR_PFIM_FBDC_REQ_SB_TAG 0x0028
+#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_MASK  0x00000003U
+#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SHIFT 0
+#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SIGNED 0
+
+#define CR_PFIM_FBDC_REQ_SB_TAG_UV_MASK     0x00000030U
+#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SHIFT    4
+#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SIGNED   0
+
+/*
+       Register CR_PFIM_FBDC_HDR_INVAL_REQ
+*/
+#define CR_PFIM_FBDC_HDR_INVAL_REQ 0x002C
+#define CR_PFIM_FBDC_HDR_INVAL_REQ_MASK 0x00000001U
+#define CR_PFIM_FBDC_HDR_INVAL_REQ_SHIFT 0
+#define CR_PFIM_FBDC_HDR_INVAL_REQ_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_PIX_FORMAT
+*/
+#define CR_PFIM_FBDC_PIX_FORMAT 0x0030
+#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_MASK 0x0000007FU
+#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SHIFT 0
+#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CR_CH0123_VAL0
+*/
+#define CR_PFIM_FBDC_CR_CH0123_VAL0 0x0034
+#define CR_PFIM_FBDC_CR_CH0123_VAL0_MASK 0xFFFFFFFFU
+#define CR_PFIM_FBDC_CR_CH0123_VAL0_SHIFT 0
+#define CR_PFIM_FBDC_CR_CH0123_VAL0_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CR_CH0123_VAL1
+*/
+#define CR_PFIM_FBDC_CR_CH0123_VAL1 0x0038
+#define CR_PFIM_FBDC_CR_CH0123_VAL1_MASK 0xFFFFFFFFU
+#define CR_PFIM_FBDC_CR_CH0123_VAL1_SHIFT 0
+#define CR_PFIM_FBDC_CR_CH0123_VAL1_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CR_Y_VAL0
+*/
+#define CR_PFIM_FBDC_CR_Y_VAL0 0x003C
+#define CR_PFIM_FBDC_CR_Y_VAL0_MASK 0x000003FFU
+#define CR_PFIM_FBDC_CR_Y_VAL0_SHIFT 0
+#define CR_PFIM_FBDC_CR_Y_VAL0_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CR_UV_VAL0
+*/
+#define CR_PFIM_FBDC_CR_UV_VAL0 0x0040
+#define CR_PFIM_FBDC_CR_UV_VAL0_MASK 0x000003FFU
+#define CR_PFIM_FBDC_CR_UV_VAL0_SHIFT 0
+#define CR_PFIM_FBDC_CR_UV_VAL0_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CR_Y_VAL1
+*/
+#define CR_PFIM_FBDC_CR_Y_VAL1 0x0044
+#define CR_PFIM_FBDC_CR_Y_VAL1_MASK 0x000003FFU
+#define CR_PFIM_FBDC_CR_Y_VAL1_SHIFT 0
+#define CR_PFIM_FBDC_CR_Y_VAL1_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CR_UV_VAL1
+*/
+#define CR_PFIM_FBDC_CR_UV_VAL1 0x0048
+#define CR_PFIM_FBDC_CR_UV_VAL1_MASK 0x000003FFU
+#define CR_PFIM_FBDC_CR_UV_VAL1_SHIFT 0
+#define CR_PFIM_FBDC_CR_UV_VAL1_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_FILTER_ENABLE
+*/
+#define CR_PFIM_FBDC_FILTER_ENABLE 0x004C
+#define CR_PFIM_FBDC_FILTER_ENABLE_MASK 0x00000001U
+#define CR_PFIM_FBDC_FILTER_ENABLE_SHIFT 0
+#define CR_PFIM_FBDC_FILTER_ENABLE_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_FILTER_STATUS
+*/
+#define CR_PFIM_FBDC_FILTER_STATUS 0x0050
+#define CR_PFIM_FBDC_FILTER_STATUS_MASK 0x0000000FU
+#define CR_PFIM_FBDC_FILTER_STATUS_SHIFT 0
+#define CR_PFIM_FBDC_FILTER_STATUS_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_FILTER_CLEAR
+*/
+#define CR_PFIM_FBDC_FILTER_CLEAR 0x0054
+#define CR_PFIM_FBDC_FILTER_CLEAR_MASK 0x0000000FU
+#define CR_PFIM_FBDC_FILTER_CLEAR_SHIFT 0
+#define CR_PFIM_FBDC_FILTER_CLEAR_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_TILE_TYPE
+*/
+#define CR_PFIM_FBDC_TILE_TYPE 0x0058
+#define CR_PFIM_FBDC_TILE_TYPE_MASK 0x00000003U
+#define CR_PFIM_FBDC_TILE_TYPE_SHIFT 0
+#define CR_PFIM_FBDC_TILE_TYPE_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CLEAR_COLOUR_LSB
+*/
+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB 0x005C
+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_MASK 0xFFFFFFFFU
+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SHIFT 0
+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_CLEAR_COLOUR_MSB
+*/
+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB 0x0060
+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_MASK 0xFFFFFFFFU
+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SHIFT 0
+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SIGNED 0
+
+/*
+       Register CR_PFIM_FBDC_REQ_LOSSY
+*/
+#define CR_PFIM_FBDC_REQ_LOSSY 0x0064
+#define CR_PFIM_FBDC_REQ_LOSSY_MASK 0x00000001U
+#define CR_PFIM_FBDC_REQ_LOSSY_SHIFT 0
+#define CR_PFIM_FBDC_REQ_LOSSY_SIGNED 0
+
+#endif /* _PFIM_REGS_H_ */
+
+/******************************************************************************
+ End of file (pfim_regs.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h
new file mode 100644 (file)
index 0000000..cc7b10f
--- /dev/null
@@ -0,0 +1,1018 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework system control register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from: tcf_clk_ctrl.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_CLK_CTRL_H_)
+#define _TCF_CLK_CTRL_H_
+
+/*
+ * The following register definitions are valid if register 0x28 has value 0.
+ */
+
+/*
+       Register FPGA_ID_REG
+*/
+#define TCF_CLK_CTRL_FPGA_ID_REG            0x0000
+#define FPGA_ID_REG_CORE_CFG_MASK           0x0000FFFFU
+#define FPGA_ID_REG_CORE_CFG_SHIFT          0
+#define FPGA_ID_REG_CORE_CFG_SIGNED         0
+
+#define FPGA_ID_REG_CORE_ID_MASK            0xFFFF0000U
+#define FPGA_ID_REG_CORE_ID_SHIFT           16
+#define FPGA_ID_REG_CORE_ID_SIGNED          0
+
+/*
+       Register FPGA_REV_REG
+*/
+#define TCF_CLK_CTRL_FPGA_REV_REG           0x0008
+#define FPGA_REV_REG_MAINT_MASK             0x000000FFU
+#define FPGA_REV_REG_MAINT_SHIFT            0
+#define FPGA_REV_REG_MAINT_SIGNED           0
+
+#define FPGA_REV_REG_MINOR_MASK             0x0000FF00U
+#define FPGA_REV_REG_MINOR_SHIFT            8
+#define FPGA_REV_REG_MINOR_SIGNED           0
+
+#define FPGA_REV_REG_MAJOR_MASK             0x00FF0000U
+#define FPGA_REV_REG_MAJOR_SHIFT            16
+#define FPGA_REV_REG_MAJOR_SIGNED           0
+
+#define FPGA_REV_REG_DESIGNER_MASK          0xFF000000U
+#define FPGA_REV_REG_DESIGNER_SHIFT         24
+#define FPGA_REV_REG_DESIGNER_SIGNED        0
+
+/*
+       Register FPGA_DES_REV_1
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_1         0x0010
+#define FPGA_DES_REV_1_MASK                 0xFFFFFFFFU
+#define FPGA_DES_REV_1_SHIFT                0
+#define FPGA_DES_REV_1_SIGNED               0
+
+/*
+       Register FPGA_DES_REV_2
+*/
+#define TCF_CLK_CTRL_FPGA_DES_REV_2         0x0018
+#define FPGA_DES_REV_2_MASK                 0xFFFFFFFFU
+#define FPGA_DES_REV_2_SHIFT                0
+#define FPGA_DES_REV_2_SIGNED               0
+
+/*
+       Register TCF_CORE_ID_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_ID_REG        0x0020
+#define TCF_CORE_ID_REG_CORE_CFG_MASK       0x0000FFFFU
+#define TCF_CORE_ID_REG_CORE_CFG_SHIFT      0
+#define TCF_CORE_ID_REG_CORE_CFG_SIGNED     0
+
+#define TCF_CORE_ID_REG_CORE_ID_MASK        0xFFFF0000U
+#define TCF_CORE_ID_REG_CORE_ID_SHIFT       16
+#define TCF_CORE_ID_REG_CORE_ID_SIGNED      0
+
+/*
+       Register TCF_CORE_REV_REG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_REV_REG       0x0028
+#define TCF_CORE_REV_REG_MAINT_MASK         0x000000FFU
+#define TCF_CORE_REV_REG_MAINT_SHIFT        0
+#define TCF_CORE_REV_REG_MAINT_SIGNED       0
+
+#define TCF_CORE_REV_REG_MINOR_MASK         0x0000FF00U
+#define TCF_CORE_REV_REG_MINOR_SHIFT        8
+#define TCF_CORE_REV_REG_MINOR_SIGNED       0
+
+#define TCF_CORE_REV_REG_MAJOR_MASK         0x00FF0000U
+#define TCF_CORE_REV_REG_MAJOR_SHIFT        16
+#define TCF_CORE_REV_REG_MAJOR_SIGNED       0
+
+#define TCF_CORE_REV_REG_DESIGNER_MASK      0xFF000000U
+#define TCF_CORE_REV_REG_DESIGNER_SHIFT     24
+#define TCF_CORE_REV_REG_DESIGNER_SIGNED    0
+
+/*
+       Register TCF_CORE_DES_REV_1
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1     0x0030
+#define TCF_CORE_DES_REV_1_MASK             0xFFFFFFFFU
+#define TCF_CORE_DES_REV_1_SHIFT            0
+#define TCF_CORE_DES_REV_1_SIGNED           0
+
+/*
+       Register TCF_CORE_DES_REV_2
+*/
+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2     0x0038
+#define TCF_CORE_DES_REV_2_MASK             0xFFFFFFFFU
+#define TCF_CORE_DES_REV_2_SHIFT            0
+#define TCF_CORE_DES_REV_2_SIGNED           0
+
+
+/*
+ * The following register definitions are valid if register 0x28 has value 1.
+ */
+
+/*
+       Register ID
+*/
+#define TCF_CLK_CTRL_ID                     0x0000
+#define VARIANT_MASK                        0x0000FFFFU
+#define VARIANT_SHIFT                       0
+#define VARIANT_SIGNED                      0
+
+#define ID_MASK                             0xFFFF0000U
+#define ID_SHIFT                            16
+#define ID_SIGNED                           0
+
+/*
+       Register REL
+*/
+#define TCF_CLK_CTRL_REL                    0x0008
+#define MINOR_MASK                          0x0000FFFFU
+#define MINOR_SHIFT                         0
+#define MINOR_SIGNED                        0
+
+#define MAJOR_MASK                          0xFFFF0000U
+#define MAJOR_SHIFT                         16
+#define MAJOR_SIGNED                        0
+
+/*
+       Register CHANGE_SET
+*/
+#define TCF_CLK_CTRL_CHANGE_SET             0x0010
+#define SET_MASK                            0xFFFFFFFFU
+#define SET_SHIFT                           0
+#define SET_SIGNED                          0
+
+/*
+       Register USER_ID
+*/
+#define TCF_CLK_CTRL_USER_ID                0x0018
+#define USER_ID_MASK                        0x0000000FU
+#define USER_ID_SHIFT                       0
+#define USER_ID_SIGNED                      0
+
+/*
+       Register USER_BUILD
+*/
+#define TCF_CLK_CTRL_USER_BUILD             0x0020
+#define BUILD_MASK                          0xFFFFFFFFU
+#define BUILD_SHIFT                         0
+#define BUILD_SIGNED                        0
+
+/*
+       Register SW_IF_VERSION
+*/
+#define TCF_CLK_CTRL_SW_IF_VERSION          0x0028
+#define VERSION_MASK                        0x0000FFFFU
+#define VERSION_SHIFT                       0
+#define VERSION_SIGNED                      0
+
+/*
+ * The following register definitions are valid for all Apollo builds,
+ * even if some of the registers are not available for certain cores.
+ */
+
+/*
+       Register SCB_GENERAL_CONTROL
+*/
+#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL    0x0040
+#define SCB_GC_TRANS_HALT_MASK              0x00000200U
+#define SCB_GC_TRANS_HALT_SHIFT             9
+#define SCB_GC_TRANS_HALT_SIGNED            0
+
+#define SCB_GC_CKD_REGS_MASK                0x00000100U
+#define SCB_GC_CKD_REGS_SHIFT               8
+#define SCB_GC_CKD_REGS_SIGNED              0
+
+#define SCB_GC_CKD_SLAVE_MASK               0x00000080U
+#define SCB_GC_CKD_SLAVE_SHIFT              7
+#define SCB_GC_CKD_SLAVE_SIGNED             0
+
+#define SCB_GC_CKD_MASTER_MASK              0x00000040U
+#define SCB_GC_CKD_MASTER_SHIFT             6
+#define SCB_GC_CKD_MASTER_SIGNED            0
+
+#define SCB_GC_CKD_XDATA_MASK               0x00000020U
+#define SCB_GC_CKD_XDATA_SHIFT              5
+#define SCB_GC_CKD_XDATA_SIGNED             0
+
+#define SCB_GC_SFR_REG_MASK                 0x00000010U
+#define SCB_GC_SFR_REG_SHIFT                4
+#define SCB_GC_SFR_REG_SIGNED               0
+
+#define SCB_GC_SFR_SLAVE_MASK               0x00000008U
+#define SCB_GC_SFR_SLAVE_SHIFT              3
+#define SCB_GC_SFR_SLAVE_SIGNED             0
+
+#define SCB_GC_SFR_MASTER_MASK              0x00000004U
+#define SCB_GC_SFR_MASTER_SHIFT             2
+#define SCB_GC_SFR_MASTER_SIGNED            0
+
+#define SCB_GC_SFR_DET_DATA_MASK            0x00000002U
+#define SCB_GC_SFR_DET_DATA_SHIFT           1
+#define SCB_GC_SFR_DET_DATA_SIGNED          0
+
+#define SCB_GC_SFR_GEN_DATA_MASK            0x00000001U
+#define SCB_GC_SFR_GEN_DATA_SHIFT           0
+#define SCB_GC_SFR_GEN_DATA_SIGNED          0
+
+/*
+       Register SCB_MASTER_READ_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT  0x0048
+#define MASTER_READ_COUNT_MASK              0x0000FFFFU
+#define MASTER_READ_COUNT_SHIFT             0
+#define MASTER_READ_COUNT_SIGNED            0
+
+/*
+       Register SCB_MASTER_READ_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA   0x0050
+#define MASTER_READ_DATA_MASK               0x000000FFU
+#define MASTER_READ_DATA_SHIFT              0
+#define MASTER_READ_DATA_SIGNED             0
+
+/*
+       Register SCB_MASTER_ADDRESS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS     0x0058
+#define SCB_MASTER_ADDRESS_MASK             0x000003FFU
+#define SCB_MASTER_ADDRESS_SHIFT            0
+#define SCB_MASTER_ADDRESS_SIGNED           0
+
+/*
+       Register SCB_MASTER_WRITE_DATA
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA  0x0060
+#define MASTER_WRITE_DATA_MASK              0x000000FFU
+#define MASTER_WRITE_DATA_SHIFT             0
+#define MASTER_WRITE_DATA_SIGNED            0
+
+/*
+       Register SCB_MASTER_WRITE_COUNT
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068
+#define MASTER_WRITE_COUNT_MASK             0x0000FFFFU
+#define MASTER_WRITE_COUNT_SHIFT            0
+#define MASTER_WRITE_COUNT_SIGNED           0
+
+/*
+       Register SCB_BUS_SELECT
+*/
+#define TCF_CLK_CTRL_SCB_BUS_SELECT         0x0070
+#define BUS_SELECT_MASK                     0x00000003U
+#define BUS_SELECT_SHIFT                    0
+#define BUS_SELECT_SIGNED                   0
+
+/*
+       Register SCB_MASTER_FILL_STATUS
+*/
+#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078
+#define MASTER_WRITE_FIFO_EMPTY_MASK        0x00000008U
+#define MASTER_WRITE_FIFO_EMPTY_SHIFT       3
+#define MASTER_WRITE_FIFO_EMPTY_SIGNED      0
+
+#define MASTER_WRITE_FIFO_FULL_MASK         0x00000004U
+#define MASTER_WRITE_FIFO_FULL_SHIFT        2
+#define MASTER_WRITE_FIFO_FULL_SIGNED       0
+
+#define MASTER_READ_FIFO_EMPTY_MASK         0x00000002U
+#define MASTER_READ_FIFO_EMPTY_SHIFT        1
+#define MASTER_READ_FIFO_EMPTY_SIGNED       0
+
+#define MASTER_READ_FIFO_FULL_MASK          0x00000001U
+#define MASTER_READ_FIFO_FULL_SHIFT         0
+#define MASTER_READ_FIFO_FULL_SIGNED        0
+
+/*
+       Register CLK_AND_RST_CTRL
+*/
+#define TCF_CLK_CTRL_CLK_AND_RST_CTRL       0x0080
+#define GLB_CLKG_EN_MASK                    0x00020000U
+#define GLB_CLKG_EN_SHIFT                   17
+#define GLB_CLKG_EN_SIGNED                  0
+
+#define CLK_GATE_CNTL_MASK                  0x00010000U
+#define CLK_GATE_CNTL_SHIFT                 16
+#define CLK_GATE_CNTL_SIGNED                0
+
+#define DUT_DCM_RESETN_MASK                 0x00000400U
+#define DUT_DCM_RESETN_SHIFT                10
+#define DUT_DCM_RESETN_SIGNED               0
+
+#define MEM_RESYNC_BYPASS_MASK              0x00000200U
+#define MEM_RESYNC_BYPASS_SHIFT             9
+#define MEM_RESYNC_BYPASS_SIGNED            0
+
+#define SYS_RESYNC_BYPASS_MASK              0x00000100U
+#define SYS_RESYNC_BYPASS_SHIFT             8
+#define SYS_RESYNC_BYPASS_SIGNED            0
+
+#define SCB_RESETN_MASK                     0x00000010U
+#define SCB_RESETN_SHIFT                    4
+#define SCB_RESETN_SIGNED                   0
+
+#define PDP2_RESETN_MASK                    0x00000008U
+#define PDP2_RESETN_SHIFT                   3
+#define PDP2_RESETN_SIGNED                  0
+
+#define PDP1_RESETN_MASK                    0x00000004U
+#define PDP1_RESETN_SHIFT                   2
+#define PDP1_RESETN_SIGNED                  0
+
+#define DDR_RESETN_MASK                     0x00000002U
+#define DDR_RESETN_SHIFT                    1
+#define DDR_RESETN_SIGNED                   0
+
+#define DUT_RESETN_MASK                     0x00000001U
+#define DUT_RESETN_SHIFT                    0
+#define DUT_RESETN_SIGNED                   0
+
+/*
+       Register TEST_REG_OUT
+*/
+#define TCF_CLK_CTRL_TEST_REG_OUT           0x0088
+#define TEST_REG_OUT_MASK                   0xFFFFFFFFU
+#define TEST_REG_OUT_SHIFT                  0
+#define TEST_REG_OUT_SIGNED                 0
+
+/*
+       Register TEST_REG_IN
+*/
+#define TCF_CLK_CTRL_TEST_REG_IN            0x0090
+#define TEST_REG_IN_MASK                    0xFFFFFFFFU
+#define TEST_REG_IN_SHIFT                   0
+#define TEST_REG_IN_SIGNED                  0
+
+/*
+       Register TEST_CTRL
+*/
+#define TCF_CLK_CTRL_TEST_CTRL              0x0098
+#define PCI_TEST_OFFSET_MASK                0xF8000000U
+#define PCI_TEST_OFFSET_SHIFT               27
+#define PCI_TEST_OFFSET_SIGNED              0
+
+#define PDP1_HOST_MEM_SELECT_MASK           0x00000200U
+#define PDP1_HOST_MEM_SELECT_SHIFT          9
+#define PDP1_HOST_MEM_SELECT_SIGNED         0
+
+#define HOST_PHY_MODE_MASK                  0x00000100U
+#define HOST_PHY_MODE_SHIFT                 8
+#define HOST_PHY_MODE_SIGNED                0
+
+#define HOST_ONLY_MODE_MASK                 0x00000080U
+#define HOST_ONLY_MODE_SHIFT                7
+#define HOST_ONLY_MODE_SIGNED               0
+
+#define PCI_TEST_MODE_MASK                  0x00000040U
+#define PCI_TEST_MODE_SHIFT                 6
+#define PCI_TEST_MODE_SIGNED                0
+
+#define TURN_OFF_DDR_MASK                   0x00000020U
+#define TURN_OFF_DDR_SHIFT                  5
+#define TURN_OFF_DDR_SIGNED                 0
+
+#define SYS_RD_CLK_INV_MASK                 0x00000010U
+#define SYS_RD_CLK_INV_SHIFT                4
+#define SYS_RD_CLK_INV_SIGNED               0
+
+#define MEM_REQ_CLK_INV_MASK                0x00000008U
+#define MEM_REQ_CLK_INV_SHIFT               3
+#define MEM_REQ_CLK_INV_SIGNED              0
+
+#define BURST_SPLIT_MASK                    0x00000004U
+#define BURST_SPLIT_SHIFT                   2
+#define BURST_SPLIT_SIGNED                  0
+
+#define CLK_INVERSION_MASK                  0x00000002U
+#define CLK_INVERSION_SHIFT                 1
+#define CLK_INVERSION_SIGNED                0
+
+#define ADDRESS_FORCE_MASK                  0x00000001U
+#define ADDRESS_FORCE_SHIFT                 0
+#define ADDRESS_FORCE_SIGNED                0
+
+/*
+       Register CLEAR_HOST_MEM_SIG
+*/
+#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG     0x00A0
+#define SIGNATURE_TAG_ID_MASK               0x00000F00U
+#define SIGNATURE_TAG_ID_SHIFT              8
+#define SIGNATURE_TAG_ID_SIGNED             0
+
+#define CLEAR_HOST_MEM_SIGNATURE_MASK       0x00000001U
+#define CLEAR_HOST_MEM_SIGNATURE_SHIFT      0
+#define CLEAR_HOST_MEM_SIGNATURE_SIGNED     0
+
+/*
+       Register HOST_MEM_SIGNATURE
+*/
+#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE     0x00A8
+#define HOST_MEM_SIGNATURE_MASK             0xFFFFFFFFU
+#define HOST_MEM_SIGNATURE_SHIFT            0
+#define HOST_MEM_SIGNATURE_SIGNED           0
+
+/*
+       Register INTERRUPT_STATUS
+*/
+#define TCF_CLK_CTRL_INTERRUPT_STATUS       0x00C8
+#define INTERRUPT_MASTER_STATUS_MASK        0x80000000U
+#define INTERRUPT_MASTER_STATUS_SHIFT       31
+#define INTERRUPT_MASTER_STATUS_SIGNED      0
+
+#define OTHER_INTS_MASK                     0x7FFE0000U
+#define OTHER_INTS_SHIFT                    17
+#define OTHER_INTS_SIGNED                   0
+
+#define HOST_MST_NORESPONSE_MASK            0x00010000U
+#define HOST_MST_NORESPONSE_SHIFT           16
+#define HOST_MST_NORESPONSE_SIGNED          0
+
+#define PDP2_INT_MASK                       0x00008000U
+#define PDP2_INT_SHIFT                      15
+#define PDP2_INT_SIGNED                     0
+
+#define PDP1_INT_MASK                       0x00004000U
+#define PDP1_INT_SHIFT                      14
+#define PDP1_INT_SIGNED                     0
+
+#define EXT_INT_MASK                        0x00002000U
+#define EXT_INT_SHIFT                       13
+#define EXT_INT_SIGNED                      0
+
+#define SCB_MST_HLT_BIT_MASK                0x00001000U
+#define SCB_MST_HLT_BIT_SHIFT               12
+#define SCB_MST_HLT_BIT_SIGNED              0
+
+#define SCB_SLV_EVENT_MASK                  0x00000800U
+#define SCB_SLV_EVENT_SHIFT                 11
+#define SCB_SLV_EVENT_SIGNED                0
+
+#define SCB_TDONE_RX_MASK                   0x00000400U
+#define SCB_TDONE_RX_SHIFT                  10
+#define SCB_TDONE_RX_SIGNED                 0
+
+#define SCB_SLV_WT_RD_DAT_MASK              0x00000200U
+#define SCB_SLV_WT_RD_DAT_SHIFT             9
+#define SCB_SLV_WT_RD_DAT_SIGNED            0
+
+#define SCB_SLV_WT_PRV_RD_MASK              0x00000100U
+#define SCB_SLV_WT_PRV_RD_SHIFT             8
+#define SCB_SLV_WT_PRV_RD_SIGNED            0
+
+#define SCB_SLV_WT_WR_DAT_MASK              0x00000080U
+#define SCB_SLV_WT_WR_DAT_SHIFT             7
+#define SCB_SLV_WT_WR_DAT_SIGNED            0
+
+#define SCB_MST_WT_RD_DAT_MASK              0x00000040U
+#define SCB_MST_WT_RD_DAT_SHIFT             6
+#define SCB_MST_WT_RD_DAT_SIGNED            0
+
+#define SCB_ADD_ACK_ERR_MASK                0x00000020U
+#define SCB_ADD_ACK_ERR_SHIFT               5
+#define SCB_ADD_ACK_ERR_SIGNED              0
+
+#define SCB_WR_ACK_ERR_MASK                 0x00000010U
+#define SCB_WR_ACK_ERR_SHIFT                4
+#define SCB_WR_ACK_ERR_SIGNED               0
+
+#define SCB_SDAT_LO_TIM_MASK                0x00000008U
+#define SCB_SDAT_LO_TIM_SHIFT               3
+#define SCB_SDAT_LO_TIM_SIGNED              0
+
+#define SCB_SCLK_LO_TIM_MASK                0x00000004U
+#define SCB_SCLK_LO_TIM_SHIFT               2
+#define SCB_SCLK_LO_TIM_SIGNED              0
+
+#define SCB_UNEX_START_BIT_MASK             0x00000002U
+#define SCB_UNEX_START_BIT_SHIFT            1
+#define SCB_UNEX_START_BIT_SIGNED           0
+
+#define SCB_BUS_INACTIVE_MASK               0x00000001U
+#define SCB_BUS_INACTIVE_SHIFT              0
+#define SCB_BUS_INACTIVE_SIGNED             0
+
+/*
+       Register INTERRUPT_OP_CFG
+*/
+#define TCF_CLK_CTRL_INTERRUPT_OP_CFG       0x00D0
+#define PULSE_NLEVEL_MASK                   0x80000000U
+#define PULSE_NLEVEL_SHIFT                  31
+#define PULSE_NLEVEL_SIGNED                 0
+
+#define INT_SENSE_MASK                      0x40000000U
+#define INT_SENSE_SHIFT                     30
+#define INT_SENSE_SIGNED                    0
+
+#define INTERRUPT_DEST_MASK                 0x0000000FU
+#define INTERRUPT_DEST_SHIFT                0
+#define INTERRUPT_DEST_SIGNED               0
+
+/*
+       Register INTERRUPT_ENABLE
+*/
+#define TCF_CLK_CTRL_INTERRUPT_ENABLE       0x00D8
+#define INTERRUPT_MASTER_ENABLE_MASK        0x80000000U
+#define INTERRUPT_MASTER_ENABLE_SHIFT       31
+#define INTERRUPT_MASTER_ENABLE_SIGNED      0
+
+#define INTERRUPT_ENABLE_MASK               0x7FFFFFFFU
+#define INTERRUPT_ENABLE_SHIFT              0
+#define INTERRUPT_ENABLE_SIGNED             0
+
+/*
+       Register INTERRUPT_CLEAR
+*/
+#define TCF_CLK_CTRL_INTERRUPT_CLEAR        0x00E0
+#define INTERRUPT_MASTER_CLEAR_MASK         0x80000000U
+#define INTERRUPT_MASTER_CLEAR_SHIFT        31
+#define INTERRUPT_MASTER_CLEAR_SIGNED       0
+
+#define INTERRUPT_CLEAR_MASK                0x7FFFFFFFU
+#define INTERRUPT_CLEAR_SHIFT               0
+#define INTERRUPT_CLEAR_SIGNED              0
+
+/*
+       Register YCC_RGB_CTRL
+*/
+#define TCF_CLK_CTRL_YCC_RGB_CTRL           0x00E8
+#define RGB_CTRL1_MASK                      0x000001FFU
+#define RGB_CTRL1_SHIFT                     0
+#define RGB_CTRL1_SIGNED                    0
+
+#define RGB_CTRL2_MASK                      0x01FF0000U
+#define RGB_CTRL2_SHIFT                     16
+#define RGB_CTRL2_SIGNED                    0
+
+/*
+       Register EXP_BRD_CTRL
+*/
+#define TCF_CLK_CTRL_EXP_BRD_CTRL           0x00F8
+#define PDP1_DATA_EN_MASK                   0x00000003U
+#define PDP1_DATA_EN_SHIFT                  0
+#define PDP1_DATA_EN_SIGNED                 0
+
+#define PDP2_DATA_EN_MASK                   0x00000030U
+#define PDP2_DATA_EN_SHIFT                  4
+#define PDP2_DATA_EN_SIGNED                 0
+
+#define EXP_BRD_OUTPUT_MASK                 0xFFFFFF00U
+#define EXP_BRD_OUTPUT_SHIFT                8
+#define EXP_BRD_OUTPUT_SIGNED               0
+
+/*
+       Register HOSTIF_CONTROL
+*/
+#define TCF_CLK_CTRL_HOSTIF_CONTROL         0x0100
+#define HOSTIF_CTRL_MASK                    0x000000FFU
+#define HOSTIF_CTRL_SHIFT                   0
+#define HOSTIF_CTRL_SIGNED                  0
+
+/*
+       Register DUT_CONTROL_1
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_1          0x0108
+#define DUT_CTRL_1_MASK                     0xFFFFFFFFU
+#define DUT_CTRL_1_SHIFT                    0
+#define DUT_CTRL_1_SIGNED                   0
+
+/* TC ES2 additional needs those: */
+#define DUT_CTRL_TEST_MODE_SHIFT            0
+#define DUT_CTRL_TEST_MODE_MASK             0x3
+
+#define DUT_CTRL_VCC_0V9EN                  (1<<12)
+#define DUT_CTRL_VCC_1V8EN                  (1<<13)
+#define DUT_CTRL_VCC_IO_INH                 (1<<14)
+#define DUT_CTRL_VCC_CORE_INH               (1<<15)
+
+/*
+       Register DUT_STATUS_1
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_1           0x0110
+#define DUT_STATUS_1_MASK                   0xFFFFFFFFU
+#define DUT_STATUS_1_SHIFT                  0
+#define DUT_STATUS_1_SIGNED                 0
+
+/*
+       Register DUT_CTRL_NOT_STAT_1
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1    0x0118
+#define DUT_STAT_NOT_CTRL_1_MASK            0xFFFFFFFFU
+#define DUT_STAT_NOT_CTRL_1_SHIFT           0
+#define DUT_STAT_NOT_CTRL_1_SIGNED          0
+
+/*
+       Register DUT_CONTROL_2
+*/
+#define TCF_CLK_CTRL_DUT_CONTROL_2          0x0120
+#define DUT_CTRL_2_MASK                     0xFFFFFFFFU
+#define DUT_CTRL_2_SHIFT                    0
+#define DUT_CTRL_2_SIGNED                   0
+
+/*
+       Register DUT_STATUS_2
+*/
+#define TCF_CLK_CTRL_DUT_STATUS_2           0x0128
+#define DUT_STATUS_2_MASK                   0xFFFFFFFFU
+#define DUT_STATUS_2_SHIFT                  0
+#define DUT_STATUS_2_SIGNED                 0
+
+/*
+       Register DUT_CTRL_NOT_STAT_2
+*/
+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2    0x0130
+#define DUT_CTRL_NOT_STAT_2_MASK            0xFFFFFFFFU
+#define DUT_CTRL_NOT_STAT_2_SHIFT           0
+#define DUT_CTRL_NOT_STAT_2_SIGNED          0
+
+/*
+       Register BUS_CAP_BASE_ADDR
+*/
+#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR      0x0138
+#define BUS_CAP_BASE_ADDR_MASK              0xFFFFFFFFU
+#define BUS_CAP_BASE_ADDR_SHIFT             0
+#define BUS_CAP_BASE_ADDR_SIGNED            0
+
+/*
+       Register BUS_CAP_ENABLE
+*/
+#define TCF_CLK_CTRL_BUS_CAP_ENABLE         0x0140
+#define BUS_CAP_ENABLE_MASK                 0x00000001U
+#define BUS_CAP_ENABLE_SHIFT                0
+#define BUS_CAP_ENABLE_SIGNED               0
+
+/*
+       Register BUS_CAP_COUNT
+*/
+#define TCF_CLK_CTRL_BUS_CAP_COUNT          0x0148
+#define BUS_CAP_COUNT_MASK                  0xFFFFFFFFU
+#define BUS_CAP_COUNT_SHIFT                 0
+#define BUS_CAP_COUNT_SIGNED                0
+
+/*
+       Register DCM_LOCK_STATUS
+*/
+#define TCF_CLK_CTRL_DCM_LOCK_STATUS        0x0150
+#define DCM_LOCK_STATUS_MASK                0x00000007U
+#define DCM_LOCK_STATUS_SHIFT               0
+#define DCM_LOCK_STATUS_SIGNED              0
+
+/*
+       Register AUX_DUT_RESETNS
+*/
+#define TCF_CLK_CTRL_AUX_DUT_RESETNS        0x0158
+#define AUX_DUT_RESETNS_MASK                0x0000000FU
+#define AUX_DUT_RESETNS_SHIFT               0
+#define AUX_DUT_RESETNS_SIGNED              0
+
+/*
+       Register TCF_SPI_MST_ADDR_RDNWR
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160
+#define TCF_SPI_MST_ADDR_MASK               0x0003FFFFU
+#define TCF_SPI_MST_ADDR_SHIFT              0
+#define TCF_SPI_MST_ADDR_SIGNED             0
+
+#define TCF_SPI_MST_RDNWR_MASK              0x00040000U
+#define TCF_SPI_MST_RDNWR_SHIFT             18
+#define TCF_SPI_MST_RDNWR_SIGNED            0
+
+#define TCF_SPI_MST_SLAVE_ID_MASK           0x00080000U
+#define TCF_SPI_MST_SLAVE_ID_SHIFT          19
+#define TCF_SPI_MST_SLAVE_ID_SIGNED         0
+
+#define TCF_SPI_MST_MASTER_ID_MASK          0x00300000U
+#define TCF_SPI_MST_MASTER_ID_SHIFT         20
+#define TCF_SPI_MST_MASTER_ID_SIGNED        0
+
+/*
+       Register TCF_SPI_MST_WDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA      0x0168
+#define TCF_SPI_MST_WDATA_MASK              0xFFFFFFFFU
+#define TCF_SPI_MST_WDATA_SHIFT             0
+#define TCF_SPI_MST_WDATA_SIGNED            0
+
+/*
+       Register TCF_SPI_MST_RDATA
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA      0x0170
+#define TCF_SPI_MST_RDATA_MASK              0xFFFFFFFFU
+#define TCF_SPI_MST_RDATA_SHIFT             0
+#define TCF_SPI_MST_RDATA_SIGNED            0
+
+/*
+       Register TCF_SPI_MST_STATUS
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS     0x0178
+#define TCF_SPI_MST_STATUS_MASK             0x0000000FU
+#define TCF_SPI_MST_STATUS_SHIFT            0
+#define TCF_SPI_MST_STATUS_SIGNED           0
+
+/*
+       Register TCF_SPI_MST_GO
+*/
+#define TCF_CLK_CTRL_TCF_SPI_MST_GO         0x0180
+#define TCF_SPI_MST_GO_MASK                 0x00000001U
+#define TCF_SPI_MST_GO_SHIFT                0
+#define TCF_SPI_MST_GO_SIGNED               0
+
+/*
+       Register EXT_SIG_CTRL
+*/
+#define TCF_CLK_CTRL_EXT_SIG_CTRL           0x0188
+#define EXT_SYS_REQ_SIG_START_MASK          0x00000001U
+#define EXT_SYS_REQ_SIG_START_SHIFT         0
+#define EXT_SYS_REQ_SIG_START_SIGNED        0
+
+#define EXT_SYS_RD_SIG_START_MASK           0x00000002U
+#define EXT_SYS_RD_SIG_START_SHIFT          1
+#define EXT_SYS_RD_SIG_START_SIGNED         0
+
+#define EXT_MEM_REQ_SIG_START_MASK          0x00000004U
+#define EXT_MEM_REQ_SIG_START_SHIFT         2
+#define EXT_MEM_REQ_SIG_START_SIGNED        0
+
+#define EXT_MEM_RD_SIG_START_MASK           0x00000008U
+#define EXT_MEM_RD_SIG_START_SHIFT          3
+#define EXT_MEM_RD_SIG_START_SIGNED         0
+
+/*
+       Register EXT_SYS_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG        0x0190
+#define EXT_SYS_REQ_SIG_MASK                0xFFFFFFFFU
+#define EXT_SYS_REQ_SIG_SHIFT               0
+#define EXT_SYS_REQ_SIG_SIGNED              0
+
+/*
+       Register EXT_SYS_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_SIG         0x0198
+#define EXT_SYS_RD_SIG_MASK                 0xFFFFFFFFU
+#define EXT_SYS_RD_SIG_SHIFT                0
+#define EXT_SYS_RD_SIG_SIGNED               0
+
+/*
+       Register EXT_MEM_REQ_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG        0x01A0
+#define EXT_MEM_REQ_SIG_MASK                0xFFFFFFFFU
+#define EXT_MEM_REQ_SIG_SHIFT               0
+#define EXT_MEM_REQ_SIG_SIGNED              0
+
+/*
+       Register EXT_MEM_RD_SIG
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_SIG         0x01A8
+#define EXT_MEM_RD_SIG_MASK                 0xFFFFFFFFU
+#define EXT_MEM_RD_SIG_SHIFT                0
+#define EXT_MEM_RD_SIG_SIGNED               0
+
+/*
+       Register EXT_SYS_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT     0x01B0
+#define EXT_SYS_REQ_WR_CNT_MASK             0xFFFFFFFFU
+#define EXT_SYS_REQ_WR_CNT_SHIFT            0
+#define EXT_SYS_REQ_WR_CNT_SIGNED           0
+
+/*
+       Register EXT_SYS_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT     0x01B8
+#define EXT_SYS_REQ_RD_CNT_MASK             0xFFFFFFFFU
+#define EXT_SYS_REQ_RD_CNT_SHIFT            0
+#define EXT_SYS_REQ_RD_CNT_SIGNED           0
+
+/*
+       Register EXT_SYS_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_SYS_RD_CNT         0x01C0
+#define EXT_SYS_RD_CNT_MASK                 0xFFFFFFFFU
+#define EXT_SYS_RD_CNT_SHIFT                0
+#define EXT_SYS_RD_CNT_SIGNED               0
+
+/*
+       Register EXT_MEM_REQ_WR_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT     0x01C8
+#define EXT_MEM_REQ_WR_CNT_MASK             0xFFFFFFFFU
+#define EXT_MEM_REQ_WR_CNT_SHIFT            0
+#define EXT_MEM_REQ_WR_CNT_SIGNED           0
+
+/*
+       Register EXT_MEM_REQ_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT     0x01D0
+#define EXT_MEM_REQ_RD_CNT_MASK             0xFFFFFFFFU
+#define EXT_MEM_REQ_RD_CNT_SHIFT            0
+#define EXT_MEM_REQ_RD_CNT_SIGNED           0
+
+/*
+       Register EXT_MEM_RD_CNT
+*/
+#define TCF_CLK_CTRL_EXT_MEM_RD_CNT         0x01D8
+#define EXT_MEM_RD_CNT_MASK                 0xFFFFFFFFU
+#define EXT_MEM_RD_CNT_SHIFT                0
+#define EXT_MEM_RD_CNT_SIGNED               0
+
+/*
+       Register TCF_CORE_TARGET_BUILD_CFG
+*/
+#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0
+#define TCF_CORE_TARGET_BUILD_ID_MASK       0x000000FFU
+#define TCF_CORE_TARGET_BUILD_ID_SHIFT      0
+#define TCF_CORE_TARGET_BUILD_ID_SIGNED     0
+
+/*
+       Register MEM_THROUGH_SYS
+*/
+#define TCF_CLK_CTRL_MEM_THROUGH_SYS        0x01E8
+#define MEM_THROUGH_SYS_MASK                0x00000001U
+#define MEM_THROUGH_SYS_SHIFT               0
+#define MEM_THROUGH_SYS_SIGNED              0
+
+/*
+       Register HOST_PHY_OFFSET
+*/
+#define TCF_CLK_CTRL_HOST_PHY_OFFSET        0x01F0
+#define HOST_PHY_OFFSET_MASK                0xFFFFFFFFU
+#define HOST_PHY_OFFSET_SHIFT               0
+#define HOST_PHY_OFFSET_SIGNED              0
+
+/*
+       Register DEBUG_REG_SEL
+*/
+#define TCF_CLK_CTRL_DEBUG_REG_SEL          0x01F8
+#define DEBUG_REG_SELECT_MASK               0xFFFFFFFFU
+#define DEBUG_REG_SELECT_SHIFT              0
+#define DEBUG_REG_SELECT_SIGNED             0
+
+/*
+       Register DEBUG_REG
+*/
+#define TCF_CLK_CTRL_DEBUG_REG              0x0200
+#define DEBUG_REG_VALUE_MASK                0xFFFFFFFFU
+#define DEBUG_REG_VALUE_SHIFT               0
+#define DEBUG_REG_VALUE_SIGNED              0
+
+/*
+       Register JTAG_CTRL
+*/
+#define TCF_CLK_CTRL_JTAG_CTRL              0x0208
+#define JTAG_TRST_MASK                      0x00000001U
+#define JTAG_TRST_SHIFT                     0
+#define JTAG_TRST_SIGNED                    0
+
+#define JTAG_TMS_MASK                       0x00000002U
+#define JTAG_TMS_SHIFT                      1
+#define JTAG_TMS_SIGNED                     0
+
+#define JTAG_TCK_MASK                       0x00000004U
+#define JTAG_TCK_SHIFT                      2
+#define JTAG_TCK_SIGNED                     0
+
+#define JTAG_TDO_MASK                       0x00000008U
+#define JTAG_TDO_SHIFT                      3
+#define JTAG_TDO_SIGNED                     0
+
+#define JTAG_TDI_MASK                       0x00000010U
+#define JTAG_TDI_SHIFT                      4
+#define JTAG_TDI_SIGNED                     0
+
+#define JTAG_DASH_N_REG_MASK                0x40000000U
+#define JTAG_DASH_N_REG_SHIFT               30
+#define JTAG_DASH_N_REG_SIGNED              0
+
+#define JTAG_DISABLE_MASK                   0x80000000U
+#define JTAG_DISABLE_SHIFT                  31
+#define JTAG_DISABLE_SIGNED                 0
+
+/*
+       Register SAI_DEBUG_RDNWR
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_RDNWR        0x0300
+#define SAI_DEBUG_REG_ADDR_MASK             0x000001FFU
+#define SAI_DEBUG_REG_ADDR_SHIFT            0
+#define SAI_DEBUG_REG_ADDR_SIGNED           0
+
+#define SAI_DEBUG_REG_RDNWR_MASK            0x00000200U
+#define SAI_DEBUG_REG_RDNWR_SHIFT           9
+#define SAI_DEBUG_REG_RDNWR_SIGNED          0
+
+/*
+       Register SAI_DEBUG_WDATA
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_WDATA        0x0308
+#define SAI_DEBUG_REG_WDATA_MASK            0xFFFFFFFFU
+#define SAI_DEBUG_REG_WDATA_SHIFT           0
+#define SAI_DEBUG_REG_WDATA_SIGNED          0
+
+/*
+       Register SAI_DEBUG_RDATA
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_RDATA        0x0310
+#define SAI_DEBUG_REG_RDATA_MASK            0xFFFFFFFFU
+#define SAI_DEBUG_REG_RDATA_SHIFT           0
+#define SAI_DEBUG_REG_RDATA_SIGNED          0
+
+/*
+       Register SAI_DEBUG_GO
+*/
+#define TCF_CLK_CTRL_SAI_DEBUG_GO           0x0318
+#define SAI_DEBUG_REG_GO_MASK               0x00000001U
+#define SAI_DEBUG_REG_GO_SHIFT              0
+#define SAI_DEBUG_REG_GO_SIGNED             0
+
+/*
+       Register AUX_DUT_RESETS
+*/
+#define TCF_CLK_CTRL_AUX_DUT_RESETS         0x0320
+#define AUX_DUT_RESETS_MASK                 0x0000000FU
+#define AUX_DUT_RESETS_SHIFT                0
+#define AUX_DUT_RESETS_SIGNED               0
+
+/*
+       Register DUT_CLK_CTRL
+*/
+#define TCF_CLK_CTRL_DUT_CLK_CTRL           0x0328
+#define MEM_REQ_PHSE_MASK                   0x0000FFFFU
+#define MEM_REQ_PHSE_SHIFT                  0
+#define MEM_REQ_PHSE_SIGNED                 0
+
+/*
+       Register DUT_CLK_STATUS
+*/
+#define TCF_CLK_CTRL_DUT_CLK_STATUS         0x0330
+#define MEM_REQ_PHSE_SET_MASK               0x00000003U
+#define MEM_REQ_PHSE_SET_SHIFT              0
+#define MEM_REQ_PHSE_SET_SIGNED             0
+
+/*
+       Register DUT_CLK_INFO
+*/
+#define TCF_CLK_CTRL_DUT_CLK_INFO           0x0340
+#define CORE_MASK                           0x0000FFFFU
+#define CORE_SHIFT                          0
+#define CORE_SIGNED                         0
+
+#define MEM_MASK                            0xFFFF0000U
+#define MEM_SHIFT                           16
+#define MEM_SIGNED                          0
+
+/*
+       Register DUT_CLK_PHSE
+*/
+#define TCF_CLK_CTRL_DUT_CLK_PHSE           0x0348
+#define MEM_REQ_MASK                        0x0000FFFFU
+#define MEM_REQ_SHIFT                       0
+#define MEM_REQ_SIGNED                      0
+
+#define MEM_RD_MASK                         0xFFFF0000U
+#define MEM_RD_SHIFT                        16
+#define MEM_RD_SIGNED                       0
+
+#endif /* !defined(_TCF_CLK_CTRL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_clk_ctrl.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_pll.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_pll.h
new file mode 100644 (file)
index 0000000..71eaf92
--- /dev/null
@@ -0,0 +1,311 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework PDP register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from tcf_pll.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_PLL_H_)
+#define _TCF_PLL_H_
+
+/*
+       Register PLL_DDR2_CLK0
+*/
+#define TCF_PLL_PLL_DDR2_CLK0               0x0000
+#define DDR2_PLL_CLK0_PHS_MASK              0x00300000U
+#define DDR2_PLL_CLK0_PHS_SHIFT             20
+#define DDR2_PLL_CLK0_PHS_SIGNED            0
+
+#define DDR2_PLL_CLK0_MS_MASK               0x00030000U
+#define DDR2_PLL_CLK0_MS_SHIFT              16
+#define DDR2_PLL_CLK0_MS_SIGNED             0
+
+#define DDR2_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define DDR2_PLL_CLK0_FREQ_SHIFT            0
+#define DDR2_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+       Register PLL_DDR2_CLK1TO5
+*/
+#define TCF_PLL_PLL_DDR2_CLK1TO5            0x0008
+#define DDR2_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define DDR2_PLL_CLK1TO5_PHS_SHIFT          20
+#define DDR2_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define DDR2_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define DDR2_PLL_CLK1TO5_MS_SHIFT           10
+#define DDR2_PLL_CLK1TO5_MS_SIGNED          0
+
+#define DDR2_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define DDR2_PLL_CLK1TO5_FREQ_SHIFT         0
+#define DDR2_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+       Register PLL_DDR2_DRP_GO
+*/
+#define TCF_PLL_PLL_DDR2_DRP_GO             0x0010
+#define PLL_DDR2_DRP_GO_MASK                0x00000001U
+#define PLL_DDR2_DRP_GO_SHIFT               0
+#define PLL_DDR2_DRP_GO_SIGNED              0
+
+/*
+       Register PLL_PDP_CLK0
+*/
+#define TCF_PLL_PLL_PDP_CLK0                0x0018
+#define PDP_PLL_CLK0_PHS_MASK               0x00300000U
+#define PDP_PLL_CLK0_PHS_SHIFT              20
+#define PDP_PLL_CLK0_PHS_SIGNED             0
+
+#define PDP_PLL_CLK0_MS_MASK                0x00030000U
+#define PDP_PLL_CLK0_MS_SHIFT               16
+#define PDP_PLL_CLK0_MS_SIGNED              0
+
+#define PDP_PLL_CLK0_FREQ_MASK              0x000001FFU
+#define PDP_PLL_CLK0_FREQ_SHIFT             0
+#define PDP_PLL_CLK0_FREQ_SIGNED            0
+
+/*
+       Register PLL_PDP_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP_CLK1TO5             0x0020
+#define PDP_PLL_CLK1TO5_PHS_MASK            0x3FF00000U
+#define PDP_PLL_CLK1TO5_PHS_SHIFT           20
+#define PDP_PLL_CLK1TO5_PHS_SIGNED          0
+
+#define PDP_PLL_CLK1TO5_MS_MASK             0x000FFC00U
+#define PDP_PLL_CLK1TO5_MS_SHIFT            10
+#define PDP_PLL_CLK1TO5_MS_SIGNED           0
+
+#define PDP_PLL_CLK1TO5_FREQ_MASK           0x000003FFU
+#define PDP_PLL_CLK1TO5_FREQ_SHIFT          0
+#define PDP_PLL_CLK1TO5_FREQ_SIGNED         0
+
+/*
+       Register PLL_PDP_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP_DRP_GO              0x0028
+#define PLL_PDP_DRP_GO_MASK                 0x00000001U
+#define PLL_PDP_DRP_GO_SHIFT                0
+#define PLL_PDP_DRP_GO_SIGNED               0
+
+/*
+       Register PLL_PDP2_CLK0
+*/
+#define TCF_PLL_PLL_PDP2_CLK0               0x0030
+#define PDP2_PLL_CLK0_PHS_MASK              0x00300000U
+#define PDP2_PLL_CLK0_PHS_SHIFT             20
+#define PDP2_PLL_CLK0_PHS_SIGNED            0
+
+#define PDP2_PLL_CLK0_MS_MASK               0x00030000U
+#define PDP2_PLL_CLK0_MS_SHIFT              16
+#define PDP2_PLL_CLK0_MS_SIGNED             0
+
+#define PDP2_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define PDP2_PLL_CLK0_FREQ_SHIFT            0
+#define PDP2_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+       Register PLL_PDP2_CLK1TO5
+*/
+#define TCF_PLL_PLL_PDP2_CLK1TO5            0x0038
+#define PDP2_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define PDP2_PLL_CLK1TO5_PHS_SHIFT          20
+#define PDP2_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define PDP2_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define PDP2_PLL_CLK1TO5_MS_SHIFT           10
+#define PDP2_PLL_CLK1TO5_MS_SIGNED          0
+
+#define PDP2_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define PDP2_PLL_CLK1TO5_FREQ_SHIFT         0
+#define PDP2_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+       Register PLL_PDP2_DRP_GO
+*/
+#define TCF_PLL_PLL_PDP2_DRP_GO             0x0040
+#define PLL_PDP2_DRP_GO_MASK                0x00000001U
+#define PLL_PDP2_DRP_GO_SHIFT               0
+#define PLL_PDP2_DRP_GO_SIGNED              0
+
+/*
+       Register PLL_CORE_CLK0
+*/
+#define TCF_PLL_PLL_CORE_CLK0               0x0048
+#define CORE_PLL_CLK0_PHS_MASK              0x00300000U
+#define CORE_PLL_CLK0_PHS_SHIFT             20
+#define CORE_PLL_CLK0_PHS_SIGNED            0
+
+#define CORE_PLL_CLK0_MS_MASK               0x00030000U
+#define CORE_PLL_CLK0_MS_SHIFT              16
+#define CORE_PLL_CLK0_MS_SIGNED             0
+
+#define CORE_PLL_CLK0_FREQ_MASK             0x000001FFU
+#define CORE_PLL_CLK0_FREQ_SHIFT            0
+#define CORE_PLL_CLK0_FREQ_SIGNED           0
+
+/*
+       Register PLL_CORE_CLK1TO5
+*/
+#define TCF_PLL_PLL_CORE_CLK1TO5            0x0050
+#define CORE_PLL_CLK1TO5_PHS_MASK           0x3FF00000U
+#define CORE_PLL_CLK1TO5_PHS_SHIFT          20
+#define CORE_PLL_CLK1TO5_PHS_SIGNED         0
+
+#define CORE_PLL_CLK1TO5_MS_MASK            0x000FFC00U
+#define CORE_PLL_CLK1TO5_MS_SHIFT           10
+#define CORE_PLL_CLK1TO5_MS_SIGNED          0
+
+#define CORE_PLL_CLK1TO5_FREQ_MASK          0x000003FFU
+#define CORE_PLL_CLK1TO5_FREQ_SHIFT         0
+#define CORE_PLL_CLK1TO5_FREQ_SIGNED        0
+
+/*
+       Register PLL_CORE_DRP_GO
+*/
+#define TCF_PLL_PLL_CORE_DRP_GO             0x0058
+#define PLL_CORE_DRP_GO_MASK                0x00000001U
+#define PLL_CORE_DRP_GO_SHIFT               0
+#define PLL_CORE_DRP_GO_SIGNED              0
+
+/*
+       Register PLL_SYSIF_CLK0
+*/
+#define TCF_PLL_PLL_SYSIF_CLK0              0x0060
+#define SYSIF_PLL_CLK0_PHS_MASK             0x00300000U
+#define SYSIF_PLL_CLK0_PHS_SHIFT            20
+#define SYSIF_PLL_CLK0_PHS_SIGNED           0
+
+#define SYSIF_PLL_CLK0_MS_MASK              0x00030000U
+#define SYSIF_PLL_CLK0_MS_SHIFT             16
+#define SYSIF_PLL_CLK0_MS_SIGNED            0
+
+#define SYSIF_PLL_CLK0_FREQ_MASK            0x000001FFU
+#define SYSIF_PLL_CLK0_FREQ_SHIFT           0
+#define SYSIF_PLL_CLK0_FREQ_SIGNED          0
+
+/*
+       Register PLL_SYSIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_SYSIF_CLK1TO5           0x0068
+#define SYSIF_PLL_CLK1TO5_PHS_MASK          0x3FF00000U
+#define SYSIF_PLL_CLK1TO5_PHS_SHIFT         20
+#define SYSIF_PLL_CLK1TO5_PHS_SIGNED        0
+
+#define SYSIF_PLL_CLK1TO5_MS_MASK           0x000FFC00U
+#define SYSIF_PLL_CLK1TO5_MS_SHIFT          10
+#define SYSIF_PLL_CLK1TO5_MS_SIGNED         0
+
+#define SYSIF_PLL_CLK1TO5_FREQ_MASK         0x000003FFU
+#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT        0
+#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED       0
+
+/*
+       Register PLL_SYS_DRP_GO
+*/
+#define TCF_PLL_PLL_SYS_DRP_GO              0x0070
+#define PLL_SYS_DRP_GO_MASK                 0x00000001U
+#define PLL_SYS_DRP_GO_SHIFT                0
+#define PLL_SYS_DRP_GO_SIGNED               0
+
+/*
+       Register PLL_MEMIF_CLK0
+*/
+#define TCF_PLL_PLL_MEMIF_CLK0              0x0078
+#define MEMIF_PLL_CLK0_PHS_MASK             0x00300000U
+#define MEMIF_PLL_CLK0_PHS_SHIFT            20
+#define MEMIF_PLL_CLK0_PHS_SIGNED           0
+
+#define MEMIF_PLL_CLK0_MS_MASK              0x00030000U
+#define MEMIF_PLL_CLK0_MS_SHIFT             16
+#define MEMIF_PLL_CLK0_MS_SIGNED            0
+
+#define MEMIF_PLL_CLK0_FREQ_MASK            0x000001FFU
+#define MEMIF_PLL_CLK0_FREQ_SHIFT           0
+#define MEMIF_PLL_CLK0_FREQ_SIGNED          0
+
+/*
+       Register PLL_MEMIF_CLK1TO5
+*/
+#define TCF_PLL_PLL_MEMIF_CLK1TO5           0x0080
+#define MEMIF_PLL_CLK1TO5_PHS_MASK          0x3FF00000U
+#define MEMIF_PLL_CLK1TO5_PHS_SHIFT         20
+#define MEMIF_PLL_CLK1TO5_PHS_SIGNED        0
+
+#define MEMIF_PLL_CLK1TO5_MS_MASK           0x000FFC00U
+#define MEMIF_PLL_CLK1TO5_MS_SHIFT          10
+#define MEMIF_PLL_CLK1TO5_MS_SIGNED         0
+
+#define MEMIF_PLL_CLK1TO5_FREQ_MASK         0x000003FFU
+#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT        0
+#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED       0
+
+/*
+       Register PLL_MEM_DRP_GO
+*/
+#define TCF_PLL_PLL_MEM_DRP_GO              0x0088
+#define PLL_MEM_DRP_GO_MASK                 0x00000001U
+#define PLL_MEM_DRP_GO_SHIFT                0
+#define PLL_MEM_DRP_GO_SIGNED               0
+
+/*
+       Register PLL_ALL_DRP_GO
+*/
+#define TCF_PLL_PLL_ALL_DRP_GO              0x0090
+#define PLL_ALL_DRP_GO_MASK                 0x00000001U
+#define PLL_ALL_DRP_GO_SHIFT                0
+#define PLL_ALL_DRP_GO_SIGNED               0
+
+/*
+       Register PLL_DRP_STATUS
+*/
+#define TCF_PLL_PLL_DRP_STATUS              0x0098
+#define PLL_LOCKS_MASK                      0x00003F00U
+#define PLL_LOCKS_SHIFT                     8
+#define PLL_LOCKS_SIGNED                    0
+
+#define PLL_DRP_GOOD_MASK                   0x0000003FU
+#define PLL_DRP_GOOD_SHIFT                  0
+#define PLL_DRP_GOOD_SIGNED                 0
+
+#endif /* !defined(_TCF_PLL_H_) */
+
+/*****************************************************************************
+ End of file (tcf_pll.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h b/drivers/gpu/drm/img/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h
new file mode 100644 (file)
index 0000000..e87ba61
--- /dev/null
@@ -0,0 +1,559 @@
+/*************************************************************************/ /*!
+@Title          Test Chip Framework PDP register definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Autogenerated C -- do not edit
+                Generated from: tcf_rgbpdp_regs.def
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_TCF_RGBPDP_REGS_H_)
+#define _TCF_RGBPDP_REGS_H_
+
+/*
+       Register PVR_TCF_RGBPDP_STR1SURF
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF  0x0000
+#define STR1HEIGHT_MASK                     0x000007FFU
+#define STR1HEIGHT_SHIFT                    0
+#define STR1HEIGHT_SIGNED                   0
+
+#define STR1WIDTH_MASK                      0x003FF800U
+#define STR1WIDTH_SHIFT                     11
+#define STR1WIDTH_SIGNED                    0
+
+#define STR1PIXFMT_MASK                     0x0F000000U
+#define STR1PIXFMT_SHIFT                    24
+#define STR1PIXFMT_SIGNED                   0
+
+/*
+       Register PVR_TCF_RGBPDP_STR1ADDRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004
+#define STR1BASE_MASK                       0x03FFFFFFU
+#define STR1BASE_SHIFT                      0
+#define STR1BASE_SIGNED                     0
+
+#define STR1INTFIELD_MASK                   0x40000000U
+#define STR1INTFIELD_SHIFT                  30
+#define STR1INTFIELD_SIGNED                 0
+
+#define STR1STREN_MASK                      0x80000000U
+#define STR1STREN_SHIFT                     31
+#define STR1STREN_SIGNED                    0
+
+/*
+       Register PVR_PDP_STR1POSN
+*/
+#define TCF_RGBPDP_PVR_PDP_STR1POSN         0x0008
+#define STR1STRIDE_MASK                     0x000003FFU
+#define STR1STRIDE_SHIFT                    0
+#define STR1STRIDE_SIGNED                   0
+
+/*
+       Register PVR_TCF_RGBPDP_MEMCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL   0x000C
+#define MEMREFRESH_MASK                     0xC0000000U
+#define MEMREFRESH_SHIFT                    30
+#define MEMREFRESH_SIGNED                   0
+
+/*
+       Register PVR_TCF_RGBPDP_STRCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL   0x0010
+#define BURSTLEN_GFX_MASK                   0x000000FFU
+#define BURSTLEN_GFX_SHIFT                  0
+#define BURSTLEN_GFX_SIGNED                 0
+
+#define THRESHOLD_GFX_MASK                  0x0000FF00U
+#define THRESHOLD_GFX_SHIFT                 8
+#define THRESHOLD_GFX_SIGNED                0
+
+/*
+       Register PVR_TCF_RGBPDP_SYNCCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL  0x0014
+#define HSDIS_MASK                          0x00000001U
+#define HSDIS_SHIFT                         0
+#define HSDIS_SIGNED                        0
+
+#define HSPOL_MASK                          0x00000002U
+#define HSPOL_SHIFT                         1
+#define HSPOL_SIGNED                        0
+
+#define VSDIS_MASK                          0x00000004U
+#define VSDIS_SHIFT                         2
+#define VSDIS_SIGNED                        0
+
+#define VSPOL_MASK                          0x00000008U
+#define VSPOL_SHIFT                         3
+#define VSPOL_SIGNED                        0
+
+#define BLNKDIS_MASK                        0x00000010U
+#define BLNKDIS_SHIFT                       4
+#define BLNKDIS_SIGNED                      0
+
+#define BLNKPOL_MASK                        0x00000020U
+#define BLNKPOL_SHIFT                       5
+#define BLNKPOL_SIGNED                      0
+
+#define HS_SLAVE_MASK                       0x00000040U
+#define HS_SLAVE_SHIFT                      6
+#define HS_SLAVE_SIGNED                     0
+
+#define VS_SLAVE_MASK                       0x00000080U
+#define VS_SLAVE_SHIFT                      7
+#define VS_SLAVE_SIGNED                     0
+
+#define INTERLACE_MASK                      0x00000100U
+#define INTERLACE_SHIFT                     8
+#define INTERLACE_SIGNED                    0
+
+#define FIELDPOL_MASK                       0x00000200U
+#define FIELDPOL_SHIFT                      9
+#define FIELDPOL_SIGNED                     0
+
+#define CLKPOL_MASK                         0x00000800U
+#define CLKPOL_SHIFT                        11
+#define CLKPOL_SIGNED                       0
+
+#define CSYNC_EN_MASK                       0x00001000U
+#define CSYNC_EN_SHIFT                      12
+#define CSYNC_EN_SIGNED                     0
+
+#define FIELD_EN_MASK                       0x00002000U
+#define FIELD_EN_SHIFT                      13
+#define FIELD_EN_SIGNED                     0
+
+#define UPDWAIT_MASK                        0x000F0000U
+#define UPDWAIT_SHIFT                       16
+#define UPDWAIT_SIGNED                      0
+
+#define UPDCTRL_MASK                        0x01000000U
+#define UPDCTRL_SHIFT                       24
+#define UPDCTRL_SIGNED                      0
+
+#define UPDINTCTRL_MASK                     0x02000000U
+#define UPDINTCTRL_SHIFT                    25
+#define UPDINTCTRL_SIGNED                   0
+
+#define UPDSYNCTRL_MASK                     0x04000000U
+#define UPDSYNCTRL_SHIFT                    26
+#define UPDSYNCTRL_SIGNED                   0
+
+#define POWERDN_MASK                        0x10000000U
+#define POWERDN_SHIFT                       28
+#define POWERDN_SIGNED                      0
+
+#define DISP_RST_MASK                       0x20000000U
+#define DISP_RST_SHIFT                      29
+#define DISP_RST_SIGNED                     0
+
+#define SYNCACTIVE_MASK                     0x80000000U
+#define SYNCACTIVE_SHIFT                    31
+#define SYNCACTIVE_SIGNED                   0
+
+/*
+       Register PVR_TCF_RGBPDP_BORDCOL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL   0x0018
+#define BORDCOL_MASK                        0x00FFFFFFU
+#define BORDCOL_SHIFT                       0
+#define BORDCOL_SIGNED                      0
+
+/*
+       Register PVR_TCF_RGBPDP_UPDCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL   0x001C
+#define UPDFIELD_MASK                       0x00000001U
+#define UPDFIELD_SHIFT                      0
+#define UPDFIELD_SIGNED                     0
+
+/*
+       Register PVR_TCF_RGBPDP_HSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1    0x0020
+#define HT_MASK                             0x00000FFFU
+#define HT_SHIFT                            0
+#define HT_SIGNED                           0
+
+#define HBPS_MASK                           0x0FFF0000U
+#define HBPS_SHIFT                          16
+#define HBPS_SIGNED                         0
+
+/*
+       Register PVR_TCF_RGBPDP_HSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2    0x0024
+#define HLBS_MASK                           0x00000FFFU
+#define HLBS_SHIFT                          0
+#define HLBS_SIGNED                         0
+
+#define HAS_MASK                            0x0FFF0000U
+#define HAS_SHIFT                           16
+#define HAS_SIGNED                          0
+
+/*
+       Register PVR_TCF_RGBPDP_HSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3    0x0028
+#define HRBS_MASK                           0x00000FFFU
+#define HRBS_SHIFT                          0
+#define HRBS_SIGNED                         0
+
+#define HFPS_MASK                           0x0FFF0000U
+#define HFPS_SHIFT                          16
+#define HFPS_SIGNED                         0
+
+/*
+       Register PVR_TCF_RGBPDP_VSYNC1
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1    0x002C
+#define VT_MASK                             0x00000FFFU
+#define VT_SHIFT                            0
+#define VT_SIGNED                           0
+
+#define VBPS_MASK                           0x0FFF0000U
+#define VBPS_SHIFT                          16
+#define VBPS_SIGNED                         0
+
+/*
+       Register PVR_TCF_RGBPDP_VSYNC2
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2    0x0030
+#define VTBS_MASK                           0x00000FFFU
+#define VTBS_SHIFT                          0
+#define VTBS_SIGNED                         0
+
+#define VAS_MASK                            0x0FFF0000U
+#define VAS_SHIFT                           16
+#define VAS_SIGNED                          0
+
+/*
+       Register PVR_TCF_RGBPDP_VSYNC3
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3    0x0034
+#define VBBS_MASK                           0x00000FFFU
+#define VBBS_SHIFT                          0
+#define VBBS_SIGNED                         0
+
+#define VFPS_MASK                           0x0FFF0000U
+#define VFPS_SHIFT                          16
+#define VFPS_SIGNED                         0
+
+/*
+       Register PVR_TCF_RGBPDP_HDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL   0x0038
+#define HDEF_MASK                           0x00000FFFU
+#define HDEF_SHIFT                          0
+#define HDEF_SIGNED                         0
+
+#define HDES_MASK                           0x0FFF0000U
+#define HDES_SHIFT                          16
+#define HDES_SIGNED                         0
+
+/*
+       Register PVR_TCF_RGBPDP_VDECTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL   0x003C
+#define VDEF_MASK                           0x00000FFFU
+#define VDEF_SHIFT                          0
+#define VDEF_SIGNED                         0
+
+#define VDES_MASK                           0x0FFF0000U
+#define VDES_SHIFT                          16
+#define VDES_SIGNED                         0
+
+/*
+       Register PVR_TCF_RGBPDP_VEVENT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT    0x0040
+#define VFETCH_MASK                         0x00000FFFU
+#define VFETCH_SHIFT                        0
+#define VFETCH_SIGNED                       0
+
+#define VEVENT_MASK                         0x0FFF0000U
+#define VEVENT_SHIFT                        16
+#define VEVENT_SIGNED                       0
+
+/*
+       Register PVR_TCF_RGBPDP_OPMASK
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK    0x0044
+#define MASKR_MASK                          0x000000FFU
+#define MASKR_SHIFT                         0
+#define MASKR_SIGNED                        0
+
+#define MASKG_MASK                          0x0000FF00U
+#define MASKG_SHIFT                         8
+#define MASKG_SIGNED                        0
+
+#define MASKB_MASK                          0x00FF0000U
+#define MASKB_SHIFT                         16
+#define MASKB_SIGNED                        0
+
+#define BLANKLEVEL_MASK                     0x40000000U
+#define BLANKLEVEL_SHIFT                    30
+#define BLANKLEVEL_SIGNED                   0
+
+#define MASKLEVEL_MASK                      0x80000000U
+#define MASKLEVEL_SHIFT                     31
+#define MASKLEVEL_SIGNED                    0
+
+/*
+       Register PVR_TCF_RGBPDP_INTSTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT   0x0048
+#define INTS_HBLNK0_MASK                    0x00000001U
+#define INTS_HBLNK0_SHIFT                   0
+#define INTS_HBLNK0_SIGNED                  0
+
+#define INTS_HBLNK1_MASK                    0x00000002U
+#define INTS_HBLNK1_SHIFT                   1
+#define INTS_HBLNK1_SIGNED                  0
+
+#define INTS_VBLNK0_MASK                    0x00000004U
+#define INTS_VBLNK0_SHIFT                   2
+#define INTS_VBLNK0_SIGNED                  0
+
+#define INTS_VBLNK1_MASK                    0x00000008U
+#define INTS_VBLNK1_SHIFT                   3
+#define INTS_VBLNK1_SIGNED                  0
+
+#define INTS_STR1URUN_MASK                  0x00000010U
+#define INTS_STR1URUN_SHIFT                 4
+#define INTS_STR1URUN_SIGNED                0
+
+#define INTS_STR1ORUN_MASK                  0x00000020U
+#define INTS_STR1ORUN_SHIFT                 5
+#define INTS_STR1ORUN_SIGNED                0
+
+#define INTS_DISPURUN_MASK                  0x00000040U
+#define INTS_DISPURUN_SHIFT                 6
+#define INTS_DISPURUN_SIGNED                0
+
+/*
+       Register PVR_TCF_RGBPDP_INTENAB
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB   0x004C
+#define INTEN_HBLNK0_MASK                   0x00000001U
+#define INTEN_HBLNK0_SHIFT                  0
+#define INTEN_HBLNK0_SIGNED                 0
+
+#define INTEN_HBLNK1_MASK                   0x00000002U
+#define INTEN_HBLNK1_SHIFT                  1
+#define INTEN_HBLNK1_SIGNED                 0
+
+#define INTEN_VBLNK0_MASK                   0x00000004U
+#define INTEN_VBLNK0_SHIFT                  2
+#define INTEN_VBLNK0_SIGNED                 0
+
+#define INTEN_VBLNK1_MASK                   0x00000008U
+#define INTEN_VBLNK1_SHIFT                  3
+#define INTEN_VBLNK1_SIGNED                 0
+
+#define INTEN_STR1URUN_MASK                 0x00000010U
+#define INTEN_STR1URUN_SHIFT                4
+#define INTEN_STR1URUN_SIGNED               0
+
+#define INTEN_STR1ORUN_MASK                 0x00000020U
+#define INTEN_STR1ORUN_SHIFT                5
+#define INTEN_STR1ORUN_SIGNED               0
+
+#define INTEN_DISPURUN_MASK                 0x00000040U
+#define INTEN_DISPURUN_SHIFT                6
+#define INTEN_DISPURUN_SIGNED               0
+
+/*
+       Register PVR_TCF_RGBPDP_INTCLEAR
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR  0x0050
+#define INTCLR_HBLNK0_MASK                  0x00000001U
+#define INTCLR_HBLNK0_SHIFT                 0
+#define INTCLR_HBLNK0_SIGNED                0
+
+#define INTCLR_HBLNK1_MASK                  0x00000002U
+#define INTCLR_HBLNK1_SHIFT                 1
+#define INTCLR_HBLNK1_SIGNED                0
+
+#define INTCLR_VBLNK0_MASK                  0x00000004U
+#define INTCLR_VBLNK0_SHIFT                 2
+#define INTCLR_VBLNK0_SIGNED                0
+
+#define INTCLR_VBLNK1_MASK                  0x00000008U
+#define INTCLR_VBLNK1_SHIFT                 3
+#define INTCLR_VBLNK1_SIGNED                0
+
+#define INTCLR_STR1URUN_MASK                0x00000010U
+#define INTCLR_STR1URUN_SHIFT               4
+#define INTCLR_STR1URUN_SIGNED              0
+
+#define INTCLR_STR1ORUN_MASK                0x00000020U
+#define INTCLR_STR1ORUN_SHIFT               5
+#define INTCLR_STR1ORUN_SIGNED              0
+
+#define INTCLR_DISPURUN_MASK                0x00000040U
+#define INTCLR_DISPURUN_SHIFT               6
+#define INTCLR_DISPURUN_SIGNED              0
+
+/*
+       Register PVR_TCF_RGBPDP_INTCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL   0x0054
+#define HBLNK_LINENO_MASK                   0x00000FFFU
+#define HBLNK_LINENO_SHIFT                  0
+#define HBLNK_LINENO_SIGNED                 0
+
+#define HBLNK_LINE_MASK                     0x00010000U
+#define HBLNK_LINE_SHIFT                    16
+#define HBLNK_LINE_SIGNED                   0
+
+/*
+       Register PVR_TCF_RGBPDP_SIGNAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT    0x0058
+#define SIGNATURE_MASK                      0xFFFFFFFFU
+#define SIGNATURE_SHIFT                     0
+#define SIGNATURE_SIGNED                    0
+
+/*
+       Register PVR_TCF_RGBPDP_LINESTAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT  0x005C
+#define LINENO_MASK                         0x00000FFFU
+#define LINENO_SHIFT                        0
+#define LINENO_SIGNED                       0
+
+/*
+       Register PVR_TCF_RGBPDP_DBGCTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL   0x0060
+#define DBG_ENAB_MASK                       0x00000001U
+#define DBG_ENAB_SHIFT                      0
+#define DBG_ENAB_SIGNED                     0
+
+#define DBG_READ_MASK                       0x00000002U
+#define DBG_READ_SHIFT                      1
+#define DBG_READ_SIGNED                     0
+
+/*
+       Register PVR_TCF_RGBPDP_DBGDATA
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA   0x0064
+#define DBG_DATA_MASK                       0x00FFFFFFU
+#define DBG_DATA_SHIFT                      0
+#define DBG_DATA_SIGNED                     0
+
+/*
+       Register PVR_TCF_RGBPDP_DBGSIDE
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE   0x0068
+#define DBG_SIDE_MASK                       0x00000007U
+#define DBG_SIDE_SHIFT                      0
+#define DBG_SIDE_SIGNED                     0
+
+#define DBG_VAL_MASK                        0x00000008U
+#define DBG_VAL_SHIFT                       3
+#define DBG_VAL_SIGNED                      0
+
+/*
+       Register PVR_TCF_RGBPDP_REGLD_STAT
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070
+#define REGLD_ADDROUT_MASK                  0x00FFFFFFU
+#define REGLD_ADDROUT_SHIFT                 0
+#define REGLD_ADDROUT_SIGNED                0
+
+#define REGLD_ADDREN_MASK                   0x80000000U
+#define REGLD_ADDREN_SHIFT                  31
+#define REGLD_ADDREN_SIGNED                 0
+
+/*
+       Register PVR_TCF_RGBPDP_REGLD_CTRL
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074
+#define REGLD_ADDRIN_MASK                   0x00FFFFFFU
+#define REGLD_ADDRIN_SHIFT                  0
+#define REGLD_ADDRIN_SIGNED                 0
+
+#define REGLD_VAL_MASK                      0x01000000U
+#define REGLD_VAL_SHIFT                     24
+#define REGLD_VAL_SIGNED                    0
+
+#define REGLD_ADDRLEN_MASK                  0xFE000000U
+#define REGLD_ADDRLEN_SHIFT                 25
+#define REGLD_ADDRLEN_SIGNED                0
+
+/*
+       Register PVR_TCF_RGBPDP_CORE_ID
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID   0x0078
+#define CONFIG_ID_MASK                      0x0000FFFFU
+#define CONFIG_ID_SHIFT                     0
+#define CONFIG_ID_SIGNED                    0
+
+#define CORE_ID_MASK                        0x00FF0000U
+#define CORE_ID_SHIFT                       16
+#define CORE_ID_SIGNED                      0
+
+#define GROUP_ID_MASK                       0xFF000000U
+#define GROUP_ID_SHIFT                      24
+#define GROUP_ID_SIGNED                     0
+
+/*
+       Register PVR_TCF_RGBPDP_CORE_REV
+*/
+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV  0x007C
+#define MAINT_REV_MASK                      0x000000FFU
+#define MAINT_REV_SHIFT                     0
+#define MAINT_REV_SIGNED                    0
+
+#define MINOR_REV_MASK                      0x0000FF00U
+#define MINOR_REV_SHIFT                     8
+#define MINOR_REV_SIGNED                    0
+
+#define MAJOR_REV_MASK                      0x00FF0000U
+#define MAJOR_REV_SHIFT                     16
+#define MAJOR_REV_SIGNED                    0
+
+#endif /* !defined(_TCF_RGBPDP_REGS_H_) */
+
+/*****************************************************************************
+ End of file (tcf_rgbpdp_regs.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/include/virt_validation_defs.h b/drivers/gpu/drm/img/img-rogue/include/virt_validation_defs.h
new file mode 100644 (file)
index 0000000..5b8908f
--- /dev/null
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          Definitions for virtualization
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services shared header for virtualization definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRV_VIRT_DEFS_H
+#define SRV_VIRT_DEFS_H
+
+#if !defined(GPUVIRT_VALIDATION_MAX_STRING_LENGTH)
+       #define GPUVIRT_VALIDATION_MAX_STRING_LENGTH 512
+#endif
+
+#define GPUVIRT_VALIDATION_MAX_OS 8
+
+#define GPUVIRT_VALIDATION_NUM_REGIONS 2
+#define GPUVIRT_VAL_REGION_SECURE 0
+#define GPUVIRT_VAL_REGION_SHARED 1
+
+/* Shared region 1MB */
+#define GPUVIRT_SIZEOF_SHARED 0x100000
+
+/* Min region size 64MB */
+#define GPUVIRT_MIN_SIZE 0x4000000
+
+#endif /* SRV_VIRT_DEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/dma_flags.h b/drivers/gpu/drm/img/img-rogue/services/include/dma_flags.h
new file mode 100644 (file)
index 0000000..587e41b
--- /dev/null
@@ -0,0 +1,50 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef DMA_FLAGS_H
+#define DMA_FLAGS_H
+
+/* these should match flags in  pvrsrv_dma.h */
+#define DMA_FLAG_MEM_TO_DEV  (1U<<0)
+#define DMA_FLAG_DEV_TO_MEM  (0U<<0)
+
+#define DMA_FLAG_SYNCHRONOUS (1U<<1)
+
+#endif /* DMA_FLAGS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/htbuffer_sf.h b/drivers/gpu/drm/img/img-rogue/services/include/htbuffer_sf.h
new file mode 100644 (file)
index 0000000..9042de2
--- /dev/null
@@ -0,0 +1,241 @@
+/*************************************************************************/ /*!
+@File           htbuffer_sf.h
+@Title          Host Trace Buffer interface string format specifiers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the Host Trace Buffer logging messages. The following
+                list are the messages the host driver prints. Changing anything
+                but the first column or spelling mistakes in the strings will
+                break compatibility with log files created with older/newer
+                driver versions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef HTBUFFER_SF_H
+#define HTBUFFER_SF_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *          WILL BREAK host tracing message compatibility with previous
+ *          driver versions. Only add new ones, if so required.
+ *****************************************************************************/
+
+
+/* String used in pvrdebug -h output */
+#define HTB_LOG_GROUPS_STRING_LIST   "ctrl,mmu,sync,main,brg"
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s"
+
+/* Available log groups - Master template
+ *
+ * Group usage is as follows:
+ *    CTRL  - Internal Host Trace information and synchronisation data
+ *    MMU   - MMU page mapping information
+ *    SYNC  - Synchronisation debug
+ *    MAIN  - Data master kicks, etc. tying in with the MAIN group in FWTrace
+ *    DBG   - Temporary debugging group, logs not to be left in the driver
+ *
+ */
+#define HTB_LOG_SFGROUPLIST                               \
+       X( HTB_GROUP_NONE,     NONE  )                        \
+/*     gid,                group flag / apphint name */   \
+       X( HTB_GROUP_CTRL,     CTRL  )                        \
+       X( HTB_GROUP_MMU,      MMU   )                        \
+       X( HTB_GROUP_SYNC,     SYNC  )                        \
+       X( HTB_GROUP_MAIN,     MAIN  )                        \
+       X( HTB_GROUP_BRG,      BRG  )                         \
+/* Debug group HTB_GROUP_DBG must always be last */       \
+       X( HTB_GROUP_DBG,      DBG   )
+
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id          : unique id within a group
+ * gid         : group id as defined above
+ * sym name    : symbolic name of enumerations used to identify message strings
+ * string      : Actual string
+ * #args       : number of arguments the string format requires
+ */
+#define HTB_LOG_SFIDLIST \
+/*id,  gid,             sym name,                       string,                           # arguments */ \
+X( 0,  HTB_GROUP_NONE,  HTB_SF_FIRST,                   "You should not use this string", 0) \
+\
+X( 1,  HTB_GROUP_CTRL,  HTB_SF_CTRL_LOGMODE,            "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \
+X( 2,  HTB_GROUP_CTRL,  HTB_SF_CTRL_ENABLE_PID,         "HTB enable logging for PID %d\n", 1) \
+X( 3,  HTB_GROUP_CTRL,  HTB_SF_CTRL_ENABLE_GROUP,       "HTB enable logging groups 0x%08x\n", 1) \
+X( 4,  HTB_GROUP_CTRL,  HTB_SF_CTRL_LOG_LEVEL,          "HTB log level set to %d\n", 1) \
+X( 5,  HTB_GROUP_CTRL,  HTB_SF_CTRL_OPMODE,             "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \
+X( 6,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_SCALE,       "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 7,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_SCALE_RPT,   "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 8,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK,        "FW Sync Partition marker: %d\n", 1) \
+X( 9,  HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK_RPT,    "FW Sync Partition repeat: %d\n", 1) \
+X( 10, HTB_GROUP_CTRL,  HTB_SF_CTRL_FWSYNC_MARK_SCALE,  "Text not used", 6)\
+\
+X( 1,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_TABLE,       "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \
+X( 2,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_ALLOC,       "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 3,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_FREE,        "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 4,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_MAP,         "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 5,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_PMRMAP,      "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 6,  HTB_GROUP_MMU,   HTB_SF_MMU_PAGE_OP_UNMAP,       "MMU unmapping DevVAddr %08x%08x\n", 2) \
+\
+X( 1,  HTB_GROUP_SYNC,  HTB_SF_SYNC_SERVER_ALLOC,       "Server sync allocation [%08X]\n", 1) \
+X( 2,  HTB_GROUP_SYNC,  HTB_SF_SYNC_SERVER_UNREF,       "Server sync unreferenced [%08X]\n", 1) \
+X( 3,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_CREATE,     "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \
+X( 4,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_TAKE,       "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \
+X( 5,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_COMPLETE,   "Sync OP complete 0x%08x\n", 1) \
+X( 6,  HTB_GROUP_SYNC,  HTB_SF_SYNC_PRIM_OP_DESTROY,    "Sync OP destroy 0x%08x\n", 1) \
+\
+X( 1,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx %08X @ %d\n", 2) \
+X( 2,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx %08X @ %d\n", 2) \
+X( 3,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_CDM_DEPRECATED,"Kick CDM: FWCtx %08X @ %d\n", 2) \
+X( 4,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_RTU,           "Kick RTU: FWCtx %08X @ %d\n", 2) \
+X( 5,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_SHG,           "Kick SHG: FWCtx %08X @ %d\n", 2) \
+X( 6,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_2D_DEPRECATED, "Kick 2D: FWCtx %08X @ %d\n", 2) \
+X( 7,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_UNCOUNTED,     "Kick (uncounted) for all DMs\n", 0) \
+X( 8,  HTB_GROUP_MAIN,  HTB_SF_MAIN_FWCCB_CMD,          "FW CCB Cmd: %d\n", 1) \
+X( 9,  HTB_GROUP_MAIN,  HTB_SF_MAIN_PRE_POWER,          "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \
+X(10,  HTB_GROUP_MAIN,  HTB_SF_MAIN_POST_POWER,         "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \
+X(11,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_TA,            "Kick TA: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \
+X(12,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_3D,            "Kick 3D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \
+X(13,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_CDM,           "Kick CDM: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \
+X(14,  HTB_GROUP_MAIN,  HTB_SF_MAIN_KICK_2D,            "Kick 2D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \
+\
+X( 1,  HTB_GROUP_BRG,   HTB_SF_BRG_BRIDGE_CALL,         "Bridge call: start: %010u: bid %03d fid %d\n", 3) \
+X( 2,  HTB_GROUP_BRG,   HTB_SF_BRG_BRIDGE_CALL_ERR,     "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \
+\
+X( 1,  HTB_GROUP_DBG,   HTB_SF_DBG_INTPAIR,             "0x%8.8x 0x%8.8x\n", 2) \
+\
+X( 65535, HTB_GROUP_NONE, HTB_SF_LAST,                  "You should not use this string\n", 15)
+
+
+
+/* gid - Group numbers */
+typedef enum _HTB_LOG_SFGROUPS {
+#define X(A,B) A,
+       HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_SFGROUPS;
+
+
+/* Group flags are stored in an array of elements.
+ * Each of which have a certain number of bits.
+ */
+#define HTB_FLAG_EL_T                   IMG_UINT32
+#define HTB_FLAG_NUM_BITS_IN_EL         (sizeof(HTB_FLAG_EL_T) * 8)
+
+#define HTB_LOG_GROUP_FLAG_GROUP(gid)   ((gid-1) / HTB_FLAG_NUM_BITS_IN_EL)
+#define HTB_LOG_GROUP_FLAG(gid)         (gid ? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)) : 0)
+#define HTB_LOG_GROUP_FLAG_NAME(gid)    HTB_LOG_TYPE_ ## gid
+
+/* Group enable flags */
+typedef enum _HTB_LOG_TYPE {
+#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a),
+       HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_TYPE;
+
+
+
+/* The symbolic names found in the table above are assigned an ui32 value of
+ * the following format:
+ * 31 30 28 27       20   19  16    15  12      11            0   bits
+ * -   ---   ---- ----     ----      ----        ---- ---- ----
+ *    0-11: id number
+ *   12-15: group id number
+ *   16-19: number of parameters
+ *   20-27: unused
+ *   28-30: active: identify SF packet, otherwise regular int32
+ *      31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define HTB_LOG_IDMARKER            (0x70000000)
+#define HTB_LOG_CREATESFID(a,b,e)   (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER)
+
+#define HTB_LOG_IDMASK              (0xFFF00000)
+#define HTB_LOG_VALIDID(I)          ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER )
+
+typedef enum HTB_LOG_SFids {
+#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e),
+       HTB_LOG_SFIDLIST
+#undef X
+} HTB_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define HTB_SF_GID(x) (((x)>>12) & 0xf)
+/* Future improvement to support log levels */
+#define HTB_SF_LVL(x) (0)
+/* Returns how many arguments the SF(string format) for the given
+ * (enum generated) id requires.
+ */
+#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+/* Returns the id of given enum */
+#define HTB_SF_ID(x) (x & 0xfff)
+
+/* Format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]*
+ */
+#define HTB_LOG_HEADER_SIZE         5
+#define HTB_LOG_MAX_PARAMS          15
+
+#if defined(__cplusplus)
+}
+#endif
+
+/* Defines for handling MARK_SCALE special case */
+#define HTB_GID_CTRL 1
+#define HTB_ID_MARK_SCALE 10
+#define HTB_MARK_SCALE_ARG_ARRAY_SIZE 6
+
+/* Defines for extracting args from array for special case MARK_SCALE */
+#define HTB_ARG_SYNCMARK 0
+#define HTB_ARG_OSTS_PT1 1
+#define HTB_ARG_OSTS_PT2 2
+#define HTB_ARG_CRTS_PT1 3
+#define HTB_ARG_CRTS_PT2 4
+#define HTB_ARG_CLKSPD   5
+
+#endif /* HTBUFFER_SF_H */
+/*****************************************************************************
+ End of file (htbuffer_sf.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/htbuffer_types.h b/drivers/gpu/drm/img/img-rogue/services/include/htbuffer_types.h
new file mode 100644 (file)
index 0000000..a404bf8
--- /dev/null
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File           htbuffer_types.h
+@Title          Host Trace Buffer types.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef HTBUFFER_TYPES_H
+#define HTBUFFER_TYPES_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "htbuffer_sf.h"
+
+/* The group flags array of ints large enough to store all the group flags */
+#define HTB_FLAG_NUM_EL (((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1)
+extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL];
+
+#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF)))
+
+/*************************************************************************/ /*!
+ Host Trace Buffer operation mode
+ Care must be taken if changing this enum to ensure the MapFlags[] array
+ in htbserver.c is kept in-step.
+*/ /**************************************************************************/
+typedef enum
+{
+       /*! Undefined operation mode */
+       HTB_OPMODE_UNDEF = 0,
+
+       /*! Drop latest, intended for continuous logging to a UM daemon.
+        *  If the daemon does not keep up, the most recent log data
+        *  will be dropped
+        */
+       HTB_OPMODE_DROPLATEST,
+
+       /*! Drop oldest, intended for crash logging.
+        *  Data will be continuously written to a circular buffer.
+        *  After a crash the buffer will contain events leading up to the crash
+        */
+       HTB_OPMODE_DROPOLDEST,
+
+       /*! Block write if buffer is full */
+       HTB_OPMODE_BLOCK,
+
+       HTB_OPMODE_LAST = HTB_OPMODE_BLOCK
+} HTB_OPMODE_CTRL;
+
+
+/*************************************************************************/ /*!
+ Host Trace Buffer log mode control
+*/ /**************************************************************************/
+typedef enum
+{
+       /*! Undefined log mode, used if update is not applied */
+       HTB_LOGMODE_UNDEF = 0,
+
+       /*! Log trace messages for all PIDs. */
+       HTB_LOGMODE_ALLPID,
+
+       /*! Log trace messages for specific PIDs only. */
+       HTB_LOGMODE_RESTRICTEDPID,
+
+       HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID
+} HTB_LOGMODE_CTRL;
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* HTBUFFER_TYPES_H */
+
+/******************************************************************************
+ End of file (htbuffer_types.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/img_types_check.h b/drivers/gpu/drm/img/img-rogue/services/include/img_types_check.h
new file mode 100644 (file)
index 0000000..4708583
--- /dev/null
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Performs size checks on some of the IMG types.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef IMG_TYPES_CHECK_H
+#define IMG_TYPES_CHECK_H
+
+#ifndef __KERNEL__
+#include <assert.h>
+#endif /* __KERNEL__ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+static_assert(sizeof(IMG_BOOL) == 4, "invalid size of IMG_BOOL");
+static_assert(sizeof(IMG_INT) == 4, "invalid size of IMG_INT");
+static_assert(sizeof(IMG_UINT) == 4, "invalid size of IMG_UINT");
+static_assert(sizeof(PVRSRV_ERROR) == 4, "invalid size of PVRSRV_ERROR");
+
+#endif /* IMG_TYPES_CHECK_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/info_page_client.h b/drivers/gpu/drm/img/img-rogue/services/include/info_page_client.h
new file mode 100644 (file)
index 0000000..9df2461
--- /dev/null
@@ -0,0 +1,89 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose shared memory (i.e. information page) mapped by
+                kernel space driver and user space clients. All info page
+                entries are sizeof(IMG_UINT32) on both 32/64-bit environments.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef INFO_PAGE_CLIENT_H
+#define INFO_PAGE_CLIENT_H
+
+#include "device_connection.h"
+#include "info_page_defs.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+/*************************************************************************/ /*!
+@Function      GetInfoPage
+
+@Description   Return Info Page address
+
+@Input         hDevConnection - Services device connection
+
+@Return        Info Page address
+*/
+/*****************************************************************************/
+static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection)
+{
+#if defined(__KERNEL__)
+       return (PVRSRVGetPVRSRVData())->pui32InfoPage;
+#else
+    return hDevConnection->pui32InfoPage;
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function      GetInfoPageDebugFlags
+
+@Description   Return Info Page debug flags
+
+@Input         hDevConnection - Services device connection
+
+@Return        Info Page debug flags
+*/
+/*****************************************************************************/
+static INLINE IMG_UINT32 GetInfoPageDebugFlags(SHARED_DEV_CONNECTION hDevConnection)
+{
+       return GetInfoPage(hDevConnection)[DEBUG_FEATURE_FLAGS];
+}
+
+#endif /* INFO_PAGE_CLIENT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/info_page_defs.h b/drivers/gpu/drm/img/img-rogue/services/include/info_page_defs.h
new file mode 100644 (file)
index 0000000..d3bc153
--- /dev/null
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose shared memory (i.e. information page) mapped by
+                           kernel space driver and user space clients. All information page
+                               entries are sizeof(IMG_UINT32) on both 32/64-bit environments.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef INFO_PAGE_DEFS_H
+#define INFO_PAGE_DEFS_H
+
+
+/* CacheOp information page entries */
+#define CACHEOP_INFO_IDX_START     0x00
+#define CACHEOP_INFO_UMKMTHRESHLD  (CACHEOP_INFO_IDX_START + 1) /*!< UM=>KM routing threshold in bytes */
+#define CACHEOP_INFO_KMDFTHRESHLD  (CACHEOP_INFO_IDX_START + 2) /*!< KM/DF threshold in bytes */
+#define CACHEOP_INFO_LINESIZE      (CACHEOP_INFO_IDX_START + 3) /*!< CPU data cache line size */
+#define CACHEOP_INFO_PGSIZE        (CACHEOP_INFO_IDX_START + 4) /*!< CPU MMU page size */
+#define CACHEOP_INFO_IDX_END       (CACHEOP_INFO_IDX_START + 5)
+
+/* HWPerf information page entries */
+#define HWPERF_INFO_IDX_START      (CACHEOP_INFO_IDX_END)
+#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0)
+#define HWPERF_FILTER_EGL_IDX      (HWPERF_INFO_IDX_START + 1)
+#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2)
+#define HWPERF_FILTER_OPENCL_IDX   (HWPERF_INFO_IDX_START + 3)
+#define HWPERF_FILTER_VULKAN_IDX   (HWPERF_INFO_IDX_START + 4)
+#define HWPERF_FILTER_OPENGL_IDX   (HWPERF_INFO_IDX_START + 5)
+#define HWPERF_INFO_IDX_END        (HWPERF_INFO_IDX_START + 6)
+
+/* timeout values */
+#define TIMEOUT_INFO_IDX_START                    (HWPERF_INFO_IDX_END)
+#define TIMEOUT_INFO_VALUE_RETRIES                (TIMEOUT_INFO_IDX_START + 0)
+#define TIMEOUT_INFO_VALUE_TIMEOUT_MS             (TIMEOUT_INFO_IDX_START + 1)
+#define TIMEOUT_INFO_CONDITION_RETRIES            (TIMEOUT_INFO_IDX_START + 2)
+#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS         (TIMEOUT_INFO_IDX_START + 3)
+#define TIMEOUT_INFO_TASK_QUEUE_RETRIES           (TIMEOUT_INFO_IDX_START + 4)
+#define TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS  (TIMEOUT_INFO_IDX_START + 5)
+#define TIMEOUT_INFO_IDX_END                      (TIMEOUT_INFO_IDX_START + 6)
+
+/* Bridge Info */
+#define BRIDGE_INFO_IDX_START                (TIMEOUT_INFO_IDX_END)
+#define BRIDGE_INFO_RGX_BRIDGES              (BRIDGE_INFO_IDX_START + 0)
+#define BRIDGE_INFO_PVR_BRIDGES              (BRIDGE_INFO_IDX_START + 1)
+#define BRIDGE_INFO_IDX_END                  (BRIDGE_INFO_IDX_START + 2)
+
+/* Debug features */
+#define DEBUG_FEATURE_FLAGS                  (BRIDGE_INFO_IDX_END)
+#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED       0x1
+#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED         0x2
+#define DEBUG_FEATURE_FLAGS_IDX_END          (DEBUG_FEATURE_FLAGS + 1)
+
+
+#endif /* INFO_PAGE_DEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/km_apphint_defs_common.h b/drivers/gpu/drm/img/img-rogue/services/include/km_apphint_defs_common.h
new file mode 100644 (file)
index 0000000..987d37c
--- /dev/null
@@ -0,0 +1,280 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services AppHint definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef KM_APPHINT_DEFS_COMMON_H
+#define KM_APPHINT_DEFS_COMMON_H
+
+/*
+*******************************************************************************
+ Build variables
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR_COMMON \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableTrustedDeviceAceConfig,     BOOL,           GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG,     NO_PARAM_TABLE   ) \
+X(CleanupThreadPriority,            UINT32,         NEVER,       PVRSRV_APPHINT_CLEANUPTHREADPRIORITY,            NO_PARAM_TABLE   ) \
+X(WatchdogThreadPriority,           UINT32,         NEVER,       PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY,           NO_PARAM_TABLE   ) \
+X(HWPerfClientBufferSize,           UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE,           NO_PARAM_TABLE   ) \
+
+/*
+*******************************************************************************
+ Module parameters
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM_COMMON \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(GeneralNon4KHeapPageSize,         UINT32,         ALWAYS,      PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE,         NO_PARAM_TABLE   ) \
+\
+X(EnableSignatureChecks,            BOOL,           PDUMP,       PVRSRV_APPHINT_ENABLESIGNATURECHECKS,            NO_PARAM_TABLE   ) \
+X(SignatureChecksBufSize,           UINT32,         PDUMP,       PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE,           NO_PARAM_TABLE   ) \
+\
+X(DisableClockGating,               BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLECLOCKGATING,               NO_PARAM_TABLE   ) \
+X(DisableDMOverlap,                 BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLEDMOVERLAP,                 NO_PARAM_TABLE   ) \
+\
+X(EnableRandomContextSwitch,        BOOL,           VALIDATION,  PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH,        NO_PARAM_TABLE   ) \
+X(EnableSoftResetContextSwitch,     BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH,      NO_PARAM_TABLE   ) \
+X(EnableFWContextSwitch,            UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH,            NO_PARAM_TABLE   ) \
+X(FWContextSwitchProfile,           UINT32,         VALIDATION,  PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE,           NO_PARAM_TABLE   ) \
+\
+X(EnableRDPowerIsland,              UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLERDPOWERISLAND,              NO_PARAM_TABLE   ) \
+\
+X(DriverMode,                       UINT32,         ALWAYS,      PVRSRV_APPHINT_DRIVERMODE,                       NO_PARAM_TABLE   ) \
+\
+X(FirmwarePerf,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_FIRMWAREPERF,                     NO_PARAM_TABLE   ) \
+\
+X(HWPerfFWBufSizeInKB,              UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB,              NO_PARAM_TABLE   ) \
+X(HWPerfHostBufSizeInKB,            UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB,            NO_PARAM_TABLE   ) \
+X(HWPerfHostThreadTimeoutInMS,      UINT32,         VALIDATION,  PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS,      NO_PARAM_TABLE   ) \
+\
+X(JonesDisableMask,                 UINT32,         VALIDATION,  PVRSRV_APPHINT_JONESDISABLEMASK,                 NO_PARAM_TABLE   ) \
+X(NewFilteringMode,                 BOOL,           VALIDATION,  PVRSRV_APPHINT_NEWFILTERINGMODE,                 NO_PARAM_TABLE   ) \
+X(TruncateMode,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_TRUNCATEMODE,                     NO_PARAM_TABLE   ) \
+X(EmuMaxFreq,                       UINT32,         ALWAYS,      PVRSRV_APPHINT_EMUMAXFREQ,                       NO_PARAM_TABLE   ) \
+X(GPIOValidationMode,               UINT32,         VALIDATION,  PVRSRV_APPHINT_GPIOVALIDATIONMODE,               NO_PARAM_TABLE   ) \
+X(RGXBVNC,                          STRING,         ALWAYS,      PVRSRV_APPHINT_RGXBVNC,                          NO_PARAM_TABLE   ) \
+\
+X(FWContextSwitchCrossDM,           UINT32,         ALWAYS,      0,                                               NO_PARAM_TABLE   ) \
+X(ValidateIrq,                      BOOL,           VALIDATION,  PVRSRV_APPHINT_VALIDATEIRQ,                      NO_PARAM_TABLE   ) \
+\
+X(TPUTrilinearFracMaskPDM,          UINT32,         VALIDATION,  0xF,                                             NO_PARAM_TABLE   ) \
+X(TPUTrilinearFracMaskVDM,          UINT32,         VALIDATION,  0xF,                                             NO_PARAM_TABLE   ) \
+X(TPUTrilinearFracMaskCDM,          UINT32,         VALIDATION,  0xF,                                             NO_PARAM_TABLE   ) \
+X(TPUTrilinearFracMaskTDM,          UINT32,         VALIDATION,  0xF,                                             NO_PARAM_TABLE   ) \
+X(HTBufferSizeInKB,                 UINT32,         ALWAYS,      PVRSRV_APPHINT_HTBUFFERSIZE,                     NO_PARAM_TABLE   ) \
+X(FWTraceBufSizeInDWords,           UINT32,         ALWAYS,      PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS,           NO_PARAM_TABLE   ) \
+\
+X(EnablePageFaultDebug,             BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG,             NO_PARAM_TABLE   ) \
+X(EnableFullSyncTracking,           BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING,           NO_PARAM_TABLE   ) \
+X(IgnoreHWReportedBVNC,             BOOL,           ALWAYS,      PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC,             NO_PARAM_TABLE   ) \
+\
+X(PhysMemTestPasses,                UINT32,         ALWAYS,      PVRSRV_APPHINT_PHYSMEMTESTPASSES,                NO_PARAM_TABLE   ) \
+\
+X(FBCDCVersionOverride,             UINT32,         VALIDATION,  PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE,             NO_PARAM_TABLE   ) \
+X(TestSLRInterval,                  UINT32,         VALIDATION,  PVRSRV_APPHINT_TESTSLRINTERVAL,                  NO_PARAM_TABLE   ) \
+X(EnablePollOnChecksumErrorStatus,  UINT32,         VALIDATION,  0,                                               NO_PARAM_TABLE   ) \
+X(RiscvDmiTest,                     BOOL,           VALIDATION,  PVRSRV_APPHINT_RISCVDMITEST,                     NO_PARAM_TABLE   ) \
+X(DevMemFWHeapPolicy,               UINT32,         ALWAYS,      PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY,             NO_PARAM_TABLE   ) \
+\
+X(EnableAPMAll,                     UINT32,         VALIDATION,  PVRSRV_APPHINT_ENABLEAPM,                        NO_PARAM_TABLE   ) \
+X(KernelCCBSizeLog2,                UINT32,         VALIDATION,  PVRSRV_APPHINT_KCCB_SIZE_LOG2,                   NO_PARAM_TABLE   )
+
+/*
+*******************************************************************************
+ Debugfs parameters - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGINFO_COMMON \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableHTBLogGroup,                UINT32Bitfield, ALWAYS,      PVRSRV_APPHINT_ENABLEHTBLOGGROUP,                htb_loggroup_tbl ) \
+X(HTBOperationMode,                 UINT32List,     ALWAYS,      PVRSRV_APPHINT_HTBOPERATIONMODE,                 htb_opmode_tbl   ) \
+X(EnableFTraceGPU,                  BOOL,           ALWAYS,      PVRSRV_APPHINT_ENABLEFTRACEGPU,                  NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_Services,      UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES,      NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_EGL,           UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL,           NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_OpenGLES,      UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES,      NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_OpenCL,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL,        NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_Vulkan,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN,        NO_PARAM_TABLE   ) \
+X(HWPerfClientFilter_OpenGL,        UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL,        NO_PARAM_TABLE   ) \
+X(CacheOpConfig,                    UINT32,         ALWAYS,      PVRSRV_APPHINT_CACHEOPCONFIG,                    NO_PARAM_TABLE   ) \
+X(CacheOpUMKMThresholdSize,         UINT32,         ALWAYS,      PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE,          NO_PARAM_TABLE   ) \
+
+/*
+*******************************************************************************
+ Debugfs parameters - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \
+/* name,                            type,           class,       default,                                         helper,         */ \
+/* Device Firmware config */\
+X(AssertOnHWRTrigger,               BOOL,           ALWAYS,      APPHNT_BLDVAR_ASSERTONHWRTRIGGER,                NO_PARAM_TABLE   ) \
+X(AssertOutOfMemory,                BOOL,           ALWAYS,      PVRSRV_APPHINT_ASSERTOUTOFMEMORY,                NO_PARAM_TABLE   ) \
+X(CheckMList,                       BOOL,           ALWAYS,      PVRSRV_APPHINT_CHECKMLIST,                       NO_PARAM_TABLE   ) \
+X(EnableLogGroup,                   UINT32Bitfield, ALWAYS,      PVRSRV_APPHINT_ENABLELOGGROUP,                   fwt_loggroup_tbl ) \
+X(FirmwareLogType,                  UINT32List,     ALWAYS,      PVRSRV_APPHINT_FIRMWARELOGTYPE,                  fwt_logtype_tbl  ) \
+X(HWRDebugDumpLimit,                UINT32,         ALWAYS,      PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT,                NO_PARAM_TABLE   ) \
+X(TimeCorrClock,                    UINT32List,     ALWAYS,      PVRSRV_APPHINT_TIMECORRCLOCK,                    timecorr_clk_tbl ) \
+X(HWPerfFWFilter,                   UINT64,         ALWAYS,      PVRSRV_APPHINT_HWPERFFWFILTER,                   NO_PARAM_TABLE   ) \
+/* Device host config */ \
+X(EnableAPM,                        UINT32,         ALWAYS,      PVRSRV_APPHINT_ENABLEAPM,                        NO_PARAM_TABLE   ) \
+X(DisableFEDLogging,                BOOL,           ALWAYS,      PVRSRV_APPHINT_DISABLEFEDLOGGING,                NO_PARAM_TABLE   ) \
+X(ZeroFreelist,                     BOOL,           ALWAYS,      PVRSRV_APPHINT_ZEROFREELIST,                     NO_PARAM_TABLE   ) \
+X(DisablePDumpPanic,                BOOL,           PDUMP,       PVRSRV_APPHINT_DISABLEPDUMPPANIC,                NO_PARAM_TABLE   ) \
+X(EnableFWPoisonOnFree,             BOOL,           DEBUG,       PVRSRV_APPHINT_ENABLEFWPOISONONFREE,             NO_PARAM_TABLE   ) \
+X(GPUUnitsPowerChange,              BOOL,           VALIDATION,  PVRSRV_APPHINT_GPUUNITSPOWERCHANGE,              NO_PARAM_TABLE   ) \
+X(HWPerfHostFilter,                 UINT32,         ALWAYS,      PVRSRV_APPHINT_HWPERFHOSTFILTER,                 NO_PARAM_TABLE   )
+
+/*
+*******************************************************************************
+ Mapping between debugfs parameters and module parameters.
+ This mapping is used to initialise device specific apphints from module
+ parameters.
+******************************************************************************/
+#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON \
+/* debuginfo device apphint name  modparam name */ \
+X(EnableAPM,                      EnableAPMAll)
+
+/*
+*******************************************************************************
+ * Types used in the APPHINT_LIST_<GROUP> lists must be defined here.
+ * New types require specific handling code to be added
+******************************************************************************/
+#define APPHINT_DATA_TYPE_LIST \
+X(BOOL) \
+X(UINT64) \
+X(UINT32) \
+X(UINT32Bitfield) \
+X(UINT32List) \
+X(STRING)
+
+#define APPHINT_CLASS_LIST \
+X(ALWAYS) \
+X(NEVER) \
+X(DEBUG) \
+X(PDUMP) \
+X(VALIDATION) \
+X(GPUVIRT_VAL)
+
+/*
+*******************************************************************************
+ Visibility control for module parameters
+ These bind build variables to AppHint Visibility Groups.
+******************************************************************************/
+#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE
+#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE
+#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c)
+#if defined(DEBUG)
+       #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE
+       #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+       #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE
+       #define apphint_modparam_class_DEBUG(a, b, c)
+#endif
+#if defined(PDUMP)
+       #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE
+       #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+       #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE
+       #define apphint_modparam_class_PDUMP(a, b, c)
+#endif
+#if defined(SUPPORT_VALIDATION)
+       #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE
+       #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+       #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE
+       #define apphint_modparam_class_VALIDATION(a, b, c)
+#endif
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE
+       #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+       #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE
+       #define apphint_modparam_class_GPUVIRT_VAL(a, b, c)
+#endif
+
+/*
+*******************************************************************************
+ AppHint defaults based on other build parameters
+******************************************************************************/
+#if defined(ASSERTONHWRTRIGGER_DEFAULT_ENABLED)
+       #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER      1
+#else
+       #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER      0
+#endif
+#if defined(DEBUG)
+       #define APPHNT_BLDVAR_DEBUG             1
+       #define APPHNT_BLDVAR_DBGDUMPLIMIT      RGXFWIF_HWR_DEBUG_DUMP_ALL
+#else
+       #define APPHNT_BLDVAR_DEBUG             0
+       #define APPHNT_BLDVAR_DBGDUMPLIMIT      1
+#endif
+#if defined(PDUMP)
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS     IMG_TRUE
+#else
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS     IMG_FALSE
+#endif
+#if defined(DEBUG) || defined(SUPPORT_VALIDATION)
+#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG      IMG_TRUE
+#else
+#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG      IMG_FALSE
+#endif
+
+#if defined(DEBUG)
+       #define APPHNT_PHYSMEMTEST_ENABLE             1
+#else
+       #define APPHNT_PHYSMEMTEST_ENABLE             0
+#endif
+
+/* Data types and actions */
+typedef enum {
+       APPHINT_DATA_TYPE_INVALID = 0,
+#define X(a) APPHINT_DATA_TYPE_ ## a,
+       APPHINT_DATA_TYPE_LIST
+#undef X
+       APPHINT_DATA_TYPE_MAX
+} APPHINT_DATA_TYPE;
+
+typedef enum {
+#define X(a) APPHINT_CLASS_ ## a,
+       APPHINT_CLASS_LIST
+#undef X
+       APPHINT_CLASS_MAX
+} APPHINT_CLASS;
+
+#endif /* KM_APPHINT_DEFS_COMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/os_cpu_cache.h b/drivers/gpu/drm/img/img-rogue/services/include/os_cpu_cache.h
new file mode 100644 (file)
index 0000000..56f9203
--- /dev/null
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS and CPU d-cache maintenance mechanisms
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally only
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef OS_CPU_CACHE_H
+#define OS_CPU_CACHE_H
+
+#include "info_page_defs.h"
+
+#define PVRSRV_CACHE_OP_TIMELINE                       0x8 /*!< Request SW_SYNC timeline notification when executed */
+#define PVRSRV_CACHE_OP_FORCE_SYNCHRONOUS      0x10 /*!< Force all batch members to be executed synchronously */
+
+#define CACHEFLUSH_ISA_X86                                     0x1     /*!< x86/x64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_ARM64                           0x2     /*!< Aarch64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_GENERIC                         0x3     /*!< Other ISA's without UM range-based cache flush */
+#ifndef CACHEFLUSH_ISA_TYPE
+       #if defined(__i386__) || defined(__x86_64__)
+               #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86
+       #elif defined(__arm64__) || defined(__aarch64__)
+               #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64
+       #else
+               #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC
+       #endif
+#endif
+
+#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64)
+#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH               /*!< x86/x86_64/ARM64 supports user-mode d-cache flush */
+#endif
+
+#endif /* OS_CPU_CACHE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/pdump.h b/drivers/gpu/drm/img/img-rogue/services/include/pdump.h
new file mode 100644 (file)
index 0000000..3ef7184
--- /dev/null
@@ -0,0 +1,238 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef SERVICES_PDUMP_H
+#define SERVICES_PDUMP_H
+
+#include "img_types.h"
+#include "services_km.h"
+
+
+/* A PDump out2.txt script is made up of 3 sections from three buffers:
+ * *
+ *  - Init phase buffer    - holds PDump data written during driver
+ *                            initialisation, non-volatile.
+ *  - Main phase buffer   - holds PDump data written after driver init,
+ *                            volatile.
+ *  - Deinit phase buffer - holds PDump data  needed to shutdown HW/play back,
+ *                            written only during driver initialisation using
+ *                            the DEINIT flag.
+ *
+ * Volatile in this sense means that the buffer is drained and cleared when
+ * the pdump capture application connects and transfers the data to file.
+ *
+ * The PDump sub-system uses the driver state (init/post-init), whether
+ * the pdump capture application is connected or not (capture range set/unset)
+ * and, if pdump connected whether the frame is in the range set, to decide
+ * which of the 3 buffers to write the PDump data. Hence there are several
+ * key time periods in the lifetime of the kernel driver that is enabled
+ * with PDUMP=1 (flag XX labels below time line):
+ *
+ * Events:load              init        pdump       enter          exit         pdump
+ *       driver             done       connects     range          range     disconnects
+ *         |__________________|____________|__________|______________|____________|______ . . .
+ * State:  |   init phase     | no capture | <- capture client connected ->       | no capture
+ *         |                  |            |                                      |
+ *         |__________________|____________|______________________________________|_____ . . .
+ * Flag:   | CT,DI            | NONE,CT,PR | NONE,CT,PR                           | See no
+ *         | Never NONE or PR | Never DI   | Never DI                             |   capture
+ *         |__________________|____________|______________________________________|_____ . . .
+ * Write   | NONE -undef      | -No write  | -No write | -Main buf    | -No write | See no
+ * buffer  | CT -Init buf     | -Main buf  | -Main buf | -Main buf    | -Main buf |   capture
+ *         | PR -undef        | -Init buf  | -undef    | -Init & Main | -undef    |
+ *         | DI -Deinit buf   | -undef     | -undef    | -undef       | -undef    |
+ *         |__________________|____________|___________|______________|___________|_____ . . .
+ *
+ * Note: The time line could repeat if the pdump capture application is
+ * disconnected and reconnected without unloading the driver module.
+ *
+ * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never
+ * be OR'd together and given to a PDump call since undefined behaviour may
+ * result and produce an invalid PDump which does not play back cleanly.
+ *
+ * The decision on which flag to use comes down to which time period the
+ * client or server driver makes the PDump write call AND the nature/purpose
+ * of the data.
+ *
+ * Note: This is a simplified time line, not all conditions represented.
+ *
+ */
+
+typedef IMG_UINT32 PDUMP_FLAGS_T;
+
+#define PDUMP_FLAGS_NONE            PDUMP_NONE     /*<! Output this entry with no special treatment i.e. output
+                                                          only if in frame range. */
+#define PDUMP_FLAGS_BLKDATA         PDUMP_BLKDATA  /*<! This flag indicates block-mode PDump data to be recorded
+                                                          in Block script stream in addition to Main script stream,
+                                                          if capture mode is set to BLOCKED */
+
+#define PDUMP_FLAGS_DEINIT          0x20000000U    /*<! Output this entry to the de-initialisation section, must
+                                                          only be used by the initialisation code in the Server. */
+
+#define PDUMP_FLAGS_POWER           0x08000000U    /*<! Output this entry even when a power transition is ongoing,
+                                                          as directed by other PDUMP flags. */
+
+#define PDUMP_FLAGS_CONTINUOUS      PDUMP_CONT     /*<! Output this entry always regardless of framed capture range,
+                                                          used by client applications being dumped.
+                                                          During init phase of driver such data carrying this flag
+                                                          will be recorded and present for all PDump client
+                                                          connections.
+                                                          Never combine with the PERSIST flag. */
+
+#define PDUMP_FLAGS_PERSISTENT      PDUMP_PERSIST  /*<! Output this entry always regardless of app and range,
+                                                          used by persistent resources created *after* driver
+                                                          initialisation that must appear in all PDump captures
+                                                          (i.e. current capture regardless of frame range (CONT)
+                                                          and all future PDump captures) for that driver
+                                                          instantiation/session.
+                                                          Effectively this is data that is not forgotten
+                                                          for the second and subsequent PDump client connections.
+                                                          Never combine with the CONTINUOUS flag. */
+
+#define PDUMP_FLAGS_INTERVAL        0x04000000U    /*<! Output this entry even when the capture is on a
+                                                          "no capture interval frame" (see pdump -sr option).
+                                                          Useful for commands that have a resource that was written
+                                                          out in a frame that was captured. For example,
+                                                          used by RGXScheduleCleanupCommand. */
+
+#define PDUMP_FLAGS_DEBUG           0x00010000U    /*<! For internal debugging use */
+
+#define PDUMP_FLAGS_NOHW            0x00000001U    /* For internal use: Skip sending instructions to the hardware
+                                                        when NO_HARDWARE=0 AND PDUMP=1 */
+
+#define PDUMP_FLAGS_FORCESPLIT      0x00000002U           /* Forces Main and Block script streams to split - Internal
+                                                        flag used in Block mode of PDump */
+
+#define PDUMP_FLAGS_PDUMP_LOCK_HELD 0x00000004U    /* This flags denotes that PDUMP_LOCK is held already so
+                                                        further calls to PDUMP_LOCK  with this flag set will not
+                                                        try to take pdump lock. If PDUMP_LOCK is called without
+                                                        this flag by some other thread then it will try to take
+                                                        PDUMP_LOCK which would make that thread sleep. This flag
+                                                        introduced to enforce the order of pdumping after bridge
+                                                        lock removal */
+
+#define PDUMP_FILEOFFSET_FMTSPEC    "0x%08X"
+typedef IMG_UINT32 PDUMP_FILEOFFSET_T;
+
+/* PDump stream macros*/
+
+/* Parameter stream */
+#define PDUMP_PARAM_INIT_STREAM_NAME       "paramInit"
+#define PDUMP_PARAM_MAIN_STREAM_NAME       "paramMain"
+#define PDUMP_PARAM_DEINIT_STREAM_NAME     "paramDeinit"
+#define PDUMP_PARAM_BLOCK_STREAM_NAME      "paramBlock"
+
+/*** Parameter stream sizes ***/
+
+/* Parameter Init Stream */
+
+/* The streams are made tunable in core.mk, wherein the size of these buffers can be specified.
+ * In case nothing specified in core.mk, then the default size is taken
+ */
+#if defined(PDUMP_PARAM_INIT_STREAM_SIZE)
+       #if (PDUMP_PARAM_INIT_STREAM_SIZE < (700 * 1024))
+               #error PDUMP_PARAM_INIT_STREAM_SIZE must be at least 700 KB
+       #endif
+#endif
+
+/* Parameter Main Stream */
+#if defined(PDUMP_PARAM_MAIN_STREAM_SIZE)
+       #if (PDUMP_PARAM_MAIN_STREAM_SIZE < (2 * 1024 * 1024))
+               #error PDUMP_PARAM_MAIN_STREAM_SIZE must be at least 2 MB
+       #endif
+#endif
+
+/* Parameter Deinit Stream */
+#if defined(PDUMP_PARAM_DEINIT_STREAM_SIZE)
+       #if (PDUMP_PARAM_DEINIT_STREAM_SIZE < (64 * 1024))
+               #error PDUMP_PARAM_DEINIT_STREAM_SIZE must be at least 64 KB
+       #endif
+#endif
+
+/* Parameter Block Stream */
+/* There is no separate parameter Block stream as the Block script stream is
+ * just a filtered Main script stream. Hence it will refer to the Main stream
+ * parameters themselves.
+ */
+
+/*Script stream */
+#define PDUMP_SCRIPT_INIT_STREAM_NAME      "scriptInit"
+#define PDUMP_SCRIPT_MAIN_STREAM_NAME      "scriptMain"
+#define PDUMP_SCRIPT_DEINIT_STREAM_NAME    "scriptDeinit"
+#define PDUMP_SCRIPT_BLOCK_STREAM_NAME     "scriptBlock"
+
+/*** Script stream sizes ***/
+
+/* Script Init Stream */
+#if defined(PDUMP_SCRIPT_INIT_STREAM_SIZE)
+       #if (PDUMP_SCRIPT_INIT_STREAM_SIZE < (256 * 1024))
+               #error PDUMP_SCRIPT_INIT_STREAM_SIZE must be at least 256 KB
+       #endif
+#endif
+
+/* Script Main Stream */
+#if defined(PDUMP_SCRIPT_MAIN_STREAM_SIZE)
+       #if (PDUMP_SCRIPT_MAIN_STREAM_SIZE < (2 * 1024 * 1024))
+               #error PDUMP_SCRIPT_MAIN_STREAM_SIZE must be at least 2 MB
+       #endif
+#endif
+
+/* Script Deinit Stream */
+#if defined(PDUMP_SCRIPT_DEINIT_STREAM_SIZE)
+       #if (PDUMP_SCRIPT_DEINIT_STREAM_SIZE < (64 * 1024))
+               #error PDUMP_SCRIPT_DEINIT_STREAM_SIZE must be at least 64 KB
+       #endif
+#endif
+
+/* Script Block Stream */
+#if defined(PDUMP_SCRIPT_BLOCK_STREAM_SIZE)
+       #if (PDUMP_SCRIPT_BLOCK_STREAM_SIZE < (2 * 1024 * 1024))
+               #error PDUMP_SCRIPT_BLOCK_STREAM_SIZE must be at least 2 MB
+       #endif
+#endif
+
+
+#define PDUMP_PARAM_0_FILE_NAME     "%%0%%.prm"      /*!< Initial Param filename used in PDump capture */
+#define PDUMP_PARAM_N_FILE_NAME     "%%0%%_%02u.prm" /*!< Param filename used when PRM file split */
+#define PDUMP_PARAM_MAX_FILE_NAME   32               /*!< Max Size of parameter name used in out2.txt */
+
+#define PDUMP_IS_CONTINUOUS(flags) ((flags & PDUMP_FLAGS_CONTINUOUS) != 0)
+
+#endif /* SERVICES_PDUMP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/physheap.h b/drivers/gpu/drm/img/img-rogue/services/include/physheap.h
new file mode 100644 (file)
index 0000000..060c5cd
--- /dev/null
@@ -0,0 +1,497 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physical heap management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the interface for the physical heap management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+#include "opaque_types.h"
+#include "pmr_impl.h"
+#include "physheap_config.h"
+
+#ifndef PHYSHEAP_H
+#define PHYSHEAP_H
+
+typedef struct _PHYS_HEAP_ PHYS_HEAP;
+#define INVALID_PHYS_HEAP 0xDEADDEAD
+
+struct _CONNECTION_DATA_;
+
+typedef struct _PG_HANDLE_
+{
+       union
+       {
+               void *pvHandle;
+               IMG_UINT64 ui64Handle;
+       }u;
+       /* The allocation order is log2 value of the number of pages to allocate.
+        * As such this is a correspondingly small value. E.g, for order 4 we
+        * are talking 2^4 * PAGE_SIZE contiguous allocation.
+        * DevPxAlloc API does not need to support orders higher than 4.
+        */
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       IMG_BYTE    uiOrder;    /* Order of the corresponding allocation */
+       IMG_BYTE    uiOSid;     /* OSid to use for allocation arena.
+                                * Connection-specific. */
+       IMG_BYTE    uiPad1,
+                   uiPad2;     /* Spare */
+#else
+       IMG_BYTE    uiOrder;    /* Order of the corresponding allocation */
+       IMG_BYTE    uiPad1,
+                   uiPad2,
+                   uiPad3;     /* Spare */
+#endif
+} PG_HANDLE;
+
+/*! Pointer to private implementation specific data */
+typedef void *PHEAP_IMPL_DATA;
+
+/*************************************************************************/ /*!
+@Function       Callback function PFN_DESTROY_DATA
+@Description    Destroy private implementation specific data.
+@Input          PHEAP_IMPL_DATA    Pointer to implementation data.
+*/ /**************************************************************************/
+typedef void (*PFN_DESTROY_DATA)(PHEAP_IMPL_DATA);
+/*************************************************************************/ /*!
+@Function       Callback function PFN_GET_DEV_PADDR
+@Description    Get heap device physical address.
+@Input          PHEAP_IMPL_DATA    Pointer to implementation data.
+@Output         IMG_DEV_PHYADDR    Device physical address.
+@Return         PVRSRV_ERROR       PVRSRV_OK or error code
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_GET_DEV_PADDR)(PHEAP_IMPL_DATA, IMG_DEV_PHYADDR*);
+/*************************************************************************/ /*!
+@Function       Callback function PFN_GET_CPU_PADDR
+@Description    Get heap CPU physical address.
+@Input          PHEAP_IMPL_DATA    Pointer to implementation data.
+@Output         IMG_CPU_PHYADDR    CPU physical address.
+@Return         PVRSRV_ERROR       PVRSRV_OK or error code
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_GET_CPU_PADDR)(PHEAP_IMPL_DATA, IMG_CPU_PHYADDR*);
+/*************************************************************************/ /*!
+@Function       Callback function PFN_GET_SIZE
+@Description    Get size of heap.
+@Input          PHEAP_IMPL_DATA    Pointer to implementation data.
+@Output         IMG_UINT64         Size of heap.
+@Return         PVRSRV_ERROR       PVRSRV_OK or error code
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_GET_SIZE)(PHEAP_IMPL_DATA, IMG_UINT64*);
+/*************************************************************************/ /*!
+@Function       Callback function PFN_GET_PAGE_SHIFT
+@Description    Get heap log2 page shift.
+@Return         IMG_UINT32         Log2 page shift
+*/ /**************************************************************************/
+typedef IMG_UINT32 (*PFN_GET_PAGE_SHIFT)(void);
+
+/*************************************************************************/ /*!
+@Function       Callback function PFN_GET_MEM_STATS
+@Description    Get total and free memory size of the physical heap managed by
+                the PMR Factory.
+@Input          PHEAP_IMPL_DATA    Pointer to implementation data.
+@Output         IMG_UINT64         total Size of heap.
+@Output         IMG_UINT64         free Size available in a heap.
+@Return         none
+*/ /**************************************************************************/
+typedef void (*PFN_GET_MEM_STATS)(PHEAP_IMPL_DATA, IMG_UINT64 *, IMG_UINT64 *);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC_GPV)(PHYS_HEAP *psPhysHeap, size_t uiSize,
+                                            PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr,
+                                            IMG_UINT32 ui32OSid, IMG_PID uiPid);
+#endif
+typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC)(PHYS_HEAP *psPhysHeap, size_t uiSize,
+                                        PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr,
+                                        IMG_PID uiPid);
+
+typedef void (*PFN_PAGES_FREE)(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle);
+
+typedef PVRSRV_ERROR (*PFN_PAGES_MAP)(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle,
+                                      size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+                                      void **pvPtr);
+
+typedef void (*PFN_PAGES_UNMAP)(PHYS_HEAP *psPhysHeap,
+                                PG_HANDLE *psMemHandle, void *pvPtr);
+
+typedef PVRSRV_ERROR (*PFN_PAGES_CLEAN)(PHYS_HEAP *psPhysHeap,
+                                        PG_HANDLE *pshMemHandle,
+                                        IMG_UINT32 uiOffset,
+                                        IMG_UINT32 uiLength);
+
+/*************************************************************************/ /*!
+@Function       Callback function PFN_CREATE_PMR
+@Description    Create a PMR physical allocation and back with RAM on creation,
+                if required. The RAM page comes either directly from
+                the Phys Heap's associated pool of memory or from an OS API.
+@Input          psPhysHeap         Pointer to Phys Heap.
+@Input          psConnection       Pointer to device connection.
+@Input          uiSize             Allocation size.
+@Input          uiChunkSize        Chunk size.
+@Input          ui32NumPhysChunks  Physical chunk count.
+@Input          ui32NumVirtChunks  Virtual chunk count.
+@Input          pui32MappingTable  Mapping Table.
+@Input          uiLog2PageSize     Page size.
+@Input          uiFlags            Memalloc flags.
+@Input          pszAnnotation      Annotation.
+@Input          uiPid              Process ID.
+@Output         ppsPMRPtr          Pointer to PMR.
+@Input          ui32PDumpFlag      PDump flags.
+@Return         PVRSRV_ERROR       PVRSRV_OK or error code
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CREATE_PMR)(PHYS_HEAP *psPhysHeap,
+                                                                          struct _CONNECTION_DATA_ *psConnection,
+                                                                          IMG_DEVMEM_SIZE_T uiSize,
+                                                                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                                          IMG_UINT32 ui32NumPhysChunks,
+                                                                          IMG_UINT32 ui32NumVirtChunks,
+                                                                          IMG_UINT32 *pui32MappingTable,
+                                                                          IMG_UINT32 uiLog2PageSize,
+                                                                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                          const IMG_CHAR *pszAnnotation,
+                                                                          IMG_PID uiPid,
+                                                                          PMR **ppsPMRPtr,
+                                                                          IMG_UINT32 ui32PDumpFlags);
+
+/*! Implementation specific function table */
+typedef struct PHEAP_IMPL_FUNCS_TAG
+{
+       PFN_DESTROY_DATA pfnDestroyData;
+       PFN_GET_DEV_PADDR pfnGetDevPAddr;
+       PFN_GET_CPU_PADDR pfnGetCPUPAddr;
+       PFN_GET_SIZE pfnGetSize;
+       PFN_GET_PAGE_SHIFT pfnGetPageShift;
+       PFN_GET_MEM_STATS pfnGetPMRFactoryMemStats;
+       PFN_CREATE_PMR pfnCreatePMR;
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       PFN_PAGES_ALLOC_GPV pfnPagesAllocGPV;
+#endif
+       PFN_PAGES_ALLOC pfnPagesAlloc;
+       PFN_PAGES_FREE pfnPagesFree;
+       PFN_PAGES_MAP pfnPagesMap;
+       PFN_PAGES_UNMAP pfnPagesUnMap;
+       PFN_PAGES_CLEAN pfnPagesClean;
+} PHEAP_IMPL_FUNCS;
+
+/*************************************************************************/ /*!
+@Function       PhysHeapCreateDeviceHeapsFromConfigs
+@Description    Create new heaps for a device from configs.
+@Input          psDevNode      Pointer to device node struct
+@Input          pasConfigs     Pointer to array of Heap configurations.
+@Input          ui32NumConfigs Number of configurations in array.
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode,
+                                     PHYS_HEAP_CONFIG *pasConfigs,
+                                     IMG_UINT32 ui32NumConfigs);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapCreateHeapFromConfig
+@Description    Create a new heap. Calls specific heap API depending
+                on heap type.
+@Input          psDevNode    Pointer to device node struct.
+@Input          psConfig     Heap configuration.
+@Output         ppsPhysHeap  Pointer to the created heap.
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysHeapCreateHeapFromConfig(PPVRSRV_DEVICE_NODE psDevNode,
+                                                        PHYS_HEAP_CONFIG *psConfig,
+                                                        PHYS_HEAP **ppsPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapCreate
+@Description    Create a new heap. Allocated and stored internally.
+                Destroy with PhysHeapDestroy when no longer required.
+@Input          psDevNode    Pointer to device node struct
+@Input          psConfig     Heap configuration.
+@Input          pvImplData   Implementation specific data. Can be NULL.
+@Input          psImplFuncs  Implementation specific function table. Must be
+                             a valid pointer.
+@Output         ppsPhysHeap  Pointer to the created heap. Must be a valid
+                             pointer.
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                                                       PHYS_HEAP_CONFIG *psConfig,
+                                                       PHEAP_IMPL_DATA pvImplData,
+                                                       PHEAP_IMPL_FUNCS *psImplFuncs,
+                                                       PHYS_HEAP **ppsPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapDestroyDeviceHeaps
+@Description    Destroys all heaps referenced by a device.
+@Input          psDevNode Pointer to a device node struct.
+@Return         void
+*/ /**************************************************************************/
+void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode);
+
+void PhysHeapDestroy(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapAcquireByUsage
+@Description    Acquire PhysHeap by usage flag.
+@Input          ui32UsageFlag PhysHeap usage flag
+@Input          psDevNode     Pointer to device node struct
+@Output         ppsPhysHeap   PhysHeap if found.
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag,
+                                                                       PPVRSRV_DEVICE_NODE psDevNode,
+                                                                       PHYS_HEAP **ppsPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapAcquireByDevPhysHeap
+@Description    Acquire PhysHeap by DevPhysHeap.
+@Input          eDevPhysHeap Device Phys Heap.
+@Input          psDevNode    Pointer to device node struct
+@Output         ppsPhysHeap  PhysHeap if found.
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap,
+                                                                                 PPVRSRV_DEVICE_NODE psDevNode,
+                                                                                 PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapGetImplData
+@Description    Get physical heap implementation specific data.
+@Input          psPhysHeap   Pointer to physical heap.
+@Input          psConfig     Heap configuration.
+@Return         pvImplData   Implementation specific data. Can be NULL.
+*/ /**************************************************************************/
+PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap);
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapGetFlags
+@Description    Get phys heap usage flags.
+@Input          psPhysHeap   Pointer to physical heap.
+@Return         PHYS_HEAP_USAGE_FLAGS Phys heap usage flags.
+*/ /**************************************************************************/
+PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap);
+
+IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode);
+
+PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+                                                                          IMG_CPU_PHYADDR *psCpuPAddr);
+
+
+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
+                                                                  IMG_UINT64 *puiSize);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetDevicePhysHeapCount
+@Description    Get the physical heap count supported by the device.
+@Input          psDevNode   Device node, the heap count is requested for.
+@Output         pui32PhysHeapCount  Buffer that holds the heap count
+@Return         None
+*/ /**************************************************************************/
+void PVRSRVGetDevicePhysHeapCount(PPVRSRV_DEVICE_NODE psDevNode,
+                                                                 IMG_UINT32 *pui32PhysHeapCount);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapGetMemInfo
+@Description    Get phys heap memory statistics for a given physical heap ID.
+@Input          psDevNode          Pointer to device node struct
+@Input          ui32PhysHeapCount  Physical heap count
+@Input          paePhysHeapID      Physical heap ID
+@Output         paPhysHeapMemStats Buffer that holds the memory statistics
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysHeapGetMemInfo(PPVRSRV_DEVICE_NODE psDevNode,
+                                  IMG_UINT32 ui32PhysHeapCount,
+                                  PVRSRV_PHYS_HEAP *paePhysHeapID,
+                                  PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapGetMemInfoPkd
+@Description    Get phys heap memory statistics for a given physical heap ID.
+@Input          psDevNode          Pointer to device node struct
+@Input          ui32PhysHeapCount  Physical heap count
+@Input          paePhysHeapID      Physical heap ID
+@Output         paPhysHeapMemStats Buffer that holds the memory statistics
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysHeapGetMemInfoPkd(PPVRSRV_DEVICE_NODE psDevNode,
+                                         IMG_UINT32 ui32PhysHeapCount,
+                                         PVRSRV_PHYS_HEAP *paePhysHeapID,
+                                         PHYS_HEAP_MEM_STATS_PKD_PTR paPhysHeapMemStats);
+
+/*************************************************************************/ /*!
+@Function       PhysheapGetPhysMemUsage
+@Description    Get memory statistics for a given physical heap.
+@Input          psPhysHeap      Physical heap
+@Output         pui64TotalSize  Buffer that holds the total memory size of the
+                                given physical heap.
+@Output         pui64FreeSize   Buffer that holds the free memory available in
+                                a given physical heap.
+@Return         none
+*/ /**************************************************************************/
+void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap,
+                                                        IMG_UINT64 *pui64TotalSize,
+                                                        IMG_UINT64 *pui64FreeSize);
+
+PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap,
+                                                                IMG_DEV_PHYADDR *psDevPAddr);
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+                                                               IMG_UINT32 ui32NumOfAddr,
+                                                               IMG_DEV_PHYADDR *psDevPAddr,
+                                                               IMG_CPU_PHYADDR *psCpuPAddr);
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+                                                               IMG_UINT32 ui32NumOfAddr,
+                                                               IMG_CPU_PHYADDR *psCpuPAddr,
+                                                               IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapCreatePMR
+@Description    Function calls an implementation-specific function pointer.
+                See function pointer for details.
+@Return         PVRSRV_ERROR       PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap,
+                                                          struct _CONNECTION_DATA_ *psConnection,
+                                                          IMG_DEVMEM_SIZE_T uiSize,
+                                                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                          IMG_UINT32 ui32NumPhysChunks,
+                                                          IMG_UINT32 ui32NumVirtChunks,
+                                                          IMG_UINT32 *pui32MappingTable,
+                                                          IMG_UINT32 uiLog2PageSize,
+                                                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                          const IMG_CHAR *pszAnnotation,
+                                                          IMG_PID uiPid,
+                                                          PMR **ppsPMRPtr,
+                                                          IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PhysHeapInit(void);
+void PhysHeapDeinit(void);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapDeviceNode
+@Description    Get pointer to the device node this heap belongs to.
+@Input          psPhysHeap          Pointer to physical heap.
+@Return         PPVRSRV_DEVICE_NODE Pointer to device node.
+*/ /**************************************************************************/
+PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapPVRLayerAcquire
+@Description    Is phys heap to be acquired in PVR layer?
+@Input          ePhysHeap           phys heap
+@Return         IMG_BOOL            return IMG_TRUE if yes
+*/ /**************************************************************************/
+IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapUserModeAlloc
+@Description    Is allocation from UM allowed?
+@Input          ePhysHeap           phys heap
+@Return         IMG_BOOL            return IMG_TRUE if yes
+*/ /**************************************************************************/
+IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapMMUPxSetup
+@Description    Setup MMU Px allocation function pointers.
+@Input          psDeviceNode Pointer to device node struct
+@Return         PVRSRV_ERROR PVRSRV_OK on success.
+*/ /**************************************************************************/
+PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapMMUPxDeInit
+@Description    Deinit after PhysHeapMMUPxSetup.
+@Input          psDeviceNode Pointer to device node struct
+*/ /**************************************************************************/
+void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap,
+                                   size_t uiSize,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_DEV_PHYADDR *psDevPAddr,
+                                   IMG_UINT32 ui32OSid, IMG_PID uiPid);
+#endif
+
+PVRSRV_ERROR PhysHeapPagesAlloc(PHYS_HEAP *psPhysHeap,
+                                size_t uiSize,
+                                PG_HANDLE *psMemHandle,
+                                IMG_DEV_PHYADDR *psDevPAddr,
+                                IMG_PID uiPid);
+
+void PhysHeapPagesFree(PHYS_HEAP *psPhysHeap,
+                       PG_HANDLE *psMemHandle);
+
+PVRSRV_ERROR PhysHeapPagesMap(PHYS_HEAP *psPhysHeap,
+                              PG_HANDLE *pshMemHandle,
+                              size_t uiSize,
+                              IMG_DEV_PHYADDR *psDevPAddr,
+                              void **pvPtr);
+
+void PhysHeapPagesUnMap(PHYS_HEAP *psPhysHeap,
+                        PG_HANDLE *psMemHandle,
+                        void *pvPtr);
+
+PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap,
+                                PG_HANDLE *pshMemHandle,
+                                IMG_UINT32 uiOffset,
+                                IMG_UINT32 uiLength);
+
+/*************************************************************************/ /*!
+@Function       PhysHeapGetPageShift
+@Description    Get phys heap page shift.
+@Input          psPhysHeap   Pointer to physical heap.
+@Return         IMG_UINT32   Log2 page shift
+*/ /**************************************************************************/
+IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap);
+
+#endif /* PHYSHEAP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/physheap_config.h b/drivers/gpu/drm/img/img-rogue/services/include/physheap_config.h
new file mode 100644 (file)
index 0000000..9d4d786
--- /dev/null
@@ -0,0 +1,119 @@
+/*************************************************************************/ /*!
+@File           physheap_config.h
+@Title          Physical heap Config API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Physical heap configs are created in the system layer and
+                stored against each device node for use in the Services Server
+                common layer.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PHYSHEAP_CONFIG_H
+#define PHYSHEAP_CONFIG_H
+
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv_memalloc_physheap.h"
+
+typedef IMG_UINT32 PHYS_HEAP_USAGE_FLAGS;
+
+#define PHYS_HEAP_USAGE_GPU_LOCAL      (1<<PVRSRV_PHYS_HEAP_GPU_LOCAL)
+#define PHYS_HEAP_USAGE_CPU_LOCAL      (1<<PVRSRV_PHYS_HEAP_CPU_LOCAL)
+#define PHYS_HEAP_USAGE_FW_MAIN        (1<<PVRSRV_PHYS_HEAP_FW_MAIN)
+#define PHYS_HEAP_USAGE_FW_CONFIG      (1<<PVRSRV_PHYS_HEAP_FW_CONFIG)
+#define PHYS_HEAP_USAGE_EXTERNAL       (1<<PVRSRV_PHYS_HEAP_EXTERNAL)
+#define PHYS_HEAP_USAGE_GPU_PRIVATE    (1<<PVRSRV_PHYS_HEAP_GPU_PRIVATE)
+#define PHYS_HEAP_USAGE_GPU_COHERENT   (1<<PVRSRV_PHYS_HEAP_GPU_COHERENT)
+#define PHYS_HEAP_USAGE_GPU_SECURE     (1<<PVRSRV_PHYS_HEAP_GPU_SECURE)
+#define PHYS_HEAP_USAGE_FW_CODE        (1<<PVRSRV_PHYS_HEAP_FW_CODE)
+#define PHYS_HEAP_USAGE_FW_PRIV_DATA   (1<<PVRSRV_PHYS_HEAP_FW_PRIV_DATA)
+#define PHYS_HEAP_USAGE_WRAP           (1<<30)
+#define PHYS_HEAP_USAGE_DISPLAY        (1<<31)
+
+typedef void (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
+                                   IMG_UINT32 ui32NumOfAddr,
+                                   IMG_DEV_PHYADDR *psDevPAddr,
+                                   IMG_CPU_PHYADDR *psCpuPAddr);
+
+typedef void (*DevPAddrToCpuPAddr)(IMG_HANDLE hPrivData,
+                                   IMG_UINT32 ui32NumOfAddr,
+                                   IMG_CPU_PHYADDR *psCpuPAddr,
+                                   IMG_DEV_PHYADDR *psDevPAddr);
+
+/*! Structure used to hold function pointers used for run-time physical address
+ * translation by Services. Gives flexibility to allow the CPU and GPU to see
+ * the same pool of physical RAM and different physical bus addresses.
+ * Both fields must be valid functions even if the conversion is simple.
+ */
+typedef struct _PHYS_HEAP_FUNCTIONS_
+{
+       /*! Translate CPU physical address to device physical address */
+       CpuPAddrToDevPAddr      pfnCpuPAddrToDevPAddr;
+       /*! Translate device physical address to CPU physical address */
+       DevPAddrToCpuPAddr      pfnDevPAddrToCpuPAddr;
+} PHYS_HEAP_FUNCTIONS;
+
+/*! Structure used to describe a physical Heap supported by a system. A
+ * system layer module can declare multiple physical heaps for different
+ * purposes. At a minimum a system must provide one physical heap tagged for
+ * PHYS_HEAP_USAGE_GPU_LOCAL use.
+ * A heap represents a discrete pool of physical memory and how it is managed,
+ * as well as associating other properties and address translation logic.
+ * The structure fields sStartAddr, sCardBase and uiSize must be given valid
+ * values for LMA and DMA physical heaps types.
+ */
+typedef struct _PHYS_HEAP_CONFIG_
+{
+       PHYS_HEAP_TYPE        eType;                /*!< Class of heap and PMR factory used */
+       IMG_CHAR*             pszPDumpMemspaceName; /*!< Name given to the heap's symbolic memory
+                                                        space in a PDUMP enabled driver */
+       PHYS_HEAP_FUNCTIONS*  psMemFuncs;           /*!< Physical address translation functions */
+
+       IMG_CPU_PHYADDR       sStartAddr;           /*!< CPU Physical base address of memory region */
+       IMG_DEV_PHYADDR       sCardBase;            /*!< Device physical base address of memory
+                                                        region as seen from the PoV of the GPU */
+       IMG_UINT64            uiSize;               /*!< Size of memory region in bytes */
+
+       IMG_HANDLE            hPrivData;            /*!< System layer private data shared with
+                                                        psMemFuncs */
+
+       PHYS_HEAP_USAGE_FLAGS ui32UsageFlags;       /*!< Supported uses flags, conveys the type of
+                                                        buffers the physical heap can be used for */
+} PHYS_HEAP_CONFIG;
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/pvr_bridge.h b/drivers/gpu/drm/img/img-rogue/services/include/pvr_bridge.h
new file mode 100644 (file)
index 0000000..dc3cf76
--- /dev/null
@@ -0,0 +1,457 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_BRIDGE_H
+#define PVR_BRIDGE_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_error.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "common_dc_bridge.h"
+#if defined(SUPPORT_DCPLAT_BRIDGE)
+#include "common_dcplat_bridge.h"
+#endif
+#endif
+#include "common_mm_bridge.h"
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#include "common_mmplat_bridge.h"
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+#include "common_mmextmem_bridge.h"
+#endif
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#include "common_cmm_bridge.h"
+#endif
+#if defined(__linux__)
+#include "common_dmabuf_bridge.h"
+#endif
+#if defined(PDUMP)
+#include "common_pdump_bridge.h"
+#include "common_pdumpctrl_bridge.h"
+#include "common_pdumpmm_bridge.h"
+#endif
+#include "common_cache_bridge.h"
+#if defined(SUPPORT_DMA_TRANSFER)
+#include "common_dma_bridge.h"
+#endif
+#include "common_srvcore_bridge.h"
+#include "common_sync_bridge.h"
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_smm_bridge.h"
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#include "common_htbuffer_bridge.h"
+#endif
+#include "common_pvrtl_bridge.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "common_ri_bridge.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#include "common_validation_bridge.h"
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+#include "common_tutils_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+#include "common_synctracking_bridge.h"
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "common_syncfallback_bridge.h"
+#endif
+
+#if defined(SUPPORT_DI_BRG_IMPL)
+#include "common_di_bridge.h"
+#endif
+
+/*
+ * Bridge Cmd Ids
+ */
+
+
+/* Note: The pattern
+ *   #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1)
+ *   #if defined(SUPPORT_FEATURE)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST  (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST)
+ *   #else
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST  (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST)
+ *   #endif
+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_*
+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled.
+ *
+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where
+ * the feature is not enabled (each bridge group retains its own ioctl number).
+ */
+
+#define PVRSRV_BRIDGE_FIRST                                    0UL
+
+/*   0: Default handler */
+#define PVRSRV_BRIDGE_DEFAULT                          0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST  (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST)
+/*   1: CORE functions */
+#define PVRSRV_BRIDGE_SRVCORE                          1UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1)
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST  (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST)
+
+/*   2: SYNC functions */
+#define PVRSRV_BRIDGE_SYNC                                     2UL
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST)
+
+/* 3,4: Reserved */
+#define PVRSRV_BRIDGE_RESERVED1                                3UL
+#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+
+#define PVRSRV_BRIDGE_RESERVED2                                4UL
+#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST  (PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST)
+
+/*   5: PDUMP CTRL layer functions */
+#define PVRSRV_BRIDGE_PDUMPCTRL                                5UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+#endif
+
+/*   6: Memory Management functions */
+#define PVRSRV_BRIDGE_MM                                       6UL
+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST  (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST)
+
+/*   7: Non-Linux Memory Management functions */
+#define PVRSRV_BRIDGE_MMPLAT                           7UL
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_MM_DISPATCH_LAST)
+#endif
+
+/*   8: Context Memory Management functions */
+#define PVRSRV_BRIDGE_CMM                                      8UL
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST  (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST  (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST)
+#endif
+
+/*   9: PDUMP Memory Management functions */
+#define PVRSRV_BRIDGE_PDUMPMM                          9UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST  (PVRSRV_BRIDGE_CMM_DISPATCH_LAST)
+#endif
+
+/*   10: PDUMP functions */
+#define PVRSRV_BRIDGE_PDUMP                                    10UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST)
+#endif
+
+/*  11: DMABUF functions */
+#define PVRSRV_BRIDGE_DMABUF                           11UL
+#if defined(__linux__)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST  (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST  (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST)
+#endif
+
+/*  12: Display Class functions */
+#define PVRSRV_BRIDGE_DC                                       12UL
+#if defined(SUPPORT_DISPLAY_CLASS)
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST  (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST  (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST)
+#endif
+
+/*  13: Cache interface functions */
+#define PVRSRV_BRIDGE_CACHE                                    13UL
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST  (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST)
+
+/*  14: Secure Memory Management functions */
+#define PVRSRV_BRIDGE_SMM                                      14UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST)
+#endif
+
+/*  15: Transport Layer interface functions */
+#define PVRSRV_BRIDGE_PVRTL                                    15UL
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST  (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST)
+
+/*  16: Resource Information (RI) interface functions */
+#define PVRSRV_BRIDGE_RI                                       16UL
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST)
+#endif
+
+/*  17: Validation interface functions */
+#define PVRSRV_BRIDGE_VALIDATION                       17UL
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_RI_DISPATCH_LAST)
+#endif
+
+/*  18: TUTILS interface functions */
+#define PVRSRV_BRIDGE_TUTILS                           18UL
+#if defined(PVR_TESTING_UTILS)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)
+#endif
+
+/*  19: DevMem history interface functions */
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY         19UL
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST)
+
+/*  20: Host Trace Buffer interface functions */
+#define PVRSRV_BRIDGE_HTBUFFER                         20UL
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST  (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)
+#endif
+
+/*  21: Non-Linux Display functions */
+#define PVRSRV_BRIDGE_DCPLAT                           21UL
+#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST  (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST)
+#endif
+
+/*  22: Extmem functions */
+#define PVRSRV_BRIDGE_MMEXTMEM                         22UL
+#if defined(SUPPORT_WRAP_EXTMEM)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST  (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST)
+#endif
+
+/*  23: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCTRACKING                     23UL
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST)
+
+/*  24: Sync fallback functions */
+#define PVRSRV_BRIDGE_SYNCFALLBACK                     24UL
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST)
+#endif
+
+/*  25: Debug Information (DI) interface functions */
+#define PVRSRV_BRIDGE_DI                                       25UL
+#if defined(SUPPORT_DI_BRG_IMPL)
+#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DI_DISPATCH_LAST  (PVRSRV_BRIDGE_DI_DISPATCH_FIRST + PVRSRV_BRIDGE_DI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DI_DISPATCH_LAST  (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST)
+#endif
+
+/*  26: DMA transfer functions */
+
+#define PVRSRV_BRIDGE_DMA                      26UL
+#if defined(SUPPORT_DMA_TRANSFER)
+#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST (PVRSRV_BRIDGE_DI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST  (PVRSRV_BRIDGE_DMA_DISPATCH_FIRST + PVRSRV_BRIDGE_DMA_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST  (PVRSRV_BRIDGE_DI_DISPATCH_LAST)
+#endif
+
+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */
+#define PVRSRV_BRIDGE_LAST                                     (PVRSRV_BRIDGE_DMA)
+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */
+#define PVRSRV_BRIDGE_DISPATCH_LAST                    (PVRSRV_BRIDGE_DMA_DISPATCH_LAST)
+
+/* bit mask representing the enabled PVR bridges */
+
+static const IMG_UINT32 gui32PVRBridges =
+         (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST))
+       | (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST))
+       | (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST))
+
+#if defined(PDUMP)
+       | (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+       | (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_CMM)
+       | (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+       | (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST))
+       | (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(__linux__)
+       | (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+       | (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_SECURE_EXPORT)
+       | (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST))
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_VALIDATION)
+       | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PVR_TESTING_UTILS)
+       | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_HTBUFFER)
+       | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE)
+       | (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+       | (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+       | (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DI_BRG_IMPL)
+       | (1U << (PVRSRV_BRIDGE_DI - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DMA_TRANSFER)
+       | (1U << (PVRSRV_BRIDGE_DMA - PVRSRV_BRIDGE_FIRST))
+#endif
+       ;
+
+/* bit field representing which PVR bridge groups may optionally not
+ * be present in the server
+ */
+#define PVR_BRIDGES_OPTIONAL \
+       ( \
+               (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) \
+       )
+
+/******************************************************************************
+ * Generic bridge structures
+ *****************************************************************************/
+
+
+/******************************************************************************
+ * bridge packaging structure
+ *****************************************************************************/
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+       IMG_UINT32              ui32BridgeID;                   /*!< ioctl bridge group */
+       IMG_UINT32              ui32FunctionID;                 /*!< ioctl function index */
+       IMG_UINT32              ui32Size;                               /*!< size of structure */
+       void __user             *pvParamIn;                             /*!< input data buffer */
+       IMG_UINT32              ui32InBufferSize;               /*!< size of input data buffer */
+       void __user             *pvParamOut;                    /*!< output data buffer */
+       IMG_UINT32              ui32OutBufferSize;              /*!< size of output data buffer */
+}PVRSRV_BRIDGE_PACKAGE;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_BRIDGE_H */
+
+/******************************************************************************
+ End of file (pvr_bridge.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/pvr_dicommon.h b/drivers/gpu/drm/img/img-rogue/services/include/pvr_dicommon.h
new file mode 100644 (file)
index 0000000..c729dde
--- /dev/null
@@ -0,0 +1,59 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Debug Information (DI) common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Debug Information (DI) common types and definitions included
+                in both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DICOMMON_H
+#define PVR_DICOMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*! Maximum DI entry path length including the null byte. */
+#define DI_IMPL_BRG_PATH_LEN 64
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_DICOMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/pvr_ricommon.h b/drivers/gpu/drm/img/img-rogue/services/include/pvr_ricommon.h
new file mode 100644 (file)
index 0000000..0521aa1
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Resource Information (RI) common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Resource Information (RI) common types and definitions included
+                in both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVR_RICOMMON_H
+#define PVR_RICOMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/*! Maximum text string length including the null byte */
+#define PRVSRVRI_MAX_TEXT_LENGTH       20U
+
+/* PID used to hold PMR allocations which are driver-wide (i.e. have a lifetime
+ * longer than an application process)
+ */
+#define PVR_SYS_ALLOC_PID 1
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_RICOMMON_H */
+/******************************************************************************
+ End of file (pvr_ricommon.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rgx_bridge.h b/drivers/gpu/drm/img/img-rogue/services/include/rgx_bridge.h
new file mode 100644 (file)
index 0000000..fa4ca1f
--- /dev/null
@@ -0,0 +1,243 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the Rogue Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_BRIDGE_H
+#define RGX_BRIDGE_H
+
+#include "pvr_bridge.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "rgx_fwif_km.h"
+
+#define RGXFWINITPARAMS_VERSION   1
+#define RGXFWINITPARAMS_EXTENSION 128
+
+#include "common_rgxta3d_bridge.h"
+#include "common_rgxcmp_bridge.h"
+#if defined(SUPPORT_FASTRENDER_DM)
+#include "common_rgxtq2_bridge.h"
+#endif
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+#include "common_rgxtq_bridge.h"
+#endif
+#if defined(SUPPORT_USC_BREAKPOINT)
+#include "common_rgxbreakpoint_bridge.h"
+#endif
+#include "common_rgxfwdbg_bridge.h"
+#if defined(PDUMP)
+#include "common_rgxpdump_bridge.h"
+#endif
+#include "common_rgxhwperf_bridge.h"
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+#include "common_rgxregconfig_bridge.h"
+#endif
+#include "common_rgxkicksync_bridge.h"
+#include "common_rgxtimerquery_bridge.h"
+#if defined(SUPPORT_RGXRAY_BRIDGE)
+#include "common_rgxray_bridge.h"
+#endif
+/*
+ * Bridge Cmd Ids
+ */
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge
+ * group!
+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST offsets
+ * follow on from the previous bridge group's commands!
+ *
+ * If a bridge group is optional, ensure you *ALWAYS* define its index
+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is not
+ * defined). If an optional bridge group is not defined you must still
+ * define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an assigned
+ * value of 0.
+ */
+
+/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than
+ * follow-on from the other non-device bridge groups (meaning that they then
+ * won't be displaced if other non-device bridge groups are added).
+ */
+
+#define PVRSRV_BRIDGE_RGX_FIRST                  128UL
+
+/* 128: RGX TQ interface functions */
+#define PVRSRV_BRIDGE_RGXTQ                      128UL
+/* The RGXTQ bridge is conditional since the definitions in this header file
+ * support both the rogue and volcanic servers, but the RGXTQ bridge is not
+ * required at all on the Volcanic architecture.
+ */
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST       (PVRSRV_BRIDGE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST        (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST       0
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST        (PVRSRV_BRIDGE_DISPATCH_LAST)
+#endif
+
+/* 129: RGX Compute interface functions */
+#define PVRSRV_BRIDGE_RGXCMP                     129UL
+#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST      (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST)
+
+/* 130: RGX TA/3D interface functions */
+#define PVRSRV_BRIDGE_RGXTA3D                    130UL
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST     (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST      (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST)
+
+/* 131: RGX Breakpoint interface functions */
+#define PVRSRV_BRIDGE_RGXBREAKPOINT                 131UL
+#if defined(SUPPORT_USC_BREAKPOINT)
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST  0
+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)
+#endif
+
+/* 132: RGX Debug/Misc interface functions */
+#define PVRSRV_BRIDGE_RGXFWDBG                   132UL
+#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST    (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST)
+
+/* 133: RGX PDump interface functions */
+#define PVRSRV_BRIDGE_RGXPDUMP                   133UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    0
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST)
+#endif
+
+/* 134: RGX HWPerf interface functions */
+#define PVRSRV_BRIDGE_RGXHWPERF                  134UL
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST)
+
+/* 135: RGX Register Configuration interface functions */
+#define PVRSRV_BRIDGE_RGXREGCONFIG                  135UL
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST   0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)
+#endif
+
+/* 136: RGX kicksync interface */
+#define PVRSRV_BRIDGE_RGXKICKSYNC                136UL
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST  (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST)
+
+/* 137: RGX TQ2 interface */
+#define PVRSRV_BRIDGE_RGXTQ2                     137UL
+#if defined(SUPPORT_FASTRENDER_DM)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST      (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST      (0)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST)
+#endif
+
+/* 138: RGX timer query interface */
+#define PVRSRV_BRIDGE_RGXTIMERQUERY                 138UL
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST)
+
+/* 139: RGX Ray tracing interface */
+#define PVRSRV_BRIDGE_RGXRAY                 139UL
+#if defined(SUPPORT_RGXRAY_BRIDGE)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST  0
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST   (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST)
+#endif
+
+#define PVRSRV_BRIDGE_RGX_LAST                   (PVRSRV_BRIDGE_RGXRAY)
+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST          (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST)
+
+/* bit mask representing the enabled RGX bridges */
+
+static const IMG_UINT32 gui32RGXBridges =
+         (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_COMPUTE) || defined(__KERNEL__)
+       | (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_BREAKPOINT)
+       | (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(PDUMP)
+       | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_REGCONFIG)
+       | (1U << (PVRSRV_BRIDGE_RGXREGCONFIG - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_FASTRENDER_DM) || defined(__KERNEL__)
+       | (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_TIMERQUERY)
+       | (1U << (PVRSRV_BRIDGE_RGXTIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+       | (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST))
+       ;
+/* bit field representing which RGX bridge groups may optionally not
+ * be present in the server
+ */
+
+#define RGX_BRIDGES_OPTIONAL \
+       ( \
+               0 /* no RGX bridges are currently optional */ \
+       )
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_BRIDGE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rgx_fw_info.h b/drivers/gpu/drm/img/img-rogue/services/include/rgx_fw_info.h
new file mode 100644 (file)
index 0000000..2f012d5
--- /dev/null
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          FW image information
+
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally for HWPerf data retrieval
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FW_INFO_H)
+#define RGX_FW_INFO_H
+
+#include "img_types.h"
+#include "rgx_common.h"
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned to this size.
+ */
+#define FW_BLOCK_SIZE 4096L
+
+typedef enum
+{
+       META_CODE = 0,
+       META_PRIVATE_DATA,
+       META_COREMEM_CODE,
+       META_COREMEM_DATA,
+       MIPS_CODE,
+       MIPS_EXCEPTIONS_CODE,
+       MIPS_BOOT_CODE,
+       MIPS_PRIVATE_DATA,
+       MIPS_BOOT_DATA,
+       MIPS_STACK,
+       RISCV_UNCACHED_CODE,
+       RISCV_CACHED_CODE,
+       RISCV_PRIVATE_DATA,
+       RISCV_COREMEM_CODE,
+       RISCV_COREMEM_DATA,
+} RGX_FW_SECTION_ID;
+
+typedef enum
+{
+       NONE = 0,
+       FW_CODE,
+       FW_DATA,
+       FW_COREMEM_CODE,
+       FW_COREMEM_DATA
+} RGX_FW_SECTION_TYPE;
+
+
+/*
+ * FW binary format with FW info attached:
+ *
+ *          Contents        Offset
+ *     +-----------------+
+ *     |                 |    0
+ *     |                 |
+ *     | Original binary |
+ *     |      file       |
+ *     |   (.ldr/.elf)   |
+ *     |                 |
+ *     |                 |
+ *     +-----------------+
+ *     | FW info header  |  FILE_SIZE - 4K
+ *     +-----------------+
+ *     |                 |
+ *     | FW layout table |
+ *     |                 |
+ *     +-----------------+
+ *                          FILE_SIZE
+ */
+
+#define FW_INFO_VERSION  (1)
+
+typedef struct
+{
+       IMG_UINT32 ui32InfoVersion;      /* FW info version */
+       IMG_UINT32 ui32HeaderLen;        /* Header length */
+       IMG_UINT32 ui32LayoutEntryNum;   /* Number of entries in the layout table */
+       IMG_UINT32 ui32LayoutEntrySize;  /* Size of an entry in the layout table */
+       IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */
+       IMG_UINT32 ui32FwPageSize;       /* Page size of processor on which firmware executes */
+       IMG_UINT32 ui32Flags;            /* Compatibility flags */
+} RGX_FW_INFO_HEADER;
+
+typedef struct
+{
+       RGX_FW_SECTION_ID eId;
+       RGX_FW_SECTION_TYPE eType;
+       IMG_UINT32 ui32BaseAddr;
+       IMG_UINT32 ui32MaxSize;
+       IMG_UINT32 ui32AllocSize;
+       IMG_UINT32 ui32AllocOffset;
+} RGX_FW_LAYOUT_ENTRY;
+
+#endif /* RGX_FW_INFO_H */
+
+/******************************************************************************
+ End of file (rgx_fw_info.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rgx_memallocflags.h b/drivers/gpu/drm/img/img-rogue/services/include/rgx_memallocflags.h
new file mode 100644 (file)
index 0000000..e26f42c
--- /dev/null
@@ -0,0 +1,58 @@
+/**************************************************************************/ /*!
+@File
+@Title          RGX device specific memory allocation flags
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_MEMALLOCFLAGS_H
+#define RGX_MEMALLOCFLAGS_H
+
+
+/* Include pvrsrv layer header as the flags below are used in the device
+ * field defined in this header inside Services code.
+ * See PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK */
+#include "pvrsrv_memallocflags.h"
+
+
+/* Device specific MMU flags */
+#define PMMETA_PROTECT          (1U << 0)      /*!< Memory that only the PM and Meta can access */
+#define FIRMWARE_CACHED         (1U << 1)      /*!< Memory that is cached in META/MIPS */
+
+
+#endif /* RGX_MEMALLOCFLAGS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rgx_pdump_panics.h b/drivers/gpu/drm/img/img-rogue/services/include/rgx_pdump_panics.h
new file mode 100644 (file)
index 0000000..fce2b3e
--- /dev/null
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX PDump panic definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX PDump panic definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_PDUMP_PANICS_H_)
+#define RGX_PDUMP_PANICS_H_
+
+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of an
+ * RGX PDump panic in a PDump script. */
+typedef enum
+{
+       RGX_PDUMP_PANIC_UNDEFINED = 0,
+
+       /* These panics occur when test parameters and driver configuration
+        * enable features that require the firmware and host driver to
+        * communicate. Such features are not supported with off-line playback.
+        */
+       RGX_PDUMP_PANIC_ZSBUFFER_BACKING         = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+       RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING       = 102, /*!< Requests ZSBuffer to be unbacked */
+       RGX_PDUMP_PANIC_FREELIST_GROW            = 103, /*!< Requests an on-demand freelist grow/shrink */
+       RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+       RGX_PDUMP_PANIC_SPARSEMEM_SWAP           = 105, /*!< Requests sparse remap memory swap feature */
+} RGX_PDUMP_PANIC;
+
+#endif /* RGX_PDUMP_PANICS_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rgx_tq_shared.h b/drivers/gpu/drm/img/img-rogue/services/include/rgx_tq_shared.h
new file mode 100644 (file)
index 0000000..dc10b6e
--- /dev/null
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX transfer queue shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_TQ_SHARED_H
+#define RGX_TQ_SHARED_H
+
+#define TQ_MAX_PREPARES_PER_SUBMIT             16U
+
+#define TQ_PREP_FLAGS_COMMAND_3D               0x0U
+#define TQ_PREP_FLAGS_COMMAND_2D               0x1U
+#define TQ_PREP_FLAGS_COMMAND_MASK             (0xfU)
+#define TQ_PREP_FLAGS_COMMAND_SHIFT            0
+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS  (1U << 4)
+#define TQ_PREP_FLAGS_START                            (1U << 5)
+#define TQ_PREP_FLAGS_END                              (1U << 6)
+
+#define TQ_PREP_FLAGS_COMMAND_SET(m) \
+       ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
+
+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \
+       (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT)  == TQ_PREP_FLAGS_COMMAND_##n)
+
+#endif /* RGX_TQ_SHARED_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rgxtransfer_shader.h b/drivers/gpu/drm/img/img-rogue/services/include/rgxtransfer_shader.h
new file mode 100644 (file)
index 0000000..979f85b
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File           rgxtransfer_shader.h
+@Title          TQ binary shader file info
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header holds info about TQ binary shader file generated
+                by the TQ shader factory. This header is need by shader factory
+                when generating the file; by services KM when reading and
+                loading the file into memory; and by services UM when
+                constructing blits using the shaders.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXSHADERHEADER_H)
+#define RGXSHADERHEADER_H
+
+typedef struct _RGX_SHADER_HEADER_
+{
+       IMG_UINT32 ui32Version;
+       IMG_UINT32 ui32NumFragment;
+       IMG_UINT32 ui32SizeFragment;
+       IMG_UINT32 ui32NumTDMFragment;
+       IMG_UINT32 ui32SizeTDMFragment;
+       IMG_UINT32 ui32SizeClientMem;
+} RGX_SHADER_HEADER;
+
+#endif /* RGXSHADERHEADER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rogue/km_apphint_defs.h b/drivers/gpu/drm/img/img-rogue/services/include/rogue/km_apphint_defs.h
new file mode 100644 (file)
index 0000000..16fc36b
--- /dev/null
@@ -0,0 +1,160 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services AppHint definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "km_apphint_defs_common.h"
+
+#ifndef KM_APPHINT_DEFS_H
+#define KM_APPHINT_DEFS_H
+
+/* NB: The 'DEVICE' AppHints must be last in this list as they will be
+ * duplicated in the case of a driver supporting multiple devices
+ */
+#define APPHINT_LIST_ALL \
+       APPHINT_LIST_BUILDVAR_COMMON \
+       APPHINT_LIST_BUILDVAR \
+       APPHINT_LIST_MODPARAM_COMMON \
+       APPHINT_LIST_MODPARAM \
+       APPHINT_LIST_DEBUGINFO_COMMON \
+       APPHINT_LIST_DEBUGINFO \
+       APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \
+       APPHINT_LIST_DEBUGINFO_DEVICE
+
+
+/*
+*******************************************************************************
+ Build variables (rogue-specific)
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR
+
+/*
+*******************************************************************************
+ Module parameters (rogue-specific)
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM \
+/* name,                            type,           class,       default,                                         helper,         */ \
+X(EnableCDMKillingRandMode,         BOOL,           VALIDATION,  PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE,         NO_PARAM_TABLE   ) \
+\
+X(HWPerfDisableCustomCounterFilter, BOOL,           VALIDATION,  PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE   ) \
+X(ValidateSOCUSCTimer,              BOOL,           VALIDATION,  PVRSRV_APPHINT_VALIDATESOCUSCTIMERS,             NO_PARAM_TABLE   ) \
+X(ECCRAMErrInj,                     UINT32,         VALIDATION,  0,                                               NO_PARAM_TABLE   ) \
+\
+X(TFBCCompressionControlGroup,      UINT32,         VALIDATION,  PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP,      NO_PARAM_TABLE   ) \
+X(TFBCCompressionControlScheme,     UINT32,         VALIDATION,  PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME,     NO_PARAM_TABLE   ) \
+X(TFBCCompressionControlYUVFormat,  BOOL,           VALIDATION,  0,                                               NO_PARAM_TABLE   ) \
+
+/*
+*******************************************************************************
+ Debugfs parameters (rogue-specific) - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGINFO \
+/* name,                            type,           class,       default,                                         helper,         */ \
+
+/*
+*******************************************************************************
+ Debugfs parameters (rogue-specific) - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGINFO_DEVICE \
+/* name,                            type,           class,       default,                                         helper,         */ \
+
+/*
+*******************************************************************************
+ Mapping between debugfs parameters and module parameters.
+ This mapping is used to initialise device specific apphints from module
+ parameters. Each entry in this table will provide a default value to all
+ devices (i.e. if there is more than one device each device's value will
+ be initialised).
+******************************************************************************/
+#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT \
+/* debuginfo device apphint name  modparam name */
+
+/*
+*******************************************************************************
+
+ Table generated enums
+
+******************************************************************************/
+/* Unique ID for all AppHints */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_ID_ ## a,
+       APPHINT_LIST_ALL
+#undef X
+       APPHINT_ID_MAX
+} APPHINT_ID;
+
+/* ID for build variable Apphints - used for build variable only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a,
+       APPHINT_LIST_BUILDVAR_COMMON
+       APPHINT_LIST_BUILDVAR
+#undef X
+       APPHINT_BUILDVAR_ID_MAX
+} APPHINT_BUILDVAR_ID;
+
+/* ID for Modparam Apphints - used for modparam only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a,
+       APPHINT_LIST_MODPARAM_COMMON
+       APPHINT_LIST_MODPARAM
+#undef X
+       APPHINT_MODPARAM_ID_MAX
+} APPHINT_MODPARAM_ID;
+
+/* ID for Debugfs Apphints - used for debugfs only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGINFO_ID_ ## a,
+       APPHINT_LIST_DEBUGINFO_COMMON
+       APPHINT_LIST_DEBUGINFO
+#undef X
+       APPHINT_DEBUGINFO_ID_MAX
+} APPHINT_DEBUGINFO_ID;
+
+/* ID for Debugfs Device Apphints - used for debugfs device only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGINFO_DEVICE_ID_ ## a,
+       APPHINT_LIST_DEBUGINFO_DEVICE_COMMON
+       APPHINT_LIST_DEBUGINFO_DEVICE
+#undef X
+       APPHINT_DEBUGINFO_DEVICE_ID_MAX
+} APPHINT_DEBUGINFO_DEVICE_ID;
+
+#endif /* KM_APPHINT_DEFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/rogue/rgxapi_km.h b/drivers/gpu/drm/img/img-rogue/services/include/rogue/rgxapi_km.h
new file mode 100644 (file)
index 0000000..65ba85d
--- /dev/null
@@ -0,0 +1,336 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX API Header kernel mode
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported RGX API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXAPI_KM_H
+#define RGXAPI_KM_H
+
+#if defined(SUPPORT_SHARED_SLC)
+/*************************************************************************/ /*!
+@Function       RGXInitSLC
+@Description    Init the SLC after a power up. It is required to call this
+                 function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't
+                 be called.
+
+@Input          hDevHandle   RGX Device Node
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle);
+#endif
+
+#include "rgx_hwperf.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+/*! HWPerf device identification structure */
+typedef struct _RGX_HWPERF_DEVICE_
+{
+       IMG_CHAR pszName[20];   /*!< Helps identify this device uniquely */
+       IMG_HANDLE hDevData;    /*!< Handle for the server */
+
+       struct _RGX_HWPERF_DEVICE_ *psNext;     /*!< Next device if any */
+} RGX_HWPERF_DEVICE;
+
+/*! HWPerf connection structure */
+typedef struct
+{
+       RGX_HWPERF_DEVICE *psHWPerfDevList;     /*!< Pointer to list of devices */
+} RGX_HWPERF_CONNECTION;
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfLazyConnect
+@Description    Obtain a HWPerf connection object to the RGX device(s). The
+                 connections to devices are not actually opened until
+                 HWPerfOpen() is called.
+
+@Output         ppsHWPerfConnection Address of a HWPerf connection object
+@Return         PVRSRV_ERROR        System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfOpen
+@Description    Opens connection(s) to the RGX device(s). Valid handle to the
+                 connection object has to be provided which means the this
+                 function needs to be preceded by the call to
+                 RGXHWPerfLazyConnect() function.
+
+@Input          psHWPerfConnection HWPerf connection object
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConnect
+@Description    Obtain a connection object to the RGX HWPerf module. Allocated
+                 connection object(s) reference opened connection(s). Calling
+                 this function is an equivalent of calling RGXHWPerfLazyConnect
+                 and RGXHWPerfOpen. This connect should be used when the caller
+                 will be retrieving event data.
+
+@Output         ppsHWPerfConnection Address of HWPerf connection object
+@Return         PVRSRV_ERROR        System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfFreeConnection
+@Description    Frees the HWPerf connection object
+
+@Input          psHWPerfConnection Pointer to connection object as returned
+                                    from RGXHWPerfLazyConnect()
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfClose
+@Description    Closes all the opened connection(s) to RGX device(s)
+
+@Input          psHWPerfConnection Pointer to HWPerf connection object as
+                                    returned from RGXHWPerfConnect() or
+                                    RGXHWPerfOpen()
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfDisconnect
+@Description    Disconnect from the RGX device
+
+@Input          ppsHWPerfConnection Pointer to HWPerf connection object as
+                                     returned from RGXHWPerfConnect() or
+                                     RGXHWPerfOpen(). Calling this function is
+                                     an equivalent of calling RGXHWPerfClose()
+                                     and RGXHWPerfFreeConnection().
+@Return         PVRSRV_ERROR        System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfControl
+@Description    Enable or disable the generation of RGX HWPerf event packets.
+                 See RGXCtrlHWPerf().
+
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          eStreamId          ID of the HWPerf stream
+@Input          bToggle            Switch to toggle or apply mask.
+@Input          ui64Mask           Mask of events to control.
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfControl(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               RGX_HWPERF_STREAM_ID eStreamId,
+               IMG_BOOL             bToggle,
+               IMG_UINT64           ui64Mask);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfGetFilter
+@Description    Reads HWPerf stream filter where stream is identified by the
+                 given stream ID.
+
+@Input          hDevData     Handle to connection/device object
+@Input          eStreamId    ID of the HWPerf stream
+@Output         ui64Filter   HWPerf filter value
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfGetFilter(
+               IMG_HANDLE  hDevData,
+               RGX_HWPERF_STREAM_ID eStreamId,
+               IMG_UINT64 *ui64Filter
+);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConfigMuxCounters
+@Description    Enable and configure the performance counter block for one or
+                 more device layout modules.
+                 See RGXHWPerfConfigureAndEnableCustomCounters().
+
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks      Number of elements in the array
+@Input          asBlockConfigs     Address of the array of configuration blocks
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfConfigMuxCounters(
+               RGX_HWPERF_CONNECTION         *psHWPerfConnection,
+               IMG_UINT32                     ui32NumBlocks,
+               RGX_HWPERF_CONFIG_MUX_CNTBLK  *asBlockConfigs);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConfigureAndEnableCustomCounters
+@Description    Enable and configure custom performance counters
+
+@Input          psHWPerfConnection    Pointer to HWPerf connection object
+@Input          ui16CustomBlockID     ID of the custom block to configure
+@Input          ui16NumCustomCounters Number of custom counters
+@Input          pui32CustomCounterIDs Pointer to array containing custom
+                                       counter IDs
+@Return         PVRSRV_ERROR          System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT16             ui16CustomBlockID,
+               IMG_UINT16             ui16NumCustomCounters,
+               IMG_UINT32            *pui32CustomCounterIDs);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfDisableCounters
+@Description    Disable the performance counter block for one or more device
+                 layout modules.
+
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks      Number of elements in the array
+@Input          aeBlockIDs         An array of words with values taken from
+                                    the <tt>RGX_HWPERF_CNTBLK_ID</tt>
+                                    enumeration.
+@Return         PVRSRV_ERROR       System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32   ui32NumBlocks,
+               IMG_UINT16*  aeBlockIDs);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfEnableCounters
+@Description    Enable the performance counter block for one or more device
+                 layout modules.
+
+@Input          psHWPerfConnection Pointer to HWPerf connection object
+@Input          ui32NumBlocks      Number of elements in the array
+@Input          aeBlockIDs         An array of words with values taken from the
+                                    <tt>RGX_HWPERF_CNTBLK_ID</tt> enumeration.
+@Return         PVRSRV_ERROR  System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfEnableCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32   ui32NumBlocks,
+               IMG_UINT16*  aeBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client's
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfAcquireEvents
+@Description    When there is data available to read this call returns with OK
+                 and the address and length of the data buffer the client can
+                 safely read. This buffer may contain one or more event packets.
+                 When there is no data to read, this call returns with OK and
+                 sets *puiBufLen to 0 on exit.
+                 Clients must pair this call with a RGXHWPerfReleaseEvents()
+                 call.
+                 Data returned in ppBuf will be in the form of a sequence of
+                 HWPerf packets which should be traversed using the pointers,
+                 structures and macros provided by rgx_hwperf.h.
+
+@Input          hDevData     Handle to connection/device object
+@Input          eStreamId    ID of the HWPerf stream
+@Output         ppBuf        Address of a pointer to a byte buffer. On exit it
+                              contains the address of buffer to read from
+@Output         pui32BufLen  Pointer to an integer. On exit it is the size of
+                              the data to read from the buffer
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+               IMG_HANDLE  hDevData,
+               RGX_HWPERF_STREAM_ID eStreamId,
+               IMG_PBYTE*  ppBuf,
+               IMG_UINT32* pui32BufLen);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfReleaseEvents
+@Description    Called after client has read the event data out of the buffer
+                 retrieved from the Acquire Events call to release resources.
+
+@Input          hDevData     Handle to connection/device object
+@Input          eStreamId    ID of the HWPerf stream
+@Return         PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+               IMG_HANDLE hDevData,
+               RGX_HWPERF_STREAM_ID eStreamId);
+
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfConvertCRTimeStamp
+@Description    Converts the timestamp given by FW events to the common OS
+                 timestamp. The first three inputs are obtained via a CLK_SYNC
+                 event, ui64CRTimeStamp is the CR timestamp from the FW event
+                 to be converted.
+
+@Input          ui32ClkSpeed        Clock speed given by sync event
+@Input          ui64CorrCRTimeStamp CR Timestamp given by sync event
+@Input          ui64CorrOSTimeStamp Correlating OS Timestamp given by sync
+                                     event
+@Input          ui64CRTimeStamp     CR Timestamp to convert
+@Return         IMG_UINT64          Calculated OS Timestamp
+*/ /**************************************************************************/
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+               IMG_UINT32 ui32ClkSpeed,
+               IMG_UINT64 ui64CorrCRTimeStamp,
+               IMG_UINT64 ui64CorrOSTimeStamp,
+               IMG_UINT64 ui64CRTimeStamp);
+
+#endif /* RGXAPI_KM_H */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/include/sync_checkpoint_internal.h b/drivers/gpu/drm/img/img-rogue/services/include/sync_checkpoint_internal.h
new file mode 100644 (file)
index 0000000..ce17847
--- /dev/null
@@ -0,0 +1,288 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal server interface for services
+                synchronisation checkpoints.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_CHECKPOINT_INTERNAL_H
+#define SYNC_CHECKPOINT_INTERNAL_H
+
+#include "img_types.h"
+#include "opaque_types.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_km.h"
+
+struct SYNC_CHECKPOINT_RECORD;
+
+/*
+       Private structures
+*/
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL;
+
+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG
+{
+       PPVRSRV_DEVICE_NODE                             psDevNode;
+       IMG_CHAR                                                azName[PVRSRV_SYNC_NAME_LENGTH];       /*!< Name of the RA */
+       RA_ARENA                                                *psSubAllocRA;                         /*!< RA context */
+       IMG_CHAR                                                azSpanName[PVRSRV_SYNC_NAME_LENGTH];   /*!< Name of the span RA */
+       RA_ARENA                                                *psSpanRA;                             /*!< RA used for span management of SubAllocRA */
+       ATOMIC_T                                                hRefCount;                             /*!< Ref count for this context */
+       ATOMIC_T                                                hCheckpointCount;                      /*!< Checkpoint count for this context */
+       POS_LOCK                                                hLock;
+       _PSYNC_CHECKPOINT_CONTEXT_CTL   psContextCtl;
+#if defined(PDUMP)
+       DLLIST_NODE                                             sSyncCheckpointBlockListHead;          /*!< List head for the sync chkpt blocks in this context*/
+       POS_LOCK                                                hSyncCheckpointBlockListLock;          /*!< sync chkpt blocks list lock*/
+       DLLIST_NODE                                             sListNode;                              /*!< List node for the sync chkpt context list*/
+#endif
+} _SYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_
+{
+       ATOMIC_T                  hRefCount;                  /*!< Ref count for this sync block */
+       POS_LOCK                  hLock;
+       _SYNC_CHECKPOINT_CONTEXT  *psContext;                 /*!< Our copy of the services connection */
+       PPVRSRV_DEVICE_NODE       psDevNode;
+       IMG_UINT32                ui32SyncBlockSize;          /*!< Size of the sync checkpoint block */
+       IMG_UINT32                ui32FirmwareAddr;           /*!< Firmware address */
+       DEVMEM_MEMDESC            *hMemDesc;                  /*!< DevMem allocation for block */
+       volatile IMG_UINT32       *pui32LinAddr;              /*!< Server-code CPU mapping */
+       IMG_UINT64                uiSpanBase;                 /*!< Base of this import (FW DevMem) in the span RA */
+#if defined(PDUMP)
+       DLLIST_NODE               sListNode;                  /*!< List node for the sync chkpt blocks */
+#endif
+} SYNC_CHECKPOINT_BLOCK;
+
+typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE;
+
+typedef struct SYNC_CHECKPOINT_TAG
+{
+       //_SYNC_CHECKPOINT_CONTEXT      *psContext;             /*!< pointer to the parent context of this checkpoint */
+       /* A sync checkpoint is assigned a unique ID, to avoid any confusion should
+        * the same memory be re-used later for a different checkpoint
+        */
+       IMG_UINT32                      ui32UID;                /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/
+       ATOMIC_T                        hRefCount;              /*!< Ref count for this sync */
+       ATOMIC_T                        hEnqueuedCCBCount;      /*!< Num times sync has been put in CCBs */
+       SYNC_CHECKPOINT_BLOCK           *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */
+       IMG_UINT64                      uiSpanAddr;             /*!< Span address of the sync */
+       volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */
+       PRGXFWIF_UFO_ADDR               sCheckpointUFOAddr;     /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */
+       IMG_CHAR                        azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */
+       PVRSRV_TIMELINE                 hTimeline;              /*!< Timeline on which this sync checkpoint was created */
+       IMG_UINT32                      ui32ValidationCheck;
+       IMG_PID                         uiProcess;              /*!< The Process ID of the process which created this sync checkpoint */
+       PSYNC_CHECKPOINT_RECORD_HANDLE  hRecord;                /*!< Sync record handle */
+       DLLIST_NODE                     sListNode;              /*!< List node for the global sync chkpt list */
+       DLLIST_NODE                     sDeferredFreeListNode;  /*!< List node for the deferred free sync chkpt list */
+       IMG_UINT32                      ui32FWAddr;             /*!< FWAddr stored at sync checkpoint alloc time */
+       PDUMP_FLAGS_T                   ui32PDumpFlags;         /*!< Pdump Capture mode to be used for POL*/
+} SYNC_CHECKPOINT;
+
+
+typedef struct _SYNC_CHECKPOINT_SIGNAL_
+{
+       SYNC_CHECKPOINT                asSyncCheckpoint;       /*!< Store sync checkpt for deferred signal */
+       IMG_UINT32                      ui32Status;             /*!< sync checkpt status signal/errored */
+} _SYNC_CHECKPOINT_DEFERRED_SIGNAL;
+
+#define GET_CP_CB_NEXT_IDX(_curridx) (((_curridx) + 1) % SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL)
+#define GET_CP_CB_BASE(_idx)   (IMG_OFFSET_ADDR(psDevNode->pui8DeferredSyncCPSignal, \
+                                                ((_idx) * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL))))
+
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetFirmwareAddr
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the firmware address of
+
+@Return         The firmware address of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCCBEnqueued
+
+@Description    Increment the CCB enqueued reference count for a
+                synchronisation checkpoint. This indicates how many FW
+                operations (checks/update) have been placed into CCBs for the
+                sync checkpoint.
+                When the FW services these operation, it increments its own
+                reference count. When these two values are equal, we know
+                there are not outstanding FW operating for the checkpoint
+                in any CCB.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint for which
+                                        to increment the enqueued reference
+                                        count
+
+@Return         None
+
+*/
+/*****************************************************************************/
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetEnqueuedCount
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the enqueued count of
+
+@Return         The enqueued count of the sync checkpoint
+                (i.e. the number of FW operations (checks or updates)
+                 currently enqueued in CCBs for the sync checkpoint)
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetReferenceCount
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the reference count of
+
+@Return         The host reference count of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetCreator
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the creating process of
+
+@Return         The process id of the process which created this sync checkpoint.
+
+*/
+/*****************************************************************************/
+IMG_PID
+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetId
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the unique Id of
+
+@Return         The unique Id of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetTimeline
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the parent timeline of
+
+@Return         The parent timeline of the sync checkpoint
+
+*/
+/*****************************************************************************/
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetRGXFWIFUFOAddr
+
+@Description    .
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get
+                                        the PRGXFWIF_UFO_ADDR of
+
+@Return         The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when
+                providing the update in server kick code.
+
+*/
+/*****************************************************************************/
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#if !defined(SUPPORT_NATIVE_FENCE_SYNC)
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetAssociatedDevice
+
+@Description    .
+
+@Input          psSyncCheckpointContext Synchronisation Checkpoint context
+                                        to get the device node of
+
+@Return         The PVRSRV_DEVICE_NODE of the device on which the sync
+                checkpoint context was created.
+
+*/
+/*****************************************************************************/
+PPVRSRV_DEVICE_NODE
+SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+#endif /* !defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+#endif /* SYNC_CHECKPOINT_INTERNAL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/cache_km.c b/drivers/gpu/drm/img/img-rogue/services/server/common/cache_km.c
new file mode 100644 (file)
index 0000000..368c1ee
--- /dev/null
@@ -0,0 +1,1636 @@
+/*************************************************************************/ /*!
+@File           cache_km.c
+@Title          CPU d-cache maintenance operations framework
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for CPU d-cache maintenance taking
+                into account the idiosyncrasies of the various types of CPU
+                d-cache instruction-set architecture (ISA) maintenance
+                mechanisms.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(__linux__)
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#endif
+
+#include "pmr.h"
+#include "log2.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "pvr_debug.h"
+#include "lock_types.h"
+#include "allocmem.h"
+#include "process_stats.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+#include "ri_server.h"
+#endif
+#include "devicemem.h"
+#include "pvrsrv_apphint.h"
+#include "pvrsrv_sync_server.h"
+#include "km_apphint_defs.h"
+#include "km_apphint_defs_common.h"
+#include "oskm_apphint.h"
+#include "di_server.h"
+
+/* This header must always be included last */
+#if defined(__linux__)
+#include "kernel_compatibility.h"
+#endif
+
+/* Top-level file-local build definitions */
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(__linux__)
+#define CACHEOP_DEBUG
+#define CACHEOP_STATS_ITEMS_MAX                                32
+#define INCR_WRAP(x)                                           ((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1))
+#define DECR_WRAP(x)                                           ((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1))
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+/* Refer to CacheOpStatsExecLogHeader() for header item names */
+#define CACHEOP_RI_PRINTF_HEADER                       "%-8s %-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s"
+#define CACHEOP_RI_PRINTF                                      "%-8d %-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu\n"
+#else
+#define CACHEOP_PRINTF_HEADER                          "%-8s %-8s %-10s %-10s %-5s %-10s %-10s %-18s"
+#define CACHEOP_PRINTF                                         "%-8d %-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu\n"
+#endif
+#endif
+
+//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING               /* Force OS page (not cache line) flush granularity */
+#define CACHEOP_PVR_ASSERT(x)                                                  /* Define as PVR_ASSERT(x), enable for swdev & testing */
+#define CACHEOP_DEVMEM_OOR_ERROR_STRING                "cacheop device memory request is out of range"
+#define CACHEOP_MAX_DEBUG_MESSAGE_LEN          160
+
+typedef struct _CACHEOP_WORK_ITEM_
+{
+       PMR *psPMR;
+       IMG_DEVMEM_SIZE_T uiSize;
+       PVRSRV_CACHE_OP uiCacheOp;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       PVRSRV_TIMELINE iTimeline;
+       SYNC_TIMELINE_OBJ sSWTimelineObj;
+       PVRSRV_DEVICE_NODE *psDevNode;
+#if defined(CACHEOP_DEBUG)
+       IMG_UINT64 ui64StartTime;
+       IMG_UINT64 ui64EndTime;
+       IMG_BOOL bKMReq;
+       IMG_PID pid;
+#endif
+} CACHEOP_WORK_ITEM;
+
+typedef struct _CACHEOP_STATS_EXEC_ITEM_
+{
+       IMG_UINT32 ui32DeviceID;
+       IMG_PID pid;
+       PVRSRV_CACHE_OP uiCacheOp;
+       IMG_DEVMEM_SIZE_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_UINT64 ui64StartTime;
+       IMG_UINT64 ui64EndTime;
+       IMG_BOOL bKMReq;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_DEV_PHYADDR sDevPAddr;
+#endif
+} CACHEOP_STATS_EXEC_ITEM;
+
+typedef enum _CACHEOP_CONFIG_
+{
+       CACHEOP_CONFIG_DEFAULT = 0,
+       /* cache flush mechanism types */
+       CACHEOP_CONFIG_URBF    = 4,
+       /* sw-emulated deferred flush mechanism */
+       CACHEOP_CONFIG_KDF     = 8,
+       /* pseudo configuration items */
+       CACHEOP_CONFIG_LAST    = 16,
+       CACHEOP_CONFIG_KLOG    = 16,
+       CACHEOP_CONFIG_ALL     = 31
+} CACHEOP_CONFIG;
+
+typedef struct _CACHEOP_WORK_QUEUE_
+{
+/*
+ * Init. state & primary device node framework
+ * is anchored on.
+ */
+       IMG_BOOL bInit;
+/*
+  MMU page size/shift & d-cache line size
+ */
+       size_t uiPageSize;
+       IMG_UINT32 uiLineSize;
+       IMG_UINT32 uiLineShift;
+       IMG_UINT32 uiPageShift;
+       OS_CACHE_OP_ADDR_TYPE uiCacheOpAddrType;
+       PMR *psInfoPagePMR;
+       IMG_UINT32 *pui32InfoPage;
+
+#if defined(CACHEOP_DEBUG)
+/*
+  CacheOp statistics
+ */
+       DI_ENTRY *psDIEntry;
+       IMG_HANDLE hStatsExecLock;
+
+       IMG_UINT32 ui32ServerOps;
+       IMG_UINT32 ui32ClientOps;
+       IMG_UINT32 ui32TotalOps;
+       IMG_UINT32 ui32ServerOpUsedUMVA;
+       IMG_UINT32 ui32AvgExecTime;
+       IMG_UINT32 ui32AvgExecTimeRemainder;
+
+       IMG_INT32 i32StatsExecWriteIdx;
+       CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX];
+#endif
+
+       DI_ENTRY *psConfigTune;
+       IMG_HANDLE hConfigLock;
+       CACHEOP_CONFIG  eConfig;
+       IMG_UINT32              ui32Config;
+       IMG_BOOL                bSupportsUMFlush;
+} CACHEOP_WORK_QUEUE;
+
+/* Top-level CacheOp framework object */
+static CACHEOP_WORK_QUEUE gsCwq;
+
+#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE)
+extern void do_invalid_range(unsigned long start, unsigned long len);
+
+#if defined(CACHEOP_DEBUG)
+static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN])
+{
+       OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                               CACHEOP_RI_PRINTF_HEADER,
+#else
+                               CACHEOP_PRINTF_HEADER,
+#endif
+                               "DevID",
+                               "Pid",
+                               "CacheOp",
+                               "Type",
+                               "Origin",
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                               "DevVAddr",
+                               "DevPAddr",
+#endif
+                               "Offset",
+                               "Size",
+                               "xTime (us)");
+}
+
+static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+       IMG_INT32 i32WriteOffset;
+       IMG_UINT32 ui32ExecTime;
+       printk("log write\n");
+       if (!psCacheOpWorkItem->uiCacheOp)
+       {
+               return;
+       }
+       else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+       {
+               /* KM logs spams the history due to frequency, this removes it completely */
+               return;
+       }
+
+       OSLockAcquire(gsCwq.hStatsExecLock);
+
+       i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+       gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx);
+       gsCwq.asStatsExecuted[i32WriteOffset].ui32DeviceID = psCacheOpWorkItem->psDevNode ? psCacheOpWorkItem->psDevNode->sDevId.ui32InternalID : -1;
+       gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid;
+       gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize;
+       gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq;
+       gsCwq.asStatsExecuted[i32WriteOffset].uiOffset  = psCacheOpWorkItem->uiOffset;
+       gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp;
+       gsCwq.asStatsExecuted[i32WriteOffset].ui64StartTime = psCacheOpWorkItem->ui64StartTime;
+       gsCwq.asStatsExecuted[i32WriteOffset].ui64EndTime = psCacheOpWorkItem->ui64EndTime;
+
+       CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid);
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+       if (gsCwq.bInit && psCacheOpWorkItem->psPMR)
+       {
+               IMG_CPU_PHYADDR sDevPAddr;
+               PVRSRV_ERROR eError, eLockError;
+               IMG_BOOL bValid;
+
+               /* Get more detailed information regarding the sub allocations that
+                  PMR has from RI manager for process that requested the CacheOp */
+               eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR,
+                                                                        gsCwq.asStatsExecuted[i32WriteOffset].pid,
+                                                                        gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+                                                                        &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr);
+               PVR_GOTO_IF_ERROR(eError, e0);
+
+               /* (Re)lock here as some PMR might have not been locked */
+               eLockError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+               PVR_GOTO_IF_ERROR(eLockError, e0);
+
+               eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR,
+                                                                gsCwq.uiPageShift,
+                                                                1,
+                                                                gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+                                                                &sDevPAddr,
+                                                                &bValid);
+
+               eLockError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+               PVR_LOG_IF_ERROR(eLockError, "PMRUnlockSysPhysAddresses");
+
+               PVR_GOTO_IF_ERROR(eError, e0);
+
+
+
+               gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr;
+       }
+#endif
+
+       /* Calculate the approximate cumulative moving average execution time.
+        * This calculation is based on standard equation:
+        *
+        * CMAnext = (new + count * CMAprev) / (count + 1)
+        *
+        * but in simplified form:
+        *
+        * CMAnext = CMAprev + (new - CMAprev) / (count + 1)
+        *
+        * this gets rid of multiplication and prevents overflow.
+        *
+        * Also to increase accuracy that we lose with integer division,
+        * we hold the moving remainder of the division and add it.
+        *
+        * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1)
+        *
+        * Multiple tests proved it to be the best solution for approximating
+        * CMA using integers.
+        *
+        */
+
+       ui32ExecTime =
+               gsCwq.asStatsExecuted[i32WriteOffset].ui64EndTime -
+               gsCwq.asStatsExecuted[i32WriteOffset].ui64StartTime;
+
+       {
+
+       IMG_INT32 i32Div =
+               (IMG_INT32) ui32ExecTime -
+               (IMG_INT32) gsCwq.ui32AvgExecTime +
+               (IMG_INT32) gsCwq.ui32AvgExecTimeRemainder;
+
+       gsCwq.ui32AvgExecTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalOps + 1);
+       gsCwq.ui32AvgExecTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalOps + 1);
+
+       gsCwq.ui32TotalOps++;
+
+       }
+
+       if (!gsCwq.asStatsExecuted[i32WriteOffset].bKMReq)
+       {
+               /* This operation queues only UM CacheOp in per-PID process statistics database */
+               PVRSRVStatsUpdateCacheOpStats(
+                                               gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                               gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr,
+                                               gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr,
+#endif
+                                               gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+                                               gsCwq.asStatsExecuted[i32WriteOffset].uiSize,
+                                               ui32ExecTime,
+                                               !gsCwq.asStatsExecuted[i32WriteOffset].bKMReq,
+                                               psCacheOpWorkItem->pid);
+       }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+e0:
+#endif
+       OSLockRelease(gsCwq.hStatsExecLock);
+}
+
+static int CacheOpStatsExecLogRead(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       IMG_CHAR *pszFlushType;
+       IMG_CHAR *pszCacheOpType;
+       IMG_CHAR *pszFlushSource;
+       IMG_INT32 i32ReadOffset;
+       IMG_INT32 i32WriteOffset;
+
+       IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0};
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       OSLockAcquire(gsCwq.hStatsExecLock);
+
+       DIPrintf(psEntry,
+                       "Primary CPU d-cache architecture: LSZ: 0x%x, URBF: %s\n",
+                       gsCwq.uiLineSize,
+                       gsCwq.bSupportsUMFlush ? "Yes" : "No");
+
+       DIPrintf(psEntry,
+                       "Configuration: UKT: %d, URBF: %s\n",
+                       gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD],
+                       gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No");
+
+       DIPrintf(psEntry,
+                       "Summary: Total Ops [%d] - Server(using UMVA)/Client [%d(%d)/%d]. Avg execution time [%d]\n",
+                       gsCwq.ui32TotalOps, gsCwq.ui32ServerOps, gsCwq.ui32ServerOpUsedUMVA, gsCwq.ui32ClientOps, gsCwq.ui32AvgExecTime);
+
+
+       CacheOpStatsExecLogHeader(szBuffer);
+       DIPrintf(psEntry, "%s\n", szBuffer);
+
+       i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+       for (i32ReadOffset = DECR_WRAP(i32WriteOffset);
+                i32ReadOffset != i32WriteOffset;
+                i32ReadOffset = DECR_WRAP(i32ReadOffset))
+       {
+               IMG_UINT64 ui64ExecTime =
+                       gsCwq.asStatsExecuted[i32ReadOffset].ui64EndTime -
+                       gsCwq.asStatsExecuted[i32ReadOffset].ui64StartTime;
+
+               IMG_DEVMEM_SIZE_T ui64NumOfPages =
+                       gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift;
+
+
+               if (!gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+               {
+                       break;
+               }
+               if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+               {
+                       pszFlushType = "RBF.Fast";
+               }
+               else
+               {
+                       pszFlushType = "RBF.Slow";
+               }
+
+               pszFlushSource = gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM" : " UM";
+
+               switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+               {
+                       case PVRSRV_CACHE_OP_NONE:
+                               pszCacheOpType = "None";
+                               break;
+                       case PVRSRV_CACHE_OP_CLEAN:
+                               pszCacheOpType = "Clean";
+                               break;
+                       case PVRSRV_CACHE_OP_INVALIDATE:
+                               pszCacheOpType = "Invalidate";
+                               break;
+                       case PVRSRV_CACHE_OP_FLUSH:
+                               pszCacheOpType = "Flush";
+                               break;
+                       case PVRSRV_CACHE_OP_TIMELINE:
+                               pszCacheOpType = "Timeline";
+                               pszFlushType = "      ";
+                               break;
+                       default:
+                               pszCacheOpType = "Unknown";
+                               break;
+               }
+
+               DIPrintf(psEntry,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                               CACHEOP_RI_PRINTF,
+#else
+                                               CACHEOP_PRINTF,
+#endif
+                                               gsCwq.asStatsExecuted[i32ReadOffset].ui32DeviceID,
+                                               gsCwq.asStatsExecuted[i32ReadOffset].pid,
+                                               pszCacheOpType,
+                                               pszFlushType,
+                                               pszFlushSource,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                               gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr,
+                                               gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr,
+#endif
+                                               gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+                                               gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+                                               ui64ExecTime);
+
+       }
+
+       OSLockRelease(gsCwq.hStatsExecLock);
+
+       return 0;
+}
+#endif /* defined(CACHEOP_DEBUG) */
+
+static INLINE void CacheOpStatsReset(void)
+{
+#if defined(CACHEOP_DEBUG)
+       gsCwq.ui32ServerOps = 0;
+       gsCwq.ui32ClientOps = 0;
+       gsCwq.ui32TotalOps = 0;
+       gsCwq.ui32ServerOpUsedUMVA = 0;
+       gsCwq.ui32AvgExecTime = 0;
+       gsCwq.ui32AvgExecTimeRemainder = 0;
+
+       gsCwq.i32StatsExecWriteIdx = 0;
+
+       OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+#endif
+}
+
+static void CacheOpConfigUpdate(IMG_UINT32 ui32Config)
+{
+       OSLockAcquire(gsCwq.hConfigLock);
+
+       /* Step 0, set the gsCwq.eConfig bits */
+       if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1)))
+       {
+               gsCwq.eConfig = CACHEOP_CONFIG_KDF;
+               if (gsCwq.bSupportsUMFlush)
+               {
+                       gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+               }
+       }
+       else
+       {
+               if (ui32Config & CACHEOP_CONFIG_KDF)
+               {
+                       gsCwq.eConfig |= CACHEOP_CONFIG_KDF;
+               }
+               else
+               {
+                       gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF;
+               }
+
+               if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF))
+               {
+                       gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+               }
+               else
+               {
+                       gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF;
+               }
+       }
+
+       if (ui32Config & CACHEOP_CONFIG_KLOG)
+       {
+               /* Suppress logs from KM caller */
+               gsCwq.eConfig |= CACHEOP_CONFIG_KLOG;
+       }
+       else
+       {
+               gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG;
+       }
+
+       /* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */
+       ui32Config = 0;
+
+       if (gsCwq.eConfig & CACHEOP_CONFIG_KDF)
+       {
+               ui32Config |= CACHEOP_CONFIG_KDF;
+       }
+       if (gsCwq.eConfig & CACHEOP_CONFIG_URBF)
+       {
+               ui32Config |= CACHEOP_CONFIG_URBF;
+       }
+       if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG)
+       {
+               ui32Config |= CACHEOP_CONFIG_KLOG;
+       }
+       gsCwq.ui32Config = ui32Config;
+
+
+       /* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point
+          the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM)
+          is clawed-back because of the overhead of maintaining such large request which might stalls the
+          user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */
+       gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2);
+
+       /* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests
+          to come down into the KM for maintenance */
+       gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0;
+
+       if (gsCwq.bSupportsUMFlush)
+       {
+               /* With URBF enabled we never go to the kernel */
+               if (gsCwq.eConfig & CACHEOP_CONFIG_URBF)
+               {
+                       gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0;
+               }
+       }
+
+       /* Step 5, reset stats. */
+       CacheOpStatsReset();
+
+       OSLockRelease(gsCwq.hConfigLock);
+}
+
+static int CacheOpConfigRead(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       DIPrintf(psEntry, "URBF: %s\n",
+               gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No");
+
+       return 0;
+}
+
+static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                       const void *psPrivate,
+                                                                                       IMG_UINT32 *pui32Value)
+{
+       IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate;
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+       switch (ui32ID)
+       {
+               case APPHINT_ID_CacheOpConfig:
+                       *pui32Value = gsCwq.ui32Config;
+                       break;
+
+               case APPHINT_ID_CacheOpUMKMThresholdSize:
+                       *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD];
+                       break;
+
+               default:
+                       break;
+       }
+
+       return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                       const void *psPrivate,
+                                                                                       IMG_UINT32 ui32Value)
+{
+       IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate;
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+       switch (ui32ID)
+       {
+               case APPHINT_ID_CacheOpConfig:
+                       CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL);
+                       break;
+
+
+               case APPHINT_ID_CacheOpUMKMThresholdSize:
+               {
+                       if (!ui32Value || !gsCwq.bSupportsUMFlush)
+                       {
+                               /* CPU ISA does not support UM flush, therefore every request goes down into
+                                  the KM, silently ignore request to adjust threshold */
+                               PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]);
+                               break;
+                       }
+                       else if (ui32Value < gsCwq.uiPageSize)
+                       {
+                               /* Silently round-up to OS page size */
+                               ui32Value = gsCwq.uiPageSize;
+                       }
+
+                       /* Align to OS page size */
+                       ui32Value &= ~(gsCwq.uiPageSize - 1);
+
+                       gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value;
+
+                       break;
+               }
+
+               default:
+                       break;
+       }
+
+       return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineBind(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                          CACHEOP_WORK_ITEM *psCacheOpWorkItem,
+                                                                                          PVRSRV_TIMELINE iTimeline)
+{
+       PVRSRV_ERROR eError;
+
+       /* Always default the incoming CacheOp work-item to safe values */
+       SyncClearTimelineObj(&psCacheOpWorkItem->sSWTimelineObj);
+       psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE;
+       psCacheOpWorkItem->psDevNode = psDevNode;
+       if (iTimeline == PVRSRV_NO_TIMELINE)
+       {
+               return PVRSRV_OK;
+       }
+
+       psCacheOpWorkItem->iTimeline = iTimeline;
+       eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj);
+       PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj");
+
+       return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+       PVRSRV_ERROR eError;
+
+       if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE)
+       {
+               return PVRSRV_OK;
+       }
+       CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj.pvTlObj);
+
+       eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->psDevNode,
+                                        &psCacheOpWorkItem->sSWTimelineObj);
+       (void) SyncSWTimelineReleaseKM(&psCacheOpWorkItem->sSWTimelineObj);
+
+       return eError;
+}
+
+static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                               PVRSRV_CACHE_OP uiCacheOp,
+                                                                               IMG_BYTE *pbCpuVirtAddr,
+                                                                               IMG_CPU_PHYADDR sCpuPhyAddr,
+                                                                               IMG_DEVMEM_OFFSET_T uiPgAlignedOffset,
+                                                                               IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset,
+                                                                               IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset)
+{
+       IMG_BYTE *pbCpuVirtAddrEnd;
+       IMG_BYTE *pbCpuVirtAddrStart;
+       IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+       IMG_CPU_PHYADDR sCpuPhyAddrStart;
+       IMG_DEVMEM_SIZE_T uiRelFlushSize;
+       IMG_DEVMEM_OFFSET_T uiRelFlushOffset;
+       IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset;
+
+       /* These quantities allows us to perform cache operations
+          at cache-line granularity thereby ensuring we do not
+          perform more than is necessary */
+       CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset);
+       uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+       uiRelFlushOffset = 0;
+
+       if (uiCLAlignedStartOffset > uiPgAlignedOffset)
+       {
+               /* Zero unless initially starting at an in-page offset */
+               uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset;
+               uiRelFlushSize -= uiRelFlushOffset;
+       }
+
+       /* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp
+          size is smaller. The 1st case handles in-page CacheOp range and
+          the 2nd case handles multiple-page CacheOp range with a last
+          CacheOp size that is less than gsCwq.uiPageSize */
+       uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+       if (uiNextPgAlignedOffset < uiPgAlignedOffset)
+       {
+               /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset
+                  by implication of this wrap-round; this only happens when
+                  uiPgAlignedOffset is the last page aligned offset */
+               uiRelFlushSize = uiRelFlushOffset ?
+                               uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+                               uiCLAlignedEndOffset - uiPgAlignedOffset;
+       }
+       else
+       {
+               if (uiNextPgAlignedOffset > uiCLAlignedEndOffset)
+               {
+                       uiRelFlushSize = uiRelFlushOffset ?
+                                       uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+                                       uiCLAlignedEndOffset - uiPgAlignedOffset;
+               }
+       }
+
+       /* More efficient to request cache maintenance operation for full
+          relative range as opposed to multiple cache-aligned ranges */
+       sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset;
+       sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize;
+       if (pbCpuVirtAddr)
+       {
+               pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset;
+               pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize;
+       }
+       else
+       {
+               /* Some OS/Env layer support functions expect NULL(s) */
+               pbCpuVirtAddrStart = NULL;
+               pbCpuVirtAddrEnd = NULL;
+       }
+
+       /* Perform requested CacheOp on the CPU data cache for successive cache
+          line worth of bytes up to page or in-page cache-line boundary */
+       switch (uiCacheOp)
+       {
+               case PVRSRV_CACHE_OP_CLEAN:
+                       OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+                                                                       sCpuPhyAddrStart, sCpuPhyAddrEnd);
+                       break;
+               case PVRSRV_CACHE_OP_INVALIDATE:
+                       OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+                                                                       sCpuPhyAddrStart, sCpuPhyAddrEnd);
+                       break;
+               case PVRSRV_CACHE_OP_FLUSH:
+                       OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+                                                                       sCpuPhyAddrStart, sCpuPhyAddrEnd);
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d",
+                                       __func__, uiCacheOp));
+                       break;
+       }
+
+}
+
+static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                IMG_CPU_VIRTADDR pvAddress,
+                                                                                IMG_DEVMEM_SIZE_T uiSize,
+                                                                                PVRSRV_CACHE_OP uiCacheOp)
+{
+       IMG_CPU_PHYADDR sCpuPhyAddrUnused =
+               { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
+       IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize);
+       IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1));
+
+       /*
+         If the start/end address isn't aligned to cache line size, round it up to the
+         nearest multiple; this ensures that we flush all the cache lines affected by
+         unaligned start/end addresses.
+        */
+       pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize);
+       switch (uiCacheOp)
+       {
+               case PVRSRV_CACHE_OP_CLEAN:
+                       OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+                       break;
+               case PVRSRV_CACHE_OP_INVALIDATE:
+                       OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+                       break;
+               case PVRSRV_CACHE_OP_FLUSH:
+                       OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d",
+                                        __func__, uiCacheOp));
+                       break;
+       }
+
+}
+
+static INLINE PVRSRV_ERROR CacheOpValidateUMVA(PMR *psPMR,
+                                                                                          IMG_CPU_VIRTADDR pvAddress,
+                                                                                          IMG_DEVMEM_OFFSET_T uiOffset,
+                                                                                          IMG_DEVMEM_SIZE_T uiSize,
+                                                                                          PVRSRV_CACHE_OP uiCacheOp,
+                                                                                          void **ppvOutAddress)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(__linux__) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+#endif
+       void __user *pvAddr;
+
+       IMG_BOOL bReadOnlyInvalidate =
+               (uiCacheOp == PVRSRV_CACHE_OP_INVALIDATE) &&
+               !PVRSRV_CHECK_CPU_WRITEABLE(PMR_Flags(psPMR));
+
+       if (!pvAddress || bReadOnlyInvalidate)
+       {
+               /* As pvAddress is optional, NULL is expected from UM/KM requests */
+               /* Also don't allow invalidates for UMVA of read-only memory */
+               pvAddr = NULL;
+               goto e0;
+       }
+
+
+
+#if !defined(__linux__) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+       pvAddr = NULL;
+#else
+       /* Validate VA, assume most basic address limit access_ok() check */
+       pvAddr = (void __user *)(uintptr_t)((uintptr_t)pvAddress + uiOffset);
+       if (!access_ok(pvAddr, uiSize))
+       {
+               pvAddr = NULL;
+               if (! mm)
+               {
+                       /* Bad KM request, don't silently ignore */
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0);
+               }
+       }
+       else if (mm)
+       {
+               mmap_read_lock(mm);
+               vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddr);
+
+               if (!vma ||
+                       vma->vm_start > (unsigned long)(uintptr_t)pvAddr ||
+                       vma->vm_end < (unsigned long)(uintptr_t)pvAddr + uiSize ||
+                       vma->vm_private_data != psPMR)
+               {
+                       /*
+                        * Request range is not fully mapped or is not matching the PMR
+                        * Ignore request's VA.
+                        */
+                       pvAddr = NULL;
+               }
+               mmap_read_unlock(mm);
+       }
+#endif
+
+e0:
+       *ppvOutAddress = (IMG_CPU_VIRTADDR __force) pvAddr;
+       return eError;
+}
+
+static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR,
+                                                                       IMG_CPU_VIRTADDR pvAddress,
+                                                                       IMG_DEVMEM_OFFSET_T uiOffset,
+                                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                                       PVRSRV_CACHE_OP uiCacheOp,
+                                                                       IMG_BOOL bIsRequestValidated)
+
+{
+       IMG_HANDLE hPrivOut = NULL;
+       IMG_BOOL bPMRIsSparse;
+       IMG_UINT32 ui32PageIndex;
+       IMG_UINT32 ui32NumOfPages;
+       size_t uiOutSize;       /* Effectively unused */
+       PVRSRV_DEVICE_NODE *psDevNode;
+       IMG_DEVMEM_SIZE_T uiPgAlignedSize;
+       IMG_DEVMEM_OFFSET_T uiPgAlignedOffset;
+       IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset;
+       IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset;
+       IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset;
+       IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset;
+       IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr;
+       IMG_BOOL bIsPMRInfoValid = IMG_FALSE;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BYTE *pbCpuVirtAddr = NULL;
+       IMG_BOOL *pbValid = abValid;
+
+       if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE)
+       {
+               return PVRSRV_OK;
+       }
+
+       if (! bIsRequestValidated)
+       {
+               IMG_DEVMEM_SIZE_T uiLPhysicalSize;
+
+               /* Need to validate parameters before proceeding */
+               eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize);
+               PVR_LOG_RETURN_IF_ERROR(eError, "uiLPhysicalSize");
+
+               PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PMRLockSysPhysAddresses");
+       }
+
+       /* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */
+       eError = CacheOpValidateUMVA(psPMR, pvAddress, uiOffset, uiSize, uiCacheOp, (void**)&pbCpuVirtAddr);
+       if (eError == PVRSRV_OK)
+       {
+               pvAddress = pbCpuVirtAddr;
+
+               if (pvAddress && gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL)
+               {
+                       CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp);
+
+                       if (!bIsRequestValidated)
+                       {
+                               eError = PMRUnlockSysPhysAddresses(psPMR);
+                               PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+                       }
+#if defined(CACHEOP_DEBUG)
+                       gsCwq.ui32ServerOpUsedUMVA += 1;
+#endif
+                       return PVRSRV_OK;
+               }
+               else if (pvAddress)
+               {
+                       /* Round down the incoming VA (if any) down to the nearest page aligned VA */
+                       pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1));
+#if defined(CACHEOP_DEBUG)
+                       gsCwq.ui32ServerOpUsedUMVA += 1;
+#endif
+               }
+       }
+       else
+       {
+               /*
+                * This validation pathway has been added to accommodate any/all requests that might
+                * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint.
+                * parameters but if this fails then we would rather fail gracefully than cause the
+                * kernel to Oops so instead we log the fact that an invalid KM virtual address was
+                * supplied and what action was taken to mitigate against kernel Oops(ing) if any.
+                */
+               CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL);
+
+               if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress",
+                                       __func__,
+                                       pvAddress));
+
+                       /* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */
+                       pvAddress = NULL;
+               }
+               else
+               {
+                       /*
+                        * The approach here is to attempt a reacquisition of the PMR kernel VA and see if
+                        * said VA corresponds to the parameter VA, if so fail requested cache maint. op.
+                        * cause this indicates some kind of internal, memory and/or meta-data corruption
+                        * else we reissue the request using this (re)acquired alias PMR kernel VA.
+                        */
+                       if (PMR_IsSparse(psPMR))
+                       {
+                               eError = PMRAcquireSparseKernelMappingData(psPMR,
+                                                                                                                  0,
+                                                                                                                  gsCwq.uiPageSize,
+                                                                                                                  (void **)&pbCpuVirtAddr,
+                                                                                                                  &uiOutSize,
+                                                                                                                  &hPrivOut);
+                               PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+                       }
+                       else
+                       {
+                               eError = PMRAcquireKernelMappingData(psPMR,
+                                                                                                        0,
+                                                                                                        gsCwq.uiPageSize,
+                                                                                                        (void **)&pbCpuVirtAddr,
+                                                                                                        &uiOutSize,
+                                                                                                        &hPrivOut);
+                               PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+                       }
+
+                       /* Here, we only compare these CPU virtual addresses at granularity of the OS page size */
+                       if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request",
+                                               __func__,
+                                               pvAddress));
+
+                               eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+                               PVR_LOG_GOTO_WITH_ERROR("PMRReleaseKernelMappingData", eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0);
+                       }
+                       else if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p",
+                                               __func__,
+                                               pvAddress,
+                                               pbCpuVirtAddr));
+
+                               /* Note that this might still fail if there is kernel memory/meta-data corruption;
+                                  there is not much we can do here but at the least we will be informed of this
+                                  before the kernel Oops(ing) */
+                               CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp);
+
+                               eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+                               PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+                               eError = PVRSRV_OK;
+                               goto e0;
+                       }
+                       else
+                       {
+                               /* At this junction, we have exhausted every possible work-around possible but we do
+                                  know that VA reacquisition returned another/alias page-aligned VA; so with this
+                                  future expectation of PMRAcquireKernelMappingData(), we proceed */
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress",
+                                               __func__,
+                                               pvAddress));
+
+                               eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+                               PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+                               /* NULL this to force per-page reacquisition down-stream */
+                               pvAddress = NULL;
+                       }
+               }
+       }
+
+       /* NULL clobbered var., OK to proceed */
+       pbCpuVirtAddr = NULL;
+       eError = PVRSRV_OK;
+
+       /* Need this for kernel mapping */
+       bPMRIsSparse = PMR_IsSparse(psPMR);
+       psDevNode = PMR_DeviceNode(psPMR);
+
+       /* Round the incoming offset down to the nearest cache-line / page aligned-address */
+       uiCLAlignedEndOffset = uiOffset + uiSize;
+       uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize);
+       uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1));
+
+       uiPgAlignedEndOffset = uiCLAlignedEndOffset;
+       uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize);
+       uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1));
+       uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset;
+
+#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING)
+       /* For internal debug if cache-line optimised
+          flushing is suspected of causing data corruption */
+       uiCLAlignedStartOffset = uiPgAlignedStartOffset;
+       uiCLAlignedEndOffset = uiPgAlignedEndOffset;
+#endif
+
+       /* Type of allocation backing the PMR data */
+       ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift;
+       if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               /* The pbValid array is allocated first as it is needed in
+                  both physical/virtual cache maintenance methods */
+               pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
+               if (! pbValid)
+               {
+                       pbValid = abValid;
+               }
+               else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL)
+               {
+                       psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR));
+                       if (! psCpuPhyAddr)
+                       {
+                               psCpuPhyAddr = asCpuPhyAddr;
+                               OSFreeMem(pbValid);
+                               pbValid = abValid;
+                       }
+               }
+       }
+
+       /* We always retrieve PMR data in bulk, up-front if number of pages is within
+          PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a
+          dynamic buffer has been allocated to satisfy requests outside limits */
+       if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid)
+       {
+               if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL)
+               {
+                       /* Look-up PMR CpuPhyAddr once, if possible */
+                       eError = PMR_CpuPhysAddr(psPMR,
+                                                                        gsCwq.uiPageShift,
+                                                                        ui32NumOfPages,
+                                                                        uiPgAlignedStartOffset,
+                                                                        psCpuPhyAddr,
+                                                                        pbValid);
+                       if (eError == PVRSRV_OK)
+                       {
+                               bIsPMRInfoValid = IMG_TRUE;
+                       }
+               }
+               else
+               {
+                       /* Look-up PMR per-page validity once, if possible */
+                       eError = PMR_IsOffsetValid(psPMR,
+                                                                          gsCwq.uiPageShift,
+                                                                          ui32NumOfPages,
+                                                                          uiPgAlignedStartOffset,
+                                                                          pbValid);
+                       bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE;
+               }
+       }
+
+       /* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */
+       for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0;
+                uiPgAlignedOffset < uiPgAlignedEndOffset;
+                uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1)
+       {
+
+               if (! bIsPMRInfoValid)
+               {
+                       /* Never cross page boundary without looking up corresponding PMR page physical
+                          address and/or page validity if these were not looked-up, in bulk, up-front */
+                       ui32PageIndex = 0;
+                       if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL)
+                       {
+                               eError = PMR_CpuPhysAddr(psPMR,
+                                                                                gsCwq.uiPageShift,
+                                                                                1,
+                                                                                uiPgAlignedOffset,
+                                                                                psCpuPhyAddr,
+                                                                                pbValid);
+                               PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", e0);
+                       }
+                       else
+                       {
+                               eError = PMR_IsOffsetValid(psPMR,
+                                                                                 gsCwq.uiPageShift,
+                                                                                 1,
+                                                                                 uiPgAlignedOffset,
+                                                                                 pbValid);
+                               PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e0);
+                       }
+               }
+
+               /* Skip invalid PMR pages (i.e. sparse) */
+               if (pbValid[ui32PageIndex] == IMG_FALSE)
+               {
+                       CACHEOP_PVR_ASSERT(bPMRIsSparse);
+                       continue;
+               }
+
+               if (pvAddress)
+               {
+                       /* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */
+                       pbCpuVirtAddr =
+                               (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset));
+               }
+               /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */
+               else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL)
+               {
+                       if (bPMRIsSparse)
+                       {
+                               eError =
+                                       PMRAcquireSparseKernelMappingData(psPMR,
+                                                                                                         uiPgAlignedOffset,
+                                                                                                         gsCwq.uiPageSize,
+                                                                                                         (void **)&pbCpuVirtAddr,
+                                                                                                         &uiOutSize,
+                                                                                                         &hPrivOut);
+                               PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+                       }
+                       else
+                       {
+                               eError =
+                                       PMRAcquireKernelMappingData(psPMR,
+                                                                                               uiPgAlignedOffset,
+                                                                                               gsCwq.uiPageSize,
+                                                                                               (void **)&pbCpuVirtAddr,
+                                                                                               &uiOutSize,
+                                                                                               &hPrivOut);
+                               PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+                       }
+               }
+
+               /* Issue actual cache maintenance for PMR */
+               CacheOpExecRangeBased(psDevNode,
+                                                       uiCacheOp,
+                                                       pbCpuVirtAddr,
+                                                       (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ?
+                                                               psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0],
+                                                       uiPgAlignedOffset,
+                                                       uiCLAlignedStartOffset,
+                                                       uiCLAlignedEndOffset);
+
+               if (! pvAddress)
+               {
+                       /* The caller has not supplied either a KM/UM CpuVA, release mapping */
+                       if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL)
+                       {
+                               eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+                               PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+                       }
+               }
+       }
+
+    if(uiCacheOp == PVRSRV_CACHE_OP_INVALIDATE && uiSize >= 4096)
+    {
+        do_invalid_range(0x00000000, 0x200000);
+    }
+
+e0:
+       if (psCpuPhyAddr != asCpuPhyAddr)
+       {
+               OSFreeMem(psCpuPhyAddr);
+       }
+
+       if (pbValid != abValid)
+       {
+               OSFreeMem(pbValid);
+       }
+
+       if (! bIsRequestValidated)
+       {
+               eError = PMRUnlockSysPhysAddresses(psPMR);
+               PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+       }
+
+       return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                        PVRSRV_TIMELINE iTimeline)
+{
+       PVRSRV_ERROR eError;
+       CACHEOP_WORK_ITEM sCacheOpWorkItem = {NULL};
+
+       eError = CacheOpTimelineBind(psDevNode, &sCacheOpWorkItem, iTimeline);
+       PVR_LOG_RETURN_IF_ERROR(eError, "CacheOpTimelineBind");
+
+       eError = CacheOpTimelineExec(&sCacheOpWorkItem);
+       PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+       return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                       PMR **ppsPMR,
+                                                                                       IMG_CPU_VIRTADDR *pvAddress,
+                                                                                       IMG_DEVMEM_OFFSET_T *puiOffset,
+                                                                                       IMG_DEVMEM_SIZE_T *puiSize,
+                                                                                       PVRSRV_CACHE_OP *puiCacheOp,
+                                                                                       IMG_UINT32 ui32NumCacheOps,
+                                                                                       PVRSRV_TIMELINE uiTimeline)
+{
+       IMG_UINT32 ui32Idx;
+       IMG_BOOL bBatchHasTimeline;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(CACHEOP_DEBUG)
+       CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+       sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+
+       /* Check if batch has an associated timeline update */
+       bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+       puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_TIMELINE);
+
+       for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+       {
+               /* Fail UM request, don't silently ignore */
+               PVR_GOTO_IF_INVALID_PARAM(puiSize[ui32Idx], eError, e0);
+
+#if defined(CACHEOP_DEBUG)
+               sCacheOpWorkItem.ui64StartTime = OSClockus64();
+#endif
+
+               eError = CacheOpPMRExec(ppsPMR[ui32Idx],
+                                                               pvAddress[ui32Idx],
+                                                               puiOffset[ui32Idx],
+                                                               puiSize[ui32Idx],
+                                                               puiCacheOp[ui32Idx],
+                                                               IMG_FALSE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpExecPMR", e0);
+
+#if defined(CACHEOP_DEBUG)
+               sCacheOpWorkItem.ui64EndTime = OSClockus64();
+
+               sCacheOpWorkItem.psDevNode = psDevNode;
+               sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+               sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+               sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+               sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+               CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+
+               gsCwq.ui32ServerOps += 1;
+#endif
+       }
+
+e0:
+       if (bBatchHasTimeline)
+       {
+               eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline);
+       }
+
+       return eError;
+}
+
+
+PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode,
+                                                 void *pvVirtStart,
+                                                 void *pvVirtEnd,
+                                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                                 IMG_CPU_PHYADDR sCPUPhysEnd,
+                                                 PVRSRV_CACHE_OP uiCacheOp)
+{
+#if defined(CACHEOP_DEBUG)
+       IMG_UINT64 ui64StartTime = OSClockus64();
+#endif
+
+       switch (uiCacheOp)
+       {
+               case PVRSRV_CACHE_OP_CLEAN:
+                       OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+                       break;
+               case PVRSRV_CACHE_OP_INVALIDATE:
+                       OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+                       break;
+               case PVRSRV_CACHE_OP_FLUSH:
+                       OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d",
+                                        __func__, uiCacheOp));
+                       break;
+       }
+
+#if defined(CACHEOP_DEBUG)
+       if (CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+       {
+               CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+
+               gsCwq.ui32ServerOps += 1;
+
+               sCacheOpWorkItem.uiOffset = 0;
+               sCacheOpWorkItem.bKMReq = IMG_TRUE;
+               sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+               /* Use information page PMR for logging KM request */
+               sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+               sCacheOpWorkItem.psDevNode = psDevNode;
+               sCacheOpWorkItem.ui64StartTime = ui64StartTime;
+               sCacheOpWorkItem.ui64EndTime = OSClockus64();
+               sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+               sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr);
+
+               CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpValExec(PMR *psPMR,
+                                                   IMG_UINT64 uiAddress,
+                                                   IMG_DEVMEM_OFFSET_T uiOffset,
+                                                   IMG_DEVMEM_SIZE_T uiSize,
+                                                   PVRSRV_CACHE_OP uiCacheOp)
+{
+       PVRSRV_ERROR eError;
+       IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress;
+#if defined(CACHEOP_DEBUG)
+       CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+
+       sCacheOpWorkItem.ui64StartTime = OSClockus64();
+#endif
+
+       eError = CacheOpPMRExec(psPMR,
+                                                       pvAddress,
+                                                       uiOffset,
+                                                       uiSize,
+                                                       uiCacheOp,
+                                                       IMG_FALSE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpPMRExec", e0);
+
+#if defined(CACHEOP_DEBUG)
+       sCacheOpWorkItem.ui64EndTime = OSClockus64();
+
+       sCacheOpWorkItem.psDevNode = PMR_DeviceNode(psPMR);
+       sCacheOpWorkItem.psPMR = psPMR;
+       sCacheOpWorkItem.uiSize = uiSize;
+       sCacheOpWorkItem.uiOffset = uiOffset;
+       sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+       sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+       CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+
+       gsCwq.ui32ServerOps += 1;
+#endif
+
+e0:
+       return eError;
+}
+
+PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDevNode,
+                                                  IMG_UINT32 ui32NumCacheOps,
+                                                  PMR **ppsPMR,
+                                                  IMG_UINT64 *puiAddress,
+                                                  IMG_DEVMEM_OFFSET_T *puiOffset,
+                                                  IMG_DEVMEM_SIZE_T *puiSize,
+                                                  PVRSRV_CACHE_OP *puiCacheOp,
+                                                  IMG_UINT32 ui32OpTimeline)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline;
+       IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (!gsCwq.bInit)
+       {
+               PVR_LOG(("CacheOp framework not initialised, failing request"));
+               return PVRSRV_ERROR_NOT_INITIALISED;
+       }
+       else if (! ui32NumCacheOps)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       /* Ensure any single timeline CacheOp request is processed immediately */
+       else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE)
+       {
+               eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline);
+       }
+       /* This is the default entry for all client requests */
+       else
+       {
+               if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1)))
+               {
+                       /* default the configuration before execution */
+                       CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+               }
+
+               eError =
+                       CacheOpBatchExecRangeBased(psDevNode,
+                                                                          ppsPMR,
+                                                                          pvAddress,
+                                                                          puiOffset,
+                                                                          puiSize,
+                                                                          puiCacheOp,
+                                                                          ui32NumCacheOps,
+                                                                          uiTimeline);
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+                                                IMG_UINT64 puiAddress,
+                                                IMG_DEVMEM_OFFSET_T uiOffset,
+                                                IMG_DEVMEM_SIZE_T uiSize,
+                                                IMG_UINT64 ui64StartTime,
+                                                IMG_UINT64 ui64EndTime,
+                                                PVRSRV_CACHE_OP uiCacheOp)
+{
+#if defined(CACHEOP_DEBUG)
+       CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+       PVR_UNREFERENCED_PARAMETER(puiAddress);
+
+       sCacheOpWorkItem.psDevNode = PMR_DeviceNode(psPMR);
+       sCacheOpWorkItem.psPMR = psPMR;
+       sCacheOpWorkItem.uiSize = uiSize;
+       sCacheOpWorkItem.uiOffset = uiOffset;
+       sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+       sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+
+       sCacheOpWorkItem.ui64StartTime = ui64StartTime;
+       sCacheOpWorkItem.ui64EndTime = ui64EndTime;
+
+       gsCwq.ui32ClientOps += 1;
+
+       CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#else
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+       PVR_UNREFERENCED_PARAMETER(puiAddress);
+       PVR_UNREFERENCED_PARAMETER(ui64StartTime);
+       PVR_UNREFERENCED_PARAMETER(ui64EndTime);
+#endif
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpInit2 (void)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       /* Apphint read/write is not concurrent, so lock protects against this */
+       eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
+
+
+#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH)
+       gsCwq.bSupportsUMFlush = IMG_TRUE;
+#else
+       gsCwq.bSupportsUMFlush = IMG_FALSE;
+#endif
+
+       gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage;
+       gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR;
+
+       /* Normally, platforms should use their default configurations, put exceptions here */
+#if defined(__i386__) || defined(__x86_64__)
+#if !defined(TC_MEMORY_CONFIG)
+       CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KDF);
+#else
+       CacheOpConfigUpdate(CACHEOP_CONFIG_KDF);
+#endif
+#else /* defined(__x86__) */
+       CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+#endif
+
+       /* Initialise the remaining occupants of the CacheOp information page */
+       gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE]   = (IMG_UINT32)gsCwq.uiPageSize;
+       gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE] = (IMG_UINT32)gsCwq.uiLineSize;
+
+       /* Set before spawning thread */
+       gsCwq.bInit = IMG_TRUE;
+
+       {
+               DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpConfigRead};
+               /* Writing the unsigned integer binary encoding of CACHEOP_CONFIG
+                  into this file cycles through avail. configuration(s) */
+               eError = DICreateEntry("cacheop_config", NULL, &sIterator, NULL,
+                                      DI_ENTRY_TYPE_GENERIC, &gsCwq.psConfigTune);
+               PVR_LOG_GOTO_IF_FALSE(gsCwq.psConfigTune, "DICreateEntry", e0);
+       }
+
+       /* Register the CacheOp framework (re)configuration handlers */
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig,
+                                                                               CacheOpConfigQuery,
+                                                                               CacheOpConfigSet,
+                                                                               APPHINT_OF_DRIVER_NO_DEVICE,
+                                                                               (void *) APPHINT_ID_CacheOpConfig);
+
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize,
+                                                                               CacheOpConfigQuery,
+                                                                               CacheOpConfigSet,
+                                                                               APPHINT_OF_DRIVER_NO_DEVICE,
+                                                                               (void *) APPHINT_ID_CacheOpUMKMThresholdSize);
+
+       return PVRSRV_OK;
+e0:
+       CacheOpDeInit2();
+       return eError;
+}
+
+void CacheOpDeInit2 (void)
+{
+       gsCwq.bInit = IMG_FALSE;
+
+       if (gsCwq.hConfigLock)
+       {
+               OSLockDestroy(gsCwq.hConfigLock);
+               gsCwq.hConfigLock = NULL;
+       }
+
+       if (gsCwq.psConfigTune)
+       {
+               DIDestroyEntry(gsCwq.psConfigTune);
+               gsCwq.psConfigTune = NULL;
+       }
+
+       gsCwq.pui32InfoPage = NULL;
+       gsCwq.psInfoPagePMR = NULL;
+}
+
+PVRSRV_ERROR CacheOpInit (void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       gsCwq.uiPageSize = OSGetPageSize();
+       gsCwq.uiPageShift = OSGetPageShift();
+       gsCwq.uiLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE);
+       gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize);
+       PVR_LOG_RETURN_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE);
+       gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType();
+
+#if defined(CACHEOP_DEBUG)
+       /* debugfs file read-out is not concurrent, so lock protects against this */
+       eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
+
+       gsCwq.i32StatsExecWriteIdx = 0;
+       OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+
+       {
+               DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpStatsExecLogRead};
+               /* File captures the most recent subset of CacheOp(s) executed */
+               eError = DICreateEntry("cacheop_history", NULL, &sIterator, NULL,
+                                      DI_ENTRY_TYPE_GENERIC, &gsCwq.psDIEntry);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", e0);
+       }
+e0:
+#endif
+       return eError;
+}
+
+void CacheOpDeInit (void)
+{
+#if defined(CACHEOP_DEBUG)
+       if (gsCwq.hStatsExecLock)
+       {
+               OSLockDestroy(gsCwq.hStatsExecLock);
+               gsCwq.hStatsExecLock = NULL;
+       }
+
+       if (gsCwq.psDIEntry)
+       {
+               DIDestroyEntry(gsCwq.psDIEntry);
+               gsCwq.psDIEntry = NULL;
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/connection_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/connection_server.c
new file mode 100644 (file)
index 0000000..92e0551
--- /dev/null
@@ -0,0 +1,491 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Handles connections coming from the client and the management
+                connection based information
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "sync_server.h"
+#include "process_stats.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "tlstream.h"
+#include "rgxhwperf_common.h"
+
+/* PID associated with Connection currently being purged by Cleanup thread */
+static IMG_PID gCurrentPurgeConnectionPid;
+
+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
+{
+       PVRSRV_ERROR eError;
+       PROCESS_HANDLE_BASE *psProcessHandleBase;
+       IMG_UINT64 ui64MaxBridgeTime;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if (psPVRSRVData->bUnload)
+       {
+               /* driver is unloading so do not allow the bridge lock to be released */
+               ui64MaxBridgeTime = 0;
+       }
+       else
+       {
+               ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS;
+       }
+
+       PVR_ASSERT(psConnection != NULL);
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psConnection, "psConnection");
+
+       /* Close HWPerfClient stream here even though we created it in
+        * PVRSRVConnectKM(). */
+       if (psConnection->hClientTLStream)
+       {
+               TLStreamClose(psConnection->hClientTLStream);
+               psConnection->hClientTLStream = NULL;
+               PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream."));
+       }
+
+       /* Get process handle base to decrement the refcount */
+       psProcessHandleBase = psConnection->psProcessHandleBase;
+
+       if (psProcessHandleBase != NULL)
+       {
+               eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, psConnection->pid,
+                                                       ui64MaxBridgeTime);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVReleaseProcessHandleBase");
+
+               psConnection->psProcessHandleBase = NULL;
+       }
+
+       /* Free handle base for this connection */
+       if (psConnection->psHandleBase != NULL)
+       {
+               eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime);
+               /*
+                * If we get PVRSRV_ERROR_RETRY we need to pass this back to the caller
+                * who will schedule a retry.
+                * Do not log this as it is an expected exception.
+                * This can occur if the Firmware is still processing a workload from
+                * the client when a tear-down request is received.
+                * Retrying will allow the in-flight work to be completed and the
+                * tear-down request can be completed when the FW is no longer busy.
+                */
+               if (PVRSRV_ERROR_RETRY == eError)
+               {
+                       return eError;
+               }
+               else
+               {
+                       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:2");
+               }
+
+               psConnection->psHandleBase = NULL;
+       }
+
+       if (psConnection->psSyncConnectionData != NULL)
+       {
+               SyncUnregisterConnection(psConnection->psSyncConnectionData);
+               psConnection->psSyncConnectionData = NULL;
+       }
+
+       if (psConnection->psPDumpConnectionData != NULL)
+       {
+               PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+               psConnection->psPDumpConnectionData = NULL;
+       }
+
+       /* Call environment specific connection data deinit function */
+       if (psConnection->hOsPrivateData != NULL)
+       {
+               eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+               PVR_LOG_RETURN_IF_ERROR(eError, "OSConnectionPrivateDataDeInit");
+
+               psConnection->hOsPrivateData = NULL;
+       }
+
+       /* Close the PID stats entry as late as possible to catch all frees */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       if (psConnection->hProcessStats != NULL)
+       {
+               PVRSRVStatsDeregisterProcess(psConnection->hProcessStats);
+               psConnection->hProcessStats = NULL;
+       }
+#endif
+
+       OSFreeMemNoStats(psConnection);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData)
+{
+       CONNECTION_DATA *psConnection;
+       PVRSRV_ERROR eError;
+       PROCESS_HANDLE_BASE *psProcessHandleBase;
+
+       /* Allocate connection data area, no stats since process not registered yet */
+       psConnection = OSAllocZMemNoStats(sizeof(*psConnection));
+       PVR_LOG_RETURN_IF_NOMEM(psConnection, "psConnection");
+
+       /* Allocate process statistics as early as possible to catch all allocs */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", failure);
+#endif
+
+       /* Call environment specific connection data init function */
+       eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure);
+
+       psConnection->pid = OSGetCurrentClientProcessIDKM();
+       psConnection->vpid = OSGetCurrentVirtualProcessID();
+       psConnection->tid = (IMG_UINT32)OSGetCurrentClientThreadIDKM();
+       OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN);
+
+#if defined(SUPPORT_DMA_TRANSFER)
+       OSLockCreate(&psConnection->hDmaReqLock);
+
+       eError = OSEventObjectCreate("Dma transfer cleanup event object",
+                                                                                                                        &psConnection->hDmaEventObject);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", failure);
+
+       OSAtomicWrite(&psConnection->ui32NumDmaTransfersInFlight, 0);
+       psConnection->bAcceptDmaRequests = IMG_TRUE;
+#endif
+
+       /* Register this connection with the sync core */
+       eError = SyncRegisterConnection(&psConnection->psSyncConnectionData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncRegisterConnection", failure);
+
+       /*
+        * Register this connection and Sync PDump callback with
+        * the pdump core. Pass in the Sync connection data.
+        */
+       eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
+                                         SyncConnectionPDumpSyncBlocks,
+                                         &psConnection->psPDumpConnectionData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure);
+
+       /* Allocate handle base for this connection */
+       eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase,
+                                      PVRSRV_HANDLE_BASE_TYPE_CONNECTION);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure);
+
+       /* get process handle base (if it doesn't exist it will be allocated) */
+       eError = PVRSRVAcquireProcessHandleBase(psConnection->pid, &psProcessHandleBase);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireProcessHandleBase", failure);
+
+       /* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */
+       {
+               IMG_BOOL bHostStreamIsNull;
+               PVRSRV_RGXDEV_INFO  *psRgxDevInfo;
+               PVRSRV_DEVICE_NODE      *psDevNode = OSGetDevNode(psConnection);
+
+               OSLockAcquire(psDevNode->hConnectionsLock);
+               dllist_add_to_tail(&psDevNode->sConnections, &psConnection->sConnectionListNode);
+#if defined(DEBUG) || defined(PDUMP)
+               PVR_LOG(("%s connected - (devID = %u)", psConnection->pszProcName,
+                       psDevNode->sDevId.ui32InternalID));
+#endif
+               OSLockRelease(psDevNode->hConnectionsLock);
+
+               if (!PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       psRgxDevInfo = _RGX_DEVICE_INFO_FROM_NODE(psDevNode);
+
+                       OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+                       bHostStreamIsNull = (IMG_BOOL)(psRgxDevInfo->hHWPerfHostStream == NULL);
+                       OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+
+                       if (!bHostStreamIsNull)
+                       {
+                               if (TLStreamIsOpenForReading(psRgxDevInfo->hHWPerfHostStream))
+                               {
+                                       /* Announce this client connection in the host stream, if event mask is set */
+                                       RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(psDevNode, psConnection->pid, psConnection->pszProcName);
+                               }
+                       }
+               }
+       }
+
+       psConnection->psProcessHandleBase = psProcessHandleBase;
+
+       *ppvPrivData = psConnection;
+
+       return PVRSRV_OK;
+
+failure:
+       ConnectionDataDestroy(psConnection);
+
+       return eError;
+}
+
+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData)
+{
+       PVRSRV_ERROR eErrorConnection, eErrorKernel;
+       CONNECTION_DATA *psConnectionData = pvConnectionData;
+
+       gCurrentPurgeConnectionPid = psConnectionData->pid;
+
+       eErrorConnection = ConnectionDataDestroy(psConnectionData);
+       if (eErrorConnection != PVRSRV_OK)
+       {
+               if (eErrorConnection == PVRSRV_ERROR_RETRY)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                "%s: Failed to purge connection data %p "
+                                "(deferring destruction)",
+                                __func__,
+                                psConnectionData));
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                        "%s: Connection data %p deferred destruction finished",
+                        __func__,
+                        psConnectionData));
+       }
+
+       /* Check if possible resize the global handle base */
+       eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+       PVR_LOG_IF_ERROR(eErrorKernel, "PVRSRVPurgeHandles");
+
+       gCurrentPurgeConnectionPid = 0;
+
+       return eErrorConnection;
+}
+
+#if defined(SUPPORT_DMA_TRANSFER)
+static void WaitForOutstandingDma(CONNECTION_DATA *psConnectionData)
+{
+
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hEvent;
+       IMG_UINT32 ui32Tries = 100;
+
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR,
+                                       "Waiting on %d DMA transfers in flight...", OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight)));
+#endif
+
+       eError = OSEventObjectOpen(psConnectionData->hDmaEventObject, &hEvent);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+               return;
+       }
+
+       while (OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight) != 0)
+       {
+               /*
+               #define DMA_TRANSFER_TIMEOUT_US (5000000ULL)
+
+               This currently doesn't work properly. Wait time is not as requested.
+               Using OSSleepms instead
+
+               OSEventObjectWaitKernel(hEvent, DMA_TRANSFER_TIMEOUT_US);
+               */
+               OSSleepms(50);
+               if (!ui32Tries)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Timeout while waiting on outstanding DMA transfers!", __func__));
+                       break;
+               }
+
+               ui32Tries--;
+       }
+
+       OSEventObjectClose(hEvent);
+}
+#endif
+
+void PVRSRVCommonConnectionDisconnect(void *pvDataPtr)
+{
+       CONNECTION_DATA *psConnectionData = pvDataPtr;
+       PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnectionData);
+
+       OSLockAcquire(psDevNode->hConnectionsLock);
+       dllist_remove_node(&psConnectionData->sConnectionListNode);
+       OSLockRelease(psDevNode->hConnectionsLock);
+
+       /* Notify the PDump core if the pdump control client is disconnecting */
+       if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL)
+       {
+               PDumpDisconnectionNotify(psDevNode);
+       }
+#if defined(SUPPORT_DMA_TRANSFER)
+       OSLockAcquire(psConnectionData->hDmaReqLock);
+
+       psConnectionData->bAcceptDmaRequests = IMG_FALSE;
+
+       OSLockRelease(psConnectionData->hDmaReqLock);
+
+       WaitForOutstandingDma(psConnectionData);
+
+       OSEventObjectDestroy(psConnectionData->hDmaEventObject);
+       OSLockDestroy(psConnectionData->hDmaReqLock);
+#endif
+
+#if defined(DEBUG) || defined(PDUMP)
+       PVR_LOG(("%s disconnected - (devID = %u)", psConnectionData->pszProcName,
+               psDevNode->sDevId.ui32InternalID));
+#endif
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+       {
+               /* Defer the release of the connection data */
+               psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
+               psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
+               psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE;
+               CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn,
+                                              CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
+               PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
+       }
+}
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void)
+{
+       return gCurrentPurgeConnectionPid;
+}
+
+/* Prefix for debug messages about Active Connections */
+#define DEBUG_DUMP_CONNECTION_FORMAT_STR " P%d-V%d-T%d-%s,"
+#define CONNECTIONS_PREFIX               "Connections Device ID:%u(%d)"
+#define MAX_CONNECTIONS_PREFIX            (29)
+#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN (1+10+10+10+7+PVRSRV_CONNECTION_PROCESS_NAME_LEN)
+#define MAX_DEBUG_DUMP_STRING_LEN         (1+MAX_CONNECTIONS_PREFIX+(3*MAX_DEBUG_DUMP_CONNECTION_STR_LEN))
+
+void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 void *pvDumpDebugFile)
+{
+       PDLLIST_NODE pNext, pNode;
+
+       /* We must check for an initialised device before accessing its mutex.
+        * The mutex is initialised as part of DeviceInitialize() which occurs
+        * on first access to the device node.
+        */
+       if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+       {
+               PVR_DUMPDEBUG_LOG("Connections: No Devices: No active connections");
+               return;
+       }
+
+       OSLockAcquire(psDevNode->hConnectionsLock);
+       if (dllist_is_empty(&psDevNode->sConnections))
+       {
+               PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections",
+                                                 (unsigned char)psDevNode->sDevId.ui32InternalID,
+                                                 (unsigned char)psDevNode->sDevId.i32OsDeviceID);
+       }
+       else
+       {
+               IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN];
+               IMG_UINT16 i, uiPos = 0;
+               IMG_BOOL bPrinted = IMG_FALSE;
+               size_t uiSize = sizeof(sActiveConnections);
+
+               IMG_CHAR szTmpConBuff[MAX_CONNECTIONS_PREFIX + 1];
+               i = OSSNPrintf(szTmpConBuff,
+                                          MAX_CONNECTIONS_PREFIX,
+                                          CONNECTIONS_PREFIX,
+                                          (unsigned char)psDevNode->sDevId.ui32InternalID,
+                                          (unsigned char)psDevNode->sDevId.i32OsDeviceID);
+               OSStringLCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize);
+
+               /* Move the write offset to the end of the current string */
+               uiPos += i;
+               /* Update the amount of remaining space available to copy into */
+               uiSize -= i;
+
+               dllist_foreach_node(&psDevNode->sConnections, pNode, pNext)
+               {
+                       CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode);
+
+                       IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN];
+                       i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN,
+                               DEBUG_DUMP_CONNECTION_FORMAT_STR, sData->pid, sData->vpid, sData->tid, sData->pszProcName);
+                       i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i);
+                       bPrinted = IMG_FALSE;
+
+                       OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize);
+
+                       /* Move the write offset to the end of the current string */
+                       uiPos += i;
+                       /* Update the amount of remaining space available to copy into */
+                       uiSize -= i;
+
+                       /* If there is not enough space to add another connection to this line, output the line */
+                       if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN)
+                       {
+                               PVR_DUMPDEBUG_LOG("%s", sActiveConnections);
+
+                               /*
+                                * Remove the "Connections:" prefix from the buffer.
+                                * Leave the subsequent buffer contents indented by the same
+                                * amount to aid in interpreting the debug output.
+                                */
+                               uiPos = sizeof(CONNECTIONS_PREFIX) - 1;
+                               /* Reset the amount of space available to copy into */
+                               uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos;
+                               bPrinted = IMG_TRUE;
+                       }
+               }
+
+               /* Only print the current line if it hasn't already been printed */
+               if (!bPrinted)
+               {
+                       /* Strip off the final comma */
+                       sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0';
+                       PVR_DUMPDEBUG_LOG("%s", sActiveConnections);
+               }
+#undef MAX_DEBUG_DUMP_STRING_LEN
+#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE
+       }
+       OSLockRelease(psDevNode->hConnectionsLock);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/debug_common.c b/drivers/gpu/drm/img/img-rogue/services/server/common/debug_common.c
new file mode 100644 (file)
index 0000000..ee17281
--- /dev/null
@@ -0,0 +1,1646 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Creates common debug info entries.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__linux__)
+#include <errno.h>
+#endif /* #if !defined(__linux__) */
+
+#include "debug_common.h"
+#include "pvrsrv.h"
+#include "di_server.h"
+#include "lists.h"
+#include "pvrversion.h"
+#include "rgx_options.h"
+#include "allocmem.h"
+#include "rgxfwutils.h"
+
+#ifdef SUPPORT_RGX
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "rgxinit.h"
+#include "rgxmmudefs_km.h"
+static IMG_HANDLE ghGpuUtilUserDebugFS;
+#endif
+
+static DI_ENTRY *gpsVersionDIEntry;
+static DI_ENTRY *gpsStatusDIEntry;
+
+#ifdef SUPPORT_VALIDATION
+static DI_ENTRY *gpsTestMemLeakDIEntry;
+#endif /* SUPPORT_VALIDATION */
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static DI_ENTRY *gpsDebugLevelDIEntry;
+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
+
+static void _DumpDebugDIPrintfWrapper(void *pvDumpDebugFile, const IMG_CHAR *pszFormat, ...)
+{
+       IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+       va_list ArgList;
+
+       OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, "%s\n", pszFormat);
+
+       va_start(ArgList, pszFormat);
+       DIVPrintf(pvDumpDebugFile, szBuffer, ArgList);
+       va_end(ArgList);
+}
+
+/*************************************************************************/ /*!
+ Version DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+                                          va_list va)
+{
+       IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *);
+       IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64);
+       IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition;
+
+       (*pui64CurrentPosition)++;
+
+       return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL;
+}
+
+static void *_VersionDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry);
+       IMG_UINT64 uiCurrentPosition = 1;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PVR_UNREFERENCED_PARAMETER(psEntry);
+
+       if (psPVRSRVData == NULL) {
+               PVR_DPF((PVR_DBG_ERROR, "psPVRSRVData = NULL"));
+               return NULL;
+       }
+
+       if (*pui64Pos == 0)
+       {
+               return DI_START_TOKEN;
+       }
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+                                             _DebugVersionCompare_AnyVaCb,
+                                             &uiCurrentPosition,
+                                             *pui64Pos);
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       return psDeviceNode;
+}
+
+static void _VersionDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvPriv)
+{
+       PVR_UNREFERENCED_PARAMETER(psEntry);
+       PVR_UNREFERENCED_PARAMETER(pvPriv);
+}
+
+static void *_VersionDINext(OSDI_IMPL_ENTRY *psEntry,void *pvPriv,
+                            IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry);
+       IMG_UINT64 uiCurrentPosition = 1;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       (*pui64Pos)++;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+                                             _DebugVersionCompare_AnyVaCb,
+                                             &uiCurrentPosition,
+                                             *pui64Pos);
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       return psDeviceNode;
+}
+
+#define DI_PRINT_VERSION_FMTSPEC \
+               "%s Version: %u.%u @ %u (%s) build options: 0x%08x %s\n"
+#define STR_DEBUG   "debug"
+#define STR_RELEASE "release"
+
+#if defined(DEBUG) || defined(SUPPORT_VALIDATION)
+#define BUILD_OPT_LEN 80
+
+static inline void _AppendOptionStr(IMG_CHAR pszBuildOptions[], const IMG_CHAR* str, OSDI_IMPL_ENTRY *psEntry, IMG_UINT32* pui32BuildOptionLen)
+{
+       IMG_UINT32 ui32BuildOptionLen = *pui32BuildOptionLen;
+       const IMG_UINT32 strLen = OSStringLength(str);
+       const IMG_UINT32 optStrLen = sizeof(IMG_CHAR) * (BUILD_OPT_LEN-1);
+
+       if ((ui32BuildOptionLen + strLen) > optStrLen)
+       {
+               pszBuildOptions[ui32BuildOptionLen] = '\0';
+               DIPrintf(psEntry, "%s\n", pszBuildOptions);
+               ui32BuildOptionLen = 0;
+       }
+       if (strLen < optStrLen)
+       {
+               OSStringLCopy(pszBuildOptions+ui32BuildOptionLen, str, strLen);
+               ui32BuildOptionLen += strLen - 1;
+       }
+       *pui32BuildOptionLen = ui32BuildOptionLen;
+}
+#endif /* DEBUG || SUPPORT_VALIDATION */
+
+static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv)
+{
+       PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry);
+
+       if (pvPriv == DI_START_TOKEN)
+       {
+               if (psPVRSRVData->sDriverInfo.bIsNoMatch)
+               {
+                       const BUILD_INFO *psBuildInfo;
+
+                       psBuildInfo = &psPVRSRVData->sDriverInfo.sUMBuildInfo;
+                       DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC,
+                                "UM Driver",
+                                PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion),
+                                PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion),
+                                psBuildInfo->ui32BuildRevision,
+                                (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ?
+                                        STR_DEBUG : STR_RELEASE,
+                                psBuildInfo->ui32BuildOptions,
+                                PVR_BUILD_DIR);
+
+                       psBuildInfo = &psPVRSRVData->sDriverInfo.sKMBuildInfo;
+                       DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC,
+                                "KM Driver (" PVR_ARCH_NAME ")",
+                                PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion),
+                                PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion),
+                                psBuildInfo->ui32BuildRevision,
+                                (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ?
+                                        STR_DEBUG : STR_RELEASE,
+                                psBuildInfo->ui32BuildOptions,
+                                PVR_BUILD_DIR);
+               }
+               else
+               {
+                       /* bIsNoMatch is `false` in one of the following cases:
+                        * - UM & KM version parameters actually match.
+                        * - A comparison between UM & KM has not been made yet, because no
+                        *   client ever connected.
+                        *
+                        * In both cases, available (KM) version info is the best output we
+                        * can provide.
+                        */
+                       DIPrintf(psEntry, "Driver Version: %s (%s) (%s) build options: "
+                                "0x%08lx %s\n", PVRVERSION_STRING, PVR_ARCH_NAME,
+                                PVR_BUILD_TYPE, RGX_BUILD_OPTIONS_KM, PVR_BUILD_DIR);
+               }
+       }
+       else if (pvPriv != NULL)
+       {
+               PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) pvPriv;
+               PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+#ifdef SUPPORT_RGX
+               PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+#if defined(DEBUG) || defined(SUPPORT_VALIDATION)
+               IMG_CHAR pszBuildOptions[BUILD_OPT_LEN];
+               IMG_UINT32 ui32BuildOptionLen = 0;
+               static const char* aszOptions[] = RGX_BUILD_OPTIONS_LIST;
+               int i = 0;
+#endif
+#endif /* SUPPORT_RGX */
+               IMG_BOOL bFwVersionInfoPrinted = IMG_FALSE;
+
+               DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName);
+               DIPrintf(psEntry, "Device ID: %u:%d\n", psDevNode->sDevId.ui32InternalID,
+                                                       psDevNode->sDevId.i32OsDeviceID);
+
+               if (psDevConfig->pszVersion)
+               {
+                       DIPrintf(psEntry, "Device Version: %s\n",
+                                 psDevConfig->pszVersion);
+               }
+
+               if (psDevNode->pfnDeviceVersionString)
+               {
+                       IMG_CHAR *pszVerStr;
+
+                       if (psDevNode->pfnDeviceVersionString(psDevNode,
+                                                             &pszVerStr) == PVRSRV_OK)
+                       {
+                               DIPrintf(psEntry, "%s\n", pszVerStr);
+
+                               OSFreeMem(pszVerStr);
+                       }
+               }
+
+#ifdef SUPPORT_RGX
+               /* print device's firmware version info */
+               if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL)
+               {
+                       /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */
+                       if (psDevInfo->psRGXFWIfOsInit != NULL)
+                       {
+                               if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)
+                               {
+                                       const RGXFWIF_COMPCHECKS *psRGXCompChecks =
+                                               &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks;
+                                       IMG_UINT32 ui32DDKVer = psRGXCompChecks->ui32DDKVersion;
+
+                                       DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC,
+                                                "Firmware",
+                                                PVRVERSION_UNPACK_MAJ(ui32DDKVer),
+                                                PVRVERSION_UNPACK_MIN(ui32DDKVer),
+                                                psRGXCompChecks->ui32DDKBuild,
+                                                ((psRGXCompChecks->ui32BuildOptions &
+                                                 OPTIONS_DEBUG_MASK) ? STR_DEBUG : STR_RELEASE),
+                                                psRGXCompChecks->ui32BuildOptions,
+                                                PVR_BUILD_DIR);
+                                       bFwVersionInfoPrinted = IMG_TRUE;
+
+#if defined(DEBUG) || defined(SUPPORT_VALIDATION)
+                                       DIPrintf(psEntry, "Firmware Build Options:\n");
+
+                                       for (i = 0; i < ARRAY_SIZE(aszOptions); i++)
+                                       {
+                                               if ((psRGXCompChecks->ui32BuildOptions & 1<<i))
+                                               {
+                                                       _AppendOptionStr(pszBuildOptions, aszOptions[i], psEntry, &ui32BuildOptionLen);
+                                               }
+                                       }
+
+                                       if (ui32BuildOptionLen != 0)
+                                       {
+                                               DIPrintf(psEntry, "%s", pszBuildOptions);
+                                       }
+                                       DIPrintf(psEntry, "\n");
+#endif
+                               }
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Error acquiring CPU virtual "
+                                       "address of FWInitMemDesc", __func__));
+                       }
+               }
+#endif /* SUPPORT_RGX */
+
+               if (!bFwVersionInfoPrinted)
+               {
+                       DIPrintf(psEntry, "Firmware Version: Info unavailable %s\n",
+#ifdef NO_HARDWARE
+                                "on NoHW driver"
+#else /* NO_HARDWARE */
+                                "(Is INIT complete?)"
+#endif /* NO_HARDWARE */
+                                );
+               }
+       }
+
+       return 0;
+}
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+
+/*************************************************************************/ /*!
+ Power data DebugFS entry
+*/ /**************************************************************************/
+
+static PVRSRV_ERROR SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode,
+                                            RGXFWIF_COUNTER_DUMP_REQUEST eRequestType,
+                                            IMG_UINT32 *pui32kCCBCommandSlot)
+{
+       PVRSRV_ERROR eError;
+
+       RGXFWIF_KCCB_CMD sCounterDumpCmd;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sCounterDumpCmd.eCmdType = RGXFWIF_KCCB_CMD_COUNTER_DUMP;
+       sCounterDumpCmd.uCmdData.sCounterDumpConfigData.eCounterDumpRequest = eRequestType;
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                               RGXFWIF_DM_GP,
+                               &sCounterDumpCmd,
+                               0,
+                               PDUMP_FLAGS_CONTINUOUS,
+                               pui32kCCBCommandSlot);
+       PVR_LOG_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot");
+
+       return eError;
+}
+
+static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32 ui32kCCBCommandSlot;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Device not initialised when "
+                                "power counter data was requested!"));
+               return -EIO;
+       }
+
+       OSLockAcquire(psDevInfo->hCounterDumpingLock);
+
+       eError = SendPowerCounterCommand(psDeviceNode,
+                                                                        RGXFWIF_PWR_COUNTER_DUMP_SAMPLE,
+                                                                        &ui32kCCBCommandSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               OSLockRelease(psDevInfo->hCounterDumpingLock);
+               return -EIO;
+       }
+
+       /* Wait for FW complete completion */
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo,
+                                                                         ui32kCCBCommandSlot,
+                                                                         PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+               OSLockRelease(psDevInfo->hCounterDumpingLock);
+               return -EIO;
+       }
+
+       /* Read back the buffer */
+       {
+               IMG_UINT32* pui32PowerBuffer;
+               IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod;
+               IMG_UINT32 i, j;
+
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc,
+                                                                                 (void**)&pui32PowerBuffer);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr");
+                       OSLockRelease(psDevInfo->hCounterDumpingLock);
+                       return -EIO;
+               }
+
+               ui32NumOfRegs = *pui32PowerBuffer++;
+               ui32SamplePeriod = *pui32PowerBuffer++;
+
+               if (ui32NumOfRegs)
+               {
+                       DIPrintf(psEntry, "Power counter data for device\n");
+                       DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod);
+
+                       for (i = 0; i < ui32NumOfRegs; i++)
+                       {
+                               IMG_UINT32 ui32High, ui32Low;
+                               IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++;
+                               IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++;
+
+                               PVR_ASSERT(ui32NumOfInstances);
+
+                               DIPrintf(psEntry, "0x%08x:", ui32RegOffset);
+
+                               for (j = 0; j < ui32NumOfInstances; j++)
+                               {
+                                       ui32Low = *pui32PowerBuffer++;
+                                       ui32High = *pui32PowerBuffer++;
+
+                                       DIPrintf(psEntry, " 0x%016llx",
+                                                        (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32);
+                               }
+
+                               DIPrintf(psEntry, "\n");
+                       }
+               }
+
+               DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc);
+       }
+
+       OSLockRelease(psDevInfo->hCounterDumpingLock);
+
+       return eError;
+}
+
+static IMG_INT64 PowerDataSet(const IMG_CHAR __user *pcBuffer,
+                              IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos,
+                              void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_COUNTER_DUMP_REQUEST eRequest;
+       IMG_UINT32 ui32kCCBCommandSlot;
+
+       PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+       PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO);
+       PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+
+       if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Device not initialised when "
+                                "power counter data was requested!"));
+               return -EIO;
+       }
+
+       if (pcBuffer[0] == '1')
+       {
+               eRequest = RGXFWIF_PWR_COUNTER_DUMP_START;
+       }
+       else if (pcBuffer[0] == '0')
+       {
+               eRequest = RGXFWIF_PWR_COUNTER_DUMP_STOP;
+       }
+       else
+       {
+               return -EINVAL;
+       }
+
+       OSLockAcquire(psDevInfo->hCounterDumpingLock);
+
+       SendPowerCounterCommand(psDeviceNode,
+                               eRequest,
+                               &ui32kCCBCommandSlot);
+
+       OSLockRelease(psDevInfo->hCounterDumpingLock);
+
+       *pui64Pos += ui64Count;
+       return ui64Count;
+}
+
+#endif /* defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */
+
+/*************************************************************************/ /*!
+ Status DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                                va_list va)
+{
+       IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *);
+       IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64);
+       IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition;
+
+       (*pui64CurrentPosition)++;
+
+       return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL;
+}
+
+static void *_DebugStatusDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry);
+       IMG_UINT64 uiCurrentPosition = 1;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       if (*pui64Pos == 0)
+       {
+               return DI_START_TOKEN;
+       }
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode =  List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+                                                                                 _DebugStatusCompare_AnyVaCb,
+                                                                                 &uiCurrentPosition,
+                                                                                 *pui64Pos);
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       return psDeviceNode;
+}
+
+static void _DebugStatusDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVR_UNREFERENCED_PARAMETER(psEntry);
+       PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugStatusDINext(OSDI_IMPL_ENTRY *psEntry,
+                                                                void *pvData,
+                                                                IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry);
+       IMG_UINT64 uiCurrentPosition = 1;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       (*pui64Pos)++;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode =  List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+                                                                                 _DebugStatusCompare_AnyVaCb,
+                                                                                 &uiCurrentPosition,
+                                                                                 *pui64Pos);
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       return psDeviceNode;
+}
+
+static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       if (pvData == DI_START_TOKEN)
+       {
+               PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry);
+
+               if (psPVRSRVData != NULL)
+               {
+                       switch (psPVRSRVData->eServicesState)
+                       {
+                               case PVRSRV_SERVICES_STATE_OK:
+                                       DIPrintf(psEntry, "Driver Status:   OK\n");
+                                       break;
+                               case PVRSRV_SERVICES_STATE_BAD:
+                                       DIPrintf(psEntry, "Driver Status:   BAD\n");
+                                       break;
+                               case PVRSRV_SERVICES_STATE_UNDEFINED:
+                                       DIPrintf(psEntry, "Driver Status:   UNDEFINED\n");
+                                       break;
+                               default:
+                                       DIPrintf(psEntry, "Driver Status:   UNKNOWN (%d)\n",
+                                                psPVRSRVData->eServicesState);
+                                       break;
+                       }
+               }
+       }
+       else if (pvData != NULL)
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+               IMG_CHAR           *pszStatus = "";
+               IMG_CHAR           *pszReason = "";
+               PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus;
+               PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
+
+               DIPrintf(psEntry, "\nDevice ID: %u:%d\n", psDeviceNode->sDevId.ui32InternalID,
+                                                         psDeviceNode->sDevId.i32OsDeviceID);
+
+               /* Update the health status now if possible... */
+               if (psDeviceNode->pfnUpdateHealthStatus)
+               {
+                       psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE);
+               }
+               eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+               eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason);
+
+               switch (eHealthStatus)
+               {
+                       case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszStatus = "OK";  break;
+                       case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszStatus = "NOT RESPONDING";  break;
+                       case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszStatus = "DEAD";  break;
+                       case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:  pszStatus = "FAULT";  break;
+                       case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:  pszStatus = "UNDEFINED";  break;
+                       default:  pszStatus = "UNKNOWN";  break;
+               }
+
+               switch (eHealthReason)
+               {
+                       case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " (Asserted)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " (Poll failing)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " (Global Event Object timeouts rising)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " (KCCB offset invalid)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " (KCCB stalled)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_IDLING:  pszReason = " (Idling)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING:  pszReason = " (Restarting)";  break;
+                       case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:  pszReason = " (Missing interrupts)";  break;
+                       default:  pszReason = " (Unknown reason)";  break;
+               }
+
+               DIPrintf(psEntry, "Firmware Status: %s%s\n", pszStatus, pszReason);
+               if (PVRSRV_ERROR_LIMIT_REACHED)
+               {
+                       DIPrintf(psEntry, "Server Errors:   %d+\n", IMG_UINT32_MAX);
+               }
+               else
+               {
+                       DIPrintf(psEntry, "Server Errors:   %d\n", PVRSRV_KM_ERRORS);
+               }
+
+
+               /* Write other useful stats to aid the test cycle... */
+               if (psDeviceNode->pvDevice != NULL)
+               {
+#ifdef SUPPORT_RGX
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+                       const RGXFWIF_HWRINFOBUF *psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl;
+                       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+#ifdef PVRSRV_DEBUG_LISR_EXECUTION
+                       /* Show the detected #LISR, #MISR scheduled calls */
+                       DIPrintf(psEntry, "RGX #LISR: %llu\n", psDeviceNode->ui64nLISR);
+                       DIPrintf(psEntry, "RGX #MISR: %llu\n", psDeviceNode->ui64nMISR);
+#endif /* PVRSRV_DEBUG_LISR_EXECUTION */
+
+                       /* Calculate the number of HWR events in total across all the DMs... */
+                       if (psHWRInfoBuf != NULL)
+                       {
+                               IMG_UINT32 ui32HWREventCount = 0;
+                               IMG_UINT32 ui32CRREventCount = 0;
+                               IMG_UINT32 ui32DMIndex;
+
+                               for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++)
+                               {
+                                       ui32HWREventCount += psHWRInfoBuf->aui32HwrDmLockedUpCount[ui32DMIndex];
+                                       ui32CRREventCount += psHWRInfoBuf->aui32HwrDmOverranCount[ui32DMIndex];
+                               }
+
+                               DIPrintf(psEntry, "HWR Event Count: %d\n", ui32HWREventCount);
+                               DIPrintf(psEntry, "CRR Event Count: %d\n", ui32CRREventCount);
+#ifdef PVRSRV_STALLED_CCB_ACTION
+                               /* Write the number of Sync Lockup Recovery (SLR) events... */
+                               DIPrintf(psEntry, "SLR Event Count: %d\n", psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested);
+#endif /* PVRSRV_STALLED_CCB_ACTION */
+                       }
+
+                       /* Show error counts */
+                       DIPrintf(psEntry, "WGP Error Count: %d\n", psDevInfo->sErrorCounts.ui32WGPErrorCount);
+                       DIPrintf(psEntry, "TRP Error Count: %d\n", psDevInfo->sErrorCounts.ui32TRPErrorCount);
+
+                       /*
+                        * Guest drivers do not support the following functionality:
+                        *      - Perform actual on-chip fw tracing.
+                        *      - Collect actual on-chip GPU utilization stats.
+                        *      - Perform actual on-chip GPU power/dvfs management.
+                        *      - As a result no more information can be provided.
+                        */
+                       if (!PVRSRV_VZ_MODE_IS(GUEST))
+                       {
+                               if (psFwSysData != NULL)
+                               {
+                                       DIPrintf(psEntry, "FWF Event Count: %d\n", psFwSysData->ui32FWFaults);
+                               }
+
+                               /* Write the number of APM events... */
+                               DIPrintf(psEntry, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal);
+
+                               /* Write the current GPU Utilisation values... */
+                               if (psDevInfo->pfnGetGpuUtilStats &&
+                                       eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
+                               {
+                                       RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+                                       PVRSRV_ERROR eError = PVRSRV_OK;
+
+                                       eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+                                                                                                                  ghGpuUtilUserDebugFS,
+                                                                                                                  &sGpuUtilStats);
+
+                                       if ((eError == PVRSRV_OK) &&
+                                               ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+                                       {
+                                               IMG_UINT64 util;
+                                               IMG_UINT32 rem;
+
+                                               util = 100 * sGpuUtilStats.ui64GpuStatActive;
+                                               util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+
+                                               DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+                                       }
+                                       else
+                                       {
+                                               DIPrintf(psEntry, "GPU Utilisation: -\n");
+                                       }
+                               }
+                       }
+#endif /* SUPPORT_RGX */
+               }
+       }
+
+       return 0;
+}
+
+static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                                IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+       PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO);
+       PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[0] == 'k' || pcBuffer[0] == 'K', -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+
+       psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD;
+
+       *pui64Pos += ui64Count;
+       return ui64Count;
+}
+
+/*************************************************************************/ /*!
+ Dump Debug DebugFS entry
+*/ /**************************************************************************/
+
+static int _DebugDumpDebugDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       if (psDeviceNode->pvDevice != NULL)
+       {
+               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+                                  _DumpDebugDIPrintfWrapper, psEntry);
+       }
+
+       return 0;
+}
+
+#ifdef SUPPORT_RGX
+
+/*************************************************************************/ /*!
+ Firmware Trace DebugFS entry
+*/ /**************************************************************************/
+
+static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       if (psDevInfo != NULL)
+       {
+               RGXDumpFirmwareTrace(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo);
+       }
+
+       return 0;
+}
+
+/*************************************************************************/ /*!
+ Firmware Translated Page Tables DebugFS entry
+*/ /**************************************************************************/
+
+static void _DocumentFwMapping(OSDI_IMPL_ENTRY *psEntry,
+                                                                PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                IMG_UINT32 ui32FwVA,
+                                                                IMG_CPU_PHYADDR sCpuPA,
+                                                                IMG_DEV_PHYADDR sDevPA,
+                                                                IMG_UINT64 ui64PTE)
+{
+#if defined(RGX_FEATURE_MIPS_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               DIPrintf(psEntry, "|    0x%8X   |   "
+                                                 "0x%16" IMG_UINT64_FMTSPECX "   |   "
+                                                 "0x%16" IMG_UINT64_FMTSPECX "   |   "
+                                                 "%s%s%s   |\n",
+                                                 ui32FwVA,
+                                                 (IMG_UINT64) sCpuPA.uiAddr,
+                                                 sDevPA.uiAddr,
+                                                 gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(ui64PTE)],
+                                                 gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(ui64PTE)],
+                                                 gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(ui64PTE)]);
+       }
+       else
+#endif
+       {
+               /* META and RISCV use a subset of the GPU's virtual address space */
+               DIPrintf(psEntry, "|    0x%8X   |   "
+                                                 "0x%16" IMG_UINT64_FMTSPECX "   |   "
+                                                 "0x%16" IMG_UINT64_FMTSPECX "   |   "
+                                                 "%s%s%s%s%s%s   |\n",
+                                                 ui32FwVA,
+                                                 (IMG_UINT64) sCpuPA.uiAddr,
+                                                 sDevPA.uiAddr,
+                                                 BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN)   ? "P" : " ",
+                                                 BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_PM_SRC_EN)          ? "PM" : "  ",
+#if defined(RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN)
+                                                 BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) ? "B" : " ",
+#else
+                                                 " ",
+#endif
+                                                 BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_CC_EN)              ? "C" : " ",
+                                                 BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_READ_ONLY_EN)       ? "RO" : "RW",
+                                                 BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_VALID_EN)           ? "V" : " ");
+       }
+}
+
+static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32FwVA;
+       IMG_UINT32 ui32FwPageSize;
+       IMG_UINT32 ui32OSID;
+
+       psDeviceNode = DIGetPrivData(psEntry);
+
+       if ((psDeviceNode == NULL) ||
+           (psDeviceNode->pvDevice == NULL) ||
+           (((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice)->psKernelMMUCtx == NULL))
+       {
+               /* The Kernel MMU context containing the Firmware mappings is not initialised */
+               return 0;
+       }
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n"
+                                         "|    Firmware     |           CPU          |         Device         |      PTE     |\n"
+                                         "| Virtual Address |    Physical Address    |    Physical Address    |     Flags    |\n"
+                                         "+-----------------+------------------------+------------------------+              +\n");
+
+#if defined(RGX_FEATURE_MIPS_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               DIPrintf(psEntry, "|                                               RI/XI = Read / Execution Inhibit   |\n"
+                                 "|                                               C     = Cache Coherent             |\n"
+                                 "|                                               D     = Dirty Page Table Entry     |\n"
+                                 "|                                               V     = Valid Page Table Entry     |\n"
+                                 "|                                               G     = Global Page Table Entry    |\n"
+                                 "+-----------------+------------------------+------------------------+--------------+\n");
+
+               /* MIPS uses the same page size as the OS */
+               ui32FwPageSize = OSGetPageSize();
+       }
+       else
+#endif
+       {
+               DIPrintf(psEntry, "|                                               P     = Pending Page Table Entry   |\n"
+                                 "|                                               PM    = Parameter Manager Source   |\n"
+                                 "|                                               B     = Bypass SLC                 |\n"
+                                 "|                                               C     = Cache Coherent             |\n"
+                                 "|                                               RW/RO = Device Access Rights       |\n"
+                                 "|                                               V     = Valid Page Table Entry     |\n"
+                                 "+-----------------+------------------------+------------------------+--------------+\n");
+
+               ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT);
+       }
+
+       for (ui32OSID = 0; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+       {
+               IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) ((RGX_FIRMWARE_RAW_HEAP_BASE +
+                                            (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX);
+               IMG_UINT32 ui32FwHeapEnd  = ui32FwHeapBase + (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_SIZE & UINT_MAX);
+
+               DIPrintf(psEntry, "|                                       OS ID %u                                    |\n"
+                                                 "+-----------------+------------------------+------------------------+--------------+\n", ui32OSID);
+
+               for (ui32FwVA = ui32FwHeapBase;
+                    ui32FwVA < ui32FwHeapEnd;
+                    ui32FwVA += ui32FwPageSize)
+               {
+                       PVRSRV_ERROR eError;
+                       IMG_UINT64 ui64PTE = 0U;
+                       IMG_CPU_PHYADDR sCpuPA = {0U};
+                       IMG_DEV_PHYADDR sDevPA = {0U};
+
+                       eError = RGXGetFwMapping(psDevInfo, ui32FwVA, &sCpuPA, &sDevPA, &ui64PTE);
+
+                       if (eError == PVRSRV_OK)
+                       {
+                               _DocumentFwMapping(psEntry, psDevInfo, ui32FwVA, sCpuPA, sDevPA, ui64PTE);
+                       }
+                       else if (eError != PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+                       {
+                               PVR_LOG_ERROR(eError, "RGXGetFwMapping");
+                               return -EIO;
+                       }
+               }
+
+               DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n");
+
+               if (PVRSRV_VZ_MODE_IS(NATIVE))
+               {
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+#ifdef SUPPORT_FIRMWARE_GCOV
+
+static void *_FirmwareGcovDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       if (psDevInfo != NULL)
+       {
+               if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL)
+               {
+                       void *pvCpuVirtAddr;
+                       DevmemAcquireCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc, &pvCpuVirtAddr);
+                       return *pui64Pos ? NULL : pvCpuVirtAddr;
+               }
+       }
+
+       return NULL;
+}
+
+static void _FirmwareGcovDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       if (psDevInfo != NULL)
+       {
+               if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc);
+               }
+       }
+}
+
+static void *_FirmwareGcovDINext(OSDI_IMPL_ENTRY *psEntry,
+                                                                 void *pvData,
+                                                                 IMG_UINT64 *pui64Pos)
+{
+       PVR_UNREFERENCED_PARAMETER(psEntry);
+       PVR_UNREFERENCED_PARAMETER(pvData);
+       PVR_UNREFERENCED_PARAMETER(pui64Pos);
+       return NULL;
+}
+
+static int _FirmwareGcovDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       if (psDevInfo != NULL)
+       {
+               DIWrite(psEntry, pvData, psDevInfo->ui32FirmwareGcovSize);
+       }
+       return 0;
+}
+
+#endif /* SUPPORT_FIRMWARE_GCOV */
+
+#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
+
+/*************************************************************************/ /*!
+ Power monitoring DebugFS entry
+*/ /**************************************************************************/
+
+static int _PowMonTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       if (psDevInfo != NULL)
+       {
+               RGXDumpPowerMonitoring(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo);
+       }
+
+       return 0;
+}
+
+#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */
+
+#ifdef SUPPORT_VALIDATION
+
+#ifndef SYS_RGX_DEV_UNMAPPED_FW_REG
+#define SYS_RGX_DEV_UNMAPPED_FW_REG 0XFFFFFFFF
+#endif
+#define DI_RGXREGS_TIMEOUT_MS 1000
+
+/*************************************************************************/ /*!
+ RGX Registers Dump DebugFS entry
+*/ /**************************************************************************/
+
+static IMG_INT64 _RgxRegsSeek(IMG_UINT64 ui64Offset, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -1);
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_LOG_RETURN_IF_FALSE(ui64Offset <= (psDevInfo->ui32RegSize - 4),
+                               "register offset is too big", -1);
+
+       return ui64Offset;
+}
+
+static IMG_INT64 _RgxRegsRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                              IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT64 ui64RegVal = 0;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT64 ui64CompRes;
+
+       PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO);
+       PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8,
+                               "wrong RGX register size", -EIO);
+       PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)),
+                               "register read offset isn't aligned", -EINVAL);
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG)
+       {
+               if (!psDevInfo->bFirmwareInitialised)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but "
+                                        "Firmware isn't yet initialised\n"));
+                       return -EIO;
+               }
+
+               reinit_completion(&psDevInfo->sFwRegs.sRegComp);
+
+               eError = RGXScheduleRgxRegCommand(psDevInfo,
+                                                                                 0x00,
+                                                                                 ui64Count,
+                                                                                 (IMG_UINT32) *pui64Pos,
+                                                                                 IMG_FALSE);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand");
+                       return -EIO;
+               }
+
+               ui64CompRes = wait_for_completion_timeout(&psDevInfo->sFwRegs.sRegComp,
+                                                                                                 msecs_to_jiffies(DI_RGXREGS_TIMEOUT_MS));
+               if (!ui64CompRes)
+               {
+                               PVR_DPF((PVR_DBG_ERROR, "FW RGX Register access timeout %#x\n",
+                                  (IMG_UINT32) *pui64Pos));
+                               return -EIO;
+               }
+
+               OSCachedMemCopy(pcBuffer, &psDevInfo->sFwRegs.ui64RegVal, ui64Count);
+       }
+       else
+       {
+               ui64RegVal = ui64Count == 4 ?
+               OSReadHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos) :
+                       OSReadHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos);
+               OSCachedMemCopy(pcBuffer, &ui64RegVal, ui64Count);
+       }
+
+       return ui64Count;
+}
+
+static IMG_INT64 _RgxRegsWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                               IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT64 ui64RegVal = 0;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       /* ignore the '\0' character */
+       ui64Count -= 1;
+
+       PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO);
+       PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8,
+                               "wrong RGX register size", -EIO);
+       PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)),
+                               "register read offset isn't aligned", -EINVAL);
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG)
+       {
+               if (!psDevInfo->bFirmwareInitialised)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but "
+                                        "Firmware isn't yet initialised\n"));
+                       return -EIO;
+               }
+
+               if (ui64Count == 4)
+                       ui64RegVal = (IMG_UINT64) *((IMG_UINT32 *) pcBuffer);
+               else
+                       ui64RegVal = *((IMG_UINT64 *) pcBuffer);
+
+               eError = RGXScheduleRgxRegCommand(psDevInfo,
+                                                                                 ui64RegVal,
+                                                                                 ui64Count,
+                                                                                 (IMG_UINT32) *pui64Pos,
+                                                                                 IMG_TRUE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand");
+                       return -EIO;
+               }
+
+       }
+       else
+       {
+               if (ui64Count == 4)
+               {
+                       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos,
+                                                  *((IMG_UINT32 *) (void *) pcBuffer));
+               }
+               else
+               {
+                       OSWriteHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos,
+                                                  *((IMG_UINT64 *) (void *) pcBuffer));
+               }
+       }
+
+       return ui64Count;
+}
+
+#endif /* SUPPORT_VALIDATION */
+
+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB)
+#define RISCV_DMI_SIZE  (8U)
+
+static IMG_INT64 _RiscvDmiRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                               IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+       PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
+
+       ui64Count = MIN(RISCV_DMI_SIZE, ui64Count);
+       memcpy(pcBuffer, &psDebugInfo->ui64RiscvDmi, ui64Count);
+
+       return ui64Count;
+}
+
+static IMG_INT64 _RiscvDmiWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                                IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
+
+       if (psDevInfo == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: devinfo is NULL", __func__));
+               return 0;
+       }
+
+       ui64Count -= 1; /* Drop `\0` */
+       ui64Count = MIN(RISCV_DMI_SIZE, ui64Count);
+
+       memcpy(&psDebugInfo->ui64RiscvDmi, pcBuffer, ui64Count);
+
+       RGXRiscvDmiOp(psDevInfo, &psDebugInfo->ui64RiscvDmi);
+
+       return ui64Count;
+}
+#endif
+
+#endif /* SUPPORT_RGX */
+
+#ifdef SUPPORT_VALIDATION
+
+static int TestMemLeakDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       PVR_RETURN_IF_FALSE(pvData != NULL, -EINVAL);
+
+       DIPrintf(psEntry, "os: %s, %u\ngpu: %s, %u\nmmu: %s, %u\n",
+                psPVRSRVData->sMemLeakIntervals.ui32OSAlloc ? "enabled" : "disabled",
+                psPVRSRVData->sMemLeakIntervals.ui32OSAlloc,
+                psPVRSRVData->sMemLeakIntervals.ui32GPU ? "enabled" : "disabled",
+                psPVRSRVData->sMemLeakIntervals.ui32GPU,
+                psPVRSRVData->sMemLeakIntervals.ui32MMU ? "enabled" : "disabled",
+                psPVRSRVData->sMemLeakIntervals.ui32MMU);
+
+       return 0;
+}
+
+static IMG_INT64 TestMemLeakDISet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                                  IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       IMG_CHAR *pcTemp;
+       unsigned long ui32MemLeakInterval;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+       PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO);
+       PVR_RETURN_IF_FALSE(ui64Count <= 16, -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+
+       pcTemp = strchr(pcBuffer, ',');
+
+       if (kstrtoul(pcTemp+1, 0, &ui32MemLeakInterval) != 0)
+       {
+               return -EINVAL;
+       }
+
+       if (strncmp(pcBuffer, "os", pcTemp-pcBuffer) == 0)
+       {
+               psPVRSRVData->sMemLeakIntervals.ui32OSAlloc = ui32MemLeakInterval;
+       }
+       else if (strncmp(pcBuffer, "gpu", pcTemp-pcBuffer) == 0)
+       {
+               psPVRSRVData->sMemLeakIntervals.ui32GPU = ui32MemLeakInterval;
+       }
+       else if (strncmp(pcBuffer, "mmu", pcTemp-pcBuffer) == 0)
+       {
+               psPVRSRVData->sMemLeakIntervals.ui32MMU = ui32MemLeakInterval;
+       }
+       else
+       {
+               return -EINVAL;
+       }
+
+       *pui64Pos += ui64Count;
+       return ui64Count;
+}
+
+#endif /* SUPPORT_VALIDATION */
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+
+/*************************************************************************/ /*!
+ Debug level DebugFS entry
+*/ /**************************************************************************/
+
+static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       DIPrintf(psEntry, "%u\n", OSDebugLevel());
+
+       return 0;
+}
+
+#ifndef __GNUC__
+static int __builtin_ffsl(long int x)
+{
+       for (size_t i = 0; i < sizeof(x) * 8; i++)
+       {
+               if (x & (1 << i))
+               {
+                       return i + 1;
+               }
+       }
+       return 0;
+}
+#endif /* __GNUC__ */
+
+static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+                               IMG_UINT64 *pui64Pos, void *pvData)
+{
+       const IMG_UINT uiMaxBufferSize = 6;
+       IMG_UINT32 ui32Level;
+
+       PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+       PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO);
+       PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+
+       if (sscanf(pcBuffer, "%u", &ui32Level) == 0)
+       {
+               return -EINVAL;
+       }
+
+       OSSetDebugLevel(ui32Level & ((1 << __builtin_ffsl(DBGPRIV_LAST)) - 1));
+
+       *pui64Pos += ui64Count;
+       return ui64Count;
+}
+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
+
+PVRSRV_ERROR DebugCommonInitDriver(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psPVRSRVData != NULL);
+
+       /*
+        * The DebugFS entries are designed to work in a single device system but
+        * this function will be called multiple times in a multi-device system.
+        * Return an error in this case.
+        */
+       if (gpsVersionDIEntry)
+       {
+               return -EEXIST;
+       }
+
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+       if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */
+
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnStart = _VersionDIStart,
+                       .pfnStop = _VersionDIStop,
+                       .pfnNext = _VersionDINext,
+                       .pfnShow = _VersionDIShow
+               };
+
+               eError = DICreateEntry("version", NULL, &sIterator, psPVRSRVData,
+                                      DI_ENTRY_TYPE_GENERIC, &gpsVersionDIEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnStart = _DebugStatusDIStart,
+                       .pfnStop = _DebugStatusDIStop,
+                       .pfnNext = _DebugStatusDINext,
+                       .pfnShow = _DebugStatusDIShow,
+                       .pfnWrite = DebugStatusSet,
+                       //'K' expected + Null terminator
+                       .ui32WriteLenMax= ((1U)+1U)
+               };
+               eError = DICreateEntry("status", NULL, &sIterator, psPVRSRVData,
+                                      DI_ENTRY_TYPE_GENERIC, &gpsStatusDIEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+
+#ifdef SUPPORT_VALIDATION
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnShow = TestMemLeakDIShow,
+                       .pfnWrite = TestMemLeakDISet,
+                       //Function only allows max 15 chars + Null terminator
+                       .ui32WriteLenMax = ((15U)+1U)
+               };
+               eError = DICreateEntry("test_memleak", NULL, &sIterator, psPVRSRVData,
+                                      DI_ENTRY_TYPE_GENERIC, &gpsTestMemLeakDIEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+#endif /* SUPPORT_VALIDATION */
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnShow = DebugLevelDIShow,
+                       .pfnWrite = DebugLevelSet,
+                       //Max value of 255(3 char) + Null terminator
+                       .ui32WriteLenMax =((3U)+1U)
+               };
+               eError = DICreateEntry("debug_level", NULL, &sIterator, NULL,
+                                      DI_ENTRY_TYPE_GENERIC, &gpsDebugLevelDIEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
+
+       return PVRSRV_OK;
+
+return_error_:
+       DebugCommonDeInitDriver();
+
+       return eError;
+}
+
+void DebugCommonDeInitDriver(void)
+{
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+       if (gpsDebugLevelDIEntry != NULL)
+       {
+               DIDestroyEntry(gpsDebugLevelDIEntry);
+       }
+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
+
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+       if (ghGpuUtilUserDebugFS != NULL)
+       {
+               SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS);
+               ghGpuUtilUserDebugFS = NULL;
+       }
+#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */
+
+#ifdef SUPPORT_VALIDATION
+       if (gpsTestMemLeakDIEntry != NULL)
+       {
+               DIDestroyEntry(gpsTestMemLeakDIEntry);
+       }
+#endif /* SUPPORT_VALIDATION */
+
+       if (gpsStatusDIEntry != NULL)
+       {
+               DIDestroyEntry(gpsStatusDIEntry);
+       }
+
+       if (gpsVersionDIEntry != NULL)
+       {
+               DIDestroyEntry(gpsVersionDIEntry);
+       }
+}
+
+PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
+       PVRSRV_ERROR eError;
+
+       {
+               IMG_CHAR pszDeviceId[sizeof("gpu4294967296")];
+
+               OSSNPrintf(pszDeviceId, sizeof(pszDeviceId), "gpu%02d",
+                          psDeviceNode->sDevId.ui32InternalID);
+
+               eError = DICreateGroup(pszDeviceId, NULL, &psDebugInfo->psGroup);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+
+       {
+               DI_ITERATOR_CB sIterator = {.pfnShow = _DebugDumpDebugDIShow};
+               eError = DICreateEntry("debug_dump", psDebugInfo->psGroup, &sIterator,
+                                      psDeviceNode, DI_ENTRY_TYPE_GENERIC,
+                                      &psDebugInfo->psDumpDebugEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+
+#ifdef SUPPORT_RGX
+       if (! PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               {
+                       DI_ITERATOR_CB sIterator = {.pfnShow = _DebugFWTraceDIShow};
+                       eError = DICreateEntry("firmware_trace", psDebugInfo->psGroup, &sIterator,
+                                              psDeviceNode, DI_ENTRY_TYPE_GENERIC,
+                                              &psDebugInfo->psFWTraceEntry);
+                       PVR_GOTO_IF_ERROR(eError, return_error_);
+               }
+
+#ifdef SUPPORT_FIRMWARE_GCOV
+               {
+                       DI_ITERATOR_CB sIterator = {
+                               .pfnStart = _FirmwareGcovDIStart,
+                               .pfnStop = _FirmwareGcovDIStop,
+                               .pfnNext = _FirmwareGcovDINext,
+                               .pfnShow = _FirmwareGcovDIShow
+                       };
+
+                       eError = DICreateEntry("firmware_gcov", psDebugInfo->psGroup, &sIterator,
+                                              psDeviceNode, DI_ENTRY_TYPE_GENERIC,
+                                              &psDebugInfo->psFWGCOVEntry);
+                       PVR_GOTO_IF_ERROR(eError, return_error_);
+               }
+#endif /* SUPPORT_FIRMWARE_GCOV */
+
+               {
+                       DI_ITERATOR_CB sIterator = {.pfnShow = _FirmwareMappingsDIShow};
+                       eError = DICreateEntry("firmware_mappings", psDebugInfo->psGroup, &sIterator,
+                                              psDeviceNode, DI_ENTRY_TYPE_GENERIC,
+                                              &psDebugInfo->psFWMappingsEntry);
+                       PVR_GOTO_IF_ERROR(eError, return_error_);
+               }
+
+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB)
+               {
+                       DI_ITERATOR_CB sIterator = {
+                               .pfnRead = _RiscvDmiRead,
+                               .pfnWrite = _RiscvDmiWrite,
+                               .ui32WriteLenMax = ((RISCV_DMI_SIZE)+1U)
+                       };
+                       eError = DICreateEntry("riscv_dmi", psDebugInfo->psGroup, &sIterator, psDeviceNode,
+                                              DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRiscvDmiDIEntry);
+                       PVR_GOTO_IF_ERROR(eError, return_error_);
+                       psDebugInfo->ui64RiscvDmi = 0ULL;
+               }
+#endif /* SUPPORT_VALIDATION || SUPPORT_RISCV_GDB */
+       }
+#ifdef SUPPORT_VALIDATION
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnSeek = _RgxRegsSeek,
+                       .pfnRead = _RgxRegsRead,
+                       .pfnWrite = _RgxRegsWrite,
+                       //Max size of input binary data is 4 bytes (UINT32) or 8 bytes (UINT64)
+                       .ui32WriteLenMax = ((8U)+1U)
+               };
+               eError = DICreateEntry("rgxregs", psDebugInfo->psGroup, &sIterator, psDeviceNode,
+                                      DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRGXRegsEntry);
+
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+#endif /* SUPPORT_VALIDATION */
+
+#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
+       if (! PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnShow = _PowMonTraceDIShow
+               };
+               eError = DICreateEntry("power_mon", psDebugInfo->psGroup, &sIterator, psDeviceNode,
+                                                          DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowMonEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */
+#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS
+       {
+               DI_ITERATOR_CB sIterator = {
+                       .pfnShow = _DebugPowerDataDIShow,
+                       .pfnWrite = PowerDataSet,
+                       //Expects '0' or '1' plus Null terminator
+                       .ui32WriteLenMax = ((1U)+1U)
+               };
+               eError = DICreateEntry("power_data", psDebugInfo->psGroup, &sIterator, psDeviceNode,
+                                      DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowerDataEntry);
+               PVR_GOTO_IF_ERROR(eError, return_error_);
+       }
+#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */
+#endif /* SUPPORT_RGX */
+
+       return PVRSRV_OK;
+
+return_error_:
+       DebugCommonDeInitDevice(psDeviceNode);
+
+       return eError;
+}
+
+void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
+
+#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS
+       if (psDebugInfo->psPowerDataEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psPowerDataEntry);
+               psDebugInfo->psPowerDataEntry = NULL;
+       }
+#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */
+
+#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
+       if (psDebugInfo->psPowMonEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psPowMonEntry);
+               psDebugInfo->psPowMonEntry = NULL;
+       }
+#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */
+
+#ifdef SUPPORT_VALIDATION
+       if (psDebugInfo->psRGXRegsEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psRGXRegsEntry);
+               psDebugInfo->psRGXRegsEntry = NULL;
+       }
+#endif /* SUPPORT_VALIDATION */
+
+#ifdef SUPPORT_RGX
+       if (psDebugInfo->psFWTraceEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psFWTraceEntry);
+               psDebugInfo->psFWTraceEntry = NULL;
+       }
+
+#ifdef SUPPORT_FIRMWARE_GCOV
+       if (psDebugInfo->psFWGCOVEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psFWGCOVEntry);
+               psDebugInfo->psFWGCOVEntry = NULL;
+       }
+#endif
+
+       if (psDebugInfo->psFWMappingsEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psFWMappingsEntry);
+               psDebugInfo->psFWMappingsEntry = NULL;
+       }
+
+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB)
+       if (psDebugInfo->psRiscvDmiDIEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psRiscvDmiDIEntry);
+               psDebugInfo->psRiscvDmiDIEntry = NULL;
+       }
+#endif
+#endif /* SUPPORT_RGX */
+
+       if (psDebugInfo->psDumpDebugEntry != NULL)
+       {
+               DIDestroyEntry(psDebugInfo->psDumpDebugEntry);
+               psDebugInfo->psDumpDebugEntry = NULL;
+       }
+
+       if (psDebugInfo->psGroup != NULL)
+       {
+               DIDestroyGroup(psDebugInfo->psGroup);
+               psDebugInfo->psGroup = NULL;
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_heapcfg.c b/drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_heapcfg.c
new file mode 100644 (file)
index 0000000..f38a612
--- /dev/null
@@ -0,0 +1,184 @@
+/*************************************************************************/ /*!
+@File           devicemem_heapcfg.c
+@Title          Device Heap Configuration Helper Functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* our exported API */
+#include "devicemem_heapcfg.h"
+#include "devicemem_utils.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "connection_server.h"
+
+static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint)
+{
+       IMG_UINT32 ui32OSPageSize = OSGetPageShift();
+
+       /* Any heap length should at least match OS page size at the minimum or
+        * a multiple of OS page size */
+       if ((psHeapBlueprint->uiHeapLength < DEVMEM_HEAP_MINIMUM_SIZE) ||
+               (psHeapBlueprint->uiHeapLength & (ui32OSPageSize - 1)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Invalid Heap \"%s\" Size: "
+                        "%"IMG_UINT64_FMTSPEC
+                        "("IMG_DEVMEM_SIZE_FMTSPEC")",
+                        __func__,
+                        psHeapBlueprint->pszName,
+                        psHeapBlueprint->uiHeapLength,
+                        psHeapBlueprint->uiHeapLength));
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Heap Size should always be a non-zero value and a "
+                        "multiple of OS Page Size:%u(0x%x)",
+                        ui32OSPageSize, ui32OSPageSize));
+               PVR_ASSERT(psHeapBlueprint->uiHeapLength >= ui32OSPageSize);
+       }
+
+
+       PVR_ASSERT(psHeapBlueprint->uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY == 0);
+}
+
+void HeapCfgBlueprintInit(const IMG_CHAR        *pszName,
+                             IMG_UINT64             ui64HeapBaseAddr,
+                             IMG_DEVMEM_SIZE_T      uiHeapLength,
+                             IMG_DEVMEM_SIZE_T      uiReservedRegionLength,
+                             IMG_UINT32             ui32Log2DataPageSize,
+                             IMG_UINT32             uiLog2ImportAlignment,
+                             DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint)
+{
+       psHeapBlueprint->pszName                = pszName;
+       psHeapBlueprint->sHeapBaseAddr.uiAddr   = ui64HeapBaseAddr;
+       psHeapBlueprint->uiHeapLength           = uiHeapLength;
+       psHeapBlueprint->uiReservedRegionLength = uiReservedRegionLength;
+       psHeapBlueprint->uiLog2DataPageSize     = ui32Log2DataPageSize;
+       psHeapBlueprint->uiLog2ImportAlignment  = uiLog2ImportAlignment;
+
+       _CheckBlueprintHeapAlignment(psHeapBlueprint);
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection,
+                                          const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          IMG_UINT32 *puiNumHeapConfigsOut)
+{
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapCount(CONNECTION_DATA * psConnection,
+                                const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                IMG_UINT32 uiHeapConfigIndex,
+                                IMG_UINT32 *puiNumHeapsOut)
+{
+       if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+       {
+               return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+       }
+
+       *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(CONNECTION_DATA * psConnection,
+                                         const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         IMG_UINT32 uiHeapConfigIndex,
+                                         IMG_UINT32 uiHeapConfigNameBufSz,
+                                         IMG_CHAR *pszHeapConfigNameOut)
+{
+       if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+       {
+               return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+       }
+
+       OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(CONNECTION_DATA * psConnection,
+                                  const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_UINT32 uiHeapConfigIndex,
+                                  IMG_UINT32 uiHeapIndex,
+                                  IMG_UINT32 uiHeapNameBufSz,
+                                  IMG_CHAR *pszHeapNameOut,
+                                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                                  IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut,
+                                  IMG_UINT32 *puiLog2DataPageSizeOut,
+                                  IMG_UINT32 *puiLog2ImportAlignmentOut)
+{
+       DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+       if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+       {
+               return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+       }
+
+       if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+       {
+               return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+       }
+
+       psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+       OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName);
+       *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr;
+       *puiHeapLengthOut = psHeapBlueprint->uiHeapLength;
+       *puiReservedRegionLengthOut = psHeapBlueprint->uiReservedRegionLength;
+       *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize;
+       *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment;
+
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_history_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_history_server.c
new file mode 100644 (file)
index 0000000..412a51b
--- /dev/null
@@ -0,0 +1,1962 @@
+/*************************************************************************/ /*!
+@File
+@Title          Devicemem history functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Devicemem history functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_defs.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "devicemem_server.h"
+#include "lock.h"
+#include "devicemem_history_server.h"
+#include "pdump_km.h"
+#include "di_server.h"
+
+#define ALLOCATION_LIST_NUM_ENTRIES 10000
+
+/* data type to hold an allocation index.
+ * we make it 16 bits wide if possible
+ */
+#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF
+typedef uint16_t ALLOC_INDEX_T;
+#else
+typedef uint32_t ALLOC_INDEX_T;
+#endif
+
+/* a record describing a single allocation known to DeviceMemHistory.
+ * this is an element in a doubly linked list of allocations
+ */
+typedef struct _RECORD_ALLOCATION_
+{
+       /* time when this RECORD_ALLOCATION was created/initialised */
+       IMG_UINT64 ui64CreationTime;
+       /* serial number of the PMR relating to this allocation */
+       IMG_UINT64 ui64Serial;
+       /* base DevVAddr of this allocation */
+       IMG_DEV_VIRTADDR sDevVAddr;
+       /* size in bytes of this allocation */
+       IMG_DEVMEM_SIZE_T uiSize;
+       /* Log2 page size of this allocation's GPU pages */
+       IMG_UINT32 ui32Log2PageSize;
+       /* Process ID (PID) this allocation belongs to */
+       IMG_PID uiPID;
+       /* index of previous allocation in the list */
+       ALLOC_INDEX_T ui32Prev;
+       /* index of next allocation in the list */
+       ALLOC_INDEX_T ui32Next;
+       /* annotation/name of this allocation */
+       IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN];
+} RECORD_ALLOCATION;
+
+/* each command in the circular buffer is prefixed with an 8-bit value
+ * denoting the command type
+ */
+typedef enum _COMMAND_TYPE_
+{
+       COMMAND_TYPE_NONE,
+       COMMAND_TYPE_TIMESTAMP,
+       COMMAND_TYPE_MAP_ALL,
+       COMMAND_TYPE_UNMAP_ALL,
+       COMMAND_TYPE_MAP_RANGE,
+       COMMAND_TYPE_UNMAP_RANGE,
+       /* sentinel value */
+       COMMAND_TYPE_COUNT,
+} COMMAND_TYPE;
+
+/* Timestamp command:
+ * This command is inserted into the circular buffer to provide an updated
+ * timestamp.
+ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order
+ * for the whole command to fit into 8 bytes.
+ */
+typedef struct _COMMAND_TIMESTAMP_
+{
+       IMG_UINT8 aui8TimeNs[7];
+} COMMAND_TIMESTAMP;
+
+/* MAP_ALL command:
+ * This command denotes the allocation at the given index was wholly mapped
+ * in to the GPU MMU
+ */
+typedef struct _COMMAND_MAP_ALL_
+{
+       ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_ALL;
+
+/* UNMAP_ALL command:
+ * This command denotes the allocation at the given index was wholly unmapped
+ * from the GPU MMU
+ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout.
+ */
+typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL;
+
+/* packing attributes for the MAP_RANGE command */
+#define MAP_RANGE_MAX_START ((1 << 18) - 1)
+#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1)
+
+/* MAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ */
+
+typedef struct _COMMAND_MAP_RANGE_
+{
+       IMG_UINT8 aui8Data[5];
+       ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_RANGE;
+
+/* UNMAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout.
+ */
+typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE;
+
+/* wrapper structure for a command */
+typedef struct _COMMAND_WRAPPER_
+{
+       IMG_UINT8 ui8Type;
+       union {
+               COMMAND_TIMESTAMP sTimeStamp;
+               COMMAND_MAP_ALL sMapAll;
+               COMMAND_UNMAP_ALL sUnmapAll;
+               COMMAND_MAP_RANGE sMapRange;
+               COMMAND_UNMAP_RANGE sUnmapRange;
+       } u;
+} COMMAND_WRAPPER;
+
+/* target size for the circular buffer of commands */
+#define CIRCULAR_BUFFER_SIZE_KB 2048
+/* turn the circular buffer target size into a number of commands */
+#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER))
+
+/* index value denoting the end of a list */
+#define END_OF_LIST 0xFFFFFFFF
+#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx]))
+#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES)
+
+/* wrapper structure for the allocation records and the commands circular buffer */
+typedef struct _RECORDS_
+{
+       RECORD_ALLOCATION *pasAllocations;
+       IMG_UINT32 ui32AllocationsListHead;
+
+       IMG_UINT32 ui32Head;
+       IMG_UINT32 ui32Tail;
+       COMMAND_WRAPPER *pasCircularBuffer;
+} RECORDS;
+
+typedef struct _DEVICEMEM_HISTORY_DATA_
+{
+       /* DI entry */
+       DI_ENTRY *psDIEntry;
+
+       RECORDS sRecords;
+       POS_LOCK hLock;
+} DEVICEMEM_HISTORY_DATA;
+
+static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData;
+
+/* gsDevicememHistoryData is static, hLock is NULL unless
+ * EnablePageFaultDebug is set and DevicememHistoryInitKM()
+ * was called.
+ */
+static void DevicememHistoryLock(void)
+{
+       if (gsDevicememHistoryData.hLock)
+       {
+               OSLockAcquire(gsDevicememHistoryData.hLock);
+       }
+}
+
+static void DevicememHistoryUnlock(void)
+{
+       if (gsDevicememHistoryData.hLock)
+       {
+               OSLockRelease(gsDevicememHistoryData.hLock);
+       }
+}
+
+/* given a time stamp, calculate the age in nanoseconds */
+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now,
+                                               IMG_UINT64 ui64Then,
+                                               IMG_UINT64 ui64Max)
+{
+       if (ui64Now >= ui64Then)
+       {
+               /* no clock wrap */
+               return ui64Now - ui64Then;
+       }
+       else
+       {
+               /* clock has wrapped */
+               return (ui64Max - ui64Then) + ui64Now + 1;
+       }
+}
+
+/* AcquireCBSlot:
+ * Acquire the next slot in the circular buffer and
+ * move the circular buffer head along by one
+ * Returns a pointer to the acquired slot.
+ */
+static COMMAND_WRAPPER *AcquireCBSlot(void)
+{
+       COMMAND_WRAPPER *psSlot;
+
+       psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head];
+
+       gsDevicememHistoryData.sRecords.ui32Head =
+               (gsDevicememHistoryData.sRecords.ui32Head + 1)
+                               % CIRCULAR_BUFFER_NUM_COMMANDS;
+
+       return psSlot;
+}
+
+/* TimeStampPack:
+ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure.
+ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit
+ * integer in the COMMAND_TIMESTAMP command.
+ */
+static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now)
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++)
+       {
+               psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF;
+               ui64Now >>= 8;
+       }
+}
+
+/* packing a 64-bit nanosecond into a 7-byte integer loses the
+ * top 8 bits of data. This must be taken into account when
+ * comparing a full timestamp against an unpacked timestamp
+ */
+#define TIME_STAMP_MASK ((1LLU << 56) - 1)
+#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK)
+
+/* TimeStampUnpack:
+ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command
+ */
+static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp)
+{
+       IMG_UINT64 ui64TimeNs = 0;
+       IMG_UINT32 i;
+
+       for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--)
+       {
+               ui64TimeNs <<= 8;
+               ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1];
+       }
+
+       return ui64TimeNs;
+}
+
+#if defined(PDUMP)
+
+static void EmitPDumpAllocation(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                IMG_UINT32 ui32AllocationIndex,
+                                RECORD_ALLOCATION *psAlloc)
+{
+       PDUMPCOMMENT(psDeviceNode,
+                       "[SrvPFD] Allocation: %u"
+                       " Addr: " IMG_DEV_VIRTADDR_FMTSPEC
+                       " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+                       " Page size: %u"
+                       " PID: %u"
+                       " Process: %s"
+                       " Name: %s",
+                       ui32AllocationIndex,
+                       psAlloc->sDevVAddr.uiAddr,
+                       psAlloc->uiSize,
+                       1U << psAlloc->ui32Log2PageSize,
+                       psAlloc->uiPID,
+                       OSGetCurrentClientProcessNameKM(),
+                       psAlloc->szName);
+}
+
+static void EmitPDumpMapUnmapAll(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 COMMAND_TYPE eType,
+                                 IMG_UINT32 ui32AllocationIndex)
+{
+       const IMG_CHAR *pszOpName;
+
+       switch (eType)
+       {
+               case COMMAND_TYPE_MAP_ALL:
+                       pszOpName = "MAP_ALL";
+                       break;
+               case COMMAND_TYPE_UNMAP_ALL:
+                       pszOpName = "UNMAP_ALL";
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u",
+                                                                               eType));
+                       return;
+
+       }
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "[SrvPFD] Op: %s Allocation: %u",
+                    pszOpName,
+                    ui32AllocationIndex);
+}
+
+static void EmitPDumpMapUnmapRange(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       COMMAND_TYPE eType,
+                                       IMG_UINT32 ui32AllocationIndex,
+                                       IMG_UINT32 ui32StartPage,
+                                       IMG_UINT32 ui32Count)
+{
+       const IMG_CHAR *pszOpName;
+
+       switch (eType)
+       {
+               case COMMAND_TYPE_MAP_RANGE:
+                       pszOpName = "MAP_RANGE";
+                       break;
+               case COMMAND_TYPE_UNMAP_RANGE:
+                       pszOpName = "UNMAP_RANGE";
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u",
+                                                                               eType));
+                       return;
+       }
+
+       PDUMPCOMMENT(psDeviceNode,
+                 "[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u",
+                 pszOpName,
+                 ui32AllocationIndex,
+                 ui32StartPage,
+                 ui32Count);
+}
+
+#endif
+
+/* InsertTimeStampCommand:
+ * Insert a timestamp command into the circular buffer.
+ */
+static void InsertTimeStampCommand(IMG_UINT64 ui64Now)
+{
+       COMMAND_WRAPPER *psCommand;
+
+       psCommand = AcquireCBSlot();
+
+       psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+
+       TimeStampPack(&psCommand->u.sTimeStamp, ui64Now);
+}
+
+/* InsertMapAllCommand:
+ * Insert a "MAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertMapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                IMG_UINT32 ui32AllocIndex)
+{
+       COMMAND_WRAPPER *psCommand;
+
+       psCommand = AcquireCBSlot();
+
+       psCommand->ui8Type = COMMAND_TYPE_MAP_ALL;
+       psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+       EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_MAP_ALL, ui32AllocIndex);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+}
+
+/* InsertUnmapAllCommand:
+ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertUnmapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_UINT32 ui32AllocIndex)
+{
+       COMMAND_WRAPPER *psCommand;
+
+       psCommand = AcquireCBSlot();
+
+       psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL;
+       psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+       EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+}
+
+/* MapRangePack:
+ * Pack the given StartPage and Count values into the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangePack(COMMAND_MAP_RANGE *psMapRange,
+                                               IMG_UINT32 ui32StartPage,
+                                               IMG_UINT32 ui32Count)
+{
+       IMG_UINT64 ui64Data;
+       IMG_UINT32 i;
+
+       /* we must encode the data into 40 bits:
+        *   18 bits for the start page index
+        *   12 bits for the range
+       */
+       PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START);
+       PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE);
+
+       ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count;
+
+       for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++)
+       {
+               psMapRange->aui8Data[i] = ui64Data & 0xFF;
+               ui64Data >>= 8;
+       }
+}
+
+/* MapRangePack:
+ * Unpack the StartPage and Count values from the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange,
+                                               IMG_UINT32 *pui32StartPage,
+                                               IMG_UINT32 *pui32Count)
+{
+       IMG_UINT64 ui64Data = 0;
+       IMG_UINT32 i;
+
+       for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--)
+       {
+               ui64Data <<= 8;
+               ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1];
+       }
+
+       *pui32StartPage = (ui64Data >> 12);
+       *pui32Count = ui64Data & ((1 << 12) - 1);
+}
+
+/* InsertMapRangeCommand:
+ * Insert a MAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertMapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_UINT32 ui32AllocIndex,
+                                               IMG_UINT32 ui32StartPage,
+                                               IMG_UINT32 ui32Count)
+{
+       COMMAND_WRAPPER *psCommand;
+
+       psCommand = AcquireCBSlot();
+
+       psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE;
+       psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+       MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+       EmitPDumpMapUnmapRange(psDeviceNode,
+                              COMMAND_TYPE_MAP_RANGE,
+                              ui32AllocIndex,
+                              ui32StartPage,
+                              ui32Count);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+}
+
+/* InsertUnmapRangeCommand:
+ * Insert a UNMAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertUnmapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_UINT32 ui32AllocIndex,
+                                               IMG_UINT32 ui32StartPage,
+                                               IMG_UINT32 ui32Count)
+{
+       COMMAND_WRAPPER *psCommand;
+
+       psCommand = AcquireCBSlot();
+
+       psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE;
+       psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+       MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+       EmitPDumpMapUnmapRange(psDeviceNode,
+                              COMMAND_TYPE_UNMAP_RANGE,
+                              ui32AllocIndex,
+                              ui32StartPage,
+                              ui32Count);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+}
+
+/* InsertAllocationToList:
+ * Helper function for the allocation list.
+ * Inserts the given allocation at the head of the list, whose current head is
+ * pointed to by pui32ListHead
+ */
+static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+       RECORD_ALLOCATION *psAlloc;
+
+       psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+       if (*pui32ListHead == END_OF_LIST)
+       {
+               /* list is currently empty, so just replace it */
+               *pui32ListHead = ui32Alloc;
+               psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead;
+       }
+       else
+       {
+               RECORD_ALLOCATION *psHeadAlloc;
+               RECORD_ALLOCATION *psTailAlloc;
+
+               psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead);
+               psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev);
+
+               /* make the new alloc point forwards to the previous head */
+               psAlloc->ui32Next = *pui32ListHead;
+               /* make the new alloc point backwards to the previous tail */
+               psAlloc->ui32Prev = psHeadAlloc->ui32Prev;
+
+               /* the head is now our new alloc */
+               *pui32ListHead = ui32Alloc;
+
+               /* the old head now points back to the new head */
+               psHeadAlloc->ui32Prev = *pui32ListHead;
+
+               /* the tail now points forward to the new head */
+               psTailAlloc->ui32Next = ui32Alloc;
+       }
+}
+
+static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc)
+{
+       InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* RemoveAllocationFromList:
+ * Helper function for the allocation list.
+ * Removes the given allocation from the list, whose head is
+ * pointed to by pui32ListHead
+ */
+static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+       RECORD_ALLOCATION *psAlloc;
+
+       psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+       /* if this is the only element in the list then just make the list empty */
+       if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc))
+       {
+               *pui32ListHead = END_OF_LIST;
+       }
+       else
+       {
+               RECORD_ALLOCATION *psPrev, *psNext;
+
+               psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev);
+               psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next);
+
+               /* remove the allocation from the list */
+               psPrev->ui32Next = psAlloc->ui32Next;
+               psNext->ui32Prev = psAlloc->ui32Prev;
+
+               /* if this allocation is the head then update the head */
+               if (*pui32ListHead == ui32Alloc)
+               {
+                       *pui32ListHead = psAlloc->ui32Prev;
+               }
+       }
+}
+
+static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc)
+{
+       RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* TouchBusyAllocation:
+ * Move the given allocation to the head of the list
+ */
+static void TouchBusyAllocation(IMG_UINT32 ui32Alloc)
+{
+       RemoveAllocationFromBusyList(ui32Alloc);
+       InsertAllocationToBusyList(ui32Alloc);
+}
+
+/* GetOldestBusyAllocation:
+ * Returns the index of the oldest allocation in the MRU list
+ */
+static IMG_UINT32 GetOldestBusyAllocation(void)
+{
+       IMG_UINT32 ui32Alloc;
+       RECORD_ALLOCATION *psAlloc;
+
+       ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+       if (ui32Alloc == END_OF_LIST)
+       {
+               return END_OF_LIST;
+       }
+
+       psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+       return psAlloc->ui32Prev;
+}
+
+static IMG_UINT32 GetFreeAllocation(void)
+{
+       IMG_UINT32 ui32Alloc;
+
+       ui32Alloc = GetOldestBusyAllocation();
+
+       return ui32Alloc;
+}
+
+
+/* InitialiseAllocation:
+ * Initialise the given allocation structure with the given properties
+ */
+static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc,
+                                                       const IMG_CHAR *pszName,
+                                                       IMG_UINT64 ui64Serial,
+                                                       IMG_PID uiPID,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       IMG_UINT32 ui32Log2PageSize)
+{
+       OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName));
+       psAlloc->ui64Serial = ui64Serial;
+       psAlloc->uiPID = uiPID;
+       psAlloc->sDevVAddr = sDevVAddr;
+       psAlloc->uiSize = uiSize;
+       psAlloc->ui32Log2PageSize = ui32Log2PageSize;
+       psAlloc->ui64CreationTime = OSClockns64();
+}
+
+/* CreateAllocation:
+ * Creates a new allocation with the given properties then outputs the
+ * index of the allocation
+ */
+static PVRSRV_ERROR CreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       const IMG_CHAR *pszName,
+                                                       IMG_UINT64 ui64Serial,
+                                                       IMG_PID uiPID,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_BOOL bAutoPurge,
+                                                       IMG_UINT32 *puiAllocationIndex)
+{
+       IMG_UINT32 ui32Alloc;
+       RECORD_ALLOCATION *psAlloc;
+
+       ui32Alloc = GetFreeAllocation();
+
+       psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+       InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc),
+                                               pszName,
+                                               ui64Serial,
+                                               uiPID,
+                                               sDevVAddr,
+                                               uiSize,
+                                               ui32Log2PageSize);
+
+       /* put the newly initialised allocation at the front of the MRU list */
+       TouchBusyAllocation(ui32Alloc);
+
+       *puiAllocationIndex = ui32Alloc;
+
+#if defined(PDUMP)
+       EmitPDumpAllocation(psDeviceNode, ui32Alloc, psAlloc);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/* MatchAllocation:
+ * Tests if the allocation at the given index matches the supplied properties.
+ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE.
+ */
+static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex,
+                                               IMG_UINT64 ui64Serial,
+                                               IMG_DEV_VIRTADDR sDevVAddr,
+                                               IMG_DEVMEM_SIZE_T uiSize,
+                                               const IMG_CHAR *pszName,
+                                               IMG_UINT32 ui32Log2PageSize,
+                                               IMG_PID uiPID)
+{
+       RECORD_ALLOCATION *psAlloc;
+
+       psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex);
+
+       return (psAlloc->ui64Serial == ui64Serial) &&
+              (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
+              (psAlloc->uiSize == uiSize) &&
+              (psAlloc->ui32Log2PageSize == ui32Log2PageSize) &&
+              (OSStringNCompare(psAlloc->szName, pszName, DEVMEM_ANNOTATION_MAX_LEN) == 0);
+}
+
+/* FindOrCreateAllocation:
+ * Convenience function.
+ * Given a set of allocation properties (serial, DevVAddr, size, name, etc),
+ * this function will look for an existing record of this allocation and
+ * create the allocation if there is no existing record
+ */
+static PVRSRV_ERROR FindOrCreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_UINT32 ui32AllocationIndexHint,
+                                                       IMG_UINT64 ui64Serial,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char *pszName,
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_PID uiPID,
+                                                       IMG_BOOL bSparse,
+                                                       IMG_UINT32 *pui32AllocationIndexOut,
+                                                       IMG_BOOL *pbCreated)
+{
+       IMG_UINT32 ui32AllocationIndex;
+       PVRSRV_ERROR eError;
+
+       if (ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE)
+       {
+               IMG_BOOL bHaveAllocation;
+
+               /* first, try to match against the index given by the client.
+                * if the caller provided a hint but the allocation record is no longer
+                * there, it must have been purged, so go ahead and create a new allocation
+                */
+               bHaveAllocation = MatchAllocation(ui32AllocationIndexHint,
+                                                               ui64Serial,
+                                                               sDevVAddr,
+                                                               uiSize,
+                                                               pszName,
+                                                               ui32Log2PageSize,
+                                                               uiPID);
+               if (bHaveAllocation)
+               {
+                       *pbCreated = IMG_FALSE;
+                       *pui32AllocationIndexOut = ui32AllocationIndexHint;
+                       return PVRSRV_OK;
+               }
+       }
+
+       /* if there is no record of the allocation then we
+        * create it now
+        */
+       eError = CreateAllocation(psDeviceNode,
+                                       pszName,
+                                       ui64Serial,
+                                       uiPID,
+                                       sDevVAddr,
+                                       uiSize,
+                                       ui32Log2PageSize,
+                                       IMG_TRUE,
+                                       &ui32AllocationIndex);
+
+       if (eError == PVRSRV_OK)
+       {
+               *pui32AllocationIndexOut = ui32AllocationIndex;
+               *pbCreated = IMG_TRUE;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Failed to create record for allocation %s",
+                                                               __func__,
+                                                               pszName));
+       }
+
+       return eError;
+}
+
+/* GenerateMapUnmapCommandsForSparsePMR:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's
+ * current mapping table
+ *
+ * PMR: The PMR whose mapping table to read.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the PMR's mapping table and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR,
+                                                       IMG_UINT32 ui32AllocIndex,
+                                                       IMG_BOOL bMap)
+{
+       PMR_MAPPING_TABLE *psMappingTable;
+       IMG_UINT32 ui32DonePages = 0;
+       IMG_UINT32 ui32NumPages;
+       IMG_UINT32 i;
+       IMG_BOOL bInARun = IMG_FALSE;
+       IMG_UINT32 ui32CurrentStart = 0;
+       IMG_UINT32 ui32RunCount = 0;
+
+       psMappingTable = PMR_GetMappingTable(psPMR);
+       ui32NumPages = psMappingTable->ui32NumPhysChunks;
+
+       if (ui32NumPages == 0)
+       {
+               /* nothing to do */
+               return;
+       }
+
+       for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++)
+       {
+               if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID)
+               {
+                       if (!bInARun)
+                       {
+                               bInARun = IMG_TRUE;
+                               ui32CurrentStart = i;
+                               ui32RunCount = 1;
+                       }
+                       else
+                       {
+                               ui32RunCount++;
+                       }
+               }
+
+               if (bInARun)
+               {
+                       /* test if we need to end this current run and generate the command,
+                        * either because the next page is not virtually contiguous
+                        * to the current page, we have reached the maximum range,
+                        * or this is the last page in the mapping table
+                        */
+                       if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) ||
+                               (ui32RunCount == MAP_RANGE_MAX_RANGE) ||
+                               (i == (psMappingTable->ui32NumVirtChunks - 1)))
+                       {
+                               if (bMap)
+                               {
+                                       InsertMapRangeCommand(PMR_DeviceNode(psPMR),
+                                                             ui32AllocIndex,
+                                                             ui32CurrentStart,
+                                                             ui32RunCount);
+                               }
+                               else
+                               {
+                                       InsertUnmapRangeCommand(PMR_DeviceNode(psPMR),
+                                                               ui32AllocIndex,
+                                                               ui32CurrentStart,
+                                                               ui32RunCount);
+                               }
+
+                               ui32DonePages += ui32RunCount;
+
+                               if (ui32DonePages == ui32NumPages)
+                               {
+                                        break;
+                               }
+
+                               bInARun = IMG_FALSE;
+                       }
+               }
+       }
+
+}
+
+/* GenerateMapUnmapCommandsForChangeList:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the
+ * list of page change (page map or page unmap) indices given.
+ *
+ * ui32NumPages: Number of pages which have changed.
+ * pui32PageList: List of indices of the pages which have changed.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the list and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForChangeList(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_UINT32 ui32NumPages,
+                                                       IMG_UINT32 *pui32PageList,
+                                                       IMG_UINT32 ui32AllocIndex,
+                                                       IMG_BOOL bMap)
+{
+       IMG_UINT32 i;
+       IMG_BOOL bInARun = IMG_FALSE;
+       IMG_UINT32 ui32CurrentStart = 0;
+       IMG_UINT32 ui32RunCount = 0;
+
+       for (i = 0; i < ui32NumPages; i++)
+       {
+               if (!bInARun)
+               {
+                       bInARun = IMG_TRUE;
+                       ui32CurrentStart = pui32PageList[i];
+               }
+
+               ui32RunCount++;
+
+                /* we flush if:
+                * - the next page in the list is not one greater than the current page
+                * - this is the last page in the list
+                * - we have reached the maximum range size
+                */
+               if ((i == (ui32NumPages - 1)) ||
+                       ((pui32PageList[i] + 1) != pui32PageList[i + 1]) ||
+                       (ui32RunCount == MAP_RANGE_MAX_RANGE))
+               {
+                       if (bMap)
+                       {
+                               InsertMapRangeCommand(psDeviceNode,
+                                                                       ui32AllocIndex,
+                                                                       ui32CurrentStart,
+                                                                       ui32RunCount);
+                       }
+                       else
+                       {
+                               InsertUnmapRangeCommand(psDeviceNode,
+                                                                       ui32AllocIndex,
+                                                                       ui32CurrentStart,
+                                                                       ui32RunCount);
+                       }
+
+                       bInARun = IMG_FALSE;
+                       ui32RunCount = 0;
+               }
+       }
+}
+
+/* DevicememHistoryMapKM:
+ * Entry point for when an allocation is mapped into the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+                                                       IMG_UINT32 ui32Offset,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *pui32AllocationIndexOut)
+{
+       IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+       IMG_UINT64 ui64Serial;
+       IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+       PVRSRV_ERROR eError;
+       IMG_BOOL bCreated;
+
+       if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+               !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+                                                               __func__,
+                                                               ui32AllocationIndex));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PMRGetUID(psPMR, &ui64Serial);
+
+       DevicememHistoryLock();
+
+       eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR),
+                                               ui32AllocationIndex,
+                                               ui64Serial,
+                                               sDevVAddr,
+                                               uiSize,
+                                               szName,
+                                               ui32Log2PageSize,
+                                               uiPID,
+                                               bSparse,
+                                               &ui32AllocationIndex,
+                                               &bCreated);
+
+       if ((eError == PVRSRV_OK) && !bCreated)
+       {
+               /* touch the allocation so it goes to the head of our MRU list */
+               TouchBusyAllocation(ui32AllocationIndex);
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+                                                                       __func__,
+                                                                       szName,
+                                                                       PVRSRVGETERRORSTRING(eError)));
+               goto out_unlock;
+       }
+
+       if (!bSparse)
+       {
+               InsertMapAllCommand(PMR_DeviceNode(psPMR), ui32AllocationIndex);
+       }
+       else
+       {
+               GenerateMapUnmapCommandsForSparsePMR(psPMR,
+                                                               ui32AllocationIndex,
+                                                               IMG_TRUE);
+       }
+
+       InsertTimeStampCommand(OSClockns64());
+
+       *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+       DevicememHistoryUnlock();
+
+       return eError;
+}
+
+static void VRangeInsertMapUnmapCommands(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_BOOL bMap,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                       IMG_UINT32 ui32StartPage,
+                                                       IMG_UINT32 ui32NumPages,
+                                                       const IMG_CHAR *pszName)
+{
+       while (ui32NumPages > 0)
+       {
+               IMG_UINT32 ui32PagesToAdd;
+
+               ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE);
+
+               if (ui32StartPage > MAP_RANGE_MAX_START)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page "
+                                                                       "%u on allocation %s",
+                                                                       bMap ? "map" : "unmap",
+                                                                       ui32StartPage,
+                                                                       pszName));
+                       return;
+               }
+
+               if (bMap)
+               {
+                       InsertMapRangeCommand(psDeviceNode,
+                                                               ui32AllocationIndex,
+                                                               ui32StartPage,
+                                                               ui32PagesToAdd);
+               }
+               else
+               {
+                       InsertUnmapRangeCommand(psDeviceNode,
+                                                               ui32AllocationIndex,
+                                                               ui32StartPage,
+                                                               ui32PagesToAdd);
+               }
+
+               ui32StartPage += ui32PagesToAdd;
+               ui32NumPages -= ui32PagesToAdd;
+       }
+}
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection,
+                                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                               IMG_UINT32 ui32StartPage,
+                                               IMG_UINT32 ui32NumPages,
+                                               IMG_DEVMEM_SIZE_T uiAllocSize,
+                                               const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                               IMG_UINT32 ui32Log2PageSize,
+                                               IMG_UINT32 ui32AllocationIndex,
+                                               IMG_UINT32 *pui32AllocationIndexOut)
+{
+       IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+       PVRSRV_ERROR eError;
+       IMG_BOOL bCreated;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+               !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+                                                               __func__,
+                                                       ui32AllocationIndex));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       DevicememHistoryLock();
+
+       eError = FindOrCreateAllocation(psDeviceNode,
+                                               ui32AllocationIndex,
+                                               0,
+                                               sBaseDevVAddr,
+                                               uiAllocSize,
+                                               szName,
+                                               ui32Log2PageSize,
+                                               uiPID,
+                                               IMG_FALSE,
+                                               &ui32AllocationIndex,
+                                               &bCreated);
+
+       if ((eError == PVRSRV_OK) && !bCreated)
+       {
+               /* touch the allocation so it goes to the head of our MRU list */
+               TouchBusyAllocation(ui32AllocationIndex);
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+                                                                       __func__,
+                                                                       szName,
+                                                                       PVRSRVGETERRORSTRING(eError)));
+               goto out_unlock;
+       }
+
+       VRangeInsertMapUnmapCommands(psDeviceNode,
+                                               IMG_TRUE,
+                                               ui32AllocationIndex,
+                                               sBaseDevVAddr,
+                                               ui32StartPage,
+                                               ui32NumPages,
+                                               szName);
+
+       *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+       DevicememHistoryUnlock();
+
+       return eError;
+
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection,
+                                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                               IMG_UINT32 ui32StartPage,
+                                               IMG_UINT32 ui32NumPages,
+                                               IMG_DEVMEM_SIZE_T uiAllocSize,
+                                               const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                               IMG_UINT32 ui32Log2PageSize,
+                                               IMG_UINT32 ui32AllocationIndex,
+                                               IMG_UINT32 *pui32AllocationIndexOut)
+{
+       IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+       PVRSRV_ERROR eError;
+       IMG_BOOL bCreated;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+               !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+                                                               __func__,
+                                                       ui32AllocationIndex));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       DevicememHistoryLock();
+
+       eError = FindOrCreateAllocation(psDeviceNode,
+                                               ui32AllocationIndex,
+                                               0,
+                                               sBaseDevVAddr,
+                                               uiAllocSize,
+                                               szName,
+                                               ui32Log2PageSize,
+                                               uiPID,
+                                               IMG_FALSE,
+                                               &ui32AllocationIndex,
+                                               &bCreated);
+
+       if ((eError == PVRSRV_OK) && !bCreated)
+       {
+               /* touch the allocation so it goes to the head of our MRU list */
+               TouchBusyAllocation(ui32AllocationIndex);
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+                                                                       __func__,
+                                                                       szName,
+                                                                       PVRSRVGETERRORSTRING(eError)));
+               goto out_unlock;
+       }
+
+       VRangeInsertMapUnmapCommands(psDeviceNode,
+                                               IMG_FALSE,
+                                               ui32AllocationIndex,
+                                               sBaseDevVAddr,
+                                               ui32StartPage,
+                                               ui32NumPages,
+                                               szName);
+
+       *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+       DevicememHistoryUnlock();
+
+       return eError;
+}
+
+
+
+/* DevicememHistoryUnmapKM:
+ * Entry point for when an allocation is unmapped from the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+                                                       IMG_UINT32 ui32Offset,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *pui32AllocationIndexOut)
+{
+       IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+       IMG_UINT64 ui64Serial;
+       IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+       PVRSRV_ERROR eError;
+       IMG_BOOL bCreated;
+
+       if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+               !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+                                                               __func__,
+                                                               ui32AllocationIndex));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PMRGetUID(psPMR, &ui64Serial);
+
+       DevicememHistoryLock();
+
+       eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR),
+                                               ui32AllocationIndex,
+                                               ui64Serial,
+                                               sDevVAddr,
+                                               uiSize,
+                                               szName,
+                                               ui32Log2PageSize,
+                                               uiPID,
+                                               bSparse,
+                                               &ui32AllocationIndex,
+                                               &bCreated);
+
+       if ((eError == PVRSRV_OK) && !bCreated)
+       {
+               /* touch the allocation so it goes to the head of our MRU list */
+               TouchBusyAllocation(ui32AllocationIndex);
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+                                                                       __func__,
+                                                                       szName,
+                                                                       PVRSRVGETERRORSTRING(eError)));
+               goto out_unlock;
+       }
+
+       if (!bSparse)
+       {
+               InsertUnmapAllCommand(PMR_DeviceNode(psPMR), ui32AllocationIndex);
+       }
+       else
+       {
+               GenerateMapUnmapCommandsForSparsePMR(psPMR,
+                                                               ui32AllocationIndex,
+                                                               IMG_FALSE);
+       }
+
+       InsertTimeStampCommand(OSClockns64());
+
+       *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+       DevicememHistoryUnlock();
+
+       return eError;
+}
+
+/* DevicememHistorySparseChangeKM:
+ * Entry point for when a sparse allocation is changed, such that some of the
+ * pages within the sparse allocation are mapped or unmapped.
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocPageCount: Number of pages which have been mapped.
+ * paui32AllocPageIndices: Indices of pages which have been mapped.
+ * ui32FreePageCount: Number of pages which have been unmapped.
+ * paui32FreePageIndices: Indices of pages which have been unmapped.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ *                      We will use this as a short-cut to find the allocation
+ *                      in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ *                          This may be a new value if we just created the
+ *                          allocation record.
+ */
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+                                                       IMG_UINT32 ui32Offset,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_UINT32 ui32AllocPageCount,
+                                                       IMG_UINT32 *paui32AllocPageIndices,
+                                                       IMG_UINT32 ui32FreePageCount,
+                                                       IMG_UINT32 *paui32FreePageIndices,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *pui32AllocationIndexOut)
+{
+       IMG_UINT64 ui64Serial;
+       IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
+       PVRSRV_ERROR eError;
+       IMG_BOOL bCreated;
+
+       if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+               !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+                                                               __func__,
+                                                               ui32AllocationIndex));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PMRGetUID(psPMR, &ui64Serial);
+
+       DevicememHistoryLock();
+
+       eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR),
+                                               ui32AllocationIndex,
+                                               ui64Serial,
+                                               sDevVAddr,
+                                               uiSize,
+                                               szName,
+                                               ui32Log2PageSize,
+                                               uiPID,
+                                               IMG_TRUE /* bSparse */,
+                                               &ui32AllocationIndex,
+                                               &bCreated);
+
+       if ((eError == PVRSRV_OK) && !bCreated)
+       {
+               /* touch the allocation so it goes to the head of our MRU list */
+               TouchBusyAllocation(ui32AllocationIndex);
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+                                                                       __func__,
+                                                                       szName,
+                                                                       PVRSRVGETERRORSTRING(eError)));
+               goto out_unlock;
+       }
+
+       GenerateMapUnmapCommandsForChangeList(PMR_DeviceNode(psPMR),
+                                                       ui32AllocPageCount,
+                                                       paui32AllocPageIndices,
+                                                       ui32AllocationIndex,
+                                                       IMG_TRUE);
+
+       GenerateMapUnmapCommandsForChangeList(PMR_DeviceNode(psPMR),
+                                                       ui32FreePageCount,
+                                                       paui32FreePageIndices,
+                                                       ui32AllocationIndex,
+                                                       IMG_FALSE);
+
+       InsertTimeStampCommand(OSClockns64());
+
+       *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+       DevicememHistoryUnlock();
+
+       return eError;
+
+}
+
+/* CircularBufferIterateStart:
+ * Initialise local state for iterating over the circular buffer
+ */
+static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
+{
+       *pui32Head = gsDevicememHistoryData.sRecords.ui32Head;
+
+       if (*pui32Head != 0)
+       {
+               *pui32Iter = *pui32Head - 1;
+       }
+       else
+       {
+               *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+       }
+}
+
+/* CircularBufferIteratePrevious:
+ * Iterate to the previous item in the circular buffer.
+ * This is called repeatedly to iterate over the whole circular buffer.
+ */
+static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head,
+                                                       IMG_UINT32 *pui32Iter,
+                                                       COMMAND_TYPE *peType,
+                                                       IMG_BOOL *pbLast)
+{
+       IMG_UINT8 *pui8Header;
+       COMMAND_WRAPPER *psOut = NULL;
+
+       psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter;
+
+       pui8Header = (void *) psOut;
+
+       /* Check the command looks valid.
+        * this condition should never happen, but check for it anyway
+        * and try to handle it
+        */
+       if (*pui8Header >= COMMAND_TYPE_COUNT)
+       {
+               /* invalid header detected. Circular buffer corrupted? */
+               PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: "
+                                                       "Invalid header: %u",
+                                                       *pui8Header));
+               *pbLast = IMG_TRUE;
+               return NULL;
+       }
+
+       *peType = *pui8Header;
+
+       if (*pui32Iter != 0)
+       {
+               (*pui32Iter)--;
+       }
+       else
+       {
+               *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+       }
+
+
+       /* inform the caller this is the last command if either we have reached
+        * the head (where we started) or if we have reached an empty command,
+        * which means we have covered all populated entries
+        */
+       if ((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE))
+       {
+               /* this is the final iteration */
+               *pbLast = IMG_TRUE;
+       }
+
+       return psOut;
+}
+
+/* MapUnmapCommandGetInfo:
+ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL,
+ * MAP_RANGE or UNMAP_RANGE command
+ */
+static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand,
+                                       COMMAND_TYPE eType,
+                                       IMG_DEV_VIRTADDR *psDevVAddrStart,
+                                       IMG_DEV_VIRTADDR *psDevVAddrEnd,
+                                       IMG_BOOL *pbMap,
+                                       IMG_UINT32 *pui32AllocIndex)
+{
+       if ((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL)))
+       {
+               COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll;
+               RECORD_ALLOCATION *psAlloc;
+
+               *pbMap = (eType == COMMAND_TYPE_MAP_ALL);
+               *pui32AllocIndex = psMapAll->uiAllocIndex;
+
+               psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex);
+
+               *psDevVAddrStart = psAlloc->sDevVAddr;
+               psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1;
+       }
+       else if ((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE)))
+       {
+               COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange;
+               RECORD_ALLOCATION *psAlloc;
+               IMG_UINT32 ui32StartPage, ui32Count;
+
+               *pbMap = (eType == COMMAND_TYPE_MAP_RANGE);
+               *pui32AllocIndex = psMapRange->uiAllocIndex;
+
+               psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex);
+
+               MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count);
+
+               psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr +
+                               ((1ULL << psAlloc->ui32Log2PageSize) * ui32StartPage);
+
+               psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr +
+                               ((1ULL << psAlloc->ui32Log2PageSize) * ui32Count) - 1;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u",
+                                                               __func__,
+                                                               eType));
+       }
+}
+
+/* DevicememHistoryQuery:
+ * Entry point for rgxdebug to look up addresses relating to a page fault
+ */
+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+                               DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                               IMG_UINT32 ui32PageSizeBytes,
+                               IMG_BOOL bMatchAnyAllocInPage)
+{
+       IMG_UINT32 ui32Head, ui32Iter;
+       COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+       COMMAND_WRAPPER *psCommand = NULL;
+       IMG_BOOL bLast = IMG_FALSE;
+       IMG_UINT64 ui64StartTime = OSClockns64();
+       IMG_UINT64 ui64TimeNs = 0;
+
+       /* initialise the results count for the caller */
+       psQueryOut->ui32NumResults = 0;
+
+       DevicememHistoryLock();
+
+       /* if the search is constrained to a particular PID then we
+        * first search the list of allocations to see if this
+        * PID is known to us
+        */
+       if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY)
+       {
+               IMG_UINT32 ui32Alloc;
+               ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+               while (ui32Alloc != END_OF_LIST)
+               {
+                       RECORD_ALLOCATION *psAlloc;
+
+                       psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+                       if (psAlloc->uiPID == psQueryIn->uiPID)
+                       {
+                               goto found_pid;
+                       }
+
+                       if (ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead)
+                       {
+                               /* gone through whole list */
+                               break;
+                       }
+               }
+
+               /* PID not found, so we do not have any suitable data for this
+                * page fault
+                */
+               goto out_unlock;
+       }
+
+found_pid:
+
+       CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+       while (!bLast)
+       {
+               psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+               if (eType == COMMAND_TYPE_TIMESTAMP)
+               {
+                       ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+                       continue;
+               }
+
+               if ((eType == COMMAND_TYPE_MAP_ALL) ||
+                       (eType == COMMAND_TYPE_UNMAP_ALL) ||
+                       (eType == COMMAND_TYPE_MAP_RANGE) ||
+                       (eType == COMMAND_TYPE_UNMAP_RANGE))
+               {
+                       RECORD_ALLOCATION *psAlloc;
+                       IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig;
+                       IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr;
+                       IMG_BOOL bMap;
+                       IMG_UINT32 ui32AllocIndex;
+
+                       MapUnmapCommandGetInfo(psCommand,
+                                                       eType,
+                                                       &sAllocStartAddrOrig,
+                                                       &sAllocEndAddrOrig,
+                                                       &bMap,
+                                                       &ui32AllocIndex);
+
+                       sAllocStartAddr = sAllocStartAddrOrig;
+                       sAllocEndAddr = sAllocEndAddrOrig;
+
+                       psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+                       /* skip this command if we need to search within
+                        * a particular PID, and this allocation is not from
+                        * that PID
+                        */
+                       if ((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) &&
+                               (psAlloc->uiPID != psQueryIn->uiPID))
+                       {
+                               continue;
+                       }
+
+                       /* if the allocation was created after this event, then this
+                        * event must be for an old/removed allocation, so skip it
+                        */
+                       if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+                       {
+                               continue;
+                       }
+
+                       /* if the caller wants us to match any allocation in the
+                        * same page as the allocation then tweak the real start/end
+                        * addresses of the allocation here
+                        */
+                       if (bMatchAnyAllocInPage)
+                       {
+                               sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+                               sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+                       }
+
+                       if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) &&
+                               (psQueryIn->sDevVAddr.uiAddr <  sAllocEndAddr.uiAddr))
+                       {
+                               DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults];
+
+                               OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString));
+                               psResult->sBaseDevVAddr = psAlloc->sDevVAddr;
+                               psResult->uiSize = psAlloc->uiSize;
+                               psResult->bMap = bMap;
+                               psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK);
+                               psResult->ui64When = ui64TimeNs;
+                               /* write the responsible PID in the placeholder */
+                               psResult->sProcessInfo.uiPID = psAlloc->uiPID;
+
+                               if ((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL))
+                               {
+                                       psResult->bRange = IMG_FALSE;
+                                       psResult->bAll = IMG_TRUE;
+                               }
+                               else
+                               {
+                                       psResult->bRange = IMG_TRUE;
+                                       MapRangeUnpack(&psCommand->u.sMapRange,
+                                                                               &psResult->ui32StartPage,
+                                                                               &psResult->ui32PageCount);
+                                       psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize))
+                                                                                       == psAlloc->uiSize;
+                                       psResult->sMapStartAddr = sAllocStartAddrOrig;
+                                       psResult->sMapEndAddr = sAllocEndAddrOrig;
+                               }
+
+                               psQueryOut->ui32NumResults++;
+
+                               if (psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS)
+                               {
+                                       break;
+                               }
+                       }
+               }
+       }
+
+out_unlock:
+       DevicememHistoryUnlock();
+
+       return psQueryOut->ui32NumResults > 0;
+}
+
+static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN],
+                                                       IMG_PID uiPID,
+                                                       const IMG_CHAR *pszName,
+                                                       const IMG_CHAR *pszAction,
+                                                       IMG_DEV_VIRTADDR sDevVAddrStart,
+                                                       IMG_DEV_VIRTADDR sDevVAddrEnd,
+                                                       IMG_UINT64 ui64TimeNs)
+{
+
+       OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+                               /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/
+                               "%04u %-40s %-10s "
+                               IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " "
+                               "0x%08" IMG_UINT64_FMTSPECX " "
+                               "%013" IMG_UINT64_FMTSPEC, /* 13 digits is over 2 hours of ns */
+                               uiPID,
+                               pszName,
+                               pszAction,
+                               sDevVAddrStart.uiAddr,
+                               sDevVAddrEnd.uiAddr,
+                               sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr + 1,
+                               ui64TimeNs);
+}
+
+static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+       OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+                               "%-4s %-40s %-6s   %10s   %10s   %8s %13s",
+                               "PID",
+                               "NAME",
+                               "ACTION",
+                               "ADDR MIN",
+                               "ADDR MAX",
+                               "SIZE",
+                               "ABS NS");
+}
+
+static const char *CommandTypeToString(COMMAND_TYPE eType)
+{
+       switch (eType)
+       {
+               case COMMAND_TYPE_MAP_ALL:
+                       return "MapAll";
+               case COMMAND_TYPE_UNMAP_ALL:
+                       return "UnmapAll";
+               case COMMAND_TYPE_MAP_RANGE:
+                       return "MapRange";
+               case COMMAND_TYPE_UNMAP_RANGE:
+                       return "UnmapRange";
+               case COMMAND_TYPE_TIMESTAMP:
+                       return "TimeStamp";
+               default:
+                       return "???";
+       }
+}
+
+static void DevicememHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry)
+{
+       IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+       IMG_UINT32 ui32Iter;
+       IMG_UINT32 ui32Head;
+       IMG_BOOL bLast = IMG_FALSE;
+       IMG_UINT64 ui64TimeNs = 0;
+       IMG_UINT64 ui64StartTime = OSClockns64();
+
+       DeviceMemHistoryFmtHeader(szBuffer);
+       DIPrintf(psEntry, "%s\n", szBuffer);
+
+       CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+       while (!bLast)
+       {
+               COMMAND_WRAPPER *psCommand;
+               COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+
+               psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType,
+                                                         &bLast);
+
+               if (eType == COMMAND_TYPE_TIMESTAMP)
+               {
+                       ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+                       continue;
+               }
+
+
+               if ((eType == COMMAND_TYPE_MAP_ALL) ||
+                       (eType == COMMAND_TYPE_UNMAP_ALL) ||
+                       (eType == COMMAND_TYPE_MAP_RANGE) ||
+                       (eType == COMMAND_TYPE_UNMAP_RANGE))
+               {
+                       RECORD_ALLOCATION *psAlloc;
+                       IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd;
+                       IMG_BOOL bMap;
+                       IMG_UINT32 ui32AllocIndex;
+
+                       MapUnmapCommandGetInfo(psCommand,
+                                              eType,
+                                              &sDevVAddrStart,
+                                              &sDevVAddrEnd,
+                                              &bMap,
+                                              &ui32AllocIndex);
+
+                       psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+                       if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+                       {
+                               /* if this event relates to an allocation we
+                                * are no longer tracking then do not print it
+                                */
+                               continue;
+                       }
+
+                       DeviceMemHistoryFmt(szBuffer,
+                                           psAlloc->uiPID,
+                                           psAlloc->szName,
+                                           CommandTypeToString(eType),
+                                           sDevVAddrStart,
+                                           sDevVAddrEnd,
+                                           ui64TimeNs);
+
+                       DIPrintf(psEntry, "%s\n", szBuffer);
+               }
+       }
+
+       DIPrintf(psEntry, "\nTimestamp reference: %013" IMG_UINT64_FMTSPEC "\n",
+                ui64StartTime);
+}
+
+static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry,
+                                           void *pvData)
+{
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       DevicememHistoryLock();
+       DevicememHistoryPrintAll(psEntry);
+       DevicememHistoryUnlock();
+
+       return 0;
+}
+
+static PVRSRV_ERROR CreateRecords(void)
+{
+       gsDevicememHistoryData.sRecords.pasAllocations =
+                       OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
+
+       PVR_RETURN_IF_NOMEM(gsDevicememHistoryData.sRecords.pasAllocations);
+
+       /* Allocated and initialise the circular buffer with zeros so every
+        * command is initialised as a command of type COMMAND_TYPE_NONE. */
+       gsDevicememHistoryData.sRecords.pasCircularBuffer =
+                       OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
+
+       if (gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL)
+       {
+               OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       return PVRSRV_OK;
+}
+
+static void DestroyRecords(void)
+{
+       OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer);
+       OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+}
+
+static void InitialiseRecords(void)
+{
+       IMG_UINT32 i;
+
+       /* initialise the allocations list */
+
+       gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
+       gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1;
+
+       for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++)
+       {
+               gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1;
+               gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1;
+       }
+
+       gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
+
+       gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0;
+}
+
+PVRSRV_ERROR DevicememHistoryInitKM(void)
+{
+       PVRSRV_ERROR eError;
+       DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper};
+
+       eError = OSLockCreate(&gsDevicememHistoryData.hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", err_lock);
+
+       eError = CreateRecords();
+       PVR_LOG_GOTO_IF_ERROR(eError, "CreateRecords", err_allocations);
+
+       InitialiseRecords();
+
+       eError = DICreateEntry("devicemem_history", NULL, &sIterator, NULL,
+                              DI_ENTRY_TYPE_GENERIC,
+                              &gsDevicememHistoryData.psDIEntry);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", err_di_creation);
+
+       return PVRSRV_OK;
+
+err_di_creation:
+       DestroyRecords();
+err_allocations:
+       OSLockDestroy(gsDevicememHistoryData.hLock);
+       gsDevicememHistoryData.hLock = NULL;
+err_lock:
+       return eError;
+}
+
+void DevicememHistoryDeInitKM(void)
+{
+       if (gsDevicememHistoryData.psDIEntry != NULL)
+       {
+               DIDestroyEntry(gsDevicememHistoryData.psDIEntry);
+       }
+
+       DestroyRecords();
+
+       if (gsDevicememHistoryData.hLock != NULL)
+       {
+               OSLockDestroy(gsDevicememHistoryData.hLock);
+               gsDevicememHistoryData.hLock = NULL;
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/devicemem_server.c
new file mode 100644 (file)
index 0000000..089fa9c
--- /dev/null
@@ -0,0 +1,1813 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Server-side component of the Device Memory Management.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* our exported API */
+#include "devicemem_server.h"
+#include "devicemem_utils.h"
+#include "devicemem.h"
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "physmem.h"
+#include "pdumpdesc.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lock.h"
+
+#include "pvrsrv.h" /* for PVRSRVGetPVRSRVData() */
+
+#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0)
+#define DEVMEMHEAP_REFCOUNT_MIN 1
+#define DEVMEMHEAP_REFCOUNT_MAX IMG_INT32_MAX
+
+struct _DEVMEMINT_CTX_
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       /* MMU common code needs to have a context. There's a one-to-one
+          correspondence between device memory context and MMU context,
+          but we have the abstraction here so that we don't need to care
+          what the MMU does with its context, and the MMU code need not
+          know about us at all. */
+       MMU_CONTEXT *psMMUContext;
+
+       ATOMIC_T hRefCount;
+
+       /* This handle is for devices that require notification when a new
+          memory context is created and they need to store private data that
+          is associated with the context. */
+       IMG_HANDLE hPrivData;
+
+       /* Protects access to sProcessNotifyListHead */
+       POSWR_LOCK hListLock;
+
+       /* The following tracks UM applications that need to be notified of a
+        * page fault */
+       DLLIST_NODE sProcessNotifyListHead;
+       /* The following is a node for the list of registered devmem contexts */
+       DLLIST_NODE sPageFaultNotifyListElem;
+
+       /* Device virtual address of a page fault on this context */
+       IMG_DEV_VIRTADDR sFaultAddress;
+
+       /* General purpose flags */
+       IMG_UINT32 ui32Flags;
+};
+
+struct _DEVMEMINT_CTX_EXPORT_
+{
+       DEVMEMINT_CTX *psDevmemCtx;
+       PMR *psPMR;
+       ATOMIC_T hRefCount;
+       DLLIST_NODE sNode;
+};
+
+struct _DEVMEMINT_HEAP_
+{
+       struct _DEVMEMINT_CTX_ *psDevmemCtx;
+       IMG_UINT32 uiLog2PageSize;
+       ATOMIC_T uiRefCount;
+};
+
+struct _DEVMEMINT_RESERVATION_
+{
+       struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+       IMG_DEV_VIRTADDR sBase;
+       IMG_DEVMEM_SIZE_T uiLength;
+};
+
+struct _DEVMEMINT_MAPPING_
+{
+       struct _DEVMEMINT_RESERVATION_ *psReservation;
+       PMR *psPMR;
+       IMG_UINT32 uiNumPages;
+};
+
+struct _DEVMEMINT_PF_NOTIFY_
+{
+       IMG_UINT32  ui32PID;
+       DLLIST_NODE sProcessNotifyListElem;
+};
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxAcquire
+@Description    Acquire a reference to the provided device memory context.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx)
+{
+       OSAtomicIncrement(&psDevmemCtx->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxRelease
+@Description    Release the reference to the provided device memory context.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx)
+{
+       if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0)
+       {
+               /* The last reference has gone, destroy the context */
+               PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode;
+               DLLIST_NODE *psNode, *psNodeNext;
+
+               /* If there are any PIDs registered for page fault notification.
+                * Loop through the registered PIDs and free each one */
+               dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+               {
+                       DEVMEMINT_PF_NOTIFY *psNotifyNode =
+                               IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+                       dllist_remove_node(psNode);
+                       OSFreeMem(psNotifyNode);
+               }
+
+               /* If this context is in the list registered for a debugger, remove
+                * from that list */
+               if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem))
+               {
+                       dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+               }
+
+               if (psDevNode->pfnUnregisterMemoryContext)
+               {
+                       psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData);
+               }
+               MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+
+               OSWRLockDestroy(psDevmemCtx->hListLock);
+
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p",
+                                __func__, psDevmemCtx));
+               OSFreeMem(psDevmemCtx);
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntHeapAcquire
+@Description    Acquire a reference to the provided device memory heap.
+@Return         IMG_TRUE if referenced and IMG_FALSE in case of error
+*/ /**************************************************************************/
+static INLINE IMG_BOOL DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap)
+{
+       IMG_BOOL bSuccess = OSAtomicAddUnless(&psDevmemHeap->uiRefCount, 1,
+                                             DEVMEMHEAP_REFCOUNT_MAX);
+
+       if (!bSuccess)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory "
+                        "heap, reference count has overflowed.", __func__));
+               return IMG_FALSE;
+       }
+
+       return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntHeapRelease
+@Description    Release the reference to the provided device memory heap.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap)
+{
+       IMG_BOOL bSuccess = OSAtomicSubtractUnless(&psDevmemHeap->uiRefCount, 1,
+                                                  DEVMEMHEAP_REFCOUNT_MIN);
+
+       if (!bSuccess)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory "
+                        "heap, reference count has underflowed.", __func__));
+       }
+}
+
+PVRSRV_ERROR
+DevmemIntUnpin(PMR *psPMR)
+{
+       PVRSRV_ERROR eError;
+
+       /* Unpin */
+       eError = PMRUnpinPMR(psPMR, IMG_FALSE);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PMRUnpinPMR(psPMR, IMG_TRUE);
+       PVR_GOTO_IF_ERROR(eError, e_exit);
+
+       /* Invalidate mapping */
+       eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+                                   psDevmemMapping->psReservation->sBase,
+                                   psDevmemMapping->uiNumPages,
+                                   psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+                                   IMG_FALSE, /* !< Choose to invalidate PT entries */
+                                   psPMR);
+
+e_exit:
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPin(PMR *psPMR)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Start the pinning */
+       eError = PMRPinPMR(psPMR);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eErrorMMU = PVRSRV_OK;
+       IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize;
+
+       /* Start the pinning */
+       eError = PMRPinPMR(psPMR);
+
+       if (eError == PVRSRV_OK)
+       {
+               /* Make mapping valid again */
+               eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+                                           psDevmemMapping->psReservation->sBase,
+                                           psDevmemMapping->uiNumPages,
+                                           uiLog2PageSize,
+                                           IMG_TRUE, /* !< Choose to make PT entries valid again */
+                                           psPMR);
+       }
+       else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)
+       {
+               /* If we lost the physical backing we have to map it again because
+                * the old physical addresses are not valid anymore. */
+               PMR_FLAGS_T uiFlags;
+               uiFlags = PMR_Flags(psPMR);
+
+               eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+                                        uiFlags,
+                                        psDevmemMapping->psReservation->sBase,
+                                        psPMR,
+                                        0,
+                                        psDevmemMapping->uiNumPages,
+                                        NULL,
+                                        uiLog2PageSize);
+       }
+
+       /* Just overwrite eError if the mappings failed.
+        * PMR_NEW_MEMORY has to be propagated to the user. */
+       if (eErrorMMU != PVRSRV_OK)
+       {
+               eError = eErrorMMU;
+       }
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetImportHandle
+@Description    For given exportable memory descriptor returns PMR handle.
+@Return         Memory is exportable - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                            IMG_HANDLE *phImport)
+{
+       PVRSRV_ERROR eError;
+
+       if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, e0);
+       }
+
+       /* A new handle means a new import tracking the PMR.
+        * Hence the source PMR memory layout should be marked fixed
+        * to make sure the importer view of the memory is the same as
+        * the exporter throughout its lifetime */
+       PMR_SetLayoutFixed((PMR *)psMemDesc->psImport->hPMR, IMG_TRUE);
+
+       *phImport = psMemDesc->psImport->hPMR;
+       return PVRSRV_OK;
+
+e0:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetHeapHandle
+@Description    For given reservation returns the Heap handle.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+                          IMG_HANDLE *phHeap)
+{
+       if (psReservation == NULL || phHeap == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *phHeap = psReservation->psDevmemHeap;
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetContext
+@Description    For given heap returns the context.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap,
+                                          DEVMEMINT_CTX **ppsDevmemCtxPtr)
+{
+       if (psDevmemHeap == NULL || ppsDevmemCtxPtr == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *ppsDevmemCtxPtr = psDevmemHeap->psDevmemCtx;
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetPrivData
+@Description    For given context returns the private data handle.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx,
+                                               IMG_HANDLE *phPrivData)
+{
+       if (psDevmemCtx == NULL || phPrivData == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *phPrivData = psDevmemCtx->hPrivData;
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxCreate
+@Description    Creates and initialises a device memory context.
+@Return         valid Device Memory context handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   IMG_BOOL bKernelMemoryCtx,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData,
+                   IMG_UINT32 *pui32CPUCacheLineSize)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_CTX *psDevmemCtx;
+       IMG_HANDLE hPrivDataInt = NULL;
+       MMU_DEVICEATTRIBS *psMMUDevAttrs = psDeviceNode->pfnGetMMUDeviceAttributes(psDeviceNode,
+                                                                                  bKernelMemoryCtx);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__));
+
+       /*
+        * Ensure that we are safe to perform unaligned accesses on memory
+        * we mark write-combine, as the compiler might generate
+        * instructions operating on this memory which require this
+        * assumption to be true.
+        */
+       PVR_ASSERT(OSIsWriteCombineUnalignedSafe());
+
+       /* allocate a Devmem context */
+       psDevmemCtx = OSAllocMem(sizeof(*psDevmemCtx));
+       PVR_LOG_GOTO_IF_NOMEM(psDevmemCtx, eError, fail_alloc);
+
+       OSAtomicWrite(&psDevmemCtx->hRefCount, 1);
+       psDevmemCtx->psDevNode = psDeviceNode;
+
+       /* Call down to MMU context creation */
+
+       eError = MMU_ContextCreate(psConnection,
+                                  psDeviceNode,
+                                  &psDevmemCtx->psMMUContext,
+                                  psMMUDevAttrs);
+       PVR_LOG_GOTO_IF_ERROR(eError, "MMU_ContextCreate", fail_mmucontext);
+
+       if (psDeviceNode->pfnRegisterMemoryContext)
+       {
+               eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt);
+               PVR_LOG_GOTO_IF_ERROR(eError, "pfnRegisterMemoryContext", fail_register);
+       }
+
+       /* Store the private data as it is required to unregister the memory context */
+       psDevmemCtx->hPrivData = hPrivDataInt;
+       *hPrivData = hPrivDataInt;
+       *ppsDevmemCtxPtr = psDevmemCtx;
+
+       /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/
+       *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE);
+
+       /* Initialise the PID notify list */
+       OSWRLockCreate(&psDevmemCtx->hListLock);
+       dllist_init(&(psDevmemCtx->sProcessNotifyListHead));
+       psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL;
+       psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL;
+
+       /* Initialise page fault address */
+       psDevmemCtx->sFaultAddress.uiAddr = 0ULL;
+
+       /* Initialise flags */
+       psDevmemCtx->ui32Flags = 0;
+
+       return PVRSRV_OK;
+
+fail_register:
+       MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+fail_mmucontext:
+       OSFreeMem(psDevmemCtx);
+fail_alloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntHeapCreate
+@Description    Creates and initialises a device memory heap.
+@Return         valid Device Memory heap handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr)
+{
+       DEVMEMINT_HEAP *psDevmemHeap;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__));
+
+       /* allocate a Devmem context */
+       psDevmemHeap = OSAllocMem(sizeof(*psDevmemHeap));
+       PVR_LOG_RETURN_IF_NOMEM(psDevmemHeap, "psDevmemHeap");
+
+       psDevmemHeap->psDevmemCtx = psDevmemCtx;
+
+       DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx);
+
+       OSAtomicWrite(&psDevmemHeap->uiRefCount, 1);
+
+       psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize;
+
+       *ppsDevmemHeapPtr = psDevmemHeap;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+                                            PVRSRV_DEF_PAGE *psDefPage,
+                                            IMG_INT    uiInitValue,
+                                            IMG_CHAR *pcDefPageName,
+                                            IMG_BOOL bInitPage)
+{
+       IMG_UINT32 ui32RefCnt;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       OSLockAcquire(psDefPage->psPgLock);
+
+       /* We know there will not be 4G number of sparse PMR's */
+       ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter);
+
+       if (1 == ui32RefCnt)
+       {
+               IMG_DEV_PHYADDR sDevPhysAddr = {0};
+
+#if defined(PDUMP)
+               PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName);
+#endif
+
+               /* Allocate the dummy page required for sparse backing */
+               eError = DevPhysMemAlloc(psDevNode,
+                                        (1 << psDefPage->ui32Log2PgSize),
+                                        0,
+                                        uiInitValue,
+                                        bInitPage,
+#if defined(PDUMP)
+                                        psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+                                        pcDefPageName,
+                                        &psDefPage->hPdumpPg,
+#endif
+                                        &psDefPage->sPageHandle,
+                                        &sDevPhysAddr);
+               if (PVRSRV_OK != eError)
+               {
+                       OSAtomicDecrement(&psDefPage->atRefCounter);
+               }
+               else
+               {
+                       psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr;
+               }
+       }
+
+       OSLockRelease(psDefPage->psPgLock);
+
+       return eError;
+}
+
+void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PVRSRV_DEF_PAGE *psDefPage,
+                                   IMG_CHAR *pcDefPageName)
+{
+       IMG_UINT32 ui32RefCnt;
+
+       ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter);
+
+       /* For the cases where the dummy page allocation fails due to lack of memory
+        * The refcount can still be 0 even for a sparse allocation */
+       if (0 != ui32RefCnt)
+       {
+               OSLockAcquire(psDefPage->psPgLock);
+
+               /* We know there will not be 4G number of sparse PMR's */
+               ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter);
+
+               if (0 == ui32RefCnt)
+               {
+                       PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName);
+
+                       /* Free the dummy page when refcount reaches zero */
+                       DevPhysMemFree(psDevNode,
+#if defined(PDUMP)
+                                      psDefPage->hPdumpPg,
+#endif
+                                      &psDefPage->sPageHandle);
+
+#if defined(PDUMP)
+                       psDefPage->hPdumpPg = NULL;
+#endif
+                       psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+               }
+
+               OSLockRelease(psDefPage->psPgLock);
+       }
+
+}
+
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+                  PMR *psPMR,
+                  IMG_UINT32 ui32PageCount,
+                  IMG_UINT32 ui32PhysicalPgOffset,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PageCount");
+       PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PhysicalPgOffset < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PhysicalPgOffset");
+
+       if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). "
+                        "PMR contiguity must be a multiple of the heap contiguity!",
+                        __func__,
+                        psReservation->psDevmemHeap->uiLog2PageSize,
+                        PMR_GetLog2Contiguity(psPMR)));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+       }
+
+       eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+                             uiFlags,
+                             sDevVAddrBase,
+                             psPMR,
+                             ui32PhysicalPgOffset,
+                             ui32PageCount,
+                             NULL,
+                             psReservation->psDevmemHeap->uiLog2PageSize);
+
+e0:
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+                    IMG_DEV_VIRTADDR sDevVAddrBase,
+                    IMG_UINT32 ui32PageCount)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PageCount");
+
+       /* Unmap the pages and mark them invalid in the MMU PTE */
+       MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+                      0,
+                      sDevVAddrBase,
+                      ui32PageCount,
+                      NULL,
+                      psReservation->psDevmemHeap->uiLog2PageSize,
+                      0);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_MAPPING *psMapping;
+       /* number of pages (device pages) that allocation spans */
+       IMG_UINT32 ui32NumDevPages;
+       /* device virtual address of start of allocation */
+       IMG_DEV_VIRTADDR sAllocationDevVAddr;
+       /* and its length */
+       IMG_DEVMEM_SIZE_T uiAllocationSize;
+       IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+       IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+       PVRSRV_DEVICE_NODE *psDevNode;
+       PMR_FLAGS_T uiPMRFlags;
+       PVRSRV_DEF_PAGE *psDefPage;
+       IMG_CHAR *pszPageName;
+
+       if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Device heap and PMR have incompatible contiguity (%u - %u). "
+                        "Heap contiguity must be a multiple of the heap contiguity!",
+                        __func__,
+                        uiLog2HeapContiguity,
+                        PMR_GetLog2Contiguity(psPMR) ));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, ErrorReturnError);
+       }
+       psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+       /* Don't bother with refcount on reservation, as a reservation
+          only ever holds one mapping, so we directly increment the
+          refcount on the heap instead */
+       if (!DevmemIntHeapAcquire(psDevmemHeap))
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError);
+       }
+
+       /* allocate memory to record the mapping info */
+       psMapping = OSAllocMem(sizeof(*psMapping));
+       PVR_LOG_GOTO_IF_NOMEM(psMapping, eError, ErrorUnreference);
+
+       uiAllocationSize = psReservation->uiLength;
+
+       ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1);
+       PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize);
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       PVR_GOTO_IF_ERROR(eError, ErrorFreeMapping);
+
+       sAllocationDevVAddr = psReservation->sBase;
+
+       /*Check if the PMR that needs to be mapped is sparse */
+       bIsSparse = PMR_IsSparse(psPMR);
+       if (bIsSparse)
+       {
+               /*Get the flags*/
+               uiPMRFlags = PMR_Flags(psPMR);
+               bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+               if (bNeedBacking)
+               {
+                       IMG_INT uiInitValue;
+
+                       if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags))
+                       {
+                               psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage;
+                               uiInitValue = PVR_ZERO_PAGE_INIT_VALUE;
+                               pszPageName = DEV_ZERO_PAGE;
+                       }
+                       else
+                       {
+                               psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage;
+                               uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE;
+                               pszPageName = DUMMY_PAGE;
+                       }
+
+                       /* Error is logged with in the function if any failures.
+                        * As the allocation fails we need to fail the map request and
+                        * return appropriate error
+                        *
+                        * Allocation of dummy/zero page is done after locking the pages for PMR physically
+                        * By implementing this way, the best case path of dummy/zero page being most likely to be
+                        * allocated after physically locking down pages, is considered.
+                        * If the dummy/zero page allocation fails, we do unlock the physical address and the impact
+                        * is a bit more in on demand mode of operation */
+                       eError = DevmemIntAllocDefBackingPage(psDevNode,
+                                                             psDefPage,
+                                                             uiInitValue,
+                                                             pszPageName,
+                                                             IMG_TRUE);
+                       PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr);
+               }
+
+               /* N.B. We pass mapping permission flags to MMU_MapPages and let
+                * it reject the mapping if the permissions on the PMR are not compatible. */
+               eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+                                     uiMapFlags,
+                                     sAllocationDevVAddr,
+                                     psPMR,
+                                     0,
+                                     ui32NumDevPages,
+                                     NULL,
+                                     uiLog2HeapContiguity);
+               PVR_GOTO_IF_ERROR(eError, ErrorFreeDefBackingPage);
+       }
+       else
+       {
+               eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+                                       sAllocationDevVAddr,
+                                       psPMR,
+                                       (IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity,
+                                       uiMapFlags,
+                                       uiLog2HeapContiguity);
+               PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr);
+       }
+
+       psMapping->psReservation = psReservation;
+       psMapping->uiNumPages = ui32NumDevPages;
+       psMapping->psPMR = psPMR;
+
+       *ppsMappingPtr = psMapping;
+
+       return PVRSRV_OK;
+
+ErrorFreeDefBackingPage:
+       if (bNeedBacking)
+       {
+               /*if the mapping failed, the allocated dummy ref count need
+                * to be handled accordingly */
+               DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
+                                           psDefPage,
+                                           pszPageName);
+       }
+
+ErrorUnlockPhysAddr:
+       {
+               PVRSRV_ERROR eError1 = PVRSRV_OK;
+               eError1 = PMRUnlockSysPhysAddresses(psPMR);
+               PVR_LOG_IF_ERROR(eError1, "PMRUnlockSysPhysAddresses");
+
+               *ppsMappingPtr = NULL;
+       }
+
+ErrorFreeMapping:
+       OSFreeMem(psMapping);
+
+ErrorUnreference:
+       /* if fails there's not much to do (the function will print an error) */
+       DevmemIntHeapRelease(psDevmemHeap);
+
+ErrorReturnError:
+       PVR_ASSERT (eError != PVRSRV_OK);
+       return eError;
+}
+
+
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap;
+       /* device virtual address of start of allocation */
+       IMG_DEV_VIRTADDR sAllocationDevVAddr;
+       /* number of pages (device pages) that allocation spans */
+       IMG_UINT32 ui32NumDevPages;
+       IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+       PMR_FLAGS_T uiPMRFlags;
+
+       ui32NumDevPages = psMapping->uiNumPages;
+       sAllocationDevVAddr = psMapping->psReservation->sBase;
+
+       /*Check if the PMR that needs to be mapped is sparse */
+       bIsSparse = PMR_IsSparse(psMapping->psPMR);
+
+       if (bIsSparse)
+       {
+               /*Get the flags*/
+               uiPMRFlags = PMR_Flags(psMapping->psPMR);
+               bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+               if (bNeedBacking)
+               {
+                       if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags))
+                       {
+                               DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
+                                                                                       &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage,
+                                                                                       DEV_ZERO_PAGE);
+                       }
+                       else
+                       {
+                               DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
+                                                                                       &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage,
+                                                                                       DUMMY_PAGE);
+                       }
+               }
+
+               MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+                               0,
+                               sAllocationDevVAddr,
+                               ui32NumDevPages,
+                               NULL,
+                               psMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+                               0);
+       }
+       else
+       {
+               MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+                                sAllocationDevVAddr,
+                                ui32NumDevPages,
+                                psMapping->psReservation->psDevmemHeap->uiLog2PageSize);
+       }
+
+       eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Don't bother with refcount on reservation, as a reservation only ever
+        * holds one mapping, so we directly decrement the refcount on the heap
+        * instead.
+        * Function will print an error if the heap could not be unreferenced. */
+       DevmemIntHeapRelease(psDevmemHeap);
+
+       OSFreeMem(psMapping);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEMINT_RESERVATION *psReservation;
+
+       if (!DevmemIntHeapAcquire(psDevmemHeap))
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW,
+                                   ErrorReturnError);
+       }
+
+       /* allocate memory to record the reservation info */
+       psReservation = OSAllocMem(sizeof(*psReservation));
+       PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, ErrorUnreference);
+
+       psReservation->sBase = sAllocationDevVAddr;
+       psReservation->uiLength = uiAllocationSize;
+
+       eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext,
+                          uiAllocationSize,
+                          &uiAllocationSize,
+                          0, /* IMG_UINT32 uiProtFlags */
+                          0, /* alignment is n/a since we supply devvaddr */
+                          &sAllocationDevVAddr,
+                          psDevmemHeap->uiLog2PageSize);
+       PVR_GOTO_IF_ERROR(eError, ErrorFreeReservation);
+
+       /* since we supplied the virt addr, MMU_Alloc shouldn't have
+          chosen a new one for us */
+       PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr);
+
+       psReservation->psDevmemHeap = psDevmemHeap;
+       *ppsReservationPtr = psReservation;
+
+       return PVRSRV_OK;
+
+       /*
+        *  error exit paths follow
+        */
+
+ErrorFreeReservation:
+       OSFreeMem(psReservation);
+
+ErrorUnreference:
+       /* if fails there's not much to do (the function will print an error) */
+       DevmemIntHeapRelease(psDevmemHeap);
+
+ErrorReturnError:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation)
+{
+       IMG_DEV_VIRTADDR sBase        = psReservation->sBase;
+       IMG_UINT32 uiLength           = psReservation->uiLength;
+       IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize;
+
+       MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+                sBase,
+                uiLength,
+                uiLog2DataPageSize);
+
+       /* Don't bother with refcount on reservation, as a reservation only ever
+        * holds one mapping, so we directly decrement the refcount on the heap
+        * instead.
+        * Function will print an error if the heap could not be unreferenced. */
+       DevmemIntHeapRelease(psReservation->psDevmemHeap);
+
+       OSFreeMem(psReservation);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap)
+{
+       if (OSAtomicRead(&psDevmemHeap->uiRefCount) != DEVMEMHEAP_REFCOUNT_MIN)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "BUG!  %s called but has too many references (%d) "
+                        "which probably means reservations & mappings have been made from "
+                        "the heap and not freed", __func__,
+                        OSAtomicRead(&psDevmemHeap->uiRefCount)));
+
+               /*
+                * Try again later when you've freed all the memory
+                *
+                * Note:
+                * We don't expect the application to retry (after all this call would
+                * succeed if the client had freed all the memory which it should have
+                * done before calling this function). However, given there should be
+                * an associated handle, when the handle base is destroyed it will free
+                * any allocations leaked by the client and then it will retry this call,
+                * which should then succeed.
+                */
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       PVR_ASSERT(OSAtomicRead(&psDevmemHeap->uiRefCount) == DEVMEMHEAP_REFCOUNT_MIN);
+
+       DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap));
+       OSFreeMem(psDevmemHeap);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+                      PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                      IMG_DEV_VIRTADDR sDevVAddrBase,
+                      IMG_UINT64 sCpuVAddrBase)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR);
+       IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+       IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity;
+       IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff;
+
+       IMG_UINT32 *pai32MapIndices = pai32AllocIndices;
+       IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices;
+       IMG_UINT32 uiMapPageCount = ui32AllocPageCount;
+       IMG_UINT32 uiUnmapPageCount = ui32FreePageCount;
+
+       /* Special case:
+        * Adjust indices if we map into a heap that uses smaller page sizes
+        * than the physical allocation itself.
+        * The incoming parameters are all based on the page size of the PMR
+        * but the mapping functions expects parameters to be in terms of heap page sizes. */
+       if (uiOrderDiff != 0)
+       {
+               IMG_UINT32 uiPgIdx, uiPgOffset;
+
+               uiMapPageCount = (uiMapPageCount << uiOrderDiff);
+               uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff);
+
+               pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices));
+               PVR_GOTO_IF_NOMEM(pai32MapIndices, eError, e0);
+
+               pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices));
+               if (!pai32UnmapIndices)
+               {
+                       OSFreeMem(pai32MapIndices);
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0);
+               }
+
+               /* Every chunk index needs to be translated from physical indices
+                * into heap based indices. */
+               for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++)
+               {
+                       for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+                       {
+                               pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+                                               pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+                       }
+               }
+
+               for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++)
+               {
+                       for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+                       {
+                               pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+                                               pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+                       }
+               }
+       }
+
+       /*
+        * The order of steps in which this request is done is given below. The order of
+        * operations is very important in this case:
+        *
+        * 1. The parameters are validated in function PMR_ChangeSparseMem below.
+        *    A successful response indicates all the parameters are correct.
+        *    In failure case we bail out from here without processing further.
+        * 2. On success, get the PMR specific operations done. this includes page alloc, page free
+        *    and the corresponding PMR status changes.
+        *    when this call fails, it is ensured that the state of the PMR before is
+        *    not disturbed. If it succeeds, then we can go ahead with the subsequent steps.
+        * 3. Invalidate the GPU page table entries for the pages to be freed.
+        * 4. Write the GPU page table entries for the pages that got allocated.
+        * 5. Change the corresponding CPU space map.
+        *
+        * The above steps can be selectively controlled using flags.
+        */
+       if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH))
+       {
+               /* Do the PMR specific changes first */
+               eError = PMR_ChangeSparseMem(psPMR,
+                                            ui32AllocPageCount,
+                                            pai32AllocIndices,
+                                            ui32FreePageCount,
+                                            pai32FreeIndices,
+                                            uiSparseFlags);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                       "%s: Failed to do PMR specific changes.",
+                                       __func__));
+                       goto e1;
+               }
+
+               /* Invalidate the page table entries for the free pages.
+                * Optimisation later would be not to touch the ones that gets re-mapped */
+               if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE))
+               {
+                       PMR_FLAGS_T uiPMRFlags;
+
+                       /*Get the flags*/
+                       uiPMRFlags = PMR_Flags(psPMR);
+
+                       if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM))
+                       {
+                               /* Unmap the pages and mark them invalid in the MMU PTE */
+                               MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+                                               uiFlags,
+                                               sDevVAddrBase,
+                                               uiUnmapPageCount,
+                                               pai32UnmapIndices,
+                                               uiLog2HeapContiguity,
+                                               uiPMRFlags);
+                       }
+               }
+
+               /* Wire the pages tables that got allocated */
+               if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC))
+               {
+                       /* Map the pages and mark them Valid in the MMU PTE */
+                       eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+                                              uiFlags,
+                                              sDevVAddrBase,
+                                              psPMR,
+                                              0,
+                                              uiMapPageCount,
+                                              pai32MapIndices,
+                                              uiLog2HeapContiguity);
+                       if (PVRSRV_OK != eError)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                               "%s: Failed to map alloc indices.",
+                                               __func__));
+                               goto e1;
+                       }
+               }
+
+               /* Currently only used for debug */
+               if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM))
+               {
+                       eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+                                             uiFlags,
+                                             sDevVAddrBase,
+                                             psPMR,
+                                             0,
+                                             uiMapPageCount,
+                                             pai32UnmapIndices,
+                                             uiLog2HeapContiguity);
+                       if (PVRSRV_OK != eError)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                               "%s: Failed to map Free indices.",
+                                               __func__));
+                               goto e1;
+                       }
+               }
+       }
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+       /* Do the changes in sparse on to the CPU virtual map accordingly */
+       if (uiSparseFlags & SPARSE_MAP_CPU_ADDR)
+       {
+               if (sCpuVAddrBase != 0)
+               {
+                       eError = PMR_ChangeSparseMemCPUMap(psPMR,
+                                                          sCpuVAddrBase,
+                                                          ui32AllocPageCount,
+                                                          pai32AllocIndices,
+                                                          ui32FreePageCount,
+                                                          pai32FreeIndices);
+                       if (PVRSRV_OK != eError)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                               "%s: Failed to map to CPU addr space.",
+                                               __func__));
+                               goto e0;
+                       }
+               }
+       }
+#endif
+
+e1:
+       if (pai32MapIndices != pai32AllocIndices)
+       {
+               OSFreeMem(pai32MapIndices);
+       }
+       if (pai32UnmapIndices != pai32FreeIndices)
+       {
+               OSFreeMem(pai32UnmapIndices);
+       }
+e0:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxDestroy
+@Description    Destroy that created by DevmemIntCtxCreate
+@Input          psDevmemCtx   Device Memory context
+@Return         cannot fail.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx)
+{
+       /*
+          We can't determine if we should be freeing the context here
+          as a refcount!=1 could be due to either the fact that heap(s)
+          remain with allocations on them, or that this memory context
+          has been exported.
+          As the client couldn't do anything useful with this information
+          anyway and the fact that the refcount will ensure we only
+          free the context when _all_ references have been released
+          don't bother checking and just return OK regardless.
+          */
+       DevmemIntCtxRelease(psDevmemCtx);
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      DEVMEMINT_CTX *psDevMemContext,
+                                      IMG_DEV_VIRTADDR sDevAddr)
+{
+       IMG_UINT32 i, j, uiLog2HeapPageSize = 0;
+       DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo;
+       DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray;
+
+       IMG_BOOL bFound = IMG_FALSE;
+
+       for (i = 0;
+                i < psDinfo->uiNumHeapConfigs && !bFound;
+                i++)
+       {
+               for (j = 0;
+                        j < psConfig[i].uiNumHeaps  && !bFound;
+                        j++)
+               {
+                       IMG_DEV_VIRTADDR uiBase =
+                                       psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr;
+                       IMG_DEVMEM_SIZE_T uiSize =
+                                       psConfig[i].psHeapBlueprintArray[j].uiHeapLength;
+
+                       if ((sDevAddr.uiAddr >= uiBase.uiAddr) &&
+                               (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize)))
+                       {
+                               uiLog2HeapPageSize =
+                                               psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize;
+                               bFound = IMG_TRUE;
+                       }
+               }
+       }
+
+       if (uiLog2HeapPageSize == 0)
+       {
+               return PVRSRV_ERROR_INVALID_GPU_ADDR;
+       }
+
+       return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+                                  uiLog2HeapPageSize,
+                                  sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+}
+
+PVRSRV_ERROR
+DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevMemContext,
+                          IMG_DEV_VIRTADDR sDevVAddr,
+                          IMG_DEVMEM_SIZE_T uiSize,
+                          IMG_BOOL bInvalidate)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode;
+       MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext;
+
+       if (psDevNode->pfnDevSLCFlushRange)
+       {
+               return psDevNode->pfnDevSLCFlushRange(psDevNode,
+                                                     psMMUContext,
+                                                     sDevVAddr,
+                                                     uiSize,
+                                                     bInvalidate);
+       }
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevMemContext,
+                             IMG_UINT64 ui64FBSCEntryMask)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode;
+       MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext;
+
+       if (psDevNode->pfnInvalFBSCTable)
+       {
+               return psDevNode->pfnInvalFBSCTable(psDevNode,
+                                                   psMMUContext,
+                                                   ui64FBSCEntryMask);
+       }
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDevNode,
+                                      DEVMEMINT_CTX *psDevMemContext,
+                                      IMG_DEV_VIRTADDR *psFaultAddress)
+{
+       if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0)
+       {
+               return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+       }
+
+       *psFaultAddress = psDevMemContext->sFaultAddress;
+       psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE;
+
+       return PVRSRV_OK;
+}
+
+static POSWR_LOCK g_hExportCtxListLock;
+static DLLIST_NODE g_sExportCtxList;
+
+PVRSRV_ERROR
+DevmemIntInit(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       dllist_init(&g_sExportCtxList);
+
+       eError = OSWRLockCreate(&g_hExportCtxListLock);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntDeInit(void)
+{
+       PVR_ASSERT(dllist_is_empty(&g_sExportCtxList));
+
+       OSWRLockDestroy(g_hExportCtxListLock);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+                   PMR *psPMR,
+                   DEVMEMINT_CTX_EXPORT **ppsContextExport)
+{
+       DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+       psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT));
+       PVR_LOG_RETURN_IF_NOMEM(psCtxExport, "psCtxExport");
+
+       DevmemIntCtxAcquire(psContext);
+       PMRRefPMR(psPMR);
+       /* Now that the source PMR is exported, the layout
+        * can't change as there could be outstanding importers
+        * This is to make sure both exporter and importers view of
+        * the memory is same */
+       PMR_SetLayoutFixed(psPMR, IMG_TRUE);
+       psCtxExport->psDevmemCtx = psContext;
+       psCtxExport->psPMR = psPMR;
+       OSWRLockAcquireWrite(g_hExportCtxListLock);
+       dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode);
+       OSWRLockReleaseWrite(g_hExportCtxListLock);
+
+       *ppsContextExport = psCtxExport;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport)
+{
+       PMRUnrefPMR(psContextExport->psPMR);
+       DevmemIntCtxRelease(psContextExport->psDevmemCtx);
+       OSWRLockAcquireWrite(g_hExportCtxListLock);
+       dllist_remove_node(&psContextExport->sNode);
+       OSWRLockReleaseWrite(g_hExportCtxListLock);
+       OSFreeMem(psContextExport);
+
+       /* Unable to find exported context, return error */
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+                          DEVMEMINT_CTX **ppsContext,
+                          IMG_HANDLE *phPrivData)
+{
+       PDLLIST_NODE psListNode, psListNodeNext;
+       DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+       OSWRLockAcquireRead(g_hExportCtxListLock);
+       /* Find context from list using PMR as key */
+       dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext)
+       {
+               psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode);
+               if (psCtxExport->psPMR == psPMR)
+               {
+                       OSWRLockReleaseRead(g_hExportCtxListLock);
+                       DevmemIntCtxAcquire(psCtxExport->psDevmemCtx);
+                       *ppsContext = psCtxExport->psDevmemCtx;
+                       *phPrivData = psCtxExport->psDevmemCtx->hPrivData;
+
+                       /* PMR should have been already exported to import it
+                        * If a PMR is exported, its immutable and the same is
+                        * checked here */
+                       PVR_ASSERT(IMG_TRUE == PMR_IsMemLayoutFixed(psPMR));
+
+                       return PVRSRV_OK;
+               }
+       }
+       OSWRLockReleaseRead(g_hExportCtxListLock);
+
+       /* Unable to find exported context, return error */
+       PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Failed to acquire remote context. Could not retrieve context with given PMR",
+                       __func__));
+       return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntRegisterPFNotify
+@Description    Registers a PID to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx    The context to be notified about.
+@Input          ui32PID        The PID of the process that would like to be
+                               notified.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+                                         IMG_INT32     ui32PID,
+                                         IMG_BOOL      bRegister)
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+       DLLIST_NODE         *psNode, *psNodeNext;
+       DEVMEMINT_PF_NOTIFY *psNotifyNode;
+       IMG_BOOL            bPresent = IMG_FALSE;
+       PVRSRV_ERROR        eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx");
+
+       /* Acquire write lock for the duration, to avoid resource free
+        * while trying to read (no need to then also acquire the read lock
+        * as we have exclusive access while holding the write lock)
+        */
+       OSWRLockAcquireWrite(psDevmemCtx->hListLock);
+
+       psDevNode = psDevmemCtx->psDevNode;
+
+       if (bRegister)
+       {
+               /* If this is the first PID in the list, the device memory context
+                * needs to be registered for notification */
+               if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+               {
+                       OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock);
+                       dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead,
+                                          &psDevmemCtx->sPageFaultNotifyListElem);
+                       OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock);
+               }
+       }
+
+       /* Loop through the registered PIDs and check whether this one is
+        * present */
+       dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+       {
+               psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+               if (psNotifyNode->ui32PID == ui32PID)
+               {
+                       bPresent = IMG_TRUE;
+                       break;
+               }
+       }
+
+       if (bRegister)
+       {
+               if (bPresent)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Trying to register a PID that is already registered",
+                                __func__));
+                       eError = PVRSRV_ERROR_PID_ALREADY_REGISTERED;
+                       goto err_already_registered;
+               }
+
+               psNotifyNode = OSAllocMem(sizeof(*psNotifyNode));
+               if (psNotifyNode == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Unable to allocate memory for the notify list",
+                                 __func__));
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto err_out_of_mem;
+               }
+               psNotifyNode->ui32PID = ui32PID;
+               /* Write lock is already held */
+               dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem));
+       }
+       else
+       {
+               if (!bPresent)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Trying to unregister a PID that is not registered",
+                                __func__));
+                       eError = PVRSRV_ERROR_PID_NOT_REGISTERED;
+                       goto err_not_registered;
+               }
+               /* Write lock is already held */
+               dllist_remove_node(psNode);
+               psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+               OSFreeMem(psNotifyNode);
+
+               /* If the last process in the list is being unregistered, then also
+                * unregister the device memory context from the notify list. */
+               if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+               {
+                       OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock);
+                       dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+                       OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock);
+               }
+       }
+       eError = PVRSRV_OK;
+
+err_already_registered:
+err_out_of_mem:
+err_not_registered:
+
+       OSWRLockReleaseWrite(psDevmemCtx->hListLock);
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPFNotify
+@Description    Notifies any processes that have registered themselves to be
+                notified when a page fault happens on a specific device memory
+                context.
+@Input          *psDevNode           The device node.
+@Input          ui64FaultedPCAddress The page catalogue address that faulted.
+@Input          sFaultAddress        The address that triggered the fault.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT64         ui64FaultedPCAddress,
+                               IMG_DEV_VIRTADDR   sFaultAddress)
+{
+       DLLIST_NODE         *psNode, *psNodeNext;
+       DEVMEMINT_PF_NOTIFY *psNotifyNode;
+       PVRSRV_ERROR        eError;
+       DEVMEMINT_CTX       *psDevmemCtx = NULL;
+       IMG_BOOL            bFailed = IMG_FALSE;
+
+       OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+       if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead)))
+       {
+               OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+               return PVRSRV_OK;
+       }
+
+       dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext)
+       {
+               DEVMEMINT_CTX *psThisContext =
+                       IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem);
+               IMG_DEV_PHYADDR sPCDevPAddr;
+
+               eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "MMU_AcquireBaseAddr");
+                       OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+                       return eError;
+               }
+
+               if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress)
+               {
+                       psDevmemCtx = psThisContext;
+                       break;
+               }
+       }
+       OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+
+       if (psDevmemCtx == NULL)
+       {
+               /* Not found, just return */
+               return PVRSRV_OK;
+       }
+       OSWRLockAcquireRead(psDevmemCtx->hListLock);
+
+       /*
+        * Store the first occurrence of a page fault address,
+        * until that address is consumed by a client.
+        */
+       if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0)
+       {
+               psDevmemCtx->sFaultAddress = sFaultAddress;
+               psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE;
+       }
+
+       /* Loop through each registered PID and send a signal to the process */
+       dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+       {
+               psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+               eError = OSDebugSignalPID(psNotifyNode->ui32PID);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Unable to signal process for PID: %u",
+                                __func__,
+                                psNotifyNode->ui32PID));
+
+                       PVR_ASSERT(!"Unable to signal process");
+
+                       bFailed = IMG_TRUE;
+               }
+       }
+       OSWRLockReleaseRead(psDevmemCtx->hListLock);
+
+       if (bFailed)
+       {
+               return PVRSRV_ERROR_SIGNAL_FAILED;
+       }
+
+       return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext)
+{
+       IMG_UINT32 ui32MMUContextID;
+       MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID, PDUMP_FLAGS_CONTINUOUS);
+       return ui32MMUContextID;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 ui32ArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiPDumpMMUCtx;
+
+       PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored in PVRSRV_DATA.
+        */
+       if (psDevmemCtx->psDevNode->sDevId.ui32InternalID !=
+           (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice)
+       {
+               return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE;
+       }
+
+       eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext,
+                       &uiPDumpMMUCtx,
+                       ui32PDumpFlags);
+
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /*
+          The following SYSMEM refers to the 'MMU Context', hence it
+          should be the MMU context, not the PMR, that says what the PDump
+          MemSpace tag is?
+          From a PDump P.O.V. it doesn't matter which name space we use as long
+          as that MemSpace is used on the 'MMU Context' we're dumping from
+          */
+       eError = PDumpMMUSAB(psDevmemCtx->psDevNode,
+                            psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+                            uiPDumpMMUCtx,
+                            sDevAddrStart,
+                            uiSize,
+                            pszFilename,
+                            ui32FileOffset,
+                            ui32PDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection,
+                                                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                         DEVMEMINT_CTX *psDevMemContext,
+                                                         IMG_UINT32 ui32Size,
+                                                         const IMG_CHAR *pszFileName,
+                                                         IMG_DEV_VIRTADDR sData,
+                                                         IMG_UINT32 ui32DataSize,
+                                                         IMG_UINT32 ui32LogicalWidth,
+                                                         IMG_UINT32 ui32LogicalHeight,
+                                                         IMG_UINT32 ui32PhysicalWidth,
+                                                         IMG_UINT32 ui32PhysicalHeight,
+                                                         PDUMP_PIXEL_FORMAT ePixFmt,
+                                                         IMG_MEMLAYOUT eMemLayout,
+                                                         IMG_FB_COMPRESSION eFBCompression,
+                                                         const IMG_UINT32 *paui32FBCClearColour,
+                                                         PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                                                         IMG_DEV_VIRTADDR sHeader,
+                                                         IMG_UINT32 ui32HeaderSize,
+                                                         IMG_UINT32 ui32PDumpFlags)
+{
+       IMG_UINT32 ui32ContextID;
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+       eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags);
+       PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext");
+
+       eError = PDumpImageDescriptor(psDeviceNode,
+                                                                       ui32ContextID,
+                                                                       (IMG_CHAR *)pszFileName,
+                                                                       sData,
+                                                                       ui32DataSize,
+                                                                       ui32LogicalWidth,
+                                                                       ui32LogicalHeight,
+                                                                       ui32PhysicalWidth,
+                                                                       ui32PhysicalHeight,
+                                                                       ePixFmt,
+                                                                       eMemLayout,
+                                                                       eFBCompression,
+                                                                       paui32FBCClearColour,
+                                                                       eFBCSwizzle,
+                                                                       sHeader,
+                                                                       ui32HeaderSize,
+                                                                       ui32PDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "PDumpImageDescriptor");
+
+       /* Don't care about return value */
+       (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection,
+                                                        PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                        DEVMEMINT_CTX *psDevMemContext,
+                                                        IMG_UINT32 ui32Size,
+                                                        const IMG_CHAR *pszFileName,
+                                                        IMG_DEV_VIRTADDR sData,
+                                                        IMG_UINT32 ui32DataSize,
+                                                        IMG_UINT32 ui32HeaderType,
+                                                        IMG_UINT32 ui32ElementType,
+                                                        IMG_UINT32 ui32ElementCount,
+                                                        IMG_UINT32 ui32PDumpFlags)
+{
+       IMG_UINT32 ui32ContextID;
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+       if ((ui32HeaderType != IBIN_HEADER_TYPE) &&
+               (ui32HeaderType != DATA_HEADER_TYPE))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Invalid header type (%u)",
+                        __func__,
+                        ui32HeaderType));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags);
+       PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext");
+
+       eError = PDumpDataDescriptor(psDeviceNode,
+                                                                       ui32ContextID,
+                                                                       (IMG_CHAR *)pszFileName,
+                                                                       sData,
+                                                                       ui32DataSize,
+                                                                       ui32HeaderType,
+                                                                       ui32ElementType,
+                                                                       ui32ElementCount,
+                                                                       ui32PDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "PDumpDataDescriptor");
+
+       /* Don't care about return value */
+       (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags);
+
+       return eError;
+}
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg.c b/drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg.c
new file mode 100644 (file)
index 0000000..5670af0
--- /dev/null
@@ -0,0 +1,889 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS agnostic implementation of Debug Info interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements osdi_impl.h API to provide access to driver's
+                debug data via pvrdebug.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "hash.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "lock.h"
+#include "osfunc_common.h"
+#include "osfunc.h" /* for thread */
+#include "tlstream.h"
+#include "dllist.h"
+
+#include "osdi_impl.h"
+#include "di_impl_brg.h"
+#include "di_impl_brg_intern.h"
+#include "pvr_dicommon.h"
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+#include "pvrsrv.h"
+#endif
+
+#define ENTRIES_TABLE_INIT_SIZE 64
+#define STREAM_BUFFER_SIZE 0x4000 /* 16KB */
+#define STREAM_LINE_LENGTH 512
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define WRITER_THREAD_SLEEP_TIMEOUT 0ull
+#else
+#define WRITER_THREAD_SLEEP_TIMEOUT 28800000000ull
+#endif
+#define WRITER_THREAD_DESTROY_TIMEOUT 100000ull
+#define WRITER_THREAD_DESTROY_RETRIES 10u
+
+#define WRITE_RETRY_COUNT 10      /* retry a write to a TL buffer 10 times */
+#define WRITE_RETRY_WAIT_TIME 100 /* wait 10ms between write retries */
+
+typedef enum THREAD_STATE
+{
+       THREAD_STATE_NULL,
+       THREAD_STATE_ALIVE,
+       THREAD_STATE_TERMINATED,
+} THREAD_STATE;
+
+static struct DIIB_IMPL
+{
+       HASH_TABLE *psEntriesTable;    /*!< Table of entries. */
+       POS_LOCK psEntriesLock;        /*!< Protects psEntriesTable. */
+       IMG_HANDLE hWriterThread;
+       IMG_HANDLE hWriterEventObject;
+       ATOMIC_T eThreadState;
+
+       DLLIST_NODE sWriterQueue;
+       POS_LOCK psWriterLock;         /*!< Protects sWriterQueue. */
+} *_g_psImpl;
+
+struct DIIB_GROUP
+{
+       const IMG_CHAR *pszName;
+       struct DIIB_GROUP *psParentGroup;
+};
+
+struct DIIB_ENTRY
+{
+       struct DIIB_GROUP *psParentGroup;
+       OSDI_IMPL_ENTRY sImplEntry;
+       DI_ITERATOR_CB sIterCb;
+       DI_ENTRY_TYPE eType;
+       IMG_CHAR pszFullPath[DI_IMPL_BRG_PATH_LEN];
+       void *pvPrivData;
+
+       POS_LOCK hLock; /*!< Protects access to entry's iterator. */
+};
+
+struct DI_CONTEXT_TAG
+{
+       IMG_HANDLE hStream;
+       ATOMIC_T iRefCnt;
+       IMG_BOOL bClientConnected; /*!< Indicated that the client is or is not
+                                       connected to the DI. */
+};
+
+struct DIIB_WORK_ITEM
+{
+       DI_CONTEXT *psContext;
+       DIIB_ENTRY *psEntry;
+       IMG_UINT64 ui64Size;
+       IMG_UINT64 ui64Offset;
+
+       DLLIST_NODE sQueueElement;
+};
+
+/* Declaring function here to avoid dependencies that are introduced by
+ * including osfunc.h. */
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                           size_t uiSize);
+
+/* djb2 hash function is public domain */
+static IMG_UINT32 _Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       IMG_CHAR *pszStr = pKey;
+       IMG_UINT32 ui32Hash = 5381, ui32Char;
+
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+       while ((ui32Char = *pszStr++) != '\0')
+       {
+               ui32Hash = ((ui32Hash << 5) + ui32Hash) + ui32Char; /* hash * 33 + c */
+       }
+
+       return ui32Hash;
+}
+
+static IMG_BOOL _Compare(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       IMG_CHAR *pszKey1 = pKey1, *pszKey2 = pKey2;
+
+       return OSStringNCompare(pszKey1, pszKey2, uKeySize) == 0;
+}
+
+/* ----- native callbacks interface ----------------------------------------- */
+
+static void _WriteWithRetires(void *pvNativeHandle, const IMG_CHAR *pszStr,
+                              IMG_UINT uiLen)
+{
+       PVRSRV_ERROR eError;
+       IMG_INT iRetry = 0;
+       IMG_UINT32 ui32Flags = TL_FLAG_NO_WRITE_FAILED;
+
+       do
+       {
+               /* Try to write to the buffer but don't inject MOST_RECENT_WRITE_FAILED
+                * packet in case of failure because we're going to retry. */
+               eError = TLStreamWriteRetFlags(pvNativeHandle, (IMG_UINT8 *) pszStr,
+                                              uiLen, &ui32Flags);
+               if (eError == PVRSRV_ERROR_STREAM_FULL)
+               {
+                       // wait to give the client a change to read
+                       OSSleepms(WRITE_RETRY_WAIT_TIME);
+               }
+       }
+       while (eError == PVRSRV_ERROR_STREAM_FULL && iRetry++ < WRITE_RETRY_COUNT);
+
+       /* One last try to write to the buffer. In this case upon failure
+        * a MOST_RECENT_WRITE_FAILED packet will be inject to the buffer to
+        * indicate data loss. */
+       if (eError == PVRSRV_ERROR_STREAM_FULL)
+       {
+               eError = TLStreamWrite(pvNativeHandle, (IMG_UINT8 *) pszStr, uiLen);
+       }
+
+       PVR_LOG_IF_ERROR(eError, "TLStreamWrite");
+}
+
+static void _WriteData(void *pvNativeHandle, const void *pvData,
+                       IMG_UINT32 uiSize)
+{
+       _WriteWithRetires(pvNativeHandle, pvData, uiSize);
+}
+
+__printf(2, 0)
+static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt,
+                     va_list pArgs)
+{
+       IMG_CHAR pcBuffer[STREAM_LINE_LENGTH];
+       IMG_UINT uiLen = OSVSNPrintf(pcBuffer, sizeof(pcBuffer) - 1, pszFmt, pArgs);
+       pcBuffer[uiLen] = '\0';
+
+       _WriteWithRetires(pvNativeHandle, pcBuffer, uiLen + 1);
+}
+
+static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr)
+{
+       _WriteWithRetires(pvNativeHandle, pszStr, OSStringLength(pszStr) + 1);
+}
+
+static IMG_BOOL _HasOverflowed(void *pvNativeHandle)
+{
+       PVR_UNREFERENCED_PARAMETER(pvNativeHandle);
+       return IMG_FALSE;
+}
+
+static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = {
+       .pfnWrite = _WriteData,
+       .pfnVPrintf = _VPrintf,
+       .pfnPuts = _Puts,
+       .pfnHasOverflowed = _HasOverflowed,
+};
+
+/* ----- entry operations --------------------------------------------------- */
+
+static PVRSRV_ERROR _ContextUnrefAndMaybeDestroy(DI_CONTEXT *psContext)
+{
+       if (OSAtomicDecrement(&psContext->iRefCnt) == 0)
+       {
+               TLStreamClose(psContext->hStream);
+               OSFreeMem(psContext);
+       }
+
+       return PVRSRV_OK;
+}
+
+static IMG_INT64 _ReadGeneric(const DI_CONTEXT *psContext, DIIB_ENTRY *psEntry)
+{
+       IMG_INT64 iRet = 0;
+       IMG_UINT64 ui64Pos = 0;
+       DI_ITERATOR_CB *psIter = &psEntry->sIterCb;
+       OSDI_IMPL_ENTRY *psImplEntry = &psEntry->sImplEntry;
+       PVRSRV_ERROR eError;
+
+       if (psIter->pfnStart != NULL)
+       {
+               /* this is a full sequence of the operation */
+               void *pvData = psIter->pfnStart(psImplEntry, &ui64Pos);
+
+               while (pvData != NULL && psContext->bClientConnected)
+               {
+                       iRet = psIter->pfnShow(psImplEntry, pvData);
+                       if (iRet < 0)
+                       {
+                               break;
+                       }
+
+                       pvData = psIter->pfnNext(psImplEntry, pvData, &ui64Pos);
+               }
+
+               psIter->pfnStop(psImplEntry, pvData);
+       }
+       else if (psIter->pfnShow != NULL)
+       {
+               /* this is a simplified sequence of the operation */
+               iRet = psIter->pfnShow(psImplEntry, NULL);
+       }
+
+       eError = TLStreamMarkEOS(psImplEntry->pvNative, IMG_FALSE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_);
+
+       return iRet;
+
+return_error_:
+       return -1;
+}
+
+static IMG_INT64 _ReadRndAccess(DIIB_ENTRY *psEntry, IMG_UINT64 ui64Count,
+                                IMG_UINT64 *pui64Pos, void *pvData)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT8 *pui8Buffer;
+       IMG_HANDLE hStream = psEntry->sImplEntry.pvNative;
+
+       if (psEntry->sIterCb.pfnRead == NULL)
+       {
+               return -1;
+       }
+
+       eError = TLStreamReserve(hStream, &pui8Buffer, ui64Count);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamReserve", return_error_);
+
+       psEntry->sIterCb.pfnRead((IMG_CHAR *) pui8Buffer, ui64Count, pui64Pos,
+                                pvData);
+
+       eError = TLStreamCommit(hStream, ui64Count);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCommit", return_error_);
+
+       eError = TLStreamMarkEOS(psEntry->sImplEntry.pvNative, IMG_FALSE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_);
+
+       return 0;
+
+return_error_:
+       return -1;
+}
+
+static void _WriterThread(void *pvArg)
+{
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hEvent;
+       DLLIST_NODE *psNode;
+
+       eError = OSEventObjectOpen(_g_psImpl->hWriterEventObject, &hEvent);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen");
+
+#ifdef PVRSRV_FORCE_UNLOAD_IF_BAD_STATE
+       while (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK &&
+              OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE)
+#else
+       while (OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE)
+#endif
+       {
+               struct DIIB_WORK_ITEM *psItem = NULL;
+
+               OSLockAcquire(_g_psImpl->psWriterLock);
+               /* Get element from list tail so that we always get the oldest element
+                * (elements are added to head). */
+               while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL)
+               {
+                       IMG_INT64 i64Ret;
+                       DIIB_ENTRY *psEntry;
+                       OSDI_IMPL_ENTRY *psImplEntry;
+
+                       dllist_remove_node(psNode);
+                       OSLockRelease(_g_psImpl->psWriterLock);
+
+                       psItem = IMG_CONTAINER_OF(psNode, struct DIIB_WORK_ITEM,
+                                                 sQueueElement);
+
+                       psEntry = psItem->psEntry;
+                       psImplEntry = &psItem->psEntry->sImplEntry;
+
+                       /* if client has already disconnected we can just drop this item */
+                       if (psItem->psContext->bClientConnected)
+                       {
+
+                               PVR_ASSERT(psItem->psContext->hStream != NULL);
+
+                               psImplEntry->pvNative = psItem->psContext->hStream;
+
+                               if (psEntry->eType == DI_ENTRY_TYPE_GENERIC)
+                               {
+                                       i64Ret = _ReadGeneric(psItem->psContext, psEntry);
+                                       PVR_LOG_IF_FALSE(i64Ret >= 0, "generic access read operation "
+                                                        "failed");
+                               }
+                               else if (psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS)
+                               {
+                                       IMG_UINT64 ui64Pos = psItem->ui64Offset;
+
+                                       i64Ret = _ReadRndAccess(psEntry, psItem->ui64Size, &ui64Pos,
+                                                               psEntry->pvPrivData);
+                                       PVR_LOG_IF_FALSE(i64Ret >= 0, "random access read operation "
+                                                        "failed");
+                               }
+                               else
+                               {
+                                       PVR_ASSERT(psEntry->eType == DI_ENTRY_TYPE_GENERIC ||
+                                                  psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS);
+                               }
+
+                               psImplEntry->pvNative = NULL;
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "client reading entry \"%s\" has "
+                                       "disconnected", psEntry->pszFullPath));
+                       }
+
+                       _ContextUnrefAndMaybeDestroy(psItem->psContext);
+                       OSFreeMemNoStats(psItem);
+
+                       OSLockAcquire(_g_psImpl->psWriterLock);
+               }
+               OSLockRelease(_g_psImpl->psWriterLock);
+
+               eError = OSEventObjectWaitKernel(hEvent, WRITER_THREAD_SLEEP_TIMEOUT);
+               if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_TIMEOUT)
+               {
+                       PVR_LOG_ERROR(eError, "OSEventObjectWaitKernel");
+               }
+       }
+
+       OSLockAcquire(_g_psImpl->psWriterLock);
+       /* clear the queue if there are any items pending */
+       while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL)
+       {
+               struct DIIB_WORK_ITEM *psItem = IMG_CONTAINER_OF(psNode,
+                                                                struct DIIB_WORK_ITEM,
+                                                                sQueueElement);
+
+               dllist_remove_node(psNode);
+               _ContextUnrefAndMaybeDestroy(psItem->psContext);
+               OSFreeMem(psItem);
+       }
+       OSLockRelease(_g_psImpl->psWriterLock);
+
+       eError = OSEventObjectClose(hEvent);
+       PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+
+       OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED);
+}
+
+/* ----- DI internal API ---------------------------------------------------- */
+
+DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath)
+{
+       DIIB_ENTRY *psEntry;
+
+       OSLockAcquire(_g_psImpl->psEntriesLock);
+       psEntry = (void *) HASH_Retrieve_Extended(_g_psImpl->psEntriesTable,
+                                                 (IMG_CHAR *) pszPath);
+       OSLockRelease(_g_psImpl->psEntriesLock);
+
+       return psEntry;
+}
+
+/* ----- DI bridge interface ------------------------------------------------ */
+
+static PVRSRV_ERROR _CreateStream(IMG_CHAR *pszStreamName, IMG_HANDLE *phStream)
+{
+       IMG_UINT32 iRet;
+       IMG_HANDLE hStream;
+       PVRSRV_ERROR eError;
+
+       /* for now only one stream can be created. Should we be able to create
+        * per context stream? */
+       iRet = OSSNPrintf(pszStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
+                         "di_stream_%x", OSGetCurrentClientProcessIDKM());
+       if (iRet >= PRVSRVTL_MAX_STREAM_NAME_SIZE)
+       {
+               /* this check is superfluous because it can never happen but in case
+                * someone changes the definition of PRVSRVTL_MAX_STREAM_NAME_SIZE
+                * handle this case */
+               pszStreamName[0] = '\0';
+               return PVRSRV_ERROR_INTERNAL_ERROR;
+       }
+
+       eError = TLStreamCreate(&hStream, pszStreamName, STREAM_BUFFER_SIZE,
+                               TL_OPMODE_DROP_NEWER, NULL, NULL, NULL, NULL);
+       PVR_RETURN_IF_ERROR(eError);
+
+       *phStream = hStream;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, DI_CONTEXT **ppsContext)
+{
+       PVRSRV_ERROR eError;
+       DI_CONTEXT *psContext;
+       IMG_HANDLE hStream = NULL;
+       THREAD_STATE eTState;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppsContext != NULL, "ppsContext");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszStreamName != NULL, "pszStreamName");
+
+       psContext = OSAllocMem(sizeof(*psContext));
+       PVR_LOG_GOTO_IF_NOMEM(psContext, eError, return_);
+
+       eError = _CreateStream(pszStreamName, &hStream);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_CreateStream", free_desc_);
+
+       psContext->hStream = hStream;
+       /* indicated to the write thread if the client is still connected and
+        * waiting for the data */
+       psContext->bClientConnected = IMG_TRUE;
+       OSAtomicWrite(&psContext->iRefCnt, 1);
+
+       eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState,
+                                         THREAD_STATE_NULL,
+                                         THREAD_STATE_ALIVE);
+
+       /* if the thread has not been started yet do it */
+       if (eTState == THREAD_STATE_NULL)
+       {
+               PVR_ASSERT(_g_psImpl->hWriterThread == NULL);
+
+               eError = OSThreadCreate(&_g_psImpl->hWriterThread, "di_writer",
+                                       _WriterThread, NULL, IMG_FALSE, NULL);
+               PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreate", free_close_stream_);
+       }
+
+       *ppsContext = psContext;
+
+       return PVRSRV_OK;
+
+free_close_stream_:
+       TLStreamClose(psContext->hStream);
+       OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED);
+free_desc_:
+       OSFreeMem(psContext);
+return_:
+       return eError;
+}
+
+PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext");
+
+       /* pass the information to the write thread that the client has
+        * disconnected */
+       psContext->bClientConnected = IMG_FALSE;
+
+       return _ContextUnrefAndMaybeDestroy(psContext);
+}
+
+PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath,
+                           IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size)
+{
+       PVRSRV_ERROR eError;
+       struct DIIB_WORK_ITEM *psItem;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath");
+
+       /* 'no stats' to avoid acquiring the process stats locks */
+       psItem = OSAllocMemNoStats(sizeof(*psItem));
+       PVR_LOG_GOTO_IF_NOMEM(psItem, eError, return_);
+
+       psItem->psContext = psContext;
+       psItem->psEntry = DIImplBrgFind(pszEntryPath);
+       PVR_LOG_GOTO_IF_FALSE_VA(psItem->psEntry != NULL, free_item_,
+                                "entry %s does not exist", pszEntryPath);
+       psItem->ui64Size = ui64Size;
+       psItem->ui64Offset = ui64Offset;
+
+       /* increment ref count on the context so that it doesn't get freed
+        * before it gets processed by the writer thread. */
+       OSAtomicIncrement(&psContext->iRefCnt);
+
+       OSLockAcquire(_g_psImpl->psWriterLock);
+       dllist_add_to_head(&_g_psImpl->sWriterQueue, &psItem->sQueueElement);
+       OSLockRelease(_g_psImpl->psWriterLock);
+
+       eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject);
+       PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+       return PVRSRV_OK;
+
+free_item_:
+       eError = PVRSRV_ERROR_NOT_FOUND;
+       OSFreeMemNoStats(psItem);
+return_:
+       return eError;
+}
+
+PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath,
+                           IMG_UINT32 ui32ValueSize, const IMG_CHAR *pszValue)
+{
+       DIIB_ENTRY *psEntry;
+       DI_PFN_WRITE pfnEntryPuts;
+       IMG_INT64 i64Length = 0;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszValue != NULL, "pszValue");
+
+       psEntry = DIImplBrgFind(pszEntryPath);
+       PVR_LOG_RETURN_IF_FALSE_VA(psEntry != NULL, PVRSRV_ERROR_NOT_FOUND,
+                                "entry %s does not exist", pszEntryPath);
+
+       pfnEntryPuts = psEntry->sIterCb.pfnWrite;
+       if (pfnEntryPuts != NULL)
+       {
+               i64Length = pfnEntryPuts(pszValue, ui32ValueSize, (IMG_UINT64*)&i64Length, psEntry->pvPrivData);
+
+               /* To deal with -EINVAL being returned */
+               PVR_LOG_RETURN_IF_INVALID_PARAM(i64Length >= 0, pszValue);
+       }
+       else
+       {
+               PVR_LOG_MSG(PVR_DBG_WARNING, "Unable to write to Entry. Write callback not enabled");
+               return PVRSRV_ERROR_INVALID_REQUEST;
+       }
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _listName(uintptr_t k,
+                               uintptr_t v,
+                               void* hStream)
+{
+       PVRSRV_ERROR eError;
+       DIIB_ENTRY *psEntry;
+       IMG_UINT32 ui32Size;
+       IMG_CHAR aszName[DI_IMPL_BRG_PATH_LEN];
+
+       psEntry = (DIIB_ENTRY*) v;
+       PVR_ASSERT(psEntry != NULL);
+       PVR_UNREFERENCED_PARAMETER(k);
+
+       ui32Size = OSSNPrintf(aszName, DI_IMPL_BRG_PATH_LEN, "%s\n", psEntry->pszFullPath);
+       PVR_LOG_IF_FALSE(ui32Size > 5, "ui32Size too small, Error suspected!");
+       eError = TLStreamWrite(hStream, (IMG_UINT8 *)aszName, ui32Size+1);
+
+       return eError;
+}
+
+
+PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext");
+
+       eError = HASH_Iterate(_g_psImpl->psEntriesTable, _listName, psContext->hStream);
+       PVR_LOG_IF_ERROR(eError, "HASH_Iterate_Extended");
+
+       eError = TLStreamMarkEOS(psContext->hStream, IMG_FALSE);
+       return eError;
+}
+
+/* ----- DI implementation interface ---------------------------------------- */
+
+static PVRSRV_ERROR _Init(void)
+{
+       PVRSRV_ERROR eError;
+
+       _g_psImpl = OSAllocMem(sizeof(*_g_psImpl));
+       PVR_LOG_GOTO_IF_NOMEM(_g_psImpl, eError, return_);
+
+       _g_psImpl->psEntriesTable = HASH_Create_Extended(ENTRIES_TABLE_INIT_SIZE,
+                                                        DI_IMPL_BRG_PATH_LEN,
+                                                        _Hash, _Compare);
+       PVR_LOG_GOTO_IF_NOMEM(_g_psImpl->psEntriesTable, eError, free_impl_);
+
+       eError = OSLockCreate(&_g_psImpl->psEntriesLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_table_);
+
+       eError = OSLockCreate(&_g_psImpl->psWriterLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_entries_lock_);
+
+       eError = OSEventObjectCreate("DI_WRITER_EO",
+                                    &_g_psImpl->hWriterEventObject);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", free_writer_lock_);
+
+       _g_psImpl->hWriterThread = NULL;
+       OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_NULL);
+
+       dllist_init(&_g_psImpl->sWriterQueue);
+
+       return PVRSRV_OK;
+
+free_writer_lock_:
+       OSLockDestroy(_g_psImpl->psWriterLock);
+free_entries_lock_:
+       OSLockDestroy(_g_psImpl->psEntriesLock);
+free_table_:
+       HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE);
+free_impl_:
+       OSFreeMem(_g_psImpl);
+       _g_psImpl = NULL;
+return_:
+       return eError;
+}
+
+static void _DeInit(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       THREAD_STATE eTState;
+
+       eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState,
+                                         THREAD_STATE_ALIVE,
+                                         THREAD_STATE_TERMINATED);
+
+       if (eTState == THREAD_STATE_ALIVE)
+       {
+               if (_g_psImpl->hWriterEventObject != NULL)
+               {
+                       eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject);
+                       PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+               }
+
+               LOOP_UNTIL_TIMEOUT(WRITER_THREAD_DESTROY_TIMEOUT)
+               {
+                       eError = OSThreadDestroy(_g_psImpl->hWriterThread);
+                       if (eError == PVRSRV_OK)
+                       {
+                               break;
+                       }
+                       OSWaitus(WRITER_THREAD_DESTROY_TIMEOUT/WRITER_THREAD_DESTROY_RETRIES);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+       }
+
+       if (_g_psImpl->hWriterEventObject != NULL)
+       {
+               eError = OSEventObjectDestroy(_g_psImpl->hWriterEventObject);
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+       }
+
+       HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE);
+       OSLockDestroy(_g_psImpl->psWriterLock);
+       OSLockDestroy(_g_psImpl->psEntriesLock);
+       OSFreeMem(_g_psImpl);
+       _g_psImpl = NULL;
+}
+
+/* Recursively traverses the ancestors list up to the root group and
+ * appends their names preceded by "/" to the path in reverse order
+ * (root group's name first and psGroup group's name last).
+ * Returns current offset in the path (the current path length without the
+ * NUL character). If there is no more space in the path returns -1
+ * to indicate an error (the path is too long to fit into the buffer). */
+static IMG_INT _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup)
+{
+       IMG_INT iOff;
+
+       if (psGroup == NULL)
+       {
+               return 0;
+       }
+
+       PVR_ASSERT(pszPath != NULL);
+
+       iOff = _BuildGroupPath(pszPath, psGroup->psParentGroup);
+       PVR_RETURN_IF_FALSE(iOff != -1, -1);
+
+       iOff += OSStringLCopy(pszPath + iOff, "/",
+                             DI_IMPL_BRG_PATH_LEN - iOff);
+       PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1);
+
+       iOff += OSStringLCopy(pszPath + iOff, psGroup->pszName,
+                             DI_IMPL_BRG_PATH_LEN - iOff);
+       PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1);
+
+       return iOff;
+}
+
+static PVRSRV_ERROR _BuildEntryPath(IMG_CHAR *pszPath, const IMG_CHAR *pszName,
+                                    const DIIB_GROUP *psGroup)
+{
+       IMG_INT iOff = _BuildGroupPath(pszPath, psGroup);
+       PVR_RETURN_IF_FALSE(iOff != -1, PVRSRV_ERROR_INVALID_OFFSET);
+
+       iOff += OSStringLCopy(pszPath + iOff, "/", DI_IMPL_BRG_PATH_LEN - iOff);
+       PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN,
+                           PVRSRV_ERROR_INVALID_OFFSET);
+
+       iOff += OSStringLCopy(pszPath + iOff, pszName, DI_IMPL_BRG_PATH_LEN - iOff);
+       PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN,
+                           PVRSRV_ERROR_INVALID_OFFSET);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _CreateEntry(const IMG_CHAR *pszName,
+                                 DI_ENTRY_TYPE eType,
+                                 const DI_ITERATOR_CB *psIterCb,
+                                 void *pvPrivData,
+                                 void *pvParentGroup,
+                                 void **pvEntry)
+{
+       DIIB_GROUP *psParentGroup = pvParentGroup;
+       DIIB_ENTRY *psEntry;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pvEntry != NULL, "pvEntry");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentGroup != NULL, "pvParentGroup");
+
+       switch (eType)
+       {
+               case DI_ENTRY_TYPE_GENERIC:
+                       break;
+               case DI_ENTRY_TYPE_RANDOM_ACCESS:
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__));
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, return_);
+       }
+
+       psEntry = OSAllocMem(sizeof(*psEntry));
+       PVR_LOG_GOTO_IF_NOMEM(psEntry, eError, return_);
+
+       eError = OSLockCreate(&psEntry->hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", free_entry_);
+
+       psEntry->eType = eType;
+       psEntry->sIterCb = *psIterCb;
+       psEntry->pvPrivData = pvPrivData;
+       psEntry->psParentGroup = psParentGroup;
+       psEntry->pszFullPath[0] = '\0';
+
+       psEntry->sImplEntry.pvPrivData = pvPrivData;
+       psEntry->sImplEntry.pvNative = NULL;
+       psEntry->sImplEntry.psCb = &_g_sEntryCallbacks;
+
+       eError = _BuildEntryPath(psEntry->pszFullPath, pszName,
+                                psEntry->psParentGroup);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s() failed in _BuildEntryPath() for \"%s\" "
+                       "entry", __func__, pszName));
+               goto destroy_lock_;
+       }
+
+       OSLockAcquire(_g_psImpl->psEntriesLock);
+       eError = HASH_Insert_Extended(_g_psImpl->psEntriesTable,
+                                     psEntry->pszFullPath,
+                                     (uintptr_t) psEntry) ?
+                PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+       OSLockRelease(_g_psImpl->psEntriesLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "HASH_Insert_Extended failed", destroy_lock_);
+
+       *pvEntry = psEntry;
+
+       return PVRSRV_OK;
+
+destroy_lock_:
+       OSLockDestroy(psEntry->hLock);
+free_entry_:
+       OSFreeMem(psEntry);
+return_:
+       return eError;
+}
+
+static void _DestroyEntry(void *pvEntry)
+{
+       DIIB_ENTRY *psEntry = pvEntry;
+       PVR_ASSERT(psEntry != NULL);
+
+       OSLockAcquire(_g_psImpl->psEntriesLock);
+       HASH_Remove_Extended(_g_psImpl->psEntriesTable, psEntry->pszFullPath);
+       OSLockRelease(_g_psImpl->psEntriesLock);
+
+       OSLockDestroy(psEntry->hLock);
+       OSFreeMem(psEntry);
+}
+
+static PVRSRV_ERROR _CreateGroup(const IMG_CHAR *pszName,
+                                 void *pvParentGroup,
+                                 void **ppvGroup)
+{
+       DIIB_GROUP *psNewGroup;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppvGroup != NULL, "ppvGroup");
+
+       psNewGroup = OSAllocMem(sizeof(*psNewGroup));
+       PVR_LOG_RETURN_IF_NOMEM(psNewGroup, "OSAllocMem");
+
+       psNewGroup->pszName = pszName;
+       psNewGroup->psParentGroup = pvParentGroup;
+
+       *ppvGroup = psNewGroup;
+
+       return PVRSRV_OK;
+}
+
+static void _DestroyGroup(void *pvGroup)
+{
+       DIIB_GROUP *psGroup = pvGroup;
+       PVR_ASSERT(psGroup != NULL);
+
+       OSFreeMem(psGroup);
+}
+
+PVRSRV_ERROR PVRDIImplBrgRegister(void)
+{
+       OSDI_IMPL_CB sImplCb = {
+               .pfnInit = _Init,
+               .pfnDeInit = _DeInit,
+               .pfnCreateEntry = _CreateEntry,
+               .pfnDestroyEntry = _DestroyEntry,
+               .pfnCreateGroup = _CreateGroup,
+               .pfnDestroyGroup = _DestroyGroup
+       };
+
+       return DIRegisterImplementation("impl_brg", &sImplCb);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg.h b/drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg.h
new file mode 100644 (file)
index 0000000..7d5a6ca
--- /dev/null
@@ -0,0 +1,92 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS agnostic implementation of Debug Info interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_IMPL_BRG_H
+#define PVR_IMPL_BRG_H
+
+#include "pvrsrv_error.h"
+
+typedef struct DI_CONTEXT_TAG DI_CONTEXT;
+typedef struct DI_ENTRY_DESC DI_ENTRY_DESC;
+
+PVRSRV_ERROR PVRDIImplBrgRegister(void);
+
+/*! @Function DICreateContextKM
+ *
+ * @Description
+ * Creates DI context which among others also creates a TL stream for reading
+ * entries.
+ *
+ * @Output pszStreamName: name of the TL stream created in this context
+ * @Output ppsContext: pointer to the new context
+ *
+ * @Return PVRSRV_ERROR error code
+ *         PVRSRV_OK in case of a success
+ *         PVRSRV_ERROR_INVALID_PARAMS if any of the parameters is invalid
+ *         PVRSRV_ERROR_OUT_OF_MEMORY if any of the memory allocations failed
+ *         error codes returned by TLStreamCreate()
+ */
+PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName,
+                               DI_CONTEXT **ppsContext);
+
+/*! @Function DIDestroyContextKM
+ *
+ * @Description
+ * Destroy the DI context and all underlying dependencies.
+ *
+ * @Input psContext: pointer to the context
+ *
+ * @Return PVRSRV_ERROR error code
+ *         PVRSRV_OK in case of a success
+ *         PVRSRV_ERROR_INVALID_PARAMS if invalid context pointer given
+ */
+PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext);
+
+PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath,
+                           IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size);
+
+PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath,
+                            IMG_UINT32 ui32ValueSize, const IMG_CHAR *pszValue);
+
+PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext);
+
+#endif /* PVR_IMPL_BRG_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg_intern.h b/drivers/gpu/drm/img/img-rogue/services/server/common/di_impl_brg_intern.h
new file mode 100644 (file)
index 0000000..5e11cac
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS agnostic implementation of Debug Info internal interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_IMPL_BRG_INTERN_H
+#define PVR_IMPL_BRG_INTERN_H
+
+typedef struct DIIB_GROUP DIIB_GROUP;
+typedef struct DIIB_ENTRY DIIB_ENTRY;
+
+/*! @Function DIImplBrgFind
+ *
+ * @Description
+ * Retrieves an entry based on a given path.
+ *
+ * @Input pszPath: Full entry path in form of
+ *                 /rootGroup/.../parentGroup/entryName.
+ *
+ * @Return Returns entry object if exists or NULL otherwise.
+ */
+DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath);
+
+#endif /* PVR_IMPL_BRG_INTERN_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/di_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/di_server.c
new file mode 100644 (file)
index 0000000..391f3aa
--- /dev/null
@@ -0,0 +1,780 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Info framework functions and types.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "di_server.h"
+#include "osdi_impl.h"
+#include "pvrsrv_error.h"
+#include "dllist.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#define ROOT_GROUP_NAME PVR_DRM_NAME
+
+/*! Implementation object. */
+typedef struct DI_IMPL
+{
+       const IMG_CHAR *pszName;       /*<! name of the implementation */
+       OSDI_IMPL_CB sCb;              /*<! implementation callbacks */
+       IMG_BOOL bInitialised;         /*<! set to IMG_TRUE after implementation
+                                           is initialised */
+
+       DLLIST_NODE sListNode;         /*<! node element of the global list of all
+                                           implementations */
+} DI_IMPL;
+
+/*! Wrapper object for objects originating from derivative implementations.
+ * This wraps both entries and groups native implementation objects. */
+typedef struct DI_NATIVE_HANDLE
+{
+       void *pvHandle;                /*!< opaque handle to the native object */
+       DI_IMPL *psDiImpl;             /*!< implementation pvHandle is associated
+                                           with */
+       DLLIST_NODE sListNode;         /*!< node element of native handles list */
+} DI_NATIVE_HANDLE;
+
+/*! Debug Info entry object.
+ *
+ * Depending on the implementation this can be represented differently. For
+ * example for the DebugFS this translates to a file.
+ */
+struct DI_ENTRY
+{
+       const IMG_CHAR *pszName;       /*!< name of the entry */
+       void *pvPrivData;              /*! handle to entry's private data */
+       DI_ENTRY_TYPE eType;           /*! entry type */
+       DI_ITERATOR_CB sIterCb;        /*!< iterator interface for the entry */
+
+       DLLIST_NODE sListNode;         /*!< node element of group's entry list */
+       DLLIST_NODE sNativeHandleList; /*!< list of native handles belonging to this
+                                           entry */
+};
+
+/*! Debug Info group object.
+ *
+ * Depending on the implementation this can be represented differently. For
+ * example for the DebugFS this translates to a directory.
+ */
+struct DI_GROUP
+{
+       IMG_CHAR *pszName;               /*!< name of the group */
+       const struct DI_GROUP *psParent; /*!< parent groups */
+
+       DLLIST_NODE sListNode;           /*!< node element of group's group list */
+       DLLIST_NODE sGroupList;          /*!< list of groups (children) that belong
+                                             to this group */
+       DLLIST_NODE sEntryList;          /*!< list of entries (children) that belong
+                                             to this group */
+       DLLIST_NODE sNativeHandleList;   /*!< list of native handles belonging to
+                                             this group */
+};
+
+/* List of all registered implementations. */
+static DECLARE_DLLIST(_g_sImpls);
+
+/* Root group for the DI entries and groups. This group is used as a root
+ * group for all other groups and entries if during creation a parent groups
+ * is not given. */
+static DI_GROUP *_g_psRootGroup;
+
+/* Protects access to _g_sImpls and _g_psRootGroup */
+static POS_LOCK _g_hLock;
+
+PVRSRV_ERROR DIInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       eError = OSLockCreate(&_g_hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", return_);
+
+       _g_psRootGroup = OSAllocMem(sizeof(*_g_psRootGroup));
+       PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup, eError, destroy_lock_);
+
+       _g_psRootGroup->pszName = OSAllocMem(sizeof(ROOT_GROUP_NAME));
+       PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup->pszName, eError, cleanup_name_);
+       OSStringLCopy(_g_psRootGroup->pszName, ROOT_GROUP_NAME,
+                                 sizeof(ROOT_GROUP_NAME));
+
+       dllist_init(&_g_psRootGroup->sListNode);
+       dllist_init(&_g_psRootGroup->sGroupList);
+       dllist_init(&_g_psRootGroup->sEntryList);
+       dllist_init(&_g_psRootGroup->sNativeHandleList);
+
+       return PVRSRV_OK;
+
+cleanup_name_:
+       OSFreeMem(_g_psRootGroup);
+destroy_lock_:
+       OSLockDestroy(_g_hLock);
+return_:
+       return eError;
+}
+
+/* Destroys the whole tree of group and entries for a given group as a root. */
+static void _DeInitGroupRecursively(DI_GROUP *psGroup)
+{
+       DLLIST_NODE *psThis, *psNext;
+
+       dllist_foreach_node(&psGroup->sEntryList, psThis, psNext)
+       {
+               DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode);
+               DIDestroyEntry(psThisEntry);
+       }
+
+       dllist_foreach_node(&psGroup->sGroupList, psThis, psNext)
+       {
+               DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode);
+
+               _DeInitGroupRecursively(psThisGroup);
+       }
+
+       DIDestroyGroup(psGroup);
+}
+
+void DIDeInit(void)
+{
+       DLLIST_NODE *psThis, *psNext;
+
+       OSLockAcquire(_g_hLock);
+
+       if (!dllist_is_empty(&_g_psRootGroup->sGroupList) ||
+           !dllist_is_empty(&_g_psRootGroup->sEntryList))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: entries or groups still exist during "
+                       "de-initialisation process, destroying all", __func__));
+       }
+
+       _DeInitGroupRecursively(_g_psRootGroup);
+       _g_psRootGroup = NULL;
+
+       /* Remove all of the implementations. */
+       dllist_foreach_node(&_g_sImpls, psThis, psNext)
+       {
+               DI_IMPL *psDiImpl = IMG_CONTAINER_OF(psThis, DI_IMPL, sListNode);
+
+               if (psDiImpl->bInitialised)
+               {
+                       psDiImpl->sCb.pfnDeInit();
+                       psDiImpl->bInitialised = IMG_FALSE;
+               }
+
+               dllist_remove_node(&psDiImpl->sListNode);
+               OSFreeMem(psDiImpl);
+       }
+
+       OSLockRelease(_g_hLock);
+
+       /* all resources freed so free the lock itself too */
+
+       OSLockDestroy(_g_hLock);
+}
+
+static IMG_BOOL _ValidateIteratorCb(const DI_ITERATOR_CB *psIterCb,
+                                    DI_ENTRY_TYPE eType)
+{
+       IMG_UINT32 uiFlags = 0;
+
+       if (psIterCb == NULL)
+       {
+               return IMG_FALSE;
+       }
+
+       if (eType == DI_ENTRY_TYPE_GENERIC)
+       {
+               uiFlags |= psIterCb->pfnShow != NULL ? BIT(0) : 0;
+               uiFlags |= psIterCb->pfnStart != NULL ? BIT(1) : 0;
+               uiFlags |= psIterCb->pfnStop != NULL ? BIT(2) : 0;
+               uiFlags |= psIterCb->pfnNext != NULL ? BIT(3) : 0;
+
+               /* either only pfnShow or all callbacks need to be set */
+               if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x0f))
+               {
+                       return IMG_FALSE;
+               }
+       }
+       else if (eType == DI_ENTRY_TYPE_RANDOM_ACCESS)
+       {
+               uiFlags |= psIterCb->pfnRead != NULL ? BIT(0) : 0;
+               uiFlags |= psIterCb->pfnSeek != NULL ? BIT(1) : 0;
+
+               /* either only pfnRead or all callbacks need to be set */
+               if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x03))
+               {
+                       return IMG_FALSE;
+               }
+       }
+       else
+       {
+               return IMG_FALSE;
+       }
+
+       return IMG_TRUE;
+}
+
+static PVRSRV_ERROR _CreateNativeEntry(DI_ENTRY *psEntry,
+                                       const DI_NATIVE_HANDLE *psNativeParent)
+{
+       PVRSRV_ERROR eError;
+       DI_IMPL *psImpl = psNativeParent->psDiImpl;
+
+       DI_NATIVE_HANDLE *psNativeEntry = OSAllocMem(sizeof(*psNativeEntry));
+       PVR_LOG_GOTO_IF_NOMEM(psNativeEntry, eError, return_);
+
+       eError = psImpl->sCb.pfnCreateEntry(psEntry->pszName,
+                                           psEntry->eType,
+                                           &psEntry->sIterCb,
+                                           psEntry->pvPrivData,
+                                           psNativeParent->pvHandle,
+                                           &psNativeEntry->pvHandle);
+       PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateEntry", free_memory_);
+
+       psNativeEntry->psDiImpl = psImpl;
+
+       dllist_add_to_head(&psEntry->sNativeHandleList, &psNativeEntry->sListNode);
+
+       return PVRSRV_OK;
+
+free_memory_:
+       OSFreeMem(psNativeEntry);
+return_:
+       return eError;
+}
+
+static void _DestroyNativeEntry(DI_NATIVE_HANDLE *psNativeEntry)
+{
+       dllist_remove_node(&psNativeEntry->sListNode);
+       OSFreeMem(psNativeEntry);
+}
+
+PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName,
+                           DI_GROUP *psGroup,
+                           const DI_ITERATOR_CB *psIterCb,
+                           void *pvPriv,
+                           DI_ENTRY_TYPE eType,
+                           DI_ENTRY **ppsEntry)
+{
+       PVRSRV_ERROR eError;
+       DLLIST_NODE *psThis, *psNext;
+       DI_ENTRY *psEntry;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateIteratorCb(psIterCb, eType),
+                                       "psIterCb");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppsEntry != NULL, "psEntry");
+
+       psEntry = OSAllocMem(sizeof(*psEntry));
+       PVR_LOG_RETURN_IF_NOMEM(psEntry, "OSAllocMem");
+
+       if (psGroup == NULL)
+       {
+               psGroup = _g_psRootGroup;
+       }
+
+       psEntry->pszName = pszName;
+       psEntry->pvPrivData = pvPriv;
+       psEntry->eType = eType;
+       psEntry->sIterCb = *psIterCb;
+       dllist_init(&psEntry->sNativeHandleList);
+
+       OSLockAcquire(_g_hLock);
+
+       dllist_add_to_tail(&psGroup->sEntryList, &psEntry->sListNode);
+
+       /* Iterate over all of the native handles of parent group to create
+        * the entry for every registered implementation. */
+       dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNativeGroup =
+                       IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode);
+
+               eError = _CreateNativeEntry(psEntry, psNativeGroup);
+               PVR_GOTO_IF_ERROR(eError, cleanup_);
+       }
+
+       OSLockRelease(_g_hLock);
+
+       *ppsEntry = psEntry;
+
+       return PVRSRV_OK;
+
+cleanup_:
+       OSLockRelease(_g_hLock);
+
+       /* Something went wrong so if there were any native entries created remove
+        * them from the list, free them and free the DI entry itself. */
+       dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNativeEntry =
+                       IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode);
+
+               _DestroyNativeEntry(psNativeEntry);
+       }
+
+       OSFreeMem(psEntry);
+
+       return eError;
+}
+
+void DIDestroyEntry(DI_ENTRY *psEntry)
+{
+       DLLIST_NODE *psThis, *psNext;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE(psEntry != NULL,
+                                    "psEntry invalid in DIDestroyEntry()");
+
+       /* Iterate through all of the native entries of the DI entry, remove
+        * them from the list and then destroy them. After that, destroy the
+        * DI entry itself. */
+       dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE,
+                                                             sListNode);
+
+               /* The implementation must ensure that entry is not removed if any
+                * operations are being executed on the entry. If this is the case
+                * the implementation should block until all of them are finished
+                * and prevent any further operations.
+                * This will guarantee proper synchronisation between the DI framework
+                * and underlying implementations and prevent destruction/access
+                * races. */
+               psNative->psDiImpl->sCb.pfnDestroyEntry(psNative->pvHandle);
+               dllist_remove_node(&psNative->sListNode);
+               OSFreeMem(psNative);
+       }
+
+       dllist_remove_node(&psEntry->sListNode);
+
+       OSFreeMem(psEntry);
+}
+
+static PVRSRV_ERROR _CreateNativeGroup(DI_GROUP *psGroup,
+                                       const DI_NATIVE_HANDLE *psNativeParent,
+                                       DI_NATIVE_HANDLE **ppsNativeGroup)
+{
+       PVRSRV_ERROR eError;
+       DI_IMPL *psImpl = psNativeParent->psDiImpl;
+
+       DI_NATIVE_HANDLE *psNativeGroup = OSAllocMem(sizeof(*psNativeGroup));
+       PVR_LOG_GOTO_IF_NOMEM(psNativeGroup, eError, return_);
+
+       eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName,
+                                           psNativeParent->pvHandle,
+                                           &psNativeGroup->pvHandle);
+       PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_);
+
+       psNativeGroup->psDiImpl = psImpl;
+
+       dllist_add_to_head(&psGroup->sNativeHandleList, &psNativeGroup->sListNode);
+
+       *ppsNativeGroup = psNativeGroup;
+
+       return PVRSRV_OK;
+
+free_memory_:
+       OSFreeMem(psNativeGroup);
+return_:
+       return eError;
+}
+
+static void _DestroyNativeGroup(DI_NATIVE_HANDLE *psNativeEntry)
+{
+       dllist_remove_node(&psNativeEntry->sListNode);
+       OSFreeMem(psNativeEntry);
+}
+
+PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName,
+                           DI_GROUP *psParent,
+                           DI_GROUP **ppsGroup)
+{
+       PVRSRV_ERROR eError;
+       DLLIST_NODE *psThis, *psNext;
+       DI_GROUP *psGroup;
+       size_t uSize;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppsGroup != NULL, "ppsDiGroup");
+
+       psGroup = OSAllocMem(sizeof(*psGroup));
+       PVR_LOG_RETURN_IF_NOMEM(psGroup, "OSAllocMem");
+
+       if (psParent == NULL)
+       {
+               psParent = _g_psRootGroup;
+       }
+
+       uSize = OSStringLength(pszName) + 1;
+       psGroup->pszName = OSAllocMem(uSize * sizeof(*psGroup->pszName));
+       PVR_LOG_GOTO_IF_NOMEM(psGroup->pszName, eError, cleanup_name_);
+       OSStringLCopy(psGroup->pszName, pszName, uSize);
+
+       psGroup->psParent = psParent;
+       dllist_init(&psGroup->sGroupList);
+       dllist_init(&psGroup->sEntryList);
+       dllist_init(&psGroup->sNativeHandleList);
+
+       OSLockAcquire(_g_hLock);
+
+       dllist_add_to_tail(&psParent->sGroupList, &psGroup->sListNode);
+
+       /* Iterate over all of the native handles of parent group to create
+        * the group for every registered implementation. */
+       dllist_foreach_node(&psParent->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNativeGroup = NULL, *psNativeParent =
+                       IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode);
+
+               eError = _CreateNativeGroup(psGroup, psNativeParent, &psNativeGroup);
+               PVR_GOTO_IF_ERROR(eError, cleanup_);
+       }
+
+       OSLockRelease(_g_hLock);
+
+       *ppsGroup = psGroup;
+
+       return PVRSRV_OK;
+
+cleanup_:
+       OSLockRelease(_g_hLock);
+
+       /* Something went wrong so if there were any native groups created remove
+        * them from the list, free them and free the DI group itself. */
+       dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNativeGroup =
+                       IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode);
+
+               dllist_remove_node(&psNativeGroup->sListNode);
+               OSFreeMem(psNativeGroup);
+       }
+
+       OSFreeMem(psGroup->pszName);
+cleanup_name_:
+       OSFreeMem(psGroup);
+
+       return eError;
+}
+
+void DIDestroyGroup(DI_GROUP *psGroup)
+{
+       DLLIST_NODE *psThis, *psNext;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE(psGroup != NULL,
+                                    "psGroup invalid in DIDestroyGroup()");
+
+       /* Iterate through all of the native groups of the DI group, remove
+        * them from the list and then destroy them. After that destroy the
+        * DI group itself. */
+       dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE,
+                                                             sListNode);
+
+               psNative->psDiImpl->sCb.pfnDestroyGroup(psNative->pvHandle);
+               dllist_remove_node(&psNative->sListNode);
+               OSFreeMem(psNative);
+       }
+
+       dllist_remove_node(&psGroup->sListNode);
+
+       OSFreeMem(psGroup->pszName);
+       OSFreeMem(psGroup);
+}
+
+void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry)
+{
+       PVR_ASSERT(psEntry != NULL);
+
+       return psEntry->pvPrivData;
+}
+
+void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData,
+             IMG_UINT32 uiSize)
+{
+       PVR_ASSERT(psEntry != NULL);
+       PVR_ASSERT(psEntry->psCb != NULL);
+       PVR_ASSERT(psEntry->psCb->pfnWrite != NULL);
+       PVR_ASSERT(psEntry->pvNative != NULL);
+
+       psEntry->psCb->pfnWrite(psEntry->pvNative, pvData, uiSize);
+}
+
+void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...)
+{
+       va_list args;
+
+       PVR_ASSERT(psEntry != NULL);
+       PVR_ASSERT(psEntry->psCb != NULL);
+       PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL);
+       PVR_ASSERT(psEntry->pvNative != NULL);
+
+       va_start(args, pszFmt);
+       psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, args);
+       va_end(args);
+}
+
+void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt,
+               va_list pArgs)
+{
+       PVR_ASSERT(psEntry != NULL);
+       PVR_ASSERT(psEntry->psCb != NULL);
+       PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL);
+       PVR_ASSERT(psEntry->pvNative != NULL);
+
+       psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, pArgs);
+}
+
+void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr)
+{
+       PVR_ASSERT(psEntry != NULL);
+       PVR_ASSERT(psEntry->psCb != NULL);
+       PVR_ASSERT(psEntry->psCb->pfnPuts != NULL);
+       PVR_ASSERT(psEntry->pvNative != NULL);
+
+       psEntry->psCb->pfnPuts(psEntry->pvNative, pszStr);
+}
+
+IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry)
+{
+       PVR_ASSERT(psEntry != NULL);
+       PVR_ASSERT(psEntry->psCb != NULL);
+       PVR_ASSERT(psEntry->psCb->pfnHasOverflowed != NULL);
+       PVR_ASSERT(psEntry->pvNative != NULL);
+
+       return psEntry->psCb->pfnHasOverflowed(psEntry->pvNative);
+}
+
+/* ---- OS implementation API ---------------------------------------------- */
+
+static IMG_BOOL _ValidateImplCb(const OSDI_IMPL_CB *psImplCb)
+{
+       PVR_GOTO_IF_FALSE(psImplCb->pfnInit != NULL, failed_);
+       PVR_GOTO_IF_FALSE(psImplCb->pfnDeInit != NULL, failed_);
+       PVR_GOTO_IF_FALSE(psImplCb->pfnCreateGroup != NULL, failed_);
+       PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyGroup != NULL, failed_);
+       PVR_GOTO_IF_FALSE(psImplCb->pfnCreateEntry != NULL, failed_);
+       PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyEntry != NULL, failed_);
+
+       return IMG_TRUE;
+
+failed_:
+       return IMG_FALSE;
+}
+
+/* Walks the tree of groups and entries and create all of the native handles
+ * for the given implementation for all of the already existing groups and
+ * entries. */
+static PVRSRV_ERROR _InitNativeHandlesRecursively(DI_IMPL *psImpl,
+                                               DI_GROUP *psGroup,
+                                               DI_NATIVE_HANDLE *psNativeParent)
+{
+       PVRSRV_ERROR eError;
+       DLLIST_NODE *psThis, *psNext;
+       DI_NATIVE_HANDLE *psNativeGroup;
+
+       psNativeGroup = OSAllocMem(sizeof(*psNativeGroup));
+       PVR_LOG_RETURN_IF_NOMEM(psNativeGroup, "OSAllocMem");
+
+       eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName,
+                                  psNativeParent ? psNativeParent->pvHandle : NULL,
+                                  &psNativeGroup->pvHandle);
+       PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_);
+
+       psNativeGroup->psDiImpl = psImpl;
+
+       dllist_add_to_head(&psGroup->sNativeHandleList,
+                          &psNativeGroup->sListNode);
+
+       dllist_foreach_node(&psGroup->sGroupList, psThis, psNext)
+       {
+               DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode);
+
+               // and then walk the new group
+               eError = _InitNativeHandlesRecursively(psImpl, psThisGroup,
+                                                      psNativeGroup);
+               PVR_LOG_RETURN_IF_ERROR(eError, "_InitNativeHandlesRecursively");
+       }
+
+       dllist_foreach_node(&psGroup->sEntryList, psThis, psNext)
+       {
+               DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode);
+
+               eError = _CreateNativeEntry(psThisEntry, psNativeGroup);
+               PVR_LOG_RETURN_IF_ERROR(eError, "_CreateNativeEntry");
+       }
+
+       return PVRSRV_OK;
+
+free_memory_:
+       OSFreeMem(psNativeGroup);
+
+       return eError;
+}
+
+/* Walks the tree of groups and entries and destroys all of the native handles
+ * for the given implementation. */
+static void _DeInitNativeHandlesRecursively(DI_IMPL *psImpl, DI_GROUP *psGroup)
+{
+       DLLIST_NODE *psThis, *psNext;
+
+       dllist_foreach_node(&psGroup->sEntryList, psThis, psNext)
+       {
+               DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode);
+
+               // free all of the native entries that belong to this implementation
+               dllist_foreach_node(&psThisEntry->sNativeHandleList, psThis, psNext)
+               {
+                       DI_NATIVE_HANDLE *psNativeEntry =
+                               IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode);
+
+                       if (psNativeEntry->psDiImpl == psImpl)
+                       {
+                               _DestroyNativeEntry(psNativeEntry);
+                               // there can be only one entry on the list for a given
+                               // implementation
+                               break;
+                       }
+               }
+       }
+
+       dllist_foreach_node(&psGroup->sGroupList, psThis, psNext)
+       {
+               DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode);
+
+               // and then walk the new group
+               _DeInitNativeHandlesRecursively(psImpl, psThisGroup);
+       }
+
+       // free all of the native entries that belong to this implementation
+       dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext)
+       {
+               DI_NATIVE_HANDLE *psNativeGroup =
+                       IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode);
+
+               if (psNativeGroup->psDiImpl == psImpl)
+               {
+                       _DestroyNativeGroup(psNativeGroup);
+                       // there can be only one entry on the list for a given
+                       // implementation
+                       break;
+               }
+       }
+}
+
+static PVRSRV_ERROR _InitImpl(DI_IMPL *psImpl)
+{
+       PVRSRV_ERROR eError;
+       // DI_NATIVE_HANDLE *psNativeGroup;
+
+       eError = psImpl->sCb.pfnInit();
+       PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->pfnInit()", return_);
+
+       /* if the implementation is being created after any groups or entries
+        * have been created we need to walk the current tree and create
+        * native groups and entries for all of the existing ones */
+       eError = _InitNativeHandlesRecursively(psImpl, _g_psRootGroup, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_InitNativeHandlesRecursively",
+                             free_native_handles_and_deinit_);
+
+       psImpl->bInitialised = IMG_TRUE;
+
+       return PVRSRV_OK;
+
+free_native_handles_and_deinit_:
+       /* something went wrong so we need to walk the tree and remove all of the
+        * native entries and groups that we've created before we can destroy
+        * the implementation */
+       _DeInitNativeHandlesRecursively(psImpl, _g_psRootGroup);
+       psImpl->sCb.pfnDeInit();
+return_:
+       return eError;
+}
+
+PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName,
+                                      const OSDI_IMPL_CB *psImplCb)
+{
+       DI_IMPL *psImpl;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateImplCb(psImplCb), "psImplCb");
+       /* if root group does not exist it can mean 2 things:
+        * - DIInit() was not called so initialisation order is incorrect and needs
+        *   to be fixed
+        * - DIInit() failed but if that happens we should never make it here */
+       PVR_ASSERT(_g_psRootGroup != NULL);
+
+       psImpl = OSAllocMem(sizeof(*psImpl));
+       PVR_LOG_RETURN_IF_NOMEM(psImpl, "OSAllocMem");
+
+       psImpl->pszName = pszName;
+       psImpl->sCb = *psImplCb;
+
+       OSLockAcquire(_g_hLock);
+
+       eError = _InitImpl(psImpl);
+       if (eError != PVRSRV_OK)
+       {
+               /* implementation could not be initialised so remove it from the
+                * list, free the memory and forget about it */
+
+               PVR_DPF((PVR_DBG_ERROR, "%s: could not initialise \"%s\" debug "
+                       "info implementation, discarding", __func__,
+                       psImpl->pszName));
+
+               goto free_impl_;
+       }
+
+       psImpl->bInitialised = IMG_TRUE;
+
+       dllist_add_to_tail(&_g_sImpls, &psImpl->sListNode);
+
+       OSLockRelease(_g_hLock);
+
+       return PVRSRV_OK;
+
+free_impl_:
+       OSLockRelease(_g_hLock);
+
+       OSFreeMem(psImpl);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/dma_km.c b/drivers/gpu/drm/img/img-rogue/services/server/common/dma_km.c
new file mode 100644 (file)
index 0000000..a2c504d
--- /dev/null
@@ -0,0 +1,413 @@
+/*************************************************************************/ /*!
+@File           dma_km.c
+@Title          kernel side of dma transfer scheduling
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for allowing DMA transfers between
+                cpu and device memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(__linux__)
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#endif
+
+#include "pmr.h"
+#include "log2.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "dma_km.h"
+#include "pvr_debug.h"
+#include "lock_types.h"
+#include "allocmem.h"
+#include "process_stats.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+#include "ri_server.h"
+#endif
+#include "devicemem.h"
+#include "pvrsrv_apphint.h"
+#include "pvrsrv_sync_server.h"
+#include "km_apphint_defs.h"
+#include "di_server.h"
+#include "dma_flags.h"
+
+/* This header must always be included last */
+#if defined(__linux__)
+#include "kernel_compatibility.h"
+#endif
+
+typedef struct _SERVER_CLEANUP_DATA_
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+       CONNECTION_DATA *psConnection;
+       IMG_UINT32 uiNumDMA;
+       IMG_UINT32 uiCount;
+       SYNC_TIMELINE_OBJ sTimelineObject;
+       void* pvChan;
+       PMR** ppsPMR;
+} SERVER_CLEANUP_DATA;
+
+#if !defined(NO_HARDWARE)
+static void Cleanup(void* pvCleanupData, IMG_BOOL bAdvanceTimeline)
+{
+       IMG_UINT i;
+       PVRSRV_ERROR eError;
+       SERVER_CLEANUP_DATA* psCleanupData = (SERVER_CLEANUP_DATA*) pvCleanupData;
+
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR, "Server Cleanup thread entry (%p)", pvCleanupData));
+#endif
+
+       for (i=0; i<psCleanupData->uiCount; i++)
+       {
+               eError = PMRUnlockSysPhysAddresses(psCleanupData->ppsPMR[i]);
+               PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+       }
+
+       /* Advance timeline */
+       if (psCleanupData->sTimelineObject.pvTlObj && bAdvanceTimeline)
+       {
+               eError = SyncSWTimelineAdvanceKM(psCleanupData->psDevNode, &psCleanupData->sTimelineObject);
+               PVR_LOG_IF_ERROR(eError, "SyncSWTimelineAdvanceKM");
+               eError = SyncSWTimelineReleaseKM(&psCleanupData->sTimelineObject);
+               PVR_LOG_IF_ERROR(eError, "SyncSWTimelineReleaseKM");
+       }
+
+       OSAtomicDecrement(&psCleanupData->psConnection->ui32NumDmaTransfersInFlight);
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR, "Decremented to %d", OSAtomicRead(&psCleanupData->psConnection->ui32NumDmaTransfersInFlight)));
+#endif
+       eError = OSEventObjectSignal(psCleanupData->psConnection->hDmaEventObject);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s",
+                       __func__, PVRSRVGetErrorString(eError)));
+       }
+
+
+       OSFreeMem(psCleanupData->ppsPMR);
+       OSFreeMem(psCleanupData);
+}
+#endif /* !defined(NO_HARDWARE) */
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+       if (psDevConfig->bHasDma)
+       {
+
+               PVR_ASSERT(psDevConfig->pfnSlaveDMAGetChan != NULL);
+               PVR_ASSERT(psDevConfig->pfnSlaveDMAFreeChan != NULL);
+               PVR_ASSERT(psDevConfig->pszDmaTxChanName != NULL);
+               PVR_ASSERT(psDevConfig->pszDmaRxChanName != NULL);
+
+               psDeviceNode->hDmaTxChan =
+                       psDevConfig->pfnSlaveDMAGetChan(psDevConfig,
+                                                                                        psDevConfig->pszDmaTxChanName);
+               if (!psDeviceNode->hDmaTxChan)
+               {
+                       return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+               }
+               psDeviceNode->hDmaRxChan =
+                       psDevConfig->pfnSlaveDMAGetChan(psDevConfig,
+                                                                                        psDevConfig->pszDmaRxChanName);
+               if (!psDeviceNode->hDmaRxChan)
+               {
+                       psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan);
+                       return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+               }
+               psDeviceNode->bHasSystemDMA = true;
+       }
+
+       return PVRSRV_OK;
+}
+
+IMG_EXPORT void
+PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (psDeviceNode->bHasSystemDMA)
+       {
+               PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+               psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaRxChan);
+               psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan);
+       }
+}
+
+IMG_EXPORT PVRSRV_ERROR
+DmaDeviceParams(CONNECTION_DATA *psConnection,
+                               PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT32 *ui32DmaBuffAlign,
+                               IMG_UINT32 *ui32DmaTransferMult)
+{
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+       *ui32DmaBuffAlign = psDevConfig->ui32DmaAlignment;
+       *ui32DmaTransferMult = psDevConfig->ui32DmaTransferUnit;
+
+        return PVRSRV_OK;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+DmaSparseMappingTable(PMR *psPMR,
+                                         IMG_DEVMEM_OFFSET_T uiOffset,
+                                         IMG_UINT32 ui32SizeInPages,
+                                         IMG_BOOL *pbTable)
+{
+               PVRSRV_ERROR eError = PVRSRV_OK;
+               IMG_DEV_PHYADDR *psDevPhyAddr;
+               IMG_BOOL *pbValid;
+
+               psDevPhyAddr = OSAllocZMem(ui32SizeInPages * sizeof(IMG_CPU_PHYADDR));
+               PVR_LOG_GOTO_IF_NOMEM(psDevPhyAddr, eError, err1);
+
+               pbValid = OSAllocZMem(ui32SizeInPages * sizeof(IMG_BOOL));
+               PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, err2);
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", err3);
+
+               eError = PMR_DevPhysAddr(psPMR,
+                                                                OSGetPageShift(),
+                                                                ui32SizeInPages,
+                                                                uiOffset,
+                                                                psDevPhyAddr,
+                                                                pbValid);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", err3);
+
+               PMRUnlockSysPhysAddresses(psPMR);
+
+               memcpy(pbTable, pbValid, ui32SizeInPages * sizeof(IMG_BOOL));
+
+err3:
+               OSFreeMem(pbValid);
+err2:
+               OSFreeMem(psDevPhyAddr);
+err1:
+               return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+DmaTransfer(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_UINT32 uiNumDMAs,
+                       PMR** ppsPMR,
+                       IMG_UINT64 *puiAddress,
+                       IMG_DEVMEM_OFFSET_T *puiOffset,
+                       IMG_DEVMEM_SIZE_T *puiSize,
+                       IMG_UINT32 uiFlags,
+                       PVRSRV_TIMELINE iUpdateFenceTimeline)
+{
+
+       PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(NO_HARDWARE)
+       /* On nohw the kernel call just advances the timeline to signal completion */
+
+       SYNC_TIMELINE_OBJ sSwTimeline = {NULL, PVRSRV_NO_TIMELINE};
+
+       if (iUpdateFenceTimeline != PVRSRV_NO_TIMELINE)
+       {
+               eError = SyncSWGetTimelineObj(iUpdateFenceTimeline, &sSwTimeline);
+               PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWGetTimelineObj");
+
+               eError = SyncSWTimelineAdvanceKM(psDevNode, &sSwTimeline);
+               PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWTimelineAdvanceKM");
+
+               eError = SyncSWTimelineReleaseKM(&sSwTimeline);
+               PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWTimelineReleaseKM");
+       }
+
+       return PVRSRV_OK;
+
+#else
+       IMG_DEV_PHYADDR *psDevPhyAddr;
+       IMG_DMA_ADDR *psDmaAddr;
+       IMG_BOOL *pbValid;
+       IMG_UINT32 i;
+       PVRSRV_DEVICE_CONFIG* psDevConfig = psDevNode->psDevConfig;
+       void* pvChan = NULL;
+       SERVER_CLEANUP_DATA* psServerData;
+       void*  pvOSData;
+
+       OSLockAcquire(psConnection->hDmaReqLock);
+
+       if (!psConnection->bAcceptDmaRequests)
+       {
+               OSLockRelease(psConnection->hDmaReqLock);
+               return PVRSRV_OK;
+       }
+
+       OSAtomicIncrement(&psConnection->ui32NumDmaTransfersInFlight);
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR, "Incremented to %d", OSAtomicRead(&psConnection->ui32NumDmaTransfersInFlight)));
+#endif
+       psServerData = OSAllocZMem(sizeof(SERVER_CLEANUP_DATA));
+       PVR_LOG_GOTO_IF_NOMEM(psServerData, eError, e0);
+
+       pvChan = uiFlags & (DMA_FLAG_MEM_TO_DEV) ? psDevNode->hDmaTxChan : psDevNode->hDmaRxChan;
+       if (!pvChan)
+       {
+               eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+               PVR_LOG_GOTO_IF_ERROR(eError, "Error acquiring DMA channel", e1);
+       }
+
+       if (iUpdateFenceTimeline != PVRSRV_NO_TIMELINE)
+       {
+               eError = SyncSWGetTimelineObj(iUpdateFenceTimeline, &psServerData->sTimelineObject);
+               PVR_LOG_GOTO_IF_ERROR(eError, "SyncSWGetTimelineObj", e1);
+       }
+
+       psServerData->uiCount = 0;
+       psServerData->psDevNode = psDevNode;
+       psServerData->psConnection = psConnection;
+       psServerData->pvChan = pvChan;
+       psServerData->ppsPMR = OSAllocZMem(sizeof(PMR*) * uiNumDMAs);
+       PVR_LOG_GOTO_IF_NOMEM(psServerData->ppsPMR, eError, e2);
+
+       eError = OSDmaAllocData(psDevNode, uiNumDMAs, &pvOSData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSDmaAllocData failed", e3);
+
+       for (i=0; i<uiNumDMAs; i++)
+       {
+               IMG_UINT32 ui32SizeInPages;
+               IMG_UINT32 uiOffsetInPage = puiOffset[i] & (OSGetPageSize() - 1);
+               ui32SizeInPages = (puiSize[i] + uiOffsetInPage + OSGetPageSize() - 1) >> OSGetPageShift();
+
+               psDmaAddr = OSAllocZMem(ui32SizeInPages * sizeof(IMG_DMA_ADDR));
+               PVR_LOG_GOTO_IF_NOMEM(psDmaAddr, eError, loop_e0);
+
+               psDevPhyAddr = OSAllocZMem(ui32SizeInPages * sizeof(IMG_CPU_PHYADDR));
+               PVR_LOG_GOTO_IF_NOMEM(psDevPhyAddr, eError, loop_e1);
+
+               pbValid = OSAllocZMem(ui32SizeInPages * sizeof(IMG_BOOL));
+               PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, loop_e2);
+
+               eError = PMRLockSysPhysAddresses(ppsPMR[i]);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", loop_e3);
+
+               psServerData->ppsPMR[i] = ppsPMR[i];
+
+               eError = PMR_DevPhysAddr(ppsPMR[i],
+                                                                OSGetPageShift(),
+                                                                ui32SizeInPages,
+                                                                puiOffset[i],
+                                                                psDevPhyAddr,
+                                                                pbValid);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", loop_e4);
+
+               psDevConfig->pfnDevPhysAddr2DmaAddr(psDevConfig,
+                                                                                       psDmaAddr,
+                                                                                       psDevPhyAddr,
+                                                                                       pbValid,
+                                                                                       ui32SizeInPages,
+                                                                                       PMR_IsSparse(ppsPMR[i]));
+
+               if (!PMR_IsSparse(ppsPMR[i]))
+               {
+                       eError = OSDmaPrepareTransfer(psDevNode,
+                                                                                 pvChan,
+                                                                                 &psDmaAddr[0], (IMG_UINT64*)puiAddress[i],
+                                                                                 puiSize[i], (uiFlags & DMA_FLAG_MEM_TO_DEV), pvOSData,
+                                                                                 psServerData, Cleanup, (i == 0));
+                       PVR_LOG_GOTO_IF_ERROR(eError, "OSDmaPrepareTransfer", loop_e4);
+                       psServerData->uiCount++;
+
+               }
+               else
+               {
+                       eError = OSDmaPrepareTransferSparse(psDevNode, pvChan,
+                                                                                               psDmaAddr, pbValid,
+                                                                                               (IMG_UINT64*)puiAddress[i], puiSize[i],
+                                                                                               uiOffsetInPage, ui32SizeInPages,
+                                                                                               (uiFlags & DMA_FLAG_MEM_TO_DEV),
+                                                                                               pvOSData, psServerData,
+                                                                                               Cleanup, (i == 0));
+                       PVR_LOG_GOTO_IF_ERROR(eError, "OSDmaPrepareTransferSparse", loop_e4);
+                       psServerData->uiCount++;
+               }
+
+               OSFreeMem(pbValid);
+               OSFreeMem(psDevPhyAddr);
+               OSFreeMem(psDmaAddr);
+
+               continue;
+
+loop_e4:
+               PMRUnlockSysPhysAddresses(ppsPMR[i]);
+loop_e3:
+               OSFreeMem(pbValid);
+loop_e2:
+               OSFreeMem(psDevPhyAddr);
+loop_e1:
+               OSFreeMem(psDmaAddr);
+loop_e0:
+               break;
+       }
+
+       if (psServerData->uiCount == uiNumDMAs)
+       {
+               OSDmaSubmitTransfer(psDevNode, pvOSData, pvChan, (uiFlags & DMA_FLAG_SYNCHRONOUS));
+       }
+       else
+       {
+               /* One of the transfers could not be programmed, roll back */
+               OSDmaForceCleanup(psDevNode, pvChan, pvOSData, psServerData, Cleanup);
+       }
+       OSLockRelease(psConnection->hDmaReqLock);
+       return eError;
+
+e3:
+       OSFreeMem(psServerData->ppsPMR);
+e2:
+       if (iUpdateFenceTimeline != PVRSRV_NO_TIMELINE)
+       {
+               SyncSWTimelineReleaseKM(&psServerData->sTimelineObject);
+       }
+e1:
+       OSFreeMem(psServerData);
+e0:
+       OSLockRelease(psConnection->hDmaReqLock);
+       return eError;
+#endif
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/handle.c b/drivers/gpu/drm/img/img-rogue/services/server/common/handle.c
new file mode 100644 (file)
index 0000000..c5dd5d7
--- /dev/null
@@ -0,0 +1,2484 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Handle Manager
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide resource handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* See handle.h for a description of the handle API. */
+
+/*
+ * The implementation supports movable handle structures, allowing the address
+ * of a handle structure to change without having to fix up pointers in
+ * any of the handle structures. For example, the linked list mechanism
+ * used to link subhandles together uses handle array indices rather than
+ * pointers to the structures themselves.
+ */
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+#include "handle.h"
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "connection_server.h"
+
+#define        HANDLE_HASH_TAB_INIT_SIZE 32
+#define HANDLE_PROC_HANDLE_HASH_INIT_SIZE 10
+
+#define        TEST_FLAG(v, f) BITMASK_HAS(v, f)
+#define        TEST_ALLOC_FLAG(psHandleData, f) BITMASK_HAS((psHandleData)->eFlag, f)
+
+
+/* Linked list structure. Used for both the list head and list items */
+typedef struct _HANDLE_LIST_
+{
+       IMG_HANDLE hPrev;
+       IMG_HANDLE hNext;
+       IMG_HANDLE hParent;
+} HANDLE_LIST;
+
+typedef struct _HANDLE_DATA_
+{
+       /* The handle that represents this structure */
+       IMG_HANDLE hHandle;
+
+       /* Handle type */
+       PVRSRV_HANDLE_TYPE eType;
+
+       /* Flags specified when the handle was allocated */
+       PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+       /* Pointer to the data that the handle represents */
+       void *pvData;
+
+       /*
+        * Callback specified at handle allocation time to
+        * release/destroy/free the data represented by the
+        * handle when it's reference count reaches 0. This
+        * should always be NULL for subhandles.
+        */
+       PFN_HANDLE_RELEASE pfnReleaseData;
+
+       /* List head for subhandles of this handle */
+       HANDLE_LIST sChildren;
+
+       /* List entry for sibling subhandles */
+       HANDLE_LIST sSiblings;
+
+       /* Reference count of lookups made. It helps track which resources are in
+        * use in concurrent bridge calls. */
+       IMG_INT32 iLookupCount;
+       /* State of a handle. If the handle was already destroyed this is false.
+        * If this is false and iLookupCount is 0 the pfnReleaseData callback is
+        * called on the handle. */
+       IMG_BOOL bCanLookup;
+
+#if defined(PVRSRV_DEBUG_HANDLE_LOCK)
+       /* Store the handle base used for this handle, so we
+        * can later access the handle base lock (or check if
+        * it has been already acquired)
+        */
+       PVRSRV_HANDLE_BASE *psBase;
+#endif
+
+} HANDLE_DATA;
+
+struct _HANDLE_BASE_
+{
+       /* Pointer to a handle implementations base structure */
+       HANDLE_IMPL_BASE *psImplBase;
+
+       /*
+        * Pointer to handle hash table.
+        * The hash table is used to do reverse lookups, converting data
+        * pointers to handles.
+        */
+       HASH_TABLE *psHashTab;
+
+       /* Type specific (connection/global/process) Lock handle */
+       POS_LOCK hLock;
+
+       /* Can be connection, process, global */
+       PVRSRV_HANDLE_BASE_TYPE eType;
+};
+
+/*
+ * The key for the handle hash table is an array of three elements, the
+ * pointer to the resource, the resource type and the parent handle (or
+ * NULL if there is no parent). The eHandKey enumeration gives the
+ * array indices of the elements making up the key.
+ */
+enum eHandKey
+{
+       HAND_KEY_DATA = 0,
+       HAND_KEY_TYPE,
+       HAND_KEY_PARENT,
+       HAND_KEY_LEN            /* Must be last item in list */
+};
+
+/* HAND_KEY is the type of the hash table key */
+typedef uintptr_t HAND_KEY[HAND_KEY_LEN];
+
+typedef struct FREE_HANDLE_DATA_TAG
+{
+       PVRSRV_HANDLE_BASE *psBase;
+       PVRSRV_HANDLE_TYPE eHandleFreeType;
+       /* timing data (ns) to release bridge lock upon the deadline */
+       IMG_UINT64 ui64TimeStart;
+       IMG_UINT64 ui64MaxBridgeTime;
+} FREE_HANDLE_DATA;
+
+typedef struct FREE_KERNEL_HANDLE_DATA_TAG
+{
+       PVRSRV_HANDLE_BASE *psBase;
+       HANDLE_DATA *psProcessHandleData;
+       IMG_HANDLE hKernelHandle;
+} FREE_KERNEL_HANDLE_DATA;
+
+/* Stores a pointer to the function table of the handle back-end in use */
+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs;
+
+static POS_LOCK gKernelHandleLock;
+static IMG_BOOL gbLockInitialised = IMG_FALSE;
+/* Pointer to process handle base currently being freed */
+static PVRSRV_HANDLE_BASE *g_psProcessHandleBaseBeingFreed;
+/* Lock for the process handle base table */
+static POS_LOCK g_hProcessHandleBaseLock;
+/* Hash table with process handle bases */
+static HASH_TABLE *g_psProcessHandleBaseTable;
+
+void LockHandle(PVRSRV_HANDLE_BASE *psBase)
+{
+       OSLockAcquire(psBase->hLock);
+}
+
+void UnlockHandle(PVRSRV_HANDLE_BASE *psBase)
+{
+       OSLockRelease(psBase->hLock);
+}
+
+/*
+ * Kernel handle base structure. This is used for handles that are not
+ * allocated on behalf of a particular process.
+ */
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL;
+
+/* Increase the lookup reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the increment
+ */
+static inline IMG_UINT32 HandleGet(HANDLE_DATA *psHandleData)
+{
+#if defined(PVRSRV_DEBUG_HANDLE_LOCK)
+       if (!OSLockIsLocked(psHandleData->psBase->hLock))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+               OSDumpStack();
+       }
+#endif
+
+#ifdef DEBUG_REFCNT
+       PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = %u, iLookupCount %d -> %d",
+               __func__, psHandleData->bCanLookup, psHandleData->iLookupCount,
+               psHandleData->iLookupCount + 1));
+#endif /* DEBUG_REFCNT */
+
+       PVR_ASSERT(psHandleData->bCanLookup);
+
+       return ++psHandleData->iLookupCount;
+}
+
+/* Decrease the lookup reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the decrement
+ */
+static inline IMG_UINT32 HandlePut(HANDLE_DATA *psHandleData)
+{
+#if defined(PVRSRV_DEBUG_HANDLE_LOCK)
+       if (!OSLockIsLocked(psHandleData->psBase->hLock))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+               OSDumpStack();
+       }
+#endif
+
+#ifdef DEBUG_REFCNT
+       PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = %u, iLookupCount %d -> %d",
+               __func__, psHandleData->bCanLookup, psHandleData->iLookupCount,
+               psHandleData->iLookupCount - 1));
+#endif /* DEBUG_REFCNT */
+
+       /* psHandleData->bCanLookup can be false at this point */
+       PVR_ASSERT(psHandleData->iLookupCount > 0);
+
+       return --psHandleData->iLookupCount;
+}
+
+static inline IMG_BOOL IsRetryError(PVRSRV_ERROR eError)
+{
+       return eError == PVRSRV_ERROR_RETRY || eError == PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType)
+{
+       #define HANDLETYPE(x) \
+                       case PVRSRV_HANDLE_TYPE_##x: \
+                               return #x;
+       switch (eType)
+       {
+               #include "handle_types.h"
+               #undef HANDLETYPE
+
+               default:
+                       return "INVALID";
+       }
+}
+
+static const IMG_CHAR *HandleBaseTypeToString(PVRSRV_HANDLE_BASE_TYPE eType)
+{
+       #define HANDLEBASETYPE(x) \
+                       case PVRSRV_HANDLE_BASE_TYPE_##x: \
+                               return #x;
+       switch (eType)
+       {
+               HANDLEBASETYPE(CONNECTION);
+               HANDLEBASETYPE(PROCESS);
+               HANDLEBASETYPE(GLOBAL);
+               #undef HANDLEBASETYPE
+
+               default:
+                       return "INVALID";
+       }
+}
+#endif
+
+static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase,
+                                                   HANDLE_DATA *psHandleData,
+                                                   IMG_HANDLE hHandle,
+                                                   PVRSRV_HANDLE_TYPE eType);
+
+static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase,
+                                       HANDLE_DATA *psHandleData,
+                                       IMG_HANDLE hHandle,
+                                       PVRSRV_HANDLE_TYPE eType);
+
+static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase,
+                                      HANDLE_DATA *psHandleData,
+                                      IMG_HANDLE hHandle,
+                                      PVRSRV_HANDLE_TYPE eType);
+
+/*!
+*******************************************************************************
+ @Function      GetHandleData
+ @Description   Get the handle data structure for a given handle
+ @Input         psBase - pointer to handle base structure
+                hHandle - handle from client
+                eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the handle
+                        type is not to be checked.
+ @Output        ppsHandleData - pointer to a pointer to the handle data struct
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleData)
+#endif
+static INLINE
+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase,
+                          HANDLE_DATA **ppsHandleData,
+                          IMG_HANDLE hHandle,
+                          PVRSRV_HANDLE_TYPE eType)
+{
+       HANDLE_DATA *psHandleData;
+       PVRSRV_ERROR eError;
+
+       eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase,
+                                                 hHandle,
+                                                 (void **)&psHandleData);
+       PVR_RETURN_IF_ERROR(eError);
+
+       /*
+        * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function,
+        * check handle is of the correct type.
+        */
+       if (unlikely(eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)",
+                        hHandle,
+                        HandleTypeToString(eType),
+                        eType,
+                        HandleTypeToString(psHandleData->eType),
+                        psHandleData->eType));
+               return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+       }
+
+       /* Return the handle structure */
+       *ppsHandleData = psHandleData;
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      HandleListInit
+ @Description   Initialise a linked list structure embedded in a handle
+                structure.
+ @Input         hHandle - handle containing the linked list structure
+                psList - pointer to linked list structure
+                hParent - parent handle or NULL
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent)
+{
+       psList->hPrev = hHandle;
+       psList->hNext = hHandle;
+       psList->hParent = hParent;
+}
+
+/*!
+*******************************************************************************
+ @Function      InitParentList
+ @Description   Initialise the children list head in a handle structure.
+                The children are the subhandles of this handle.
+ @Input         psHandleData - pointer to handle data structure
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+void InitParentList(HANDLE_DATA *psHandleData)
+{
+       IMG_HANDLE hParent = psHandleData->hHandle;
+
+       HandleListInit(hParent, &psHandleData->sChildren, hParent);
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitChildEntry
+ @Description   Initialise the child list entry in a handle structure. The list
+                entry is used to link together subhandles of a given handle.
+ @Input         psHandleData - pointer to handle data structure
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+void InitChildEntry(HANDLE_DATA *psHandleData)
+{
+       HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL);
+}
+
+/*!
+*******************************************************************************
+ @Function      HandleListIsEmpty
+ @Description   Determine whether a given linked list is empty.
+ @Input         hHandle - handle containing the list head
+                psList - pointer to the list head
+ @Return        IMG_TRUE if the list is empty, IMG_FALSE if it isn't.
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */
+{
+       IMG_BOOL bIsEmpty;
+
+       bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle);
+
+#ifdef DEBUG
+       {
+               IMG_BOOL bIsEmpty2;
+
+               bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle);
+               PVR_ASSERT(bIsEmpty == bIsEmpty2);
+       }
+#endif
+
+       return bIsEmpty;
+}
+
+#ifdef DEBUG
+/*!
+*******************************************************************************
+ @Function      NoChildren
+ @Description   Determine whether a handle has any subhandles
+ @Input         psHandleData - pointer to handle data structure
+ @Return        IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does.
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData)
+{
+       PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle);
+
+       return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren);
+}
+
+/*!
+*******************************************************************************
+ @Function      NoParent
+ @Description   Determine whether a handle is a subhandle
+ @Input         psHandleData - pointer to handle data structure
+ @Return        IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is.
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(HANDLE_DATA *psHandleData)
+{
+       if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings))
+       {
+               PVR_ASSERT(psHandleData->sSiblings.hParent == NULL);
+
+               return IMG_TRUE;
+       }
+
+       PVR_ASSERT(psHandleData->sSiblings.hParent != NULL);
+       return IMG_FALSE;
+}
+#endif /*DEBUG*/
+
+/*!
+*******************************************************************************
+ @Function      ParentHandle
+ @Description   Determine the parent of a handle
+ @Input         psHandleData - pointer to handle data structure
+ @Return        Parent handle, or NULL if the handle is not a subhandle.
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData)
+{
+       return psHandleData->sSiblings.hParent;
+}
+
+/*
+ * GetHandleListFromHandleAndOffset is used to generate either a
+ * pointer to the subhandle list head, or a pointer to the linked list
+ * structure of an item on a subhandle list.
+ * The list head is itself on the list, but is at a different offset
+ * in the handle structure to the linked list structure for items on
+ * the list. The two linked list structures are differentiated by
+ * the third parameter, containing the parent handle. The parent field
+ * in the list head structure references the handle structure that contains
+ * it. For items on the list, the parent field in the linked list structure
+ * references the parent handle, which will be different from the handle
+ * containing the linked list structure.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleListFromHandleAndOffset)
+#endif
+static INLINE
+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase,
+                                             IMG_HANDLE hEntry,
+                                             IMG_HANDLE hParent,
+                                             size_t uiParentOffset,
+                                             size_t uiEntryOffset)
+{
+       HANDLE_DATA *psHandleData = NULL;
+
+       PVR_ASSERT(psBase != NULL);
+
+       if (GetHandleData(psBase, &psHandleData, hEntry,
+                         PVRSRV_HANDLE_TYPE_NONE) != PVRSRV_OK)
+       {
+               return NULL;
+       }
+
+       if (hEntry == hParent)
+       {
+               return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiParentOffset);
+       }
+       else
+       {
+               return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiEntryOffset);
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function      HandleListInsertBefore
+ @Description   Insert a handle before a handle currently on the list.
+ @Input         hEntry - handle to be inserted after
+                psEntry - pointer to handle structure to be inserted after
+                uiParentOffset - offset to list head struct in handle structure
+                hNewEntry - handle to be inserted
+                psNewEntry - pointer to handle structure of item to be inserted
+                uiEntryOffset - offset of list item struct in handle structure
+                hParent - parent handle of hNewEntry
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase,
+                                   IMG_HANDLE hEntry,
+                                   HANDLE_LIST *psEntry,
+                                   size_t uiParentOffset,
+                                   IMG_HANDLE hNewEntry,
+                                   HANDLE_LIST *psNewEntry,
+                                   size_t uiEntryOffset,
+                                   IMG_HANDLE hParent)
+{
+       HANDLE_LIST *psPrevEntry;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psNewEntry != NULL, "psNewEntry");
+
+       psPrevEntry = GetHandleListFromHandleAndOffset(psBase,
+                                                      psEntry->hPrev,
+                                                      hParent,
+                                                      uiParentOffset,
+                                                      uiEntryOffset);
+       if (psPrevEntry == NULL)
+       {
+               return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+       }
+
+       PVR_ASSERT(psNewEntry->hParent == NULL);
+       PVR_ASSERT(hEntry == psPrevEntry->hNext);
+
+#if defined(DEBUG)
+       {
+               HANDLE_LIST *psParentList;
+
+               psParentList = GetHandleListFromHandleAndOffset(psBase,
+                                                               hParent,
+                                                               hParent,
+                                                               uiParentOffset,
+                                                               uiParentOffset);
+               PVR_ASSERT(psParentList && psParentList->hParent == hParent);
+       }
+#endif /* defined(DEBUG) */
+
+       psNewEntry->hPrev = psEntry->hPrev;
+       psEntry->hPrev = hNewEntry;
+
+       psNewEntry->hNext = hEntry;
+       psPrevEntry->hNext = hNewEntry;
+
+       psNewEntry->hParent = hParent;
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      AdoptChild
+ @Description   Assign a subhandle to a handle
+ @Input         psParentData - pointer to handle structure of parent handle
+                psChildData - pointer to handle structure of child subhandle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase,
+                       HANDLE_DATA *psParentData,
+                       HANDLE_DATA *psChildData)
+{
+       IMG_HANDLE hParent = psParentData->sChildren.hParent;
+
+       PVR_ASSERT(hParent == psParentData->hHandle);
+
+       return HandleListInsertBefore(psBase,
+                                     hParent,
+                                     &psParentData->sChildren,
+                                     offsetof(HANDLE_DATA, sChildren),
+                                     psChildData->hHandle,
+                                     &psChildData->sSiblings,
+                                     offsetof(HANDLE_DATA, sSiblings),
+                                     hParent);
+}
+
+/*!
+*******************************************************************************
+ @Function      HandleListRemove
+ @Description   Remove a handle from a list
+ @Input         hEntry - handle to be removed
+                psEntry - pointer to handle structure of item to be removed
+                uiEntryOffset - offset of list item struct in handle structure
+                uiParentOffset - offset to list head struct in handle structure
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase,
+                             IMG_HANDLE hEntry,
+                             HANDLE_LIST *psEntry,
+                             size_t uiEntryOffset,
+                             size_t uiParentOffset)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry");
+
+       if (!HandleListIsEmpty(hEntry, psEntry))
+       {
+               HANDLE_LIST *psPrev;
+               HANDLE_LIST *psNext;
+
+               psPrev = GetHandleListFromHandleAndOffset(psBase,
+                                                         psEntry->hPrev,
+                                                         psEntry->hParent,
+                                                         uiParentOffset,
+                                                         uiEntryOffset);
+               if (psPrev == NULL)
+               {
+                       return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+               }
+
+               psNext = GetHandleListFromHandleAndOffset(psBase,
+                                                         psEntry->hNext,
+                                                         psEntry->hParent,
+                                                         uiParentOffset,
+                                                         uiEntryOffset);
+               if (psNext == NULL)
+               {
+                       return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+               }
+
+               /*
+                * The list head is on the list, and we don't want to
+                * remove it.
+                */
+               PVR_ASSERT(psEntry->hParent != NULL);
+
+               psPrev->hNext = psEntry->hNext;
+               psNext->hPrev = psEntry->hPrev;
+
+               HandleListInit(hEntry, psEntry, NULL);
+       }
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      UnlinkFromParent
+ @Description   Remove a subhandle from its parents list
+ @Input         psHandleData - pointer to handle data structure of child
+                               subhandle.
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase,
+                             HANDLE_DATA *psHandleData)
+{
+       return HandleListRemove(psBase,
+                               psHandleData->hHandle,
+                               &psHandleData->sSiblings,
+                               offsetof(HANDLE_DATA, sSiblings),
+                               offsetof(HANDLE_DATA, sChildren));
+}
+
+/*!
+*******************************************************************************
+ @Function      HandleListIterate
+ @Description   Iterate over the items in a list
+ @Input         psHead - pointer to list head
+                uiParentOffset - offset to list head struct in handle structure
+                uiEntryOffset - offset of list item struct in handle structure
+                pfnIterFunc - function to be called for each handle in the list
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase,
+                              HANDLE_LIST *psHead,
+                              size_t uiParentOffset,
+                              size_t uiEntryOffset,
+                              PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+       IMG_HANDLE hHandle = psHead->hNext;
+       IMG_HANDLE hParent = psHead->hParent;
+       IMG_HANDLE hNext;
+
+       PVR_ASSERT(psHead->hParent != NULL);
+
+       /*
+        * Follow the next chain from the list head until we reach
+        * the list head again, which signifies the end of the list.
+        */
+       while (hHandle != hParent)
+       {
+               HANDLE_LIST *psEntry;
+               PVRSRV_ERROR eError;
+
+               psEntry = GetHandleListFromHandleAndOffset(psBase,
+                                                          hHandle,
+                                                          hParent,
+                                                          uiParentOffset,
+                                                          uiEntryOffset);
+               if (psEntry == NULL)
+               {
+                       return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+               }
+
+               PVR_ASSERT(psEntry->hParent == psHead->hParent);
+
+               /*
+                * Get the next index now, in case the list item is
+                * modified by the iteration function.
+                */
+               hNext = psEntry->hNext;
+
+               eError = (*pfnIterFunc)(psBase, hHandle);
+               PVR_RETURN_IF_ERROR(eError);
+
+               hHandle = hNext;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      IterateOverChildren
+ @Description   Iterate over the subhandles of a parent handle
+ @Input         psParentData - pointer to parent handle structure
+                pfnIterFunc - function to be called for each subhandle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase,
+                                HANDLE_DATA *psParentData,
+                                PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+        return HandleListIterate(psBase,
+                                 &psParentData->sChildren,
+                                 offsetof(HANDLE_DATA, sChildren),
+                                 offsetof(HANDLE_DATA, sSiblings),
+                                 pfnIterFunc);
+}
+
+/*!
+*******************************************************************************
+ @Function      ParentIfPrivate
+ @Description   Return the parent handle if the handle was allocated with
+                PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return NULL.
+ @Input         psHandleData - pointer to handle data structure
+ @Return        Parent handle or NULL
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData)
+{
+       return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+                       ParentHandle(psHandleData) : NULL;
+}
+
+/*!
+*******************************************************************************
+ @Function      InitKey
+ @Description   Initialise a hash table key for the current process
+ @Input         aKey - pointer to key
+                psBase - pointer to handle base structure
+                pvData - pointer to the resource the handle represents
+                eType - type of resource
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+void InitKey(HAND_KEY aKey,
+            PVRSRV_HANDLE_BASE *psBase,
+            void *pvData,
+            PVRSRV_HANDLE_TYPE eType,
+            IMG_HANDLE hParent)
+{
+       PVR_UNREFERENCED_PARAMETER(psBase);
+
+       aKey[HAND_KEY_DATA] = (uintptr_t)pvData;
+       aKey[HAND_KEY_TYPE] = (uintptr_t)eType;
+       aKey[HAND_KEY_PARENT] = (uintptr_t)hParent;
+}
+
+/*!
+*******************************************************************************
+ @Function      FindHandle
+ @Description   Find handle corresponding to a resource pointer
+ @Input         psBase - pointer to handle base structure
+                pvData - pointer to resource to be associated with the handle
+                eType - the type of resource
+ @Return        The handle, or NULL if not found
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase,
+                     void *pvData,
+                     PVRSRV_HANDLE_TYPE eType,
+                     IMG_HANDLE hParent)
+{
+       HAND_KEY aKey;
+
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+
+       InitKey(aKey, psBase, pvData, eType, hParent);
+
+       return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+}
+
+/*!
+*******************************************************************************
+ @Function      AllocHandle
+ @Description   Allocate a new handle
+ @Input         phHandle - location for new handle
+                pvData - pointer to resource to be associated with the handle
+                eType - the type of resource
+                hParent - parent handle or NULL
+                pfnReleaseData - Function to release resource at handle release
+                                 time
+ @Output        phHandle - points to new handle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase,
+                               IMG_HANDLE *phHandle,
+                               void *pvData,
+                               PVRSRV_HANDLE_TYPE eType,
+                               PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+                               IMG_HANDLE hParent,
+                               PFN_HANDLE_RELEASE pfnReleaseData)
+{
+       HANDLE_DATA *psNewHandleData;
+       IMG_HANDLE hHandle;
+       PVRSRV_ERROR eError;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+       {
+               /* Handle must not already exist */
+               PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
+       }
+
+       psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData));
+       PVR_LOG_RETURN_IF_NOMEM(psNewHandleData, "OSAllocZMem");
+
+       eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle,
+                                                 psNewHandleData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "pfnAcquireHandle",
+                         ErrorFreeHandleData);
+
+       /*
+        * If a data pointer can be associated with multiple handles, we
+        * don't put the handle in the hash table, as the data pointer
+        * may not map to a unique handle
+        */
+       if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+       {
+               HAND_KEY aKey;
+
+               /* Initialise hash key */
+               InitKey(aKey, psBase, pvData, eType, hParent);
+
+               /* Put the new handle in the hash table */
+               eError = HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle) ?
+                       PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+               PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "couldn't add handle to hash table",
+                                 ErrorReleaseHandle);
+       }
+
+       psNewHandleData->hHandle = hHandle;
+       psNewHandleData->eType = eType;
+       psNewHandleData->eFlag = eFlag;
+       psNewHandleData->pvData = pvData;
+       psNewHandleData->pfnReleaseData = pfnReleaseData;
+       psNewHandleData->iLookupCount = 0;
+       psNewHandleData->bCanLookup = IMG_TRUE;
+
+#ifdef DEBUG_REFCNT
+       PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = true", __func__));
+#endif /* DEBUG_REFCNT */
+
+       InitParentList(psNewHandleData);
+#if defined(DEBUG)
+       PVR_ASSERT(NoChildren(psNewHandleData));
+#endif
+
+       InitChildEntry(psNewHandleData);
+#if defined(DEBUG)
+       PVR_ASSERT(NoParent(psNewHandleData));
+#endif
+
+#if defined(PVRSRV_DEBUG_HANDLE_LOCK)
+       psNewHandleData->psBase = psBase;
+#endif
+
+       /* Return the new handle to the client */
+       *phHandle = psNewHandleData->hHandle;
+
+       return PVRSRV_OK;
+
+ErrorReleaseHandle:
+       (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL);
+
+ErrorFreeHandleData:
+       OSFreeMem(psNewHandleData);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVAllocHandle
+ @Description   Allocate a handle
+ @Input         psBase - pointer to handle base structure
+                pvData - pointer to resource to be associated with the handle
+                eType - the type of resource
+                pfnReleaseData - Function to release resource at handle release
+                                 time
+ @Output        phHandle - points to new handle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+                              IMG_HANDLE *phHandle,
+                              void *pvData,
+                              PVRSRV_HANDLE_TYPE eType,
+                              PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+                              PFN_HANDLE_RELEASE pfnReleaseData)
+{
+       PVRSRV_ERROR eError;
+
+       LockHandle(psBase);
+       eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData);
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVAllocHandleUnlocked
+ @Description   Allocate a handle without acquiring/releasing the handle lock.
+                The function assumes you hold the lock when called.
+ @Input         phHandle - location for new handle
+                pvData - pointer to resource to be associated with the handle
+                eType - the type of resource
+                pfnReleaseData - Function to release resource at handle release
+                                 time
+ @Output        phHandle - points to new handle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                              IMG_HANDLE *phHandle,
+                              void *pvData,
+                              PVRSRV_HANDLE_TYPE eType,
+                              PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+                              PFN_HANDLE_RELEASE pfnReleaseData)
+{
+       *phHandle = NULL;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pfnReleaseData != NULL, "pfnReleaseData");
+
+       return AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData);
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVAllocSubHandle
+ @Description   Allocate a subhandle
+ @Input         pvData - pointer to resource to be associated with the subhandle
+                eType - the type of resource
+                hParent - parent handle
+ @Output        phHandle - points to new subhandle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+                                 IMG_HANDLE *phHandle,
+                                 void *pvData,
+                                 PVRSRV_HANDLE_TYPE eType,
+                                 PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+                                 IMG_HANDLE hParent)
+{
+       PVRSRV_ERROR eError;
+
+       LockHandle(psBase);
+       eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent);
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVAllocSubHandleUnlocked
+ @Description   Allocate a subhandle without acquiring/releasing the handle
+                lock. The function assumes you hold the lock when called.
+ @Input         pvData - pointer to resource to be associated with the subhandle
+                eType - the type of resource
+                hParent - parent handle
+ @Output        phHandle - points to new subhandle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                                 IMG_HANDLE *phHandle,
+                                 void *pvData,
+                                 PVRSRV_HANDLE_TYPE eType,
+                                 PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+                                 IMG_HANDLE hParent)
+{
+       HANDLE_DATA *psPHandleData = NULL;
+       HANDLE_DATA *psCHandleData = NULL;
+       IMG_HANDLE hParentKey;
+       IMG_HANDLE hHandle;
+       PVRSRV_ERROR eError;
+
+       *phHandle = NULL;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psBase, eError, Exit);
+
+       hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL;
+
+       /* Lookup the parent handle */
+       eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure",
+                         Exit);
+
+       eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL);
+       PVR_GOTO_IF_ERROR(eError, Exit);
+
+       eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+       /* If we were able to allocate the handle then there should be no reason why we
+        * can't also get it's handle structure. Otherwise something has gone badly wrong.
+        */
+       PVR_ASSERT(eError == PVRSRV_OK);
+       PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "Failed to get parent handle structure",
+                         ExitFreeHandle);
+
+       /*
+        * Get the parent handle structure again, in case the handle
+        * structure has moved (depending on the implementation
+        * of AllocHandle).
+        */
+       eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure",
+                         ExitFreeHandle);
+
+       eError = AdoptChild(psBase, psPHandleData, psCHandleData);
+       PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "parent handle failed to adopt subhandle",
+                         ExitFreeHandle);
+
+       *phHandle = hHandle;
+
+       return PVRSRV_OK;
+
+ExitFreeHandle:
+       PVRSRVDestroyHandleUnlocked(psBase, hHandle, eType);
+Exit:
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVFindHandle
+ @Description   Find handle corresponding to a resource pointer
+ @Input         pvData - pointer to resource to be associated with the handle
+                eType - the type of resource
+ @Output        phHandle - points to returned handle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+                             IMG_HANDLE *phHandle,
+                             void *pvData,
+                             PVRSRV_HANDLE_TYPE eType)
+{
+       PVRSRV_ERROR eError;
+
+       LockHandle(psBase);
+       eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType);
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVFindHandleUnlocked
+ @Description   Find handle corresponding to a resource pointer without
+                acquiring/releasing the handle lock. The function assumes you
+                hold the lock when called.
+ @Input         pvData - pointer to resource to be associated with the handle
+                eType - the type of resource
+ @Output        phHandle - points to the returned handle
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                             IMG_HANDLE *phHandle,
+                             void *pvData,
+                             PVRSRV_HANDLE_TYPE eType)
+{
+       IMG_HANDLE hHandle;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+
+       /* See if there is a handle for this data pointer */
+       hHandle = FindHandle(psBase, pvData, eType, NULL);
+       if (hHandle == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error finding handle. Type %u",
+                        __func__,
+                        eType));
+
+               return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+       }
+
+       *phHandle = hHandle;
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVLookupHandle
+ @Description   Lookup the data pointer corresponding to a handle
+ @Input         hHandle - handle from client
+                eType - handle type
+                bRef - If TRUE, a reference will be added on the handle if the
+                       lookup is successful.
+ @Output        ppvData - points to the return data pointer
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+                               void **ppvData,
+                               IMG_HANDLE hHandle,
+                               PVRSRV_HANDLE_TYPE eType,
+                               IMG_BOOL bRef)
+{
+       PVRSRV_ERROR eError;
+
+       LockHandle(psBase);
+       eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef);
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVLookupHandleUnlocked
+ @Description   Lookup the data pointer corresponding to a handle without
+                acquiring/releasing the handle lock. The function assumes you
+                hold the lock when called.
+ @Input         hHandle - handle from client
+                eType - handle type
+                bRef - If TRUE, a reference will be added on the handle if the
+                       lookup is successful.
+ @Output        ppvData - points to the returned data pointer
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                               void **ppvData,
+                               IMG_HANDLE hHandle,
+                               PVRSRV_HANDLE_TYPE eType,
+                               IMG_BOOL bRef)
+{
+       HANDLE_DATA *psHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+
+       eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error looking up handle (%s) for base %p of type %s. Handle %p, type %s",
+                        __func__,
+                        PVRSRVGetErrorString(eError),
+                        psBase,
+                        HandleBaseTypeToString(psBase->eType),
+                        (void*) hHandle,
+                        HandleTypeToString(eType)));
+#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF)
+               OSDumpStack();
+#endif
+               return eError;
+       }
+
+       /* If bCanLookup is false it means that a destroy operation was already
+        * called on this handle; therefore it can no longer be looked up. */
+       if (!psHandleData->bCanLookup)
+       {
+               return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+       }
+
+       if (bRef)
+       {
+               HandleGet(psHandleData);
+       }
+
+       *ppvData = psHandleData->pvData;
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVLookupSubHandle
+ @Description   Lookup the data pointer corresponding to a subhandle
+ @Input         hHandle - handle from client
+                eType - handle type
+                hAncestor - ancestor handle
+ @Output        ppvData - points to the returned data pointer
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+                                  void **ppvData,
+                                  IMG_HANDLE hHandle,
+                                  PVRSRV_HANDLE_TYPE eType,
+                                  IMG_HANDLE hAncestor)
+{
+       HANDLE_DATA *psPHandleData = NULL;
+       HANDLE_DATA *psCHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+
+       LockHandle(psBase);
+
+       eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error looking up subhandle (%s). Handle %p, type %u",
+                        __func__,
+                        PVRSRVGetErrorString(eError),
+                        (void*) hHandle,
+                        eType));
+               OSDumpStack();
+               goto ExitUnlock;
+       }
+
+       /* Look for hAncestor among the handle's ancestors */
+       for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; )
+       {
+               eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "GetHandleData");
+                       eError = PVRSRV_ERROR_INVALID_SUBHANDLE;
+                       goto ExitUnlock;
+               }
+       }
+
+       *ppvData = psCHandleData->pvData;
+
+       eError = PVRSRV_OK;
+
+ExitUnlock:
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVReleaseHandle
+ @Description   Release a handle that is no longer needed
+ @Input         hHandle - handle from client
+                eType - handle type
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+void PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+                         IMG_HANDLE hHandle,
+                         PVRSRV_HANDLE_TYPE eType)
+{
+       LockHandle(psBase);
+       PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType);
+       UnlockHandle(psBase);
+}
+
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVReleaseHandleUnlocked
+ @Description   Release a handle that is no longer needed without
+                acquiring/releasing the handle lock. The function assumes you
+                hold the lock when called.
+ @Input         hHandle - handle from client
+                eType - handle type
+******************************************************************************/
+void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                                 IMG_HANDLE hHandle,
+                                 PVRSRV_HANDLE_TYPE eType)
+{
+       HANDLE_DATA *psHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+       PVR_ASSERT(psBase != NULL);
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_VOID_IF_FALSE(psBase != NULL, "invalid psBase");
+
+       eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) looking up handle %p of type %s "
+                       "for base %p of type %s.", __func__, PVRSRVGetErrorString(eError),
+                       (void*) hHandle, HandleTypeToString(eType), psBase,
+                       HandleBaseTypeToString(psBase->eType)));
+
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               return;
+       }
+
+       PVR_ASSERT(psHandleData->bCanLookup);
+       PVR_ASSERT(psHandleData->iLookupCount > 0);
+
+       /* If there are still outstanding lookups for this handle or the handle
+        * has not been destroyed yet, return early */
+       HandlePut(psHandleData);
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVPurgeHandles
+ @Description   Purge handles for a given handle base
+ @Input         psBase - pointer to handle base structure
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+
+       LockHandle(psBase);
+       eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase);
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFreeWrapper(PVRSRV_HANDLE_BASE *psBase,
+                                                          IMG_HANDLE hHandle)
+{
+       HANDLE_DATA *psHandleData;
+       PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle,
+                                           PVRSRV_HANDLE_TYPE_NONE);
+       PVR_RETURN_IF_ERROR(eError);
+
+       return HandleUnrefAndMaybeMarkForFree(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+}
+
+static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase,
+                                                   HANDLE_DATA *psHandleData,
+                                                   IMG_HANDLE hHandle,
+                                                   PVRSRV_HANDLE_TYPE eType)
+{
+       PVRSRV_ERROR eError;
+
+       /* If bCanLookup is false it means that the destructor was called more than
+        * once on this handle. */
+       if (!psHandleData->bCanLookup)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Handle %p of type %s already freed.",
+                       __func__, psHandleData->hHandle,
+                       HandleTypeToString(psHandleData->eType)));
+               return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+       }
+
+       if (psHandleData->iLookupCount > 0)
+       {
+               return PVRSRV_ERROR_OBJECT_STILL_REFERENCED;
+       }
+
+       /* Mark this handle as freed only if it's no longer referenced by any
+        * lookup. The user space should retry freeing this handle once there are
+        * no outstanding lookups. */
+       psHandleData->bCanLookup = IMG_FALSE;
+
+#ifdef DEBUG_REFCNT
+       PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = false, iLookupCount = %d", __func__,
+               psHandleData->iLookupCount));
+#endif /* DEBUG_REFCNT */
+
+       /* Prepare children for destruction */
+       eError = IterateOverChildren(psBase, psHandleData,
+                                    HandleUnrefAndMaybeMarkForFreeWrapper);
+       PVR_LOG_RETURN_IF_ERROR(eError, "HandleUnrefAndMaybeMarkForFreeWrapper");
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR HandleFreePrivDataWrapper(PVRSRV_HANDLE_BASE *psBase,
+                                              IMG_HANDLE hHandle)
+{
+       HANDLE_DATA *psHandleData;
+       PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle,
+                                           PVRSRV_HANDLE_TYPE_NONE);
+       PVR_RETURN_IF_ERROR(eError);
+
+       return HandleFreePrivData(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+}
+
+static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase,
+                                       HANDLE_DATA *psHandleData,
+                                       IMG_HANDLE hHandle,
+                                       PVRSRV_HANDLE_TYPE eType)
+{
+       PVRSRV_ERROR eError;
+
+       /* Call the release data callback for each reference on the handle */
+       if (psHandleData->pfnReleaseData != NULL)
+       {
+               eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+               if (eError != PVRSRV_OK)
+               {
+                       if (IsRetryError(eError))
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release "
+                                               "data callback for handle %p of type = %s", __func__,
+                                               hHandle, HandleTypeToString(psHandleData->eType)));
+                       }
+                       else
+                       {
+                               PVR_LOG_ERROR(eError, "pfnReleaseData");
+                       }
+
+                       return eError;
+               }
+
+               /* we don't need this so make sure it's not called on
+                * the pvData for the second time
+                */
+               psHandleData->pfnReleaseData = NULL;
+       }
+
+       /* Free children's data */
+       eError = IterateOverChildren(psBase, psHandleData,
+                                    HandleFreePrivDataWrapper);
+       PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreePrivData");
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR HandleFreeDestroyWrapper(PVRSRV_HANDLE_BASE *psBase,
+                                             IMG_HANDLE hHandle)
+{
+       HANDLE_DATA *psHandleData;
+       PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle,
+                                           PVRSRV_HANDLE_TYPE_NONE);
+       PVR_RETURN_IF_ERROR(eError);
+
+       return HandleFreeDestroy(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+}
+
+static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase,
+                                      HANDLE_DATA *psHandleData,
+                                      IMG_HANDLE hHandle,
+                                      PVRSRV_HANDLE_TYPE eType)
+{
+       HANDLE_DATA *psReleasedHandleData;
+       PVRSRV_ERROR eError;
+
+       eError = UnlinkFromParent(psBase, psHandleData);
+       PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent");
+
+       if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+       {
+               HAND_KEY aKey;
+               IMG_HANDLE hRemovedHandle;
+
+               InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType,
+                       ParentIfPrivate(psHandleData));
+
+               hRemovedHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab,
+                                                                  aKey);
+
+               PVR_ASSERT(hRemovedHandle != NULL);
+               PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+               PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+       }
+
+       /* Free children */
+       eError = IterateOverChildren(psBase, psHandleData, HandleFreeDestroyWrapper);
+       PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreeDestroy");
+
+       eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase,
+                                                 psHandleData->hHandle,
+                                                 (void **)&psReleasedHandleData);
+       OSFreeMem(psHandleData);
+       PVR_LOG_RETURN_IF_ERROR(eError, "pfnReleaseHandle");
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR DestroyHandle(PVRSRV_HANDLE_BASE *psBase,
+                                  IMG_HANDLE hHandle,
+                                  PVRSRV_HANDLE_TYPE eType,
+                                  IMG_BOOL bReleaseLock)
+{
+       PVRSRV_ERROR eError;
+       HANDLE_DATA *psHandleData = NULL;
+
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+
+       eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+       PVR_RETURN_IF_ERROR(eError);
+
+       eError = HandleUnrefAndMaybeMarkForFree(psBase, psHandleData, hHandle, eType);
+       PVR_RETURN_IF_ERROR(eError);
+
+       if (bReleaseLock)
+       {
+               UnlockHandle(psBase);
+       }
+
+       eError = HandleFreePrivData(psBase, psHandleData, hHandle, eType);
+       if (eError != PVRSRV_OK)
+       {
+               if (bReleaseLock)
+               {
+                       LockHandle(psBase);
+               }
+
+               /* If the data could not be freed due to a temporary condition the
+                * handle must be kept alive so that the next destroy call can try again */
+               if (IsRetryError(eError))
+               {
+                       psHandleData->bCanLookup = IMG_TRUE;
+               }
+
+               return eError;
+       }
+
+       if (bReleaseLock)
+       {
+               LockHandle(psBase);
+       }
+
+       return HandleFreeDestroy(psBase, psHandleData, hHandle, eType);
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVDestroyHandle
+ @Description   Destroys a handle that is no longer needed. Will
+                acquiring the handle lock for duration of the call.
+                Can return RETRY or KERNEL_CCB_FULL if resource could not be
+                destroyed, caller should retry sometime later.
+ @Input         psBase - pointer to handle base structure
+                hHandle - handle from client
+                eType - handle type
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDestroyHandle(PVRSRV_HANDLE_BASE *psBase,
+                                 IMG_HANDLE hHandle,
+                                 PVRSRV_HANDLE_TYPE eType)
+{
+       PVRSRV_ERROR eError;
+
+       LockHandle(psBase);
+       eError = DestroyHandle(psBase, hHandle, eType, IMG_FALSE);
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVDestroyHandleUnlocked
+ @Description   Destroys a handle that is no longer needed without
+                acquiring/releasing the handle lock. The function assumes you
+                hold the lock when called.
+                Can return RETRY or KERNEL_CCB_FULL if resource could not be
+                destroyed, caller should retry sometime later.
+ @Input         psBase - pointer to handle base structure
+                hHandle - handle from client
+                eType - handle type
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDestroyHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                                         IMG_HANDLE hHandle,
+                                         PVRSRV_HANDLE_TYPE eType)
+{
+       return DestroyHandle(psBase, hHandle, eType, IMG_FALSE);
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVDestroyHandleStagedUnlocked
+ @Description   Destroys a handle that is no longer needed without
+                acquiring/releasing the handle lock. The function assumes you
+                hold the lock when called. This function, unlike
+                PVRSRVDestroyHandleUnlocked(), releases the handle lock while
+                destroying handle private data. This is done to open the
+                bridge for other bridge calls.
+                Can return RETRY or KERNEL_CCB_FULL if resource could not be
+                destroyed, caller should retry sometime later.
+ @Input         psBase - pointer to handle base structure
+                hHandle - handle from client
+                eType - handle type
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDestroyHandleStagedUnlocked(PVRSRV_HANDLE_BASE *psBase,
+                                               IMG_HANDLE hHandle,
+                                               PVRSRV_HANDLE_TYPE eType)
+{
+       return DestroyHandle(psBase, hHandle, eType, IMG_TRUE);
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVAllocHandleBase
+ @Description   Allocate a handle base structure for a process
+ @Input         eType - handle type
+ @Output        ppsBase - points to handle base structure pointer
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+                                   PVRSRV_HANDLE_BASE_TYPE eType)
+{
+       PVRSRV_HANDLE_BASE *psBase;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_FALSE(gpsHandleFuncs != NULL, "handle management not initialised",
+                         PVRSRV_ERROR_NOT_READY);
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppsBase != NULL, "ppsBase");
+
+       psBase = OSAllocZMem(sizeof(*psBase));
+       PVR_LOG_RETURN_IF_NOMEM(psBase, "psBase");
+
+       eError = OSLockCreate(&psBase->hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeHandleBase);
+
+       psBase->eType = eType;
+
+       LockHandle(psBase);
+
+       eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase);
+       PVR_GOTO_IF_ERROR(eError, ErrorUnlock);
+
+       psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE,
+                                                sizeof(HAND_KEY),
+                                                HASH_Func_Default,
+                                                HASH_Key_Comp_Default);
+       PVR_LOG_GOTO_IF_FALSE(psBase->psHashTab != NULL, "couldn't create data pointer"
+                         " hash table", ErrorDestroyHandleBase);
+
+       *ppsBase = psBase;
+
+       UnlockHandle(psBase);
+
+       return PVRSRV_OK;
+
+ErrorDestroyHandleBase:
+       (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+
+ErrorUnlock:
+       UnlockHandle(psBase);
+       OSLockDestroy(psBase->hLock);
+
+ErrorFreeHandleBase:
+       OSFreeMem(psBase);
+
+       return eError;
+}
+
+#if defined(DEBUG)
+typedef struct _COUNT_HANDLE_DATA_
+{
+       PVRSRV_HANDLE_BASE *psBase;
+       IMG_UINT32 uiHandleDataCount;
+} COUNT_HANDLE_DATA;
+
+/* Used to count the number of handles that have data associated with them */
+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+       COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData;
+       HANDLE_DATA *psHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase");
+
+       eError = GetHandleData(psData->psBase,
+                              &psHandleData,
+                              hHandle,
+                              PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData");
+
+       if (psHandleData != NULL)
+       {
+               psData->uiHandleDataCount++;
+       }
+
+       return PVRSRV_OK;
+}
+
+/* Print a handle in the handle base. Used with the iterator callback. */
+static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData)
+{
+       PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData;
+       HANDLE_DATA *psHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase");
+
+       eError = GetHandleData(psBase,
+                              &psHandleData,
+                              hHandle,
+                              PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData");
+
+       if (psHandleData != NULL)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "    Handle: %6u, CanLookup: %u, LookupCount: %3u, Type: %s (%u), pvData<%p>",
+                      (IMG_UINT32) (uintptr_t) psHandleData->hHandle, psHandleData->bCanLookup,
+                      psHandleData->iLookupCount, HandleTypeToString(psHandleData->eType),
+                      psHandleData->eType, psHandleData->pvData));
+       }
+
+       return PVRSRV_OK;
+}
+
+#endif /* defined(DEBUG) */
+
+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime)
+{
+       /* unsigned arithmetic is well defined so this will wrap around correctly */
+       return (IMG_BOOL)((OSClockns64() - ui64TimeStart) >= ui64MaxBridgeTime);
+}
+
+static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData)
+{
+       FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData;
+       HANDLE_DATA *psKernelHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       /* Get kernel handle data. */
+       eError = GetHandleData(KERNEL_HANDLE_BASE,
+                           &psKernelHandleData,
+                           hHandle,
+                           PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData");
+
+       if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData)
+       {
+               /* This kernel handle belongs to our process handle. */
+               psData->hKernelHandle = hHandle;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData)
+{
+       FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       /* Get process handle data. */
+       eError = GetHandleData(psData->psBase,
+                           &psData->psProcessHandleData,
+                           hHandle,
+                           PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData");
+
+       if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+#if defined(SUPPORT_INSECURE_EXPORT)
+               || psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT
+#endif
+               )
+       {
+               /* Only multi alloc process handles might be in kernel handle base. */
+               psData->hKernelHandle = NULL;
+               /* Iterate over kernel handles. */
+               eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase,
+                                                                       &FreeKernelHandlesWrapperIterKernel,
+                                                                       (void *)psData);
+               PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "failed to iterate over kernel handles",
+                                 eError);
+
+               if (psData->hKernelHandle)
+               {
+                       /* Release kernel handle which belongs to our process handle. */
+                       eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase,
+                                               psData->hKernelHandle,
+                                               NULL);
+                       PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "couldn't release kernel handle",
+                                         eError);
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+       FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData;
+       HANDLE_DATA *psHandleData = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psData->eHandleFreeType != PVRSRV_HANDLE_TYPE_NONE,
+                                 "psData->eHandleFreeType");
+
+       eError = GetHandleData(psData->psBase,
+                              &psHandleData,
+                              hHandle,
+                              PVRSRV_HANDLE_TYPE_NONE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData");
+
+       if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType)
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_ASSERT(psHandleData->bCanLookup && psHandleData->iLookupCount == 0);
+
+       if (psHandleData->bCanLookup)
+       {
+               if (psHandleData->pfnReleaseData != NULL)
+               {
+                       eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+                       if (eError == PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release "
+                                       "data callback for handle %p of type = %s", __func__,
+                                       hHandle, HandleTypeToString(psHandleData->eType)));
+
+                               return eError;
+                       }
+                       else if (eError != PVRSRV_OK)
+                       {
+                               return eError;
+                       }
+               }
+
+               psHandleData->bCanLookup = IMG_FALSE;
+       }
+
+       if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+       {
+               HAND_KEY aKey;
+               IMG_HANDLE hRemovedHandle;
+
+               InitKey(aKey,
+                       psData->psBase,
+                       psHandleData->pvData,
+                       psHandleData->eType,
+                       ParentIfPrivate(psHandleData));
+
+               hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey);
+
+               PVR_ASSERT(hRemovedHandle != NULL);
+               PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+               PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+       }
+
+       eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL);
+       PVR_RETURN_IF_ERROR(eError);
+
+       OSFreeMem(psHandleData);
+
+       /* If we reach the end of the time slice release we can release the global
+        * lock, invoke the scheduler and reacquire the lock */
+       if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime))
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                        "%s: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")",
+                        __func__,
+                        psData->ui64MaxBridgeTime));
+               UnlockHandle(psData->psBase);
+               /* Invoke the scheduler to check if other processes are waiting for the lock */
+               OSReleaseThreadQuanta();
+               LockHandle(psData->psBase);
+               /* Set again lock timeout and reset the counter */
+               psData->ui64TimeStart = OSClockns64();
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Lock acquired again", __func__));
+       }
+
+       return PVRSRV_OK;
+}
+
+/* The Ordered Array of PVRSRV_HANDLE_TYPE Enum Entries.
+ *
+ *   Some handles must be destroyed prior to other handles,
+ *   such relationships are established with respect to handle types.
+ *   Therefore elements of this array have to maintain specific order,
+ *   e.g. the PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET must be placed
+ *   before PVRSRV_HANDLE_TYPE_RGX_FREELIST.
+ *
+ *   If ordering is incorrect driver may fail on the ground of cleanup
+ *   routines. Unfortunately, we can mainly rely on the actual definition of
+ *   the array, there is no explicit information about all relationships
+ *   between handle types. These relationships do not necessarily come from
+ *   bridge-specified handle attributes such as 'sub handle' and 'parent
+ *   handle'. They may come from internal/private ref-counters contained by
+ *   objects referenced by our kernel handles.
+ *
+ *   For example, at the bridge level, PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET
+ *   and PVRSRV_HANDLE_TYPE_RGX_FREELIST have no explicit relationship, meaning
+ *   none of them is a sub-handle for the other.
+ *   However the freelist contains internal ref-count that is decremented by
+ *   the destroy routine for KM_HW_RT_DATASET.
+ *
+ *   BE CAREFUL when adding/deleting/moving handle types.
+ */
+static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] =
+{
+       PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+       PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+       PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+       PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET,
+       PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+       PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+       PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+       PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION)
+       PVRSRV_HANDLE_TYPE_RGX_SERVER_GPUMAP_CONTEXT,
+#endif
+       PVRSRV_HANDLE_TYPE_RI_HANDLE,
+       PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+       PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+       PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+       PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+       PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+       PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+       PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+       PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+       PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+       PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+       PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+       PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+       PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+       PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+       PVRSRV_HANDLE_TYPE_DC_BUFFER,
+       PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+       PVRSRV_HANDLE_TYPE_DC_DEVICE,
+       PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+       PVRSRV_HANDLE_TYPE_DI_CONTEXT,
+       PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+};
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVFreeKernelHandles
+ @Description   Free kernel handles which belongs to process handles
+ @Input         psBase - pointer to handle base structure
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+       FREE_KERNEL_HANDLE_DATA sHandleData = {NULL};
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       LockHandle(psBase);
+
+       sHandleData.psBase = psBase;
+       /* Iterate over process handles. */
+       eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+                                                               &FreeKernelHandlesWrapperIterProcess,
+                                                               (void *)&sHandleData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock);
+
+       eError = PVRSRV_OK;
+
+ExitUnlock:
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVRetrieveProcessHandleBase
+ @Description   Returns a pointer to the process handle base for the current
+                process. If the current process is the cleanup thread, then the
+                process handle base for the process currently being cleaned up
+                is returned
+ @Return        Pointer to the process handle base, or NULL if not found.
+******************************************************************************/
+PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void)
+{
+       PVRSRV_HANDLE_BASE *psHandleBase = NULL;
+       PROCESS_HANDLE_BASE *psProcHandleBase = NULL;
+       IMG_PID ui32PurgePid = PVRSRVGetPurgeConnectionPid();
+       IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid();
+       uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid();
+
+       OSLockAcquire(g_hProcessHandleBaseLock);
+
+       /* Check to see if we're being called from the cleanup thread... */
+       if ((OSGetCurrentProcessID() == uiCleanupPid) &&
+           (OSGetCurrentThreadID() == uiCleanupTid) &&
+           (ui32PurgePid > 0))
+       {
+               /* Check to see if the cleanup thread has already removed the
+                * process handle base from the HASH table.
+                */
+               psHandleBase = g_psProcessHandleBaseBeingFreed;
+               /* psHandleBase shouldn't be null, as cleanup thread
+                * should be removing this from the HASH table before
+                * we get here, so assert if not.
+                */
+               PVR_ASSERT(psHandleBase);
+       }
+       else
+       {
+               /* Not being called from the cleanup thread, so return the process
+                * handle base for the current process.
+                */
+               psProcHandleBase = (PROCESS_HANDLE_BASE *)
+                   HASH_Retrieve(g_psProcessHandleBaseTable, OSGetCurrentClientProcessIDKM());
+       }
+
+       OSLockRelease(g_hProcessHandleBaseLock);
+
+       if (psHandleBase == NULL && psProcHandleBase != NULL)
+       {
+               psHandleBase = psProcHandleBase->psHandleBase;
+       }
+       return psHandleBase;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVAcquireProcessHandleBase
+ @Description   Increments reference count on a process handle base identified
+                by uiPid and returns pointer to the base. If the handle base
+                does not exist it will be allocated.
+ @Inout         uiPid - PID of a process
+ @Output        ppsBase - pointer to a handle base for the process identified by
+                          uiPid
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase)
+{
+       PROCESS_HANDLE_BASE *psBase;
+       PVRSRV_ERROR eError;
+
+       OSLockAcquire(g_hProcessHandleBaseLock);
+
+       psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiPid);
+
+       /* In case there is none we are going to allocate one */
+       if (psBase == NULL)
+       {
+               IMG_BOOL bSuccess;
+
+               psBase = OSAllocZMem(sizeof(*psBase));
+               PVR_LOG_GOTO_IF_NOMEM(psBase, eError, ErrorUnlock);
+
+               /* Allocate handle base for this process */
+               eError = PVRSRVAllocHandleBase(&psBase->psHandleBase, PVRSRV_HANDLE_BASE_TYPE_PROCESS);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorFreeProcessHandleBase);
+
+               /* Insert the handle base into the global hash table */
+               bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiPid, (uintptr_t) psBase);
+               PVR_LOG_GOTO_IF_FALSE(bSuccess, "HASH_Insert failed", ErrorFreeHandleBase);
+       }
+
+       OSAtomicIncrement(&psBase->iRefCount);
+
+       OSLockRelease(g_hProcessHandleBaseLock);
+
+       *ppsBase = psBase;
+
+       return PVRSRV_OK;
+
+ErrorFreeHandleBase:
+       PVRSRVFreeHandleBase(psBase->psHandleBase, 0);
+ErrorFreeProcessHandleBase:
+       OSFreeMem(psBase);
+ErrorUnlock:
+       OSLockRelease(g_hProcessHandleBaseLock);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVReleaseProcessHandleBase
+ @Description   Decrements reference count on a process handle base psBase
+                for a process identified by uiPid. If the reference count
+                reaches 0 the handle base will be freed..
+ @Input         psBase - pointer to a process handle base
+ @Inout         uiPid - PID of a process
+ @Inout         ui64MaxBridgeTime - maximum time a handle destroy operation
+                                    can hold the handle base lock (after that
+                                    time a lock will be release and reacquired
+                                    for another time slice)
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid,
+                                            IMG_UINT64 ui64MaxBridgeTime)
+{
+       PVRSRV_ERROR eError;
+       IMG_INT iRefCount;
+       uintptr_t uiHashValue;
+
+       OSLockAcquire(g_hProcessHandleBaseLock);
+
+       iRefCount = OSAtomicDecrement(&psBase->iRefCount);
+
+       if (iRefCount != 0)
+       {
+               OSLockRelease(g_hProcessHandleBaseLock);
+               return PVRSRV_OK;
+       }
+
+       /* in case the refcount becomes 0 we can remove the process handle base
+        * and all related objects */
+
+       uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, uiPid);
+       OSLockRelease(g_hProcessHandleBaseLock);
+
+       PVR_LOG_RETURN_IF_FALSE(uiHashValue != 0, "HASH_Remove failed",
+                               PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE);
+
+       eError = PVRSRVFreeKernelHandles(psBase->psHandleBase);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeKernelHandles");
+
+       eError = PVRSRVFreeHandleBase(psBase->psHandleBase, ui64MaxBridgeTime);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase");
+
+       OSFreeMem(psBase);
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVFreeHandleBase
+ @Description   Free a handle base structure
+ @Input         psBase - pointer to handle base structure
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime)
+{
+#if defined(DEBUG)
+       COUNT_HANDLE_DATA sCountData = {NULL};
+#endif
+       FREE_HANDLE_DATA sHandleData = {NULL};
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError;
+       IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid();
+       uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid();
+
+       PVR_ASSERT(gpsHandleFuncs);
+
+       LockHandle(psBase);
+
+       /* If this is a process handle base being freed by the cleanup
+        * thread, store this in g_psProcessHandleBaseBeingFreed
+        */
+       if ((OSGetCurrentProcessID() == uiCleanupPid) &&
+           (OSGetCurrentThreadID() == uiCleanupTid) &&
+           (psBase->eType == PVRSRV_HANDLE_BASE_TYPE_PROCESS))
+       {
+               g_psProcessHandleBaseBeingFreed = psBase;
+       }
+
+       sHandleData.psBase = psBase;
+       sHandleData.ui64TimeStart = OSClockns64();
+       sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime;
+
+
+#if defined(DEBUG)
+
+       sCountData.psBase = psBase;
+
+       eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+                                                      &CountHandleDataWrapper,
+                                                      (void *)&sCountData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock);
+
+       if (sCountData.uiHandleDataCount != 0)
+       {
+               IMG_BOOL bList = (IMG_BOOL)(sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM);
+
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: %u remaining handles in handle base 0x%p "
+                        "(PVRSRV_HANDLE_BASE_TYPE %u).%s",
+                        __func__,
+                        sCountData.uiHandleDataCount,
+                        psBase,
+                        psBase->eType,
+                        bList ? "": " Skipping details, too many items..."));
+
+               if (bList)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------"));
+                       (void) gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+                                                                    &ListHandlesInBase,
+                                                                    psBase);
+                       PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing    --------"));
+               }
+       }
+
+#endif /* defined(DEBUG) */
+
+       /*
+        * As we're freeing handles based on type, make sure all
+        * handles have actually had their data freed to avoid
+        * resources being leaked
+        */
+       for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++)
+       {
+               sHandleData.eHandleFreeType = g_aeOrderedFreeList[i];
+
+               /* Make sure all handles have been freed before destroying the handle base */
+               eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+                                                              &FreeHandleDataWrapper,
+                                                              (void *)&sHandleData);
+               PVR_GOTO_IF_ERROR(eError, ExitUnlock);
+       }
+
+
+       if (psBase->psHashTab != NULL)
+       {
+               HASH_Delete(psBase->psHashTab);
+       }
+
+       eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+       PVR_GOTO_IF_ERROR(eError, ExitUnlock);
+
+       UnlockHandle(psBase);
+       OSLockDestroy(psBase->hLock);
+       OSFreeMem(psBase);
+
+       return eError;
+
+ExitUnlock:
+       if ((OSGetCurrentProcessID() == uiCleanupPid) &&
+               (OSGetCurrentThreadID() == uiCleanupTid))
+       {
+               g_psProcessHandleBaseBeingFreed = NULL;
+       }
+       UnlockHandle(psBase);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVHandleInit
+ @Description   Initialise handle management
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(gpsKernelHandleBase == NULL);
+       PVR_ASSERT(gpsHandleFuncs == NULL);
+       PVR_ASSERT(g_hProcessHandleBaseLock == NULL);
+       PVR_ASSERT(g_psProcessHandleBaseTable == NULL);
+       PVR_ASSERT(!gbLockInitialised);
+
+       eError = OSLockCreate(&gKernelHandleLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate:1");
+
+       eError = OSLockCreate(&g_hProcessHandleBaseLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:2", ErrorHandleDeinit);
+
+       gbLockInitialised = IMG_TRUE;
+
+       eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVHandleGetFuncTable", ErrorHandleDeinit);
+
+       eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase,
+                                      PVRSRV_HANDLE_BASE_TYPE_GLOBAL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorHandleDeinit);
+
+       g_psProcessHandleBaseTable = HASH_Create(HANDLE_PROC_HANDLE_HASH_INIT_SIZE);
+       PVR_LOG_GOTO_IF_NOMEM(g_psProcessHandleBaseTable, eError, ErrorHandleDeinit);
+
+       eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase);
+       PVR_LOG_GOTO_IF_ERROR(eError, "pfnEnableHandlePurging", ErrorHandleDeinit);
+
+       return PVRSRV_OK;
+
+ErrorHandleDeinit:
+       (void) PVRSRVHandleDeInit();
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function      PVRSRVHandleDeInit
+ @Description   De-initialise handle management
+ @Return        Error code or PVRSRV_OK
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleDeInit(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (gpsHandleFuncs != NULL)
+       {
+               if (gpsKernelHandleBase != NULL)
+               {
+                       eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */);
+                       if (eError == PVRSRV_OK)
+                       {
+                               gpsKernelHandleBase = NULL;
+                       }
+                       else
+                       {
+                               PVR_LOG_ERROR(eError, "PVRSRVFreeHandleBase");
+                       }
+               }
+
+               if (eError == PVRSRV_OK)
+               {
+                       gpsHandleFuncs = NULL;
+               }
+       }
+       else
+       {
+               /* If we don't have a handle function table we shouldn't have a handle base either */
+               PVR_ASSERT(gpsKernelHandleBase == NULL);
+       }
+
+       if (g_psProcessHandleBaseTable != NULL)
+       {
+               HASH_Delete(g_psProcessHandleBaseTable);
+               g_psProcessHandleBaseTable = NULL;
+       }
+
+       if (g_hProcessHandleBaseLock != NULL)
+       {
+               OSLockDestroy(g_hProcessHandleBaseLock);
+               g_hProcessHandleBaseLock = NULL;
+       }
+
+       if (gKernelHandleLock != NULL)
+       {
+               OSLockDestroy(gKernelHandleLock);
+               gbLockInitialised = IMG_FALSE;
+       }
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/htb_debug.c b/drivers/gpu/drm/img/img-rogue/services/server/common/htb_debug.c
new file mode 100644 (file)
index 0000000..f361414
--- /dev/null
@@ -0,0 +1,1190 @@
+/*************************************************************************/ /*!
+@File           htb_debug.c
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides kernel side debugFS Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxdevice.h"
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "di_server.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "htb_debug.h"
+
+// Global data handles for buffer manipulation and processing
+
+typedef struct {
+       IMG_PBYTE       pBuf;           /* Raw data buffer from TL stream */
+       IMG_UINT32      uiBufLen;       /* Amount of data to process from 'pBuf' */
+       IMG_UINT32      uiTotal;        /* Total bytes processed */
+       IMG_UINT32      uiMsgLen;       /* Length of HTB message to be processed */
+       IMG_PBYTE       pCurr;          /* pointer to current message to be decoded */
+       IMG_CHAR        szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];    /* Output string */
+} HTB_Sentinel_t;
+
+typedef struct
+{
+       DI_ENTRY *psDumpHostDiEntry;  /* debug info entry */
+       HTB_Sentinel_t sSentinel;     /* private control structure for HTB DI
+                                        operations */
+       IMG_HANDLE hStream;           /* stream handle for debugFS use */
+} HTB_DBG_INFO;
+
+static HTB_DBG_INFO g_sHTBData;
+
+// Comment out for extra debug level
+// #define HTB_CHATTY_PRINT(x) PVR_DPF(x)
+#define HTB_CHATTY_PRINT(x)
+
+typedef void (DI_PRINTF)(const OSDI_IMPL_ENTRY *, const IMG_CHAR *, ...);
+
+/******************************************************************************
+ * debugFS display routines
+ *****************************************************************************/
+static int HTBDumpBuffer(DI_PRINTF, OSDI_IMPL_ENTRY *, void *);
+
+static int _DebugHBTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       int     retVal;
+
+       PVR_ASSERT(psEntry != NULL);
+
+       /* psEntry should never be NULL */
+       if (psEntry == NULL)
+       {
+               return -1;
+       }
+
+       /* Ensure that we have a valid address to use to dump info from. If NULL we
+        * return a failure code to terminate the DI read call. pvData is either
+        * DI_START_TOKEN (for the initial call) or an HTB buffer address for
+        * subsequent calls [returned from the NEXT function]. */
+       if (pvData == NULL)
+       {
+               return -1;
+       }
+
+       retVal = HTBDumpBuffer(DIPrintf, psEntry, pvData);
+
+       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal));
+
+       return retVal;
+}
+
+static IMG_UINT32 idToLogIdx(IMG_UINT32);      /* Forward declaration */
+
+/*
+ * HTB_GetNextMessage
+ *
+ * Get next non-empty message block from the buffer held in pSentinel->pBuf
+ * If we exhaust the data buffer we refill it (after releasing the previous
+ * message(s) [only one non-NULL message, but PAD messages will get released
+ * as we traverse them].
+ *
+ * Input:
+ *     pSentinel               references the already acquired data buffer
+ *
+ * Output:
+ *     pSentinel
+ *             -> uiMsglen updated to the size of the non-NULL message
+ *
+ * Returns:
+ *     Address of first non-NULL message in the buffer (if any)
+ *     NULL if there is no further data available from the stream and the buffer
+ *     contents have been drained.
+ */
+static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel)
+{
+       void    *pNext, *pLast, *pStart, *pData = NULL;
+       void    *pCurrent;              /* Current processing point within buffer */
+       PVRSRVTL_PPACKETHDR     ppHdr;  /* Current packet header */
+       IMG_UINT32      uiHdrType;              /* Packet header type */
+       IMG_UINT32      uiMsgSize;              /* Message size of current packet (bytes) */
+       IMG_BOOL        bUnrecognizedErrorPrinted = IMG_FALSE;
+       IMG_UINT32      ui32Data;
+       IMG_UINT32      ui32LogIdx;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(pSentinel != NULL);
+
+       pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+
+       pStart = pSentinel->pBuf;
+
+       pNext = pStart;
+       pSentinel->uiMsgLen = 0;        // Reset count for this message
+       uiMsgSize = 0;                          // nothing processed so far
+       ui32LogIdx = HTB_SF_LAST;       // Loop terminator condition
+
+       do
+       {
+               /*
+                * If we've drained the buffer we must RELEASE and ACQUIRE some more.
+                */
+               if (pNext >= pLast)
+               {
+                       eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream);
+                       PVR_ASSERT(eError == PVRSRV_OK);
+
+                       eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+                               g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+                       if (PVRSRV_OK != eError)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__,
+                                       "TLClientAcquireData", PVRSRVGETERRORSTRING(eError)));
+                               return NULL;
+                       }
+
+                       // Reset our limits - if we've returned an empty buffer we're done.
+                       pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+                       pStart = pSentinel->pBuf;
+                       pNext = pStart;
+
+                       if (pStart == NULL || pLast == NULL)
+                       {
+                               return NULL;
+                       }
+               }
+
+               /*
+                * We should have a header followed by data block(s) in the stream.
+                */
+
+               pCurrent = pNext;
+               ppHdr = GET_PACKET_HDR(pCurrent);
+
+               if (ppHdr == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Unexpected NULL packet in Host Trace buffer",
+                                __func__));
+                       pSentinel->uiMsgLen += uiMsgSize;
+                       return NULL;            // This should never happen
+               }
+
+               /*
+                * This should *NEVER* fire. If it does it means we have got some
+                * dubious packet header back from the HTB stream. In this case
+                * the sensible thing is to abort processing and return to
+                * the caller
+                */
+               uiHdrType = GET_PACKET_TYPE(ppHdr);
+
+               PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST &&
+                       uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF);
+
+               if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST &&
+                       uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF)
+               {
+                       /*
+                        * We have a (potentially) valid data header. We should see if
+                        * the associated packet header matches one of our expected
+                        * types.
+                        */
+                       pNext = GET_NEXT_PACKET_ADDR(ppHdr);
+
+                       PVR_ASSERT(pNext != NULL);
+
+                       uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr);
+
+                       pSentinel->uiMsgLen += uiMsgSize;
+
+                       pData = GET_PACKET_DATA_PTR(ppHdr);
+
+                       /*
+                        * Handle non-DATA packet types. These include PAD fields which
+                        * may have data associated and other types. We simply discard
+                        * these as they have no decodable information within them.
+                        */
+                       if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA)
+                       {
+                               /*
+                                * Now release the current non-data packet and proceed to the
+                                * next entry (if any).
+                                */
+                               eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+                                   g_sHTBData.hStream, uiMsgSize);
+
+                               HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Packet Type %x "
+                                                "Length %u", __func__, uiHdrType, uiMsgSize));
+
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message"
+                                               " size %u", __func__, "TLClientReleaseDataLess",
+                                               PVRSRVGETERRORSTRING(eError), uiMsgSize));
+                               }
+
+                               eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+                                       g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+                               if (PVRSRV_OK != eError)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up",
+                                               __func__, "TLClientAcquireData",
+                                               PVRSRVGETERRORSTRING(eError)));
+
+                                       return NULL;
+                               }
+                               pSentinel->uiMsgLen = 0;
+                               // Reset our limits - if we've returned an empty buffer we're done.
+                               pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+                               pStart = pSentinel->pBuf;
+                               pNext = pStart;
+
+                               if (pStart == NULL || pLast == NULL)
+                               {
+                                       return NULL;
+                               }
+                               continue;
+                       }
+                       if (pData == NULL || pData >= pLast)
+                       {
+                               continue;
+                       }
+                       ui32Data = *(IMG_UINT32 *)pData;
+                       ui32LogIdx = idToLogIdx(ui32Data);
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x",
+                               ppHdr, uiHdrType));
+
+                       return NULL;
+               }
+
+               /*
+                * Check if the unrecognized ID is valid and therefore, tracebuf
+                * needs updating.
+                */
+               if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data)
+                       && IMG_FALSE == bUnrecognizedErrorPrinted)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                           "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
+                           __func__, ui32Data, HTB_SF_GID(ui32Data),
+                           HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
+                       bUnrecognizedErrorPrinted = IMG_FALSE;
+               }
+
+       } while (HTB_SF_LAST == ui32LogIdx);
+
+       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'",
+                        __func__, pCurrent, ui32Data));
+
+       return pCurrent;
+}
+
+/*
+ * HTB_GetFirstMessage
+ *
+ * Called from START to obtain the buffer address of the first message within
+ * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty.
+ *
+ * Input:
+ *     pSentinel
+ *     pui64Pos                        Offset within the debugFS file
+ *
+ * Output:
+ *     pSentinel->pCurr        Set to reference the first valid non-NULL message within
+ *                                             the buffer. If no valid message is found set to NULL.
+ *     pSentinel
+ *             ->pBuf          if unset on entry
+ *             ->uiBufLen      if pBuf unset on entry
+ *
+ * Side-effects:
+ *     HTB TL stream will be updated to bypass any zero-length PAD messages before
+ *     the first non-NULL message (if any).
+ */
+static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_ERROR    eError;
+
+       PVR_UNREFERENCED_PARAMETER(pui64Pos);
+
+       if (pSentinel == NULL)
+               return;
+
+       if (pSentinel->pBuf == NULL)
+       {
+               /* Acquire data */
+               pSentinel->uiMsgLen = 0;
+
+               eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+                   g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'",
+                           __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError)));
+
+                       pSentinel->pBuf = NULL;
+                       pSentinel->pCurr = NULL;
+               }
+               else
+               {
+                       /*
+                        * If there is no data available we set pSentinel->pCurr to NULL
+                        * and return. This is expected behaviour if we've drained the
+                        * data and nothing else has yet been produced.
+                        */
+                       if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL)
+                       {
+                               HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Empty Buffer @ %p",
+                                                __func__, pSentinel->pBuf));
+
+                               pSentinel->pCurr = NULL;
+                               return;
+                       }
+               }
+       }
+
+       /* Locate next message within buffer. NULL => no more data to process */
+       pSentinel->pCurr = HTB_GetNextMessage(pSentinel);
+}
+
+/*
+ * _DebugHBTraceDIStart:
+ *
+ * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops.
+ * Return DI_START_TOKEN for the very first call and allocate a sentinel for
+ * use by the 'Show' routine and its helpers.
+ * This is stored in the psEntry's private hook field.
+ *
+ * We obtain access to the TLstream associated with the HTB. If this doesn't
+ * exist (because no pvrdebug capture trace has been set) we simply return with
+ * a NULL value which will stop the DI traversal.
+ */
+static void *_DebugHBTraceDIStart(OSDI_IMPL_ENTRY *psEntry,
+                                  IMG_UINT64 *pui64Pos)
+{
+       HTB_Sentinel_t  *pSentinel = DIGetPrivData(psEntry);
+       PVRSRV_ERROR    eError;
+       IMG_UINT32              uiTLMode;
+       void                    *retVal;
+       IMG_HANDLE              hStream;
+
+       /* The sentinel object should have been allocated during the creation
+        * of the DI entry. If it's not there it means that something went
+        * wrong. Return NULL in such case. */
+       if (pSentinel == NULL)
+       {
+               return NULL;
+       }
+
+       /* Check to see if the HTB stream has been configured yet. If not, there is
+        * nothing to display so we just return NULL to stop the stream access.
+        */
+       if (!HTBIsConfigured())
+       {
+               return NULL;
+       }
+
+       /* Open the stream in non-blocking mode so that we can determine if there
+        * is no data to consume. Also disable the producer callback (if any) and
+        * the open callback so that we do not generate spurious trace data when
+        * accessing the stream.
+        */
+       uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING|
+                          PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK|
+                          PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK;
+
+       /* If two or more processes try to read from this file at the same time
+        * the TLClientOpenStream() function will handle this by allowing only
+        * one of them to actually open the stream. The other process will get
+        * an error stating that the stream is already open. The open function
+        * is threads safe. */
+       eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode,
+                                   &hStream);
+
+       if (eError == PVRSRV_ERROR_ALREADY_OPEN)
+       {
+               /* Stream allows only one reader so return error if it's already
+                * opened. */
+               HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Stream handle %p already "
+                                "exists for %s", __func__, g_sHTBData.hStream,
+                                HTB_STREAM_NAME));
+               return NULL;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               /*
+                * No stream available so nothing to report
+                */
+               return NULL;
+       }
+
+       /* There is a window where hStream can be NULL but the stream is already
+        * opened. This shouldn't matter since the TLClientOpenStream() will make
+        * sure that only one stream can be opened and only one process can reach
+        * this place at a time. Also the .stop function will be always called
+        * after this function returns so there should be no risk of stream
+        * not being closed. */
+       PVR_ASSERT(g_sHTBData.hStream == NULL);
+       g_sHTBData.hStream = hStream;
+
+       /* We're starting the read operation so ensure we properly zero the
+        * sentinel object. */
+       memset(pSentinel, 0, sizeof(*pSentinel));
+
+       /*
+        * Find the first message location within pSentinel->pBuf
+        * => for DI_START_TOKEN we must issue our first ACQUIRE, also for the
+        * subsequent re-START calls (if any).
+        */
+
+       HTB_GetFirstMessage(pSentinel, pui64Pos);
+
+       retVal = *pui64Pos == 0 ? DI_START_TOKEN : pSentinel->pCurr;
+
+       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p",
+                        __func__, retVal, HTB_STREAM_NAME, g_sHTBData.hStream));
+
+       return retVal;
+}
+
+/*
+ * _DebugTBTraceDIStop:
+ *
+ * Stop processing data collection and release any previously allocated private
+ * data structure if we have exhausted the previously filled data buffers.
+ */
+static void _DebugHBTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry);
+       IMG_UINT32 uiMsgLen;
+       PVRSRV_ERROR eError;
+
+       if (pSentinel == NULL)
+       {
+               return;
+       }
+
+       uiMsgLen = pSentinel->uiMsgLen;
+
+       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen));
+
+       /* If we get here the handle should never be NULL because
+        * _DebugHBTraceDIStart() shouldn't allow that. */
+       if (g_sHTBData.hStream == NULL)
+       {
+               return;
+       }
+
+       if (uiMsgLen != 0)
+       {
+               eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+                                                g_sHTBData.hStream, uiMsgLen);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u",
+                               __func__, "TLClientReleaseDataLess",
+                               PVRSRVGETERRORSTRING(eError), uiMsgLen));
+               }
+       }
+
+       eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()",
+                       "TLClientCloseStream", PVRSRVGETERRORSTRING(eError),
+                       __func__));
+       }
+
+       g_sHTBData.hStream = NULL;
+}
+
+
+/*
+ * _DebugHBTraceDINext:
+ *
+ * This is where we release any acquired data which has been processed by the
+ * DIShow routine. If we have encountered a DI entry overflow we stop
+ * processing and return NULL. Otherwise we release the message that we
+ * previously processed and simply update our position pointer to the next
+ * valid HTB message (if any)
+ */
+static void *_DebugHBTraceDINext(OSDI_IMPL_ENTRY *psEntry, void *pvPriv,
+                                 IMG_UINT64 *pui64Pos)
+{
+       HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry);
+       IMG_UINT64 ui64CurPos;
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+       if (pui64Pos)
+       {
+               ui64CurPos = *pui64Pos;
+               *pui64Pos = ui64CurPos + 1;
+       }
+
+       /* Determine if we've had an overflow on the previous 'Show' call. If so
+        * we leave the previously acquired data in the queue (by releasing 0 bytes)
+        * and return NULL to end this DI entry iteration.
+        * If we have not overflowed we simply get the next HTB message and use that
+        * for our display purposes. */
+
+       if (DIHasOverflowed(psEntry))
+       {
+               (void) TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream,
+                                              0);
+
+               HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL",
+                                __func__));
+
+               return NULL;
+       }
+       else
+       {
+               eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+                                                g_sHTBData.hStream,
+                                                pSentinel->uiMsgLen);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d",
+                               __func__, "TLClientReleaseDataLess",
+                               PVRSRVGETERRORSTRING(eError), pSentinel->pCurr,
+                               pSentinel->uiMsgLen));
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__,
+                               pSentinel->pBuf,
+                               (IMG_PBYTE) (pSentinel->pBuf + pSentinel->uiBufLen)));
+
+               }
+
+               eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+                                            g_sHTBData.hStream, &pSentinel->pBuf,
+                                            &pSentinel->uiBufLen);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d",
+                               __func__, "TLClientAcquireData",
+                               PVRSRVGETERRORSTRING(eError), pSentinel->uiMsgLen));
+                       pSentinel->pBuf = NULL;
+               }
+
+               pSentinel->uiMsgLen = 0; /* We don't (yet) know the message size */
+       }
+
+       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p Msglen %d", __func__,
+                        pSentinel->pBuf, pSentinel->uiMsgLen));
+
+       if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0)
+       {
+               return NULL;
+       }
+
+       pSentinel->pCurr = HTB_GetNextMessage(pSentinel);
+
+       return pSentinel->pCurr;
+}
+
+/******************************************************************************
+ * HTB Dumping routines and definitions
+ *****************************************************************************/
+#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL)
+#define MAX_STRING_SIZE (128)
+
+typedef enum
+{
+       TRACEBUF_ARG_TYPE_INT,
+       TRACEBUF_ARG_TYPE_ERR,
+       TRACEBUF_ARG_TYPE_NONE
+} TRACEBUF_ARG_TYPE;
+
+/*
+ * Array of all Host Trace log IDs used to convert the tracebuf data
+ */
+typedef struct _HTB_TRACEBUF_LOG_ {
+       HTB_LOG_SFids eSFId;
+       IMG_CHAR      *pszName;
+       IMG_CHAR      *pszFmt;
+       IMG_UINT32    ui32ArgNum;
+} HTB_TRACEBUF_LOG;
+
+static const HTB_TRACEBUF_LOG aLogs[] = {
+#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e},
+       HTB_LOG_SFIDLIST
+#undef X
+};
+
+static const IMG_CHAR *aGroups[] = {
+#define X(A,B) #B,
+       HTB_LOG_SFGROUPLIST
+#undef X
+};
+static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1;
+
+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *);
+/*
+ * ExtractOneArgFmt
+ *
+ * Scan the input 'printf-like' string *ppszFmt and return the next
+ * value string to be displayed. If there is no '%' format field in the
+ * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string
+ * untouched.
+ *
+ * Input
+ *     ppszFmt          reference to format string to be decoded
+ *     pszOneArgFmt     single field format from *ppszFmt
+ *
+ * Returns
+ *     TRACEBUF_ARG_TYPE_ERR       unrecognised argument
+ *     TRACEBUF_ARG_TYPE_INT       variable is of numeric type
+ *     TRACEBUF_ARG_TYPE_NONE      no variable reference in *ppszFmt
+ *
+ * Side-effect
+ *     *ppszFmt is updated to reference the next part of the format string
+ *     to be scanned
+ */
+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(
+       IMG_CHAR **ppszFmt,
+       IMG_CHAR *pszOneArgFmt)
+{
+       IMG_CHAR          *pszFmt;
+       IMG_CHAR          *psT;
+       IMG_UINT32        ui32Count = MAX_STRING_SIZE;
+       IMG_UINT32        ui32OneArgSize;
+       TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR;
+
+       if (NULL == ppszFmt)
+               return TRACEBUF_ARG_TYPE_ERR;
+
+       pszFmt = *ppszFmt;
+       if (NULL == pszFmt)
+               return TRACEBUF_ARG_TYPE_ERR;
+
+       /*
+        * Find the first '%'
+        * NOTE: we can be passed a simple string to display which will have no
+        * parameters embedded within it. In this case we simply return
+        * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt
+        */
+       psT = strchr(pszFmt, '%');
+       if (psT == NULL)
+       {
+               return TRACEBUF_ARG_TYPE_NONE;
+       }
+
+       /* Find next conversion identifier after the initial '%' */
+       while ((*psT++) && (ui32Count-- > 0))
+       {
+               switch (*psT)
+               {
+                       case 'd':
+                       case 'i':
+                       case 'o':
+                       case 'u':
+                       case 'x':
+                       case 'X':
+                       {
+                               eRet = TRACEBUF_ARG_TYPE_INT;
+                               goto _found_arg;
+                       }
+                       case 's':
+                       {
+                               eRet = TRACEBUF_ARG_TYPE_ERR;
+                               goto _found_arg;
+                       }
+               }
+       }
+
+       if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR;
+
+_found_arg:
+       ui32OneArgSize = psT - pszFmt + 1;
+       OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize);
+       pszOneArgFmt[ui32OneArgSize] = '\0';
+
+       *ppszFmt = psT + 1;
+
+       return eRet;
+}
+
+static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData)
+{
+       IMG_UINT32      i = 0;
+       for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++)
+       {
+               if ( ui32CheckData == aLogs[i].eSFId )
+                       return i;
+       }
+       /* Nothing found, return max value */
+       return HTB_SF_LAST;
+}
+
+/*
+ * DecodeHTB
+ *
+ * Decode the data buffer message located at pBuf. This should be a valid
+ * HTB message as we are provided with the start of the buffer. If empty there
+ * is no message to process. We update the uiMsgLen field with the size of the
+ * HTB message that we have processed so that it can be returned to the system
+ * on successful logging of the message to the output file.
+ *
+ *     Input
+ *             pSentinel reference to newly read data and pending completion data
+ *                       from a previous invocation [handle DI entry buffer overflow]
+ *              -> pBuf         reference to raw data that we are to parse
+ *              -> uiBufLen     total number of bytes of data available
+ *              -> pCurr        start of message to decode
+ *
+ *             pvDumpDebugFile     output file
+ *             pfnDumpDebugPrintf  output generating routine
+ *
+ * Output
+ *             pSentinel
+ *              -> uiMsgLen    length of the decoded message which will be freed to
+ *                                             the system on successful completion of the DI entry
+ *                                             update via _DebugHBTraceDINext(),
+ * Return Value
+ *             0                               successful decode
+ *             -1                              unsuccessful decode
+ */
+static int
+DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile,
+          DI_PRINTF pfnDumpDebugPrintf)
+{
+       IMG_UINT32      ui32Data, ui32LogIdx, ui32ArgsCur;
+       IMG_CHAR        *pszFmt = NULL;
+       IMG_CHAR        aszOneArgFmt[MAX_STRING_SIZE];
+       IMG_BOOL        bUnrecognizedErrorPrinted = IMG_FALSE;
+
+       size_t  nPrinted;
+
+       void    *pNext, *pLast, *pStart, *pData = NULL;
+       PVRSRVTL_PPACKETHDR     ppHdr;  /* Current packet header */
+       IMG_UINT32      uiHdrType;              /* Packet header type */
+       IMG_UINT32      uiMsgSize;              /* Message size of current packet (bytes) */
+       IMG_BOOL        bPacketsDropped;
+
+       pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+       pStart = pSentinel->pCurr;
+
+       pSentinel->uiMsgLen = 0;        // Reset count for this message
+
+       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d",
+                        __func__, pStart, pLast, pSentinel->uiBufLen));
+
+       /*
+        * We should have a DATA header with the necessary information following
+        */
+       ppHdr = GET_PACKET_HDR(pStart);
+
+       if (ppHdr == NULL)
+       {
+                       PVR_DPF((PVR_DBG_ERROR,
+                           "%s: Unexpected NULL packet in Host Trace buffer", __func__));
+                       return -1;
+       }
+
+       uiHdrType = GET_PACKET_TYPE(ppHdr);
+       PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA);
+
+       pNext = GET_NEXT_PACKET_ADDR(ppHdr);
+
+       PVR_ASSERT(pNext != NULL);
+
+       uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr);
+
+       pSentinel->uiMsgLen += uiMsgSize;
+
+       pData = GET_PACKET_DATA_PTR(ppHdr);
+
+       if (pData == NULL || pData >= pLast)
+       {
+               HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p "
+                                "Returning 0", __func__, pData, pLast));
+               return 0;
+       }
+
+       ui32Data = *(IMG_UINT32 *)pData;
+       ui32LogIdx = idToLogIdx(ui32Data);
+
+       /*
+        * Check if the unrecognised ID is valid and therefore, tracebuf
+        * needs updating.
+        */
+       if (ui32LogIdx == HTB_SF_LAST)
+       {
+               if (HTB_LOG_VALIDID(ui32Data))
+               {
+                       if (!bUnrecognizedErrorPrinted)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                   "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
+                                   __func__, ui32Data, HTB_SF_GID(ui32Data),
+                                   HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
+                               bUnrecognizedErrorPrinted = IMG_TRUE;
+                       }
+
+                       return 0;
+               }
+
+               PVR_DPF((PVR_DBG_ERROR,
+                   "%s: Unrecognised and invalid LOG value detected '%x'",
+                   __func__, ui32Data));
+
+               return -1;
+       }
+
+       /* The string format we are going to display */
+       /*
+        * The display will show the header (log-ID, group-ID, number of params)
+        * The maximum parameter list length = 15 (only 4bits used to encode)
+        * so we need HEADER + 15 * sizeof(UINT32) and the displayed string
+        * describing the event. We use a buffer in the per-process pSentinel
+        * structure to hold the data.
+        */
+       pszFmt = aLogs[ui32LogIdx].pszFmt;
+
+       /* add the message payload size to the running count */
+       ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data);
+
+       /* Determine if we've over-filled the buffer and had to drop packets */
+       bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr);
+       if (bPacketsDropped ||
+               (uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED))
+       {
+               /* Flag this as it is useful to know ... */
+
+               PVR_DUMPDEBUG_LOG("\n<========================== *** PACKETS DROPPED *** ======================>\n");
+       }
+
+       {
+               IMG_UINT32 ui32Timestampns, ui32PID, ui32TID;
+               IMG_UINT64 ui64Timestamp, ui64TimestampSec;
+               IMG_CHAR        *szBuffer = pSentinel->szBuffer;        // Buffer start
+               IMG_CHAR        *pszBuffer = pSentinel->szBuffer;       // Current place in buf
+               size_t          uBufBytesAvailable = sizeof(pSentinel->szBuffer);
+               IMG_UINT32      *pui32Data = (IMG_UINT32 *)pData;
+               IMG_UINT32      ui_aGroupIdx;
+
+               // Get PID field from data stream
+               pui32Data++;
+               ui32PID = *pui32Data;
+               // Get TID field from data stream
+               pui32Data++;
+               ui32TID = *pui32Data;
+               // Get Timestamp part 1 from data stream
+               pui32Data++;
+               ui64Timestamp = (IMG_UINT64) *pui32Data << 32;
+               // Get Timestamp part 2 from data stream
+               pui32Data++;
+               ui64Timestamp |= (IMG_UINT64) *pui32Data;
+               // Move to start of message contents data
+               pui32Data++;
+
+               /*
+                * We need to snprintf the data to a local in-kernel buffer
+                * and then PVR_DUMPDEBUG_LOG() that in one shot
+                */
+               ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups);
+
+               /* Divide by 1B to get seconds & mod using output var (nanosecond resolution)*/
+               ui64TimestampSec = OSDivide64r64(ui64Timestamp, 1000000000, &ui32Timestampns);
+
+               nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%010"IMG_UINT64_FMTSPEC".%09u:%-5u-%-5u-%s> ",
+                       ui64TimestampSec, ui32Timestampns, ui32PID, ui32TID, aGroups[ui_aGroupIdx]);
+               if (nPrinted >= uBufBytesAvailable)
+               {
+                       PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+                               " max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+                               uBufBytesAvailable);
+
+                       nPrinted = uBufBytesAvailable;  /* Ensure we don't overflow buffer */
+               }
+
+               PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+               /* Update where our next 'output' point in the buffer is */
+               pszBuffer += nPrinted;
+               uBufBytesAvailable -= nPrinted;
+
+               /*
+                * Print one argument at a time as this simplifies handling variable
+                * number of arguments. Special case handling for no arguments.
+                * This is the case for simple format strings such as
+                * HTB_SF_MAIN_KICK_UNCOUNTED.
+                */
+               if (ui32ArgsCur == 0)
+               {
+                       if (pszFmt)
+                       {
+                               nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable);
+                               if (nPrinted >= uBufBytesAvailable)
+                               {
+                                       PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+                                               " max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+                                               uBufBytesAvailable);
+                                       nPrinted = uBufBytesAvailable;  /* Ensure we don't overflow buffer */
+                               }
+                               PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+                               pszBuffer += nPrinted;
+                               /* Don't update the uBufBytesAvailable as we have finished this
+                                * message decode. pszBuffer - szBuffer is the total amount of
+                                * data we have decoded.
+                                */
+                       }
+               }
+               else
+               {
+                       if (HTB_SF_GID(ui32Data) == HTB_GID_CTRL && HTB_SF_ID(ui32Data) == HTB_ID_MARK_SCALE)
+                       {
+                               IMG_UINT32 i;
+                               IMG_UINT32 ui32ArgArray[HTB_MARK_SCALE_ARG_ARRAY_SIZE];
+                               IMG_UINT64 ui64OSTS = 0;
+                               IMG_UINT32 ui32OSTSRem = 0;
+                               IMG_UINT64 ui64CRTS = 0;
+
+                               /* Retrieve 6 args to an array */
+                               for (i = 0; i < ARRAY_SIZE(ui32ArgArray); i++)
+                               {
+                                       ui32ArgArray[i] = *pui32Data;
+                                       pui32Data++;
+                                       --ui32ArgsCur;
+                               }
+
+                               ui64OSTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_OSTS_PT1] << 32 | ui32ArgArray[HTB_ARG_OSTS_PT2];
+                               ui64CRTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_CRTS_PT1] << 32 | ui32ArgArray[HTB_ARG_CRTS_PT2];
+
+                               /* Divide by 1B to get seconds, remainder in nano seconds*/
+                               ui64OSTS = OSDivide64r64(ui64OSTS, 1000000000, &ui32OSTSRem);
+
+                               nPrinted = OSSNPrintf(pszBuffer,
+                                                             uBufBytesAvailable,
+                                                             "HTBFWMkSync Mark=%u OSTS=%010" IMG_UINT64_FMTSPEC ".%09u CRTS=%" IMG_UINT64_FMTSPEC " CalcClkSpd=%u\n",
+                                                             ui32ArgArray[HTB_ARG_SYNCMARK],
+                                                             ui64OSTS,
+                                                             ui32OSTSRem,
+                                                             ui64CRTS,
+                                                             ui32ArgArray[HTB_ARG_CLKSPD]);
+
+                               if (nPrinted >= uBufBytesAvailable)
+                               {
+                                       PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+                                               " max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+                                               uBufBytesAvailable);
+                                       nPrinted = uBufBytesAvailable;  /* Ensure we don't overflow buffer */
+                               }
+
+                               PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+                               pszBuffer += nPrinted;
+                               uBufBytesAvailable -= nPrinted;
+                       }
+                       else
+                       {
+                               while (IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0))
+                               {
+                                       IMG_UINT32 ui32TmpArg = *pui32Data;
+                                       TRACEBUF_ARG_TYPE eArgType;
+
+                                       eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt);
+
+                                       pui32Data++;
+                                       ui32ArgsCur--;
+
+                                       switch (eArgType)
+                                       {
+                                               case TRACEBUF_ARG_TYPE_INT:
+                                                       nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+                                                               aszOneArgFmt, ui32TmpArg);
+                                                       break;
+
+                                               case TRACEBUF_ARG_TYPE_NONE:
+                                                       nPrinted = OSStringLCopy(pszBuffer, pszFmt,
+                                                               uBufBytesAvailable);
+                                                       break;
+
+                                               default:
+                                                       nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+                                                               "Error processing arguments, type not "
+                                                               "recognized (fmt: %s)", aszOneArgFmt);
+                                                       break;
+                                       }
+                                       if (nPrinted >= uBufBytesAvailable)
+                                       {
+                                               PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+                                                       " max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+                                                       uBufBytesAvailable);
+                                               nPrinted = uBufBytesAvailable;  /* Ensure we don't overflow buffer */
+                                       }
+                                       PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+                                       pszBuffer += nPrinted;
+                                       uBufBytesAvailable -= nPrinted;
+                               }
+                               /* Display any remaining text in pszFmt string */
+                               if (pszFmt)
+                               {
+                                       nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable);
+                                       if (nPrinted >= uBufBytesAvailable)
+                                       {
+                                               PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed,"
+                                                       " max space "IMG_SIZE_FMTSPEC"\n", nPrinted,
+                                                       uBufBytesAvailable);
+                                               nPrinted = uBufBytesAvailable;  /* Ensure we don't overflow buffer */
+                                       }
+                                       PVR_DUMPDEBUG_LOG("%s", pszBuffer);
+                                       pszBuffer += nPrinted;
+                                       /* Don't update the uBufBytesAvailable as we have finished this
+                                        * message decode. pszBuffer - szBuffer is the total amount of
+                                        * data we have decoded.
+                                        */
+                               }
+                       }
+               }
+
+               /* Update total bytes processed */
+               pSentinel->uiTotal += (pszBuffer - szBuffer);
+       }
+       return 0;
+}
+
+/*
+ * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API
+ *
+ * This routine just parses *one* message from the buffer.
+ * The stream will be opened by the Start() routine, closed by the Stop() and
+ * updated for data consumed by this routine once we have DebugPrintf'd it.
+ * We use the new TLReleaseDataLess() routine which enables us to update the
+ * HTB contents with just the amount of data we have successfully processed.
+ * If we need to leave the data available we can call this with a 0 count.
+ * This will happen in the case of a buffer overflow so that we can reprocess
+ * any data which wasn't handled before.
+ *
+ * In case of overflow or an error we return -1 otherwise 0
+ *
+ * Input:
+ *  pfnPrintf           output routine to display data
+ *  psEntry             handle to debug frontend
+ *  pvData              data address to start dumping from
+ *                      (set by Start() / Next())
+ */
+static int HTBDumpBuffer(DI_PRINTF pfnPrintf, OSDI_IMPL_ENTRY *psEntry,
+                         void *pvData)
+{
+       HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry);
+
+       PVR_ASSERT(pvData != NULL);
+
+       if (pvData == DI_START_TOKEN)
+       {
+               if (pSentinel->pCurr == NULL)
+               {
+                       HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: DI_START_TOKEN, "
+                                        "Empty buffer", __func__));
+                       return 0;
+               }
+               PVR_ASSERT(pSentinel->pCurr != NULL);
+
+               /* Display a Header as we have data to process */
+               pfnPrintf(psEntry, "%-20s:%-5s-%-5s-%s  %s\n", "Timestamp", "PID", "TID", "Group>",
+                        "Log Entry");
+       }
+       else
+       {
+               if (pvData != NULL)
+               {
+                       PVR_ASSERT(pSentinel->pCurr == pvData);
+               }
+       }
+
+       return DecodeHTB(pSentinel, psEntry, pfnPrintf);
+}
+
+
+/******************************************************************************
+ * External Entry Point routines ...
+ *****************************************************************************/
+/*************************************************************************/ /*!
+ @Function     HTB_CreateDIEntry
+
+ @Description  Create the debugFS entry-point for the host-trace-buffer
+
+ @Returns      eError          internal error code, PVRSRV_OK on success
+
+ */ /*************************************************************************/
+PVRSRV_ERROR HTB_CreateDIEntry(void)
+{
+       PVRSRV_ERROR eError;
+
+       DI_ITERATOR_CB sIterator = {
+               .pfnStart = _DebugHBTraceDIStart,
+               .pfnStop  = _DebugHBTraceDIStop,
+               .pfnNext  = _DebugHBTraceDINext,
+               .pfnShow  = _DebugHBTraceDIShow,
+       };
+
+       eError = DICreateEntry("host_trace", NULL, &sIterator,
+                              &g_sHTBData.sSentinel,
+                              DI_ENTRY_TYPE_GENERIC,
+                              &g_sHTBData.psDumpHostDiEntry);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DICreateEntry");
+
+       return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+ @Function     HTB_DestroyDIEntry
+
+ @Description  Destroy the debugFS entry-point created by earlier
+               HTB_CreateDIEntry() call.
+*/ /**************************************************************************/
+void HTB_DestroyDIEntry(void)
+{
+       if (g_sHTBData.psDumpHostDiEntry != NULL)
+       {
+               DIDestroyEntry(g_sHTBData.psDumpHostDiEntry);
+               g_sHTBData.psDumpHostDiEntry = NULL;
+       }
+}
+
+/* EOF */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/htb_debug.h b/drivers/gpu/drm/img/img-rogue/services/server/common/htb_debug.h
new file mode 100644 (file)
index 0000000..04132e1
--- /dev/null
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File           htb_debug.h
+@Title          Linux debugFS routine setup header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef HTB_DEBUG_H
+#define HTB_DEBUG_H
+
+/**************************************************************************/ /*!
+ @Function     HTB_CreateDIEntry
+
+ @Description  Create the debugFS entry-point for the host-trace-buffer
+
+ @Returns      eError          internal error code, PVRSRV_OK on success
+
+ */ /**************************************************************************/
+PVRSRV_ERROR HTB_CreateDIEntry(void);
+
+/**************************************************************************/ /*!
+ @Function     HTB_DestroyFSEntry
+
+ @Description  Destroy the debugFS entry-point created by earlier
+               HTB_CreateDIEntry() call.
+*/ /**************************************************************************/
+void HTB_DestroyDIEntry(void);
+
+#endif /* HTB_DEBUG_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/htbserver.c b/drivers/gpu/drm/img/img-rogue/services/server/common/htbserver.c
new file mode 100644 (file)
index 0000000..2ada5ab
--- /dev/null
@@ -0,0 +1,857 @@
+/*************************************************************************/ /*!
+@File           htbserver.c
+@Title          Host Trace Buffer server implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "pvrsrv_tlcommon.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+
+/* size of circular buffer controlling the maximum number of concurrent PIDs logged */
+#define HTB_MAX_NUM_PID 8
+
+/* number of times to try rewriting a log entry */
+#define HTB_LOG_RETRY_COUNT 5
+
+/*************************************************************************/ /*!
+  Host Trace Buffer control information structure
+*/ /**************************************************************************/
+typedef struct
+{
+       IMG_UINT32 ui32BufferSize;      /*!< Requested buffer size in bytes
+                                         Once set this may not be changed */
+
+       HTB_OPMODE_CTRL eOpMode;        /*!< Control what trace data is dropped if
+                                         the buffer is full.
+                                         Once set this may not be changed */
+
+/*     IMG_UINT32 ui32GroupEnable; */  /*!< Flags word controlling groups to be
+                                         logged */
+
+       IMG_UINT32 ui32LogLevel;        /*!< Log level to control messages logged */
+
+       IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for
+                                                     a specific set of processes */
+
+       IMG_UINT32 ui32PIDCount;        /*!< Current number of PIDs being logged */
+
+       IMG_UINT32 ui32PIDHead;         /*!< Head of the PID circular buffer */
+
+       HTB_LOGMODE_CTRL eLogMode;      /*!< Logging mode control */
+
+       IMG_BOOL bLogDropSignalled;     /*!< Flag indicating if a log message has
+                                         been signalled as dropped */
+
+       /* synchronisation parameters */
+       IMG_UINT64 ui64SyncOSTS;
+       IMG_UINT64 ui64SyncCRTS;
+       IMG_UINT32 ui32SyncCalcClkSpd;
+       IMG_UINT32 ui32SyncMarker;
+
+       IMG_BOOL bInitDone;             /* Set by HTBInit, reset by HTBDeInit */
+
+       POS_SPINLOCK hRepeatMarkerLock;     /*!< Spinlock used in HTBLogKM to protect global variables
+                                            (ByteCount, OSTS, CRTS ClkSpeed)
+                                            from becoming inconsistent due to calls from
+                                            both KM and UM */
+
+       IMG_UINT32 ui32ByteCount; /* Byte count used for triggering repeat sync point */
+       /* static variables containing details of previous sync point */
+       IMG_UINT64 ui64OSTS;
+       IMG_UINT64 ui64CRTS;
+       IMG_UINT32 ui32ClkSpeed;
+
+} HTB_CTRL_INFO;
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static const IMG_UINT32 MapFlags[] =
+{
+       0,                    /* HTB_OPMODE_UNDEF = 0 */
+       TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */
+       TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */
+       TL_OPMODE_BLOCK       /* HTB_OPMODE_BLOCK */
+};
+
+static_assert(0 == HTB_OPMODE_UNDEF,      "Unexpected value for HTB_OPMODE_UNDEF");
+static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST");
+static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST");
+static_assert(3 == HTB_OPMODE_BLOCK,      "Unexpected value for HTB_OPMODE_BLOCK");
+
+static_assert(1 == TL_OPMODE_DROP_NEWER,  "Unexpected value for TL_OPMODE_DROP_NEWER");
+static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST");
+static_assert(3 == TL_OPMODE_BLOCK,       "Unexpected value for TL_OPMODE_BLOCK");
+
+static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT
+
+/* Minimum TL buffer size.
+ * Large enough for around 60 worst case messages or 200 average messages
+ */
+#define HTB_TL_BUFFER_SIZE_MIN (0x10000)
+
+/* Minimum concentration of HTB packets in a TL Stream is 60%
+ * If we just put the HTB header in the TL stream (12 bytes), the TL overhead
+ * is 8 bytes for its own header, so for the smallest possible (and most
+ * inefficient) packet we have 3/5 of the buffer used for actual HTB data.
+ * This shift is used as a guaranteed estimation on when to produce a repeat
+ * packet. By shifting the size of the buffer by 1 we effectively /2 this
+ * under the 60% boundary chance we may have overwritten the marker and thus
+ * guaranteed to always have a marker in the stream */
+#define HTB_MARKER_PREDICTION_THRESHOLD(val) (val >> 1)
+
+static HTB_CTRL_INFO g_sCtrl;
+static IMG_BOOL g_bConfigured = IMG_FALSE;
+static IMG_HANDLE g_hTLStream;
+
+static IMG_HANDLE hHtbDbgReqNotify;
+
+
+/************************************************************************/ /*!
+ @Function      _LookupFlags
+ @Description   Convert HTBuffer Operation mode to TLStream flags
+
+ @Input         eModeHTBuffer   Operation Mode
+
+ @Return        IMG_UINT32      TLStream FLags
+*/ /**************************************************************************/
+static IMG_UINT32
+_LookupFlags( HTB_OPMODE_CTRL eMode )
+{
+       return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0;
+}
+
+
+/************************************************************************/ /*!
+ @Function      _HTBLogDebugInfo
+ @Description   Debug dump handler used to dump the state of the HTB module.
+                Called for each verbosity level during a debug dump. Function
+                only prints state when called for High verbosity.
+
+ @Input         hDebugRequestHandle See PFN_DBGREQ_NOTIFY
+
+ @Input         ui32VerbLevel       See PFN_DBGREQ_NOTIFY
+
+ @Input         pfnDumpDebugPrintf  See PFN_DBGREQ_NOTIFY
+
+ @Input         pvDumpDebugFile     See PFN_DBGREQ_NOTIFY
+
+*/ /**************************************************************************/
+static void _HTBLogDebugInfo(
+               PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+               IMG_UINT32 ui32VerbLevel,
+               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+               void *pvDumpDebugFile
+)
+{
+       PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+       {
+
+               if (g_bConfigured)
+               {
+                       IMG_INT i;
+
+                       PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------");
+
+                       PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode);
+                       PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel);
+                       PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode);
+
+                       for (i=0; i < HTB_FLAG_NUM_EL; i++)
+                       {
+                               PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]);
+                       }
+               }
+               else
+               {
+                       PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------");
+               }
+       }
+}
+
+static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN;
+
+/*
+ * AppHint access routine forward definitions
+ */
+static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *,
+                                    IMG_UINT32);
+static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *,
+                                    IMG_UINT32 *);
+
+static PVRSRV_ERROR    _HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *,
+                                   IMG_UINT32);
+static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *,
+                                    IMG_UINT32 *);
+
+static void _OnTLReaderOpenCallback(void *);
+
+/************************************************************************/ /*!
+ @Function      HTBInit
+ @Description   Allocate and initialise the Host Trace Buffer
+                The buffer size may be changed by specifying
+                HTBufferSizeInKB=xxxx
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBInit(void)
+{
+       void                    *pvAppHintState = NULL;
+       IMG_UINT32              ui32AppHintDefault;
+       IMG_UINT32              ui32BufBytes;
+       PVRSRV_ERROR    eError;
+
+       if (g_sCtrl.bInitDone)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised"));
+               return PVRSRV_ERROR_ALREADY_EXISTS;
+       }
+
+       /*
+        * Buffer Size can be configured by specifying a value in the AppHint
+        * This will only take effect at module load time so there is no query
+        * or setting mechanism available.
+        */
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB,
+                                                                               NULL,
+                                                                               NULL,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           NULL);
+
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup,
+                                           _HTBReadLogGroup,
+                                           _HTBSetLogGroup,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           NULL);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode,
+                                           _HTBReadOpMode,
+                                           _HTBSetOpMode,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           NULL);
+
+       /*
+        * Now get whatever values have been configured for our AppHints
+        */
+       OSCreateKMAppHintState(&pvAppHintState);
+       ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB,
+                                                &ui32AppHintDefault, &g_ui32HTBufferSize);
+       OSFreeKMAppHintState(pvAppHintState);
+
+       ui32BufBytes = g_ui32HTBufferSize * 1024;
+
+       /* initialise rest of state */
+       g_sCtrl.ui32BufferSize =
+               (ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN)
+               ? HTB_TL_BUFFER_SIZE_MIN
+               : ui32BufBytes;
+       g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST;
+       g_sCtrl.ui32LogLevel = 0;
+       g_sCtrl.ui32PIDCount = 0;
+       g_sCtrl.ui32PIDHead = 0;
+       g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID;
+       g_sCtrl.bLogDropSignalled = IMG_FALSE;
+
+       eError = OSSpinLockCreate(&g_sCtrl.hRepeatMarkerLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSSpinLockCreate");
+
+       eError = PVRSRVRegisterDriverDbgRequestNotify(&hHtbDbgReqNotify,
+                        _HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL);
+       PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify");
+
+       g_sCtrl.bInitDone = IMG_TRUE;
+
+       /* Log the current driver parameter setting for the HTBufferSizeInKB.
+        * We do this here as there is no other infrastructure for obtaining
+        * the value.
+        */
+       if (g_ui32HTBufferSize != ui32AppHintDefault)
+       {
+               PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize));
+       }
+
+
+       return PVRSRV_OK;
+}
+
+/************************************************************************/ /*!
+ @Function      HTBDeInit
+ @Description   Close the Host Trace Buffer and free all resources. Must
+                perform a no-op if already de-initialised.
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void )
+{
+       if (!g_sCtrl.bInitDone)
+               return PVRSRV_OK;
+
+       if (hHtbDbgReqNotify)
+       {
+               /* Not much we can do if it fails, driver unloading */
+               (void)PVRSRVUnregisterDriverDbgRequestNotify(hHtbDbgReqNotify);
+               hHtbDbgReqNotify = NULL;
+       }
+
+       if (g_hTLStream)
+       {
+               TLStreamClose( g_hTLStream );
+               g_hTLStream = NULL;
+       }
+
+       if (g_sCtrl.hRepeatMarkerLock != NULL)
+       {
+               OSSpinLockDestroy(g_sCtrl.hRepeatMarkerLock);
+               g_sCtrl.hRepeatMarkerLock = NULL;
+       }
+
+       g_sCtrl.bInitDone = IMG_FALSE;
+       return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+ AppHint interface functions
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                             const void *psPrivate,
+                             IMG_UINT32 ui32Value)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       return HTBControlKM(1, &ui32Value, 0, 0,
+                           HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF);
+}
+
+static
+PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 *pui32Value)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       *pui32Value = g_auiHTBGroupEnable[0];
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                           const void *psPrivate,
+                           IMG_UINT32 ui32Value)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value);
+}
+
+static
+PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                            const void *psPrivate,
+                            IMG_UINT32 *pui32Value)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode;
+       return PVRSRV_OK;
+}
+
+
+static void
+_OnTLReaderOpenCallback( void *pvArg )
+{
+       if ( g_hTLStream )
+       {
+               IMG_UINT64 ui64Time;
+               OSClockMonotonicns64(&ui64Time);
+               (void) HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+                             g_sCtrl.ui32SyncMarker,
+                             ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)),
+                             ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+                             ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)),
+                             ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+                             g_sCtrl.ui32SyncCalcClkSpd);
+       }
+
+       PVR_UNREFERENCED_PARAMETER(pvArg);
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControlKM
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control the behaviour of the data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+       const IMG_UINT32 ui32NumFlagGroups,
+       const IMG_UINT32 * aui32GroupEnable,
+       const IMG_UINT32 ui32LogLevel,
+       const IMG_UINT32 ui32EnablePID,
+       const HTB_LOGMODE_CTRL eLogMode,
+       const HTB_OPMODE_CTRL eOpMode
+)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+       IMG_UINT32 i;
+       IMG_UINT64 ui64Time;
+       OSClockMonotonicns64(&ui64Time);
+
+       if ( !g_bConfigured && ui32NumFlagGroups )
+       {
+               eError = TLStreamCreate(
+                               &g_hTLStream,
+                               HTB_STREAM_NAME,
+                               g_sCtrl.ui32BufferSize,
+                               _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags,
+                               _OnTLReaderOpenCallback, NULL, NULL, NULL);
+               PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate");
+               g_bConfigured = IMG_TRUE;
+       }
+
+       if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode)
+       {
+               g_sCtrl.eOpMode = eOpMode;
+               eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+               while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+               {
+                       OSReleaseThreadQuanta();
+                       eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+               }
+               PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamReconfigure");
+       }
+
+       if ( ui32EnablePID )
+       {
+               g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID;
+               g_sCtrl.ui32PIDHead++;
+               g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID;
+               g_sCtrl.ui32PIDCount++;
+               if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID )
+               {
+                       g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID;
+               }
+       }
+
+       /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */
+       if ( HTB_LOGMODE_ALLPID == eLogMode )
+       {
+               OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID));
+               g_sCtrl.ui32PIDCount = 0;
+               g_sCtrl.ui32PIDHead = 0;
+       }
+       if ( HTB_LOGMODE_UNDEF != eLogMode )
+       {
+               g_sCtrl.eLogMode = eLogMode;
+       }
+
+       if ( ui32NumFlagGroups )
+       {
+               for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++)
+               {
+                       g_auiHTBGroupEnable[i] = aui32GroupEnable[i];
+               }
+               for (; i < HTB_FLAG_NUM_EL; i++)
+               {
+                       g_auiHTBGroupEnable[i] = 0;
+               }
+       }
+
+       if ( ui32LogLevel )
+       {
+               g_sCtrl.ui32LogLevel = ui32LogLevel;
+       }
+
+       /* Dump the current configuration state */
+       eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
+       PVR_LOG_IF_ERROR(eError, "HTBLog");
+       eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
+       PVR_LOG_IF_ERROR(eError, "HTBLog");
+       eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
+       PVR_LOG_IF_ERROR(eError, "HTBLog");
+       eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
+       PVR_LOG_IF_ERROR(eError, "HTBLog");
+       for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+       {
+               eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
+               PVR_LOG_IF_ERROR(eError, "HTBLog");
+       }
+       /* Else should never be hit as we set the spd when the power state is updated */
+       if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd)
+       {
+               eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+                               g_sCtrl.ui32SyncMarker,
+                               ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+                               ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+                               g_sCtrl.ui32SyncCalcClkSpd);
+               PVR_LOG_IF_ERROR(eError, "HTBLog");
+       }
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static IMG_BOOL
+_ValidPID( IMG_UINT32 PID )
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+       {
+               if ( g_sCtrl.aui32EnablePID[i] == PID )
+               {
+                       return IMG_TRUE;
+               }
+       }
+       return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarker
+ @Description   Write an HTB sync partition marker to the HTB log
+
+ @Input         ui33Marker      Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+       const IMG_UINT32 ui32Marker
+)
+{
+       g_sCtrl.ui32SyncMarker = ui32Marker;
+       if ( g_hTLStream )
+       {
+               PVRSRV_ERROR eError;
+               IMG_UINT64 ui64Time;
+               OSClockMonotonicns64(&ui64Time);
+
+               /* Else should never be hit as we set the spd when the power state is updated */
+               if (0 != g_sCtrl.ui32SyncCalcClkSpd)
+               {
+                       eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+                                       ui32Marker,
+                                       ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+                                       ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+                                       g_sCtrl.ui32SyncCalcClkSpd);
+                       PVR_WARN_IF_ERROR(eError, "HTBLog");
+               }
+       }
+}
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarkerRepeat
+ @Description   Write a HTB sync partition marker to the HTB log, given
+                the previous values to repeat.
+
+ @Input         ui33Marker      Marker value
+ @Input         ui64SyncOSTS    previous OSTS
+ @Input         ui64SyncCRTS    previous CRTS
+ @Input         ui32ClkSpeed    previous Clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarkerRepeat(
+       const IMG_UINT32 ui32Marker,
+       const IMG_UINT64 ui64SyncOSTS,
+       const IMG_UINT64 ui64SyncCRTS,
+       const IMG_UINT32 ui32ClkSpeed
+)
+{
+       if ( g_hTLStream )
+       {
+               PVRSRV_ERROR eError;
+               IMG_UINT64 ui64Time;
+               OSClockMonotonicns64(&ui64Time);
+
+               /* Else should never be hit as we set the spd when the power state is updated */
+               if (0 != ui32ClkSpeed)
+               {
+                       eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+                                       ui32Marker,
+                                       ((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)),
+                                       ((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)),
+                                       ui32ClkSpeed);
+                       PVR_WARN_IF_ERROR(eError, "HTBLog");
+               }
+       }
+}
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncScale
+ @Description   Write FW-Host synchronisation data to the HTB log when clocks
+                change or are re-calibrated
+
+ @Input         bLogValues      IMG_TRUE if value should be immediately written
+                                out to the log
+
+ @Input         ui32OSTS        OS Timestamp
+
+ @Input         ui32CRTS        Rogue timestamp
+
+ @Input         ui32CalcClkSpd  Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+       const IMG_BOOL bLogValues,
+       const IMG_UINT64 ui64OSTS,
+       const IMG_UINT64 ui64CRTS,
+       const IMG_UINT32 ui32CalcClkSpd
+)
+{
+       g_sCtrl.ui64SyncOSTS = ui64OSTS;
+       g_sCtrl.ui64SyncCRTS = ui64CRTS;
+       g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd;
+       if (g_hTLStream && bLogValues)
+       {
+               PVRSRV_ERROR eError;
+               IMG_UINT64 ui64Time;
+               OSClockMonotonicns64(&ui64Time);
+               eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+                               g_sCtrl.ui32SyncMarker,
+                               ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)),
+                               ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)),
+                               ui32CalcClkSpd);
+               /*
+                * Don't spam the log with non-failure cases
+                */
+               PVR_WARN_IF_ERROR(eError, "HTBLog");
+       }
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogKM
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         ui64TimeStamp   The timestamp to be associated with this log event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+               IMG_UINT32 PID,
+               IMG_UINT32 TID,
+               IMG_UINT64 ui64TimeStamp,
+               HTB_LOG_SFids SF,
+               IMG_UINT32 ui32NumArgs,
+               IMG_UINT32 * aui32Args
+)
+{
+       OS_SPINLOCK_FLAGS uiSpinLockFlags;
+       IMG_UINT32 ui32ReturnFlags = 0;
+
+       /* Local snapshot variables of global counters */
+       IMG_UINT64 ui64OSTSSnap;
+       IMG_UINT64 ui64CRTSSnap;
+       IMG_UINT32 ui32ClkSpeedSnap;
+
+       /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]*
+        * Buffer is on the stack so we don't need a semaphore to guard it
+        */
+       IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
+
+       /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/
+        * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes,
+        * hence with these constraints this design is unlikely to get
+        * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error
+        */
+       PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED;
+       IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+       IMG_UINT32 * pui32Message = aui32MessageBuffer;
+       IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(aui32Args != NULL, eError, ReturnError);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError);
+
+       if ( g_hTLStream
+                       && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
+/*                     && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
+/*                     && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
+                       )
+       {
+               *pui32Message++ = SF;
+               *pui32Message++ = PID;
+               *pui32Message++ = TID;
+               *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff));
+               *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff));
+               while ( ui32NumArgs )
+               {
+                       ui32NumArgs--;
+                       pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs];
+               }
+
+               eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
+               while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+               {
+                       OSReleaseThreadQuanta();
+                       eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
+               }
+
+               if ( PVRSRV_OK == eError )
+               {
+                       g_sCtrl.bLogDropSignalled = IMG_FALSE;
+               }
+               else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled )
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
+               }
+               if ( PVRSRV_ERROR_STREAM_FULL == eError )
+               {
+                       g_sCtrl.bLogDropSignalled = IMG_TRUE;
+               }
+
+       }
+
+       if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE)
+       {
+               OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+
+               /* If a marker is being placed reset byte count from last marker */
+               g_sCtrl.ui32ByteCount = 0;
+               g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2];
+               g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2];
+               g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD];
+
+               OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+       }
+       else
+       {
+               OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+               /* Increase global count */
+               g_sCtrl.ui32ByteCount += ui32MessageSize;
+
+               /* Check if packet has overwritten last marker/rpt &&
+                  If the packet count is over half the size of the buffer */
+               if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED &&
+                                g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize))
+               {
+                       /* Take snapshot of global variables */
+                       ui64OSTSSnap = g_sCtrl.ui64OSTS;
+                       ui64CRTSSnap = g_sCtrl.ui64CRTS;
+                       ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed;
+                       /* Reset global variable counter */
+                       g_sCtrl.ui32ByteCount = 0;
+                       OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+
+                       /* Produce a repeat marker */
+                       HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap);
+               }
+               else
+               {
+                       OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+               }
+       }
+
+ReturnError:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+ @Function      HTBIsConfigured
+ @Description   Determine if HTB stream has been configured
+
+ @Input         none
+
+ @Return        IMG_FALSE       Stream has not been configured
+                IMG_TRUE        Stream has been configured
+
+*/ /**************************************************************************/
+IMG_BOOL
+HTBIsConfigured(void)
+{
+       return g_bConfigured;
+}
+/* EOF */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/info_page_km.c b/drivers/gpu/drm/img/img-rogue/services/server/common/info_page_km.c
new file mode 100644 (file)
index 0000000..77471b3
--- /dev/null
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@File           info_page_km.c
+@Title          Kernel/User space shared memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements general purpose shared memory between kernel driver
+                and user mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "info_page_defs.h"
+#include "info_page.h"
+#include "pvrsrv.h"
+#include "devicemem.h"
+#include "pmr.h"
+
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData)
+{
+    const PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                              PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                              PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                              PVRSRV_MEMALLOCFLAG_CPU_UNCACHED |
+                                              PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                              PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL);
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(psData != NULL);
+
+    /* Allocate single page of memory for driver information page */
+    eError = DevmemAllocateExportable(psData->psHostMemDeviceNode,
+                                      OSGetPageSize(),
+                                      OSGetPageSize(),
+                                      OSGetPageShift(),
+                                      uiMemFlags,
+                                      "PVRSRVInfoPage",
+                                      &psData->psInfoPageMemDesc);
+    PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+    eError =  DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc,
+                                       (void **) &psData->pui32InfoPage);
+    PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+    /* Look-up the memory descriptor PMR handle */
+    eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc,
+                                        (void **) &psData->psInfoPagePMR);
+    PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0);
+
+    eError = OSLockCreate(&psData->hInfoPageLock);
+    PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
+
+    return PVRSRV_OK;
+
+e0:
+    InfoPageDestroy(psData);
+    return eError;
+}
+
+void InfoPageDestroy(PVRSRV_DATA *psData)
+{
+    if (psData->psInfoPageMemDesc)
+    {
+        if (psData->pui32InfoPage != NULL)
+        {
+            DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc);
+            psData->pui32InfoPage = NULL;
+        }
+
+        DevmemFree(psData->psInfoPageMemDesc);
+        psData->psInfoPageMemDesc = NULL;
+    }
+
+    if (psData->hInfoPageLock)
+    {
+        OSLockDestroy(psData->hInfoPageLock);
+        psData->hInfoPageLock = NULL;
+    }
+}
+
+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR)
+{
+    PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+    PVR_LOG_RETURN_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC"
+                      " handle", PVRSRV_ERROR_INVALID_PARAMS);
+    PVR_LOG_RETURN_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle",
+                      PVRSRV_ERROR_INVALID_PARAMS);
+
+    /* Copy the PMR import handle back */
+    *ppsPMR = psData->psInfoPagePMR;
+
+    /* Mark the PMR such that no layout changes can happen
+     * This is a fixed layout created during early stages of
+     * driver loading and shouldn't change later */
+    PMR_SetLayoutFixed(psData->psInfoPagePMR, IMG_TRUE);
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR)
+{
+    /* Nothing to do here as PMR is singleton */
+    PVR_UNREFERENCED_PARAMETER(ppsPMR);
+    return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/lists.c b/drivers/gpu/drm/img/img-rogue/services/server/common/lists.c
new file mode 100644 (file)
index 0000000..e8e7088
--- /dev/null
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of the list iterators for types shared among
+                more than one file in the services code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "lists.h"
+
+/*===================================================================
+  LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just
+  once are implemented locally).
+  ===================================================================*/
+
+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/mmu_common.c b/drivers/gpu/drm/img/img-rogue/services/server/common/mmu_common.c
new file mode 100644 (file)
index 0000000..1d9bfd7
--- /dev/null
@@ -0,0 +1,4464 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#if defined(SUPPORT_RGX)
+# include "rgx_memallocflags.h"
+# include "rgxmmudefs_km.h"
+#endif
+
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+#include "pvr_ricommon.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+# include "process_stats.h"
+# include "proc_stats.h"
+#endif
+
+#if defined(PDUMP)
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#endif
+
+/*
+Major Interfaces to other modules:
+
+Let's keep this graph up-to-date:
+
+   +-----------+
+   | devicemem |
+   +-----------+
+         |
+   +============+
+   | mmu_common |
+   +============+
+         |
+         +-----------------+
+         |                 |
+    +---------+      +----------+
+    |   pmr   |      |  device  |
+    +---------+      +----------+
+ */
+
+#include "mmu_common.h"
+#include "pmr.h"
+#include "devicemem_server_utils.h"
+
+/* #define MMU_OBJECT_REFCOUNT_DEBUGING 1 */
+#if defined(MMU_OBJECT_REFCOUNT_DEBUGING)
+#define MMU_OBJ_DBG(x) PVR_DPF(x)
+#else
+#define MMU_OBJ_DBG(x)
+#endif
+
+/*!
+ * Refcounted structure that is shared between the context and
+ * the cleanup thread items.
+ * It is used to keep track of all cleanup items and whether the creating
+ * MMU context has been destroyed and therefore is not allowed to be
+ * accessed any more.
+ *
+ * The cleanup thread is used to defer the freeing of the page tables
+ * because we have to make sure that the MMU cache has been invalidated.
+ * If we don't take care of this the MMU might partially access cached
+ * and uncached tables which might lead to inconsistencies and in the
+ * worst case to MMU pending faults on random memory.
+ */
+typedef struct _MMU_CTX_CLEANUP_DATA_
+{
+       /*! Refcount to know when this structure can be destroyed */
+       ATOMIC_T iRef;
+       /*! Protect items in this structure, especially the refcount */
+       POS_LOCK hCleanupLock;
+       /*! List of all cleanup items currently in flight */
+       DLLIST_NODE sMMUCtxCleanupItemsHead;
+       /*! Was the MMU context destroyed and should not be accessed any more? */
+       IMG_BOOL bMMUContextExists;
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /*! Associated OSid for this context */
+       IMG_UINT32 ui32OSid;
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+} MMU_CTX_CLEANUP_DATA;
+
+
+/*!
+ * Structure holding one or more page tables that need to be
+ * freed after the MMU cache has been flushed which is signalled when
+ * the stored sync has a value that is <= the required value.
+ */
+typedef struct _MMU_CLEANUP_ITEM_
+{
+       /*! Cleanup thread data */
+       PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+       /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */
+       DLLIST_NODE sMMUMappingHead;
+       /*! Node of the cleanup item list for the context */
+       DLLIST_NODE sMMUCtxCleanupItem;
+       /* Pointer to the cleanup meta data */
+       MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData;
+       /* Sync to query if the MMU cache was flushed */
+       PVRSRV_CLIENT_SYNC_PRIM *psSync;
+       /*! The update value of the sync to signal that the cache was flushed */
+       IMG_UINT32 uiRequiredSyncVal;
+       /*! The device node needed to free the page tables */
+       PVRSRV_DEVICE_NODE *psDevNode;
+} MMU_CLEANUP_ITEM;
+
+/*!
+       All physical allocations and frees are relative to this context, so
+       we would get all the allocations of PCs, PDs, and PTs from the same
+       RA.
+
+       We have one per MMU context in case we have mixed UMA/LMA devices
+       within the same system.
+ */
+typedef struct _MMU_PHYSMEM_CONTEXT_
+{
+       /*! Associated MMU_CONTEXT */
+       struct _MMU_CONTEXT_ *psMMUContext;
+
+       /*! Parent device node */
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       /*! Refcount so we know when to free up the arena */
+       IMG_UINT32 uiNumAllocations;
+
+       /*! Arena from which physical memory is derived */
+       RA_ARENA *psPhysMemRA;
+       /*! Arena name */
+       IMG_CHAR *pszPhysMemRAName;
+       /*! Size of arena name string */
+       size_t uiPhysMemRANameAllocSize;
+
+       /*! Meta data for deferred cleanup */
+       MMU_CTX_CLEANUP_DATA *psCleanupData;
+       /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */
+       DLLIST_NODE sTmpMMUMappingHead;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       IMG_UINT32 ui32OSid;
+       IMG_UINT32 ui32OSidReg;
+       IMG_BOOL   bOSidAxiProt;
+#endif
+
+} MMU_PHYSMEM_CONTEXT;
+
+/*!
+       Mapping structure for MMU memory allocation
+ */
+typedef struct _MMU_MEMORY_MAPPING_
+{
+       /*! Physmem context to allocate from */
+       MMU_PHYSMEM_CONTEXT             *psContext;
+       /*! OS/system Handle for this allocation */
+       PG_HANDLE                               sMemHandle;
+       /*! CPU virtual address of this allocation */
+       void                                    *pvCpuVAddr;
+       /*! Device physical address of this allocation */
+       IMG_DEV_PHYADDR                 sDevPAddr;
+       /*! Size of this allocation */
+       size_t                                  uiSize;
+       /*! Number of current mappings of this allocation */
+       IMG_UINT32                              uiCpuVAddrRefCount;
+       /*! Node for the defer free list */
+       DLLIST_NODE                             sMMUMappingItem;
+} MMU_MEMORY_MAPPING;
+
+/*!
+       Memory descriptor for MMU objects. There can be more than one memory
+       descriptor per MMU memory allocation.
+ */
+typedef struct _MMU_MEMORY_DESC_
+{
+       /* NB: bValid is set if this descriptor describes physical
+          memory.  This allows "empty" descriptors to exist, such that we
+          can allocate them in batches. */
+       /*! Does this MMU object have physical backing */
+       IMG_BOOL                                bValid;
+       /*! Device Physical address of physical backing */
+       IMG_DEV_PHYADDR                 sDevPAddr;
+       /*! CPU virtual address of physical backing */
+       void                                    *pvCpuVAddr;
+       /*! Mapping data for this MMU object */
+       MMU_MEMORY_MAPPING              *psMapping;
+       /*! Memdesc offset into the psMapping */
+       IMG_UINT32 uiOffset;
+       /*! Size of the Memdesc */
+       IMG_UINT32 uiSize;
+} MMU_MEMORY_DESC;
+
+/*!
+       MMU levelx structure. This is generic and is used
+       for all levels (PC, PD, PT).
+ */
+typedef struct _MMU_Levelx_INFO_
+{
+       /*! The Number of entries in this level */
+       IMG_UINT32 ui32NumOfEntries;
+
+       /*! Number of times this level has been reference. Note: For Level1 (PTE)
+           we still take/drop the reference when setting up the page tables rather
+           then at map/unmap time as this simplifies things */
+       IMG_UINT32 ui32RefCount;
+
+       /*! MemDesc for this level */
+       MMU_MEMORY_DESC sMemDesc;
+
+       /*! Array of infos for the next level. Must be last member in structure */
+       struct _MMU_Levelx_INFO_ *apsNextLevel[1];
+} MMU_Levelx_INFO;
+
+/*!
+       MMU context structure
+ */
+struct _MMU_CONTEXT_
+{
+       /*! Originating Connection */
+       CONNECTION_DATA *psConnection;
+
+       MMU_DEVICEATTRIBS *psDevAttrs;
+
+       /*! For allocation and deallocation of the physical memory where
+           the pagetables live */
+       struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx;
+
+#if defined(PDUMP)
+       /*! PDump context ID (required for PDump commands with virtual addresses) */
+       IMG_UINT32 uiPDumpContextID;
+
+       /*! The refcount of the PDump context ID */
+       IMG_UINT32 ui32PDumpContextIDRefCount;
+#endif
+
+       /*! MMU cache invalidation flags (only used on Volcanic driver) */
+       ATOMIC_T sCacheFlags;
+
+       /*! Lock to ensure exclusive access when manipulating the MMU context or
+        * reading and using its content
+        */
+       POS_LOCK hLock;
+
+       /*! Base level info structure. Must be last member in structure */
+       MMU_Levelx_INFO sBaseLevelInfo;
+       /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */
+};
+
+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR};
+
+#if defined(DEBUG)
+#include "log2.h"
+#endif
+
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
+static IMG_UINT32 g_ui32MMULeakCounter = 0;
+static DEFINE_MUTEX(g_sMMULeakMutex);
+#endif
+
+/*****************************************************************************
+ *                          Utility functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _FreeMMUMapping
+
+@Description    Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables
+                they represent.
+
+@Input          psDevNode           Device node
+
+@Input          psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free
+ */
+/*****************************************************************************/
+static void
+_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+                PDLLIST_NODE psTmpMMUMappingHead)
+{
+       PDLLIST_NODE psNode, psNextNode;
+
+       /* Free the current list unconditionally */
+       dllist_foreach_node(psTmpMMUMappingHead,
+                           psNode,
+                           psNextNode)
+       {
+               MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode,
+                                                                MMU_MEMORY_MAPPING,
+                                                                sMMUMappingItem);
+
+               PhysHeapPagesFree(psDevNode->psMMUPhysHeap, &psMapping->sMemHandle);
+               dllist_remove_node(psNode);
+               OSFreeMem(psMapping);
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       _CleanupThread_FreeMMUMapping
+
+@Description    Function to be executed by the cleanup thread to free
+                MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated.
+
+                This function will request a MMU cache invalidate once and
+                retry to free the MMU_MEMORY_MAPPINGs until the invalidate
+                has been executed.
+
+                If the memory context that created this cleanup item has been
+                destroyed in the meantime this function will directly free the
+                MMU_MEMORY_MAPPINGs without waiting for any MMU cache
+                invalidation.
+
+@Input          pvData           Cleanup data in form of a MMU_CLEANUP_ITEM
+
+@Return         PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR
+_CleanupThread_FreeMMUMapping(void* pvData)
+{
+       PVRSRV_ERROR eError;
+       MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *)pvData;
+       MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData;
+       PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode;
+       IMG_BOOL bFreeNow;
+       IMG_UINT32 uiSyncCurrent;
+       IMG_UINT32 uiSyncReq;
+
+       OSLockAcquire(psMMUCtxCleanupData->hCleanupLock);
+
+       /* Don't attempt to free anything when the context has been destroyed.
+        * Especially don't access any device specific structures any more!*/
+       if (!psMMUCtxCleanupData->bMMUContextExists)
+       {
+               OSFreeMem(psCleanup);
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, e0);
+       }
+
+       if (psCleanup->psSync == NULL)
+       {
+               /* Kick to invalidate the MMU caches and get sync info */
+               eError = psDevNode->pfnMMUCacheInvalidateKick(psDevNode,
+                                                    &psCleanup->uiRequiredSyncVal);
+               if (eError != PVRSRV_OK)
+               {
+                       OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+                       return PVRSRV_ERROR_RETRY;
+               }
+               psCleanup->psSync = psDevNode->psMMUCacheSyncPrim;
+       }
+
+       uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr);
+       uiSyncReq = psCleanup->uiRequiredSyncVal;
+
+       /* Has the invalidate executed */
+       bFreeNow = (uiSyncCurrent >= uiSyncReq) ?
+                       /* ... with the counter wrapped around ...
+                        * There can't be 3*1024*1024 transactions completed, so consider wrapped */
+                       (((uiSyncCurrent - uiSyncReq) > 0xF0000000UL)? IMG_FALSE : IMG_TRUE):
+                       /* There can't be 3*1024*1024 transactions pending, so consider wrapped */
+                       (((uiSyncReq - uiSyncCurrent) > 0xF0000000UL)? IMG_TRUE : IMG_FALSE);
+
+#if defined(NO_HARDWARE)
+       /* In NOHW the syncs will never be updated so just free the tables */
+       bFreeNow = IMG_TRUE;
+#endif
+       /* If the Invalidate operation is not completed, check if the operation timed out */
+       if (!bFreeNow)
+       {
+               /* If the time left for the completion of invalidate operation is
+                * within 500ms of time-out, consider the operation as timed out */
+               if ((psCleanup->sCleanupThreadFn.ui32TimeEnd - psCleanup->sCleanupThreadFn.ui32TimeStart - 500) <=
+                   (OSClockms() - psCleanup->sCleanupThreadFn.ui32TimeStart))
+               {
+                       /* Consider the operation is timed out */
+                       bFreeNow = IMG_TRUE;
+               }
+       }
+
+       /* Free if the invalidate operation completed or the operation itself timed out */
+       if (bFreeNow)
+       {
+               _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+               dllist_remove_node(&psCleanup->sMMUCtxCleanupItem);
+               OSFreeMem(psCleanup);
+
+               eError = PVRSRV_OK;
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_RETRY;
+       }
+
+e0:
+
+       /* If this cleanup task has been successfully executed we can
+        * decrease the context cleanup data refcount. Successfully
+        * means here that the MMU_MEMORY_MAPPINGs have been freed by
+        * either this cleanup task of when the MMU context has been
+        * destroyed. */
+       if (eError == PVRSRV_OK)
+       {
+               OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+
+               if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0)
+               {
+                       OSLockDestroy(psMMUCtxCleanupData->hCleanupLock);
+                       OSFreeMem(psMMUCtxCleanupData);
+               }
+       }
+       else
+       {
+               OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+       }
+
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupCleanup_FreeMMUMapping
+
+@Description    Setup a cleanup item for the cleanup thread that will
+                kick off a MMU invalidate request and free the associated
+                MMU_MEMORY_MAPPINGs when the invalidate was successful.
+
+@Input          psPhysMemCtx        The current MMU physmem context
+ */
+/*****************************************************************************/
+static void
+_SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx)
+{
+
+       MMU_CLEANUP_ITEM *psCleanupItem;
+       MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData;
+       PVRSRV_DEVICE_NODE *psDevNode = psPhysMemCtx->psDevNode;
+
+       if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead))
+       {
+               goto e0;
+       }
+
+#if defined(PDUMP)
+       /* Free the page tables immediately in case of pdump, which avoids
+        * changing script files due to the additional invalidation kick */
+       goto e1;
+#endif
+
+       /* Don't defer the freeing if we are currently unloading the driver
+        * or if the sync has been destroyed */
+       if (PVRSRVGetPVRSRVData()->bUnload ||
+                       psDevNode->psMMUCacheSyncPrim == NULL)
+       {
+               goto e1;
+       }
+
+       /* Allocate a cleanup item */
+       psCleanupItem = OSAllocMem(sizeof(*psCleanupItem));
+       if (!psCleanupItem)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to get memory for deferred page table cleanup. "
+                               "Freeing tables immediately",
+                               __func__));
+               goto e1;
+       }
+
+       /* Set sync to NULL to indicate we did not interact with
+        * the FW yet. Kicking off an MMU cache invalidate should
+        * be done in the cleanup thread to not waste time here. */
+       psCleanupItem->psSync = NULL;
+       psCleanupItem->uiRequiredSyncVal = 0;
+       psCleanupItem->psDevNode = psDevNode;
+       psCleanupItem->psMMUCtxCleanupData = psCleanupData;
+
+       OSAtomicIncrement(&psCleanupData->iRef);
+
+       /* Move the page tables to free to the cleanup item */
+       dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead,
+                           &psCleanupItem->sMMUMappingHead);
+
+       /* Add the cleanup item itself to the context list */
+       dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead,
+                          &psCleanupItem->sMMUCtxCleanupItem);
+
+       /* Setup the cleanup thread data and add the work item */
+       psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping;
+       psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem;
+       psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE;
+       CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn,
+                                        CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT);
+
+       PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn);
+
+       return;
+
+e1:
+       /* Free the page tables now */
+       _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead);
+e0:
+       return;
+}
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page catalogue index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page catalogue index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       IMG_UINT32 ui32RetVal;
+
+       sTmpDevVAddr = sDevVAddr;
+
+       if (bRoundUp)
+       {
+               sTmpDevVAddr.uiAddr--;
+       }
+       ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask)
+                       >> psDevVAddrConfig->uiPCIndexShift);
+
+       if (bRoundUp)
+       {
+               ui32RetVal++;
+       }
+
+       return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPDEIdx
+
+@Description    Calculate the page directory index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page directory index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       IMG_UINT32 ui32RetVal;
+
+       sTmpDevVAddr = sDevVAddr;
+
+       if (bRoundUp)
+       {
+               sTmpDevVAddr.uiAddr--;
+       }
+       ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask)
+                       >> psDevVAddrConfig->uiPDIndexShift);
+
+       if (bRoundUp)
+       {
+               ui32RetVal++;
+       }
+
+       return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPTEIdx
+
+@Description    Calculate the page entry index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page entry index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       IMG_UINT32 ui32RetVal;
+
+       sTmpDevVAddr = sDevVAddr;
+       sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes;
+       if (bRoundUp)
+       {
+               sTmpDevVAddr.uiAddr--;
+       }
+       ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask)
+                       >> psDevVAddrConfig->uiPTIndexShift);
+
+       if (bRoundUp)
+       {
+               ui32RetVal++;
+       }
+
+       return ui32RetVal;
+}
+
+#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)
+/*
+ * RGXMapBRN71422TargetPhysicalAddress
+ *
+ * Set-up a special MMU tree mapping with a single page that eventually points to
+ * RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR.
+ *
+ * PC entries are 32b, with the last 4 bits being 0 except for the LSB bit that should be 1 (Valid). Addr is 4KB aligned.
+ * PD entries are 64b, with addr in bits 39:5 and everything else 0 except for LSB bit that is Valid. Addr is byte aligned?
+ * PT entries are 64b, with phy addr in bits 39:12 and everything else 0 except for LSB bit that is Valid. Addr is 4KB aligned.
+ * So, we can construct the page tables in a single page like this:
+ *   0x00 : PCE (PCE index 0)
+ *   0x04 : 0x0
+ *   0x08 : PDEa (PDE index 1)
+ *   0x0C : PDEb
+ *   0x10 : PTEa (PTE index 2)
+ *   0x14 : PTEb
+ *
+ * With the PCE and the PDE pointing to this same page.
+ * The VA address that we are mapping is therefore:
+ *  VA = PCE_idx*PCE_size + PDE_idx*PDE_size + PTE_idx*PTE_size =
+ *     =      0 * 1GB     +      1 * 2MB     +      2 * 4KB     =
+ *     =        0         +    0x20_0000     +      0x2000      =
+ *     = 0x00_0020_2000
+ */
+void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext)
+{
+       MMU_MEMORY_DESC  *psMemDesc  = &psMMUContext->sBaseLevelInfo.sMemDesc;
+       IMG_DEV_PHYADDR  sPhysAddrPC = psMemDesc->sDevPAddr;
+       IMG_UINT32       *pui32Px    = psMemDesc->pvCpuVAddr;
+       IMG_UINT64       *pui64Px    = psMemDesc->pvCpuVAddr;
+       IMG_UINT64       ui64Entry;
+
+       /* PCE points to PC */
+       ui64Entry = sPhysAddrPC.uiAddr;
+       ui64Entry = ui64Entry >> RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+       ui64Entry = ui64Entry << RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT;
+       ui64Entry = ui64Entry & ~RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK;
+       ui64Entry = ui64Entry | RGX_MMUCTRL_PC_DATA_VALID_EN;
+       pui32Px[0] = (IMG_UINT32) ui64Entry;
+
+       /* PDE points to PC */
+       ui64Entry = sPhysAddrPC.uiAddr;
+       ui64Entry = ui64Entry & ~RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK;
+       ui64Entry = ui64Entry | RGX_MMUCTRL_PD_DATA_VALID_EN;
+       pui64Px[1] = ui64Entry;
+
+       /* PTE points to PAddr */
+       ui64Entry = RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR;
+       ui64Entry = ui64Entry & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK;
+       ui64Entry = ui64Entry | RGX_MMUCTRL_PT_DATA_VALID_EN;
+       pui64Px[2] = ui64Entry;
+
+       {
+               PVRSRV_ERROR eError;
+               PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode;
+               eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                           &psMemDesc->psMapping->sMemHandle,
+                                           psMemDesc->uiOffset,
+                                           psMemDesc->uiSize);
+               PVR_LOG_IF_ERROR(eError, "pfnDevPxClean");
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapping the BRN71422 workaround to target physical address 0x%" IMG_UINT64_FMTSPECx ".",
+                __func__, RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR));
+}
+#endif
+
+/*****************************************************************************
+ *         MMU memory allocation/management functions (mem desc)             *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportAlloc
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual allocation of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          uiSize          Size of the memory to import
+
+@Input          uiFlags         Flags that where passed in the allocation.
+
+@Output         puiBase         The address of where to insert this import
+
+@Output         puiActualSize   The actual size of the import
+
+@Output         phPriv          Handle which will be passed back when
+                                this import is freed
+
+@Return         PVRSRV_OK if import alloc was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
+                                               RA_LENGTH_T uiSize,
+                                               RA_FLAGS_T uiFlags,
+                                               const IMG_CHAR *pszAnnotation,
+                                               RA_BASE_T *puiBase,
+                                               RA_LENGTH_T *puiActualSize,
+                                               RA_PERISPAN_HANDLE *phPriv)
+{
+       MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle;
+       PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psPhysMemCtx->psDevNode;
+       MMU_MEMORY_MAPPING *psMapping;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiPid = 0;
+
+       PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+       PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+       PVR_ASSERT(psDevNode != NULL);
+       PVR_GOTO_IF_INVALID_PARAM(psDevNode, eError, e0);
+
+       psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING));
+       PVR_GOTO_IF_NOMEM(psMapping, eError, e0);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ?
+               PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM();
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /*
+        * Store the OSid in the PG_HANDLE.uiOSid field for use by the
+        * pfnDevPxFree() routine.
+        */
+       psMapping->sMemHandle.uiOSid = psPhysMemCtx->ui32OSid;
+       eError = PhysHeapPagesAllocGPV(psDevNode->psMMUPhysHeap,
+                                      TRUNCATE_64BITS_TO_SIZE_T(uiSize),
+                                      &psMapping->sMemHandle,
+                                      &psMapping->sDevPAddr,
+                                      psPhysMemCtx->ui32OSid,
+                                      uiPid);
+#else
+       eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap,
+                                   TRUNCATE_64BITS_TO_SIZE_T(uiSize),
+                                   &psMapping->sMemHandle,
+                                   &psMapping->sDevPAddr,
+                                   uiPid);
+#endif
+       if (eError != PVRSRV_OK)
+       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT,
+                                         OSGetCurrentClientProcessIDKM());
+#endif
+               goto e1;
+       }
+
+       psMapping->psContext = psPhysMemCtx;
+       psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+
+       psMapping->uiCpuVAddrRefCount = 0;
+
+       *phPriv = (RA_PERISPAN_HANDLE) psMapping;
+
+       /* Note: This assumes this memory never gets paged out */
+       *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr;
+       *puiActualSize = uiSize;
+
+       return PVRSRV_OK;
+
+e1:
+       OSFreeMem(psMapping);
+e0:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportFree
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual free of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          puiBase         The address of where to insert this import
+
+@Output         phPriv          Private data that the import alloc provided
+
+@Return         None
+ */
+/*****************************************************************************/
+static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle,
+                                      RA_BASE_T uiBase,
+                                      RA_PERISPAN_HANDLE hPriv)
+{
+       MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *)hPriv;
+       MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle;
+
+       PVR_UNREFERENCED_PARAMETER(uiBase);
+
+       /* Check we have dropped all CPU mappings */
+       PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0);
+
+       /* Add mapping to defer free list */
+       psMapping->psContext = NULL;
+       dllist_add_to_tail(&psPhysMemCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem);
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemAlloc
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psPhysMemCtx    Physmem context to do the allocation from
+
+@Output         psMemDesc       Allocation description
+
+@Input          uiBytes         Size of the allocation in bytes
+
+@Input          uiAlignment     Alignment requirement of this allocation
+
+@Return         PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+
+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psPhysMemCtx,
+                                      MMU_MEMORY_DESC *psMemDesc,
+                                      size_t uiBytes,
+                                      size_t uiAlignment)
+{
+       PVRSRV_ERROR eError;
+       RA_BASE_T uiPhysAddr;
+
+       PVR_RETURN_IF_INVALID_PARAM(psMemDesc);
+       PVR_RETURN_IF_INVALID_PARAM(!psMemDesc->bValid);
+
+       eError = RA_Alloc(psPhysMemCtx->psPhysMemRA,
+                         uiBytes,
+                         RA_NO_IMPORT_MULTIPLIER,
+                         0, /* flags */
+                         uiAlignment,
+                         "",
+                         &uiPhysAddr,
+                         NULL,
+                         (RA_PERISPAN_HANDLE *)&psMemDesc->psMapping);
+
+       PVR_LOG_RETURN_IF_ERROR(eError, "RA_Alloc");
+
+       psMemDesc->bValid = IMG_TRUE;
+       psMemDesc->pvCpuVAddr = NULL;
+       psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr;
+
+       if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+       {
+               eError = PhysHeapPagesMap(psPhysMemCtx->psDevNode->psMMUPhysHeap,
+                                         &psMemDesc->psMapping->sMemHandle,
+                                         psMemDesc->psMapping->uiSize,
+                                         &psMemDesc->psMapping->sDevPAddr,
+                                         &psMemDesc->psMapping->pvCpuVAddr);
+               if (eError != PVRSRV_OK)
+               {
+                       RA_Free(psPhysMemCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr);
+                       return eError;
+               }
+       }
+
+       psMemDesc->psMapping->uiCpuVAddrRefCount++;
+       psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+       psMemDesc->pvCpuVAddr = (IMG_UINT8 *)psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset;
+       psMemDesc->uiSize = uiBytes;
+       PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL);
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemFree
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psPhysMemCtx    Physmem context to do the free on
+
+@Input          psMemDesc       Allocation description
+
+@Return         None
+ */
+/*****************************************************************************/
+static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psPhysMemCtx,
+                             MMU_MEMORY_DESC *psMemDesc)
+{
+       RA_BASE_T uiPhysAddr;
+
+       PVR_ASSERT(psMemDesc->bValid);
+
+       if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+       {
+               PhysHeapPagesUnMap(psPhysMemCtx->psDevNode->psMMUPhysHeap,
+                                  &psMemDesc->psMapping->sMemHandle,
+                                  psMemDesc->psMapping->pvCpuVAddr);
+       }
+
+       psMemDesc->pvCpuVAddr = NULL;
+
+       uiPhysAddr = psMemDesc->sDevPAddr.uiAddr;
+       RA_Free(psPhysMemCtx->psPhysMemRA, uiPhysAddr);
+
+       psMemDesc->bValid = IMG_FALSE;
+}
+
+
+/*****************************************************************************
+ *              MMU object allocation/management functions                   *
+ *****************************************************************************/
+
+static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate,
+                                                   PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                                                   MMU_PROTFLAGS_T *uiMMUProtFlags,
+                                                   MMU_CONTEXT *psMMUContext)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 uiGPUCacheMode;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+       /* Do flag conversion between devmem flags and MMU generic flags */
+       if (bInvalidate == IMG_FALSE)
+       {
+               *uiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+                               >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+                               << MMU_PROTFLAGS_DEVICE_OFFSET;
+
+               if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags))
+               {
+                       *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+               }
+               if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags))
+               {
+                       *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE;
+               }
+
+               eError = DevmemDeviceCacheMode(psDevNode, uiMappingFlags, &uiGPUCacheMode);
+               PVR_RETURN_IF_ERROR(eError);
+
+               switch (uiGPUCacheMode)
+               {
+                       case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+                       case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC:
+                               break;
+                       case PVRSRV_MEMALLOCFLAG_GPU_CACHED:
+                               *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+                               break;
+                       default:
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Wrong parameters",
+                                               __func__));
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags))
+               {
+                       *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT;
+               }
+ /* Only compile if RGX_FEATURE_MIPS_BIT_MASK is defined to avoid compilation
+  * errors on volcanic cores.
+  */
+ #if defined(SUPPORT_RGX) && defined(RGX_FEATURE_MIPS_BIT_MASK)
+               if ((psDevNode->pfnCheckDeviceFeature) &&
+                        PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS))
+               {
+                       /* If we are allocating on the MMU of the firmware processor, the
+                        * cached/uncached attributes must depend on the FIRMWARE_CACHED
+                        * allocation flag.
+                        */
+                       if (psMMUContext->psDevAttrs == psDevNode->psFirmwareMMUDevAttrs)
+                       {
+                               if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED))
+                               {
+                                       *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+                               }
+                               else
+                               {
+                                       *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED;
+
+                               }
+                               *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT;
+                       }
+               }
+#endif
+       }
+       else
+       {
+               *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemAlloc
+
+@Description    Allocates physical memory for MMU objects, initialises
+                and PDumps it.
+
+@Input          psMMUContext    MMU context
+
+@Input          uiNumEntries    Number of entries to allocate
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       MMU level that that allocation is for
+
+@Output         psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext,
+                                IMG_UINT32 uiNumEntries,
+                                const MMU_PxE_CONFIG *psConfig,
+                                MMU_LEVEL eMMULevel,
+                                MMU_MEMORY_DESC *psMemDesc,
+                                IMG_UINT32 uiLog2Align)
+{
+       PVRSRV_ERROR eError;
+       size_t uiBytes;
+       size_t uiAlign;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+       PVR_ASSERT(psConfig->uiBytesPerEntry != 0);
+
+       uiBytes = uiNumEntries * psConfig->uiBytesPerEntry;
+       /* We need here the alignment of the previous level because that is the entry for we generate here */
+       uiAlign = 1 << uiLog2Align;
+
+       /*
+        * If the hardware specifies an alignment requirement for a page table then
+        * it also requires that all memory up to the next aligned address is
+        * zeroed.
+        *
+        * Failing to do this can result in uninitialised data outside of the actual
+        * page table range being read by the MMU and treated as valid, e.g. the
+        * pending flag.
+        *
+        * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16
+        * and 8 bytes respectively but an alignment requirement of 64 bytes each.
+        */
+       uiBytes = PVR_ALIGN(uiBytes, uiAlign);
+
+       /* allocate the object */
+       eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx,
+                                  psMemDesc, uiBytes, uiAlign);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_GOTO_WITH_ERROR("_MMU_PhysMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0);
+       }
+
+       /*
+               Clear the object
+               Note: if any MMUs are cleared with non-zero values then will need a
+               custom clear function
+               Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+               unlikely
+        */
+       OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes);
+
+       eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                   &psMemDesc->psMapping->sMemHandle,
+                                   psMemDesc->uiOffset,
+                                   psMemDesc->uiSize);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevNode, "Alloc MMU object");
+
+       PDumpMMUMalloc(psDevNode,
+                      psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                      eMMULevel,
+                      &psMemDesc->sDevPAddr,
+                      uiBytes,
+                      uiAlign,
+                      psMMUContext->psDevAttrs->eMMUType);
+
+       PDumpMMUDumpPxEntries(psDevNode,
+                             eMMULevel,
+                             psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                             psMemDesc->pvCpuVAddr,
+                             psMemDesc->sDevPAddr,
+                             0,
+                             uiNumEntries,
+                             NULL, NULL, 0, /* pdump symbolic info is irrelevant here */
+                             psConfig->uiBytesPerEntry,
+                             uiLog2Align,
+                             psConfig->uiAddrShift,
+                             psConfig->uiAddrMask,
+                             psConfig->uiProtMask,
+                             psConfig->uiValidEnMask,
+                             0,
+                             psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+       return PVRSRV_OK;
+e1:
+       _MMU_PhysMemFree(psMMUContext->psPhysMemCtx,
+                        psMemDesc);
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemFree
+
+@Description    Frees physical memory for MMU objects, de-initialises
+                and PDumps it.
+
+@Input          psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+
+static void _PxMemFree(MMU_CONTEXT *psMMUContext,
+                       MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel)
+{
+#if defined(MMU_CLEARMEM_ON_FREE)
+       /*
+               Clear the MMU object
+               Note: if any MMUs are cleared with non-zero values then will need a
+               custom clear function
+               Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+               unlikely
+        */
+       OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Clear MMU object before freeing it");
+#endif
+#endif/* MMU_CLEARMEM_ON_FREE */
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Free MMU object");
+       PDumpMMUFree(psMMUContext->psPhysMemCtx->psDevNode,
+                    psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                    eMMULevel,
+                    &psMemDesc->sDevPAddr,
+                    psMMUContext->psDevAttrs->eMMUType);
+#else
+       PVR_UNREFERENCED_PARAMETER(eMMULevel);
+#endif
+       /* free the PC */
+       _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+}
+
+static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext,
+                                     MMU_Levelx_INFO *psLevel,
+                                     IMG_UINT32 uiIndex,
+                                     const MMU_PxE_CONFIG *psConfig,
+                                     const IMG_DEV_PHYADDR *psDevPAddr,
+                                     IMG_BOOL bUnmap,
+#if defined(PDUMP)
+                                     const IMG_CHAR *pszMemspaceName,
+                                     const IMG_CHAR *pszSymbolicAddr,
+                                     IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+                                     IMG_UINT64 uiProtFlags)
+{
+       MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+       IMG_UINT64 ui64PxE64;
+       IMG_UINT64 uiAddr = psDevPAddr->uiAddr;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+       if (psDevNode->pfnValidateOrTweakPhysAddrs)
+       {
+               PVRSRV_ERROR eErr = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode,
+                                                                          psMMUContext->psDevAttrs,
+                                                                          &uiAddr);
+               /* return if error */
+               PVR_LOG_RETURN_IF_ERROR(eErr, "_SetupPTE");
+       }
+
+       /* Calculate Entry */
+       ui64PxE64 =    uiAddr /* Calculate the offset to that base */
+                       >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+                       << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+                       & psConfig->uiAddrMask; /* Delete unused bits */
+       ui64PxE64 |= uiProtFlags;
+
+       /* Set the entry */
+       if (psConfig->uiBytesPerEntry == 8)
+       {
+               IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+               pui64Px[uiIndex] = ui64PxE64;
+       }
+       else if (psConfig->uiBytesPerEntry == 4)
+       {
+               IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+               /* assert that the result fits into 32 bits before writing
+                  it into the 32-bit array with a cast */
+               PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+               pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+       }
+       else
+       {
+               return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+       }
+
+
+       /* Log modification */
+       HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+               HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+               uiIndex, MMU_LEVEL_1,
+               HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+               !bUnmap);
+
+#if defined(PDUMP)
+       PDumpMMUDumpPxEntries(psDevNode,
+                             MMU_LEVEL_1,
+                             psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                             psMemDesc->pvCpuVAddr,
+                             psMemDesc->sDevPAddr,
+                             uiIndex,
+                             1,
+                             pszMemspaceName,
+                             pszSymbolicAddr,
+                             uiSymbolicAddrOffset,
+                             psConfig->uiBytesPerEntry,
+                             psConfig->uiAddrLog2Align,
+                             psConfig->uiAddrShift,
+                             psConfig->uiAddrMask,
+                             psConfig->uiProtMask,
+                             psConfig->uiValidEnMask,
+                             0,
+                             psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupPxE
+
+@Description    Sets up an entry of an MMU object to point to the
+                provided address
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel         Level info for MMU object
+
+@Input          uiIndex         Index into the MMU object to setup
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       Level of MMU object
+
+@Input          psDevPAddr      Address to setup the MMU object to point to
+
+@Input          pszMemspaceName Name of the PDump memory space that the entry
+                                will point to
+
+@Input          pszSymbolicAddr PDump symbolic address that the entry will
+                                point to
+
+@Input          uiProtFlags     MMU protection flags
+
+@Return         PVRSRV_OK if the setup was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext,
+                              MMU_Levelx_INFO *psLevel,
+                              IMG_UINT32 uiIndex,
+                              const MMU_PxE_CONFIG *psConfig,
+                              MMU_LEVEL eMMULevel,
+                              const IMG_DEV_PHYADDR *psDevPAddr,
+#if defined(PDUMP)
+                              const IMG_CHAR *pszMemspaceName,
+                              const IMG_CHAR *pszSymbolicAddr,
+                              IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+                              MMU_PROTFLAGS_T uiProtFlags,
+                              IMG_UINT32 uiLog2DataPageSize)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+       MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+
+       IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32);
+       IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32);
+
+       if (!psDevPAddr)
+       {
+               /* Invalidate entry */
+               if (~uiProtFlags & MMU_PROTFLAGS_INVALID)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry"));
+                       uiProtFlags |= MMU_PROTFLAGS_INVALID;
+               }
+               psDevPAddr = &gsBadDevPhyAddr;
+       }
+       else
+       {
+               if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry"));
+                       uiProtFlags |= MMU_PROTFLAGS_INVALID;
+               }
+       }
+
+       switch (eMMULevel)
+       {
+               case MMU_LEVEL_3:
+                       pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4;
+                       pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8;
+                       break;
+
+               case MMU_LEVEL_2:
+                       pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4;
+                       pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8;
+                       break;
+
+               case MMU_LEVEL_1:
+                       pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4;
+                       pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8;
+                       break;
+
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* How big is a PxE in bytes? */
+       /* Filling the actual Px entry with an address */
+       switch (psConfig->uiBytesPerEntry)
+       {
+               case 4:
+               {
+                       IMG_UINT32 *pui32Px;
+                       IMG_UINT64 ui64PxE64;
+
+                       pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+                       ui64PxE64 = psDevPAddr->uiAddr               /* Calculate the offset to that base */
+                                       >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+                                       << psConfig->uiAddrShift     /* Shift back to fit address in the Px entry */
+                                       & psConfig->uiAddrMask;      /* Delete unused higher bits */
+
+                       ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags);
+                       /* assert that the result fits into 32 bits before writing
+                          it into the 32-bit array with a cast */
+                       PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+                       /* We should never invalidate an invalid page */
+                       if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+                       {
+                               PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64);
+                       }
+                       pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+                       HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+                               HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+                               uiIndex, eMMULevel,
+                               HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+                               (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+                       break;
+               }
+               case 8:
+               {
+                       IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+                       pui64Px[uiIndex] = psDevPAddr->uiAddr             /* Calculate the offset to that base */
+                                       >> psConfig->uiAddrLog2Align  /* Shift away the unnecessary bits of the address */
+                                       << psConfig->uiAddrShift      /* Shift back to fit address in the Px entry */
+                                       & psConfig->uiAddrMask;       /* Delete unused higher bits */
+                       pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+
+                       HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+                               HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+                               uiIndex, eMMULevel,
+                               HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]),
+                               (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+                       break;
+               }
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d",
+                                       __func__, psConfig->uiBytesPerEntry, eMMULevel));
+
+                       return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+       }
+
+#if defined(PDUMP)
+       PDumpMMUDumpPxEntries(psDevNode,
+                             eMMULevel,
+                             psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                             psMemDesc->pvCpuVAddr,
+                             psMemDesc->sDevPAddr,
+                             uiIndex,
+                             1,
+                             pszMemspaceName,
+                             pszSymbolicAddr,
+                             uiSymbolicAddrOffset,
+                             psConfig->uiBytesPerEntry,
+                             psConfig->uiAddrLog2Align,
+                             psConfig->uiAddrShift,
+                             psConfig->uiAddrMask,
+                             psConfig->uiProtMask,
+                             psConfig->uiValidEnMask,
+                             0,
+                             psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+       psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext,
+                                        eMMULevel,
+                                        uiProtFlags & MMU_PROTFLAGS_INVALID);
+
+       return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                   MMU host control functions (Level Info)                 *
+ *****************************************************************************/
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_FreeLevel
+
+@Description    Recursively frees the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+                               At each level we might be crossing a boundary from one Px to
+                               another. The values for auiStartArray should be by used for
+                               the first call into each level and the values in auiEndArray
+                               should only be used in the last call for each level.
+                               In order to determine if this is the first/last call we pass
+                               in bFirst and bLast.
+                               When one level calls down to the next only if bFirst/bLast is set
+                               and it's the first/last iteration of the loop at its level will
+                               bFirst/bLast set for the next recursion.
+                               This means that each iteration has the knowledge of the previous
+                               level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input                 bFirst                  This is the first call for this level
+
+@Input                 bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext,
+                               MMU_Levelx_INFO *psLevel,
+                               IMG_UINT32 auiStartArray[],
+                               IMG_UINT32 auiEndArray[],
+                               IMG_UINT32 auiEntriesPerPxArray[],
+                               const MMU_PxE_CONFIG *apsConfig[],
+                               MMU_LEVEL aeMMULevel[],
+                               IMG_UINT32 *pui32CurrentLevel,
+                               IMG_UINT32 uiStartIndex,
+                               IMG_UINT32 uiEndIndex,
+                               IMG_BOOL bFirst,
+                               IMG_BOOL bLast,
+                               IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT32 uiThisLevel = *pui32CurrentLevel;
+       const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel];
+       IMG_UINT32 i;
+       IMG_BOOL bFreed = IMG_FALSE;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+       /* Parameter checks */
+       PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+       PVR_ASSERT(psLevel != NULL);
+
+       MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d",
+                       aeMMULevel[uiThisLevel], uiStartIndex,
+                       uiEndIndex, psLevel->ui32RefCount));
+
+       for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++)
+       {
+               if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+               {
+                       MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i];
+                       IMG_UINT32 uiNextStartIndex;
+                       IMG_UINT32 uiNextEndIndex;
+                       IMG_BOOL bNextFirst;
+                       IMG_BOOL bNextLast;
+
+                       /* If we're crossing a Px then the start index changes */
+                       if (bFirst && (i == uiStartIndex))
+                       {
+                               uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+                               bNextFirst = IMG_TRUE;
+                       }
+                       else
+                       {
+                               uiNextStartIndex = 0;
+                               bNextFirst = IMG_FALSE;
+                       }
+
+                       /* If we're crossing a Px then the end index changes */
+                       if (bLast && (i == (uiEndIndex - 1)))
+                       {
+                               uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+                               bNextLast = IMG_TRUE;
+                       }
+                       else
+                       {
+                               uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+                               bNextLast = IMG_FALSE;
+                       }
+
+                       /* Recurse into the next level */
+                       (*pui32CurrentLevel)++;
+                       if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray,
+                                          auiEndArray, auiEntriesPerPxArray,
+                                          apsConfig, aeMMULevel, pui32CurrentLevel,
+                                          uiNextStartIndex, uiNextEndIndex,
+                                          bNextFirst, bNextLast, uiLog2DataPageSize))
+                       {
+                               PVRSRV_ERROR eError;
+
+                               /* Un-wire the entry */
+                               eError = _SetupPxE(psMMUContext,
+                                                  psLevel,
+                                                  i,
+                                                  psConfig,
+                                                  aeMMULevel[uiThisLevel],
+                                                  NULL,
+#if defined(PDUMP)
+                                                  NULL,        /* Only required for data page */
+                                                  NULL,        /* Only required for data page */
+                                                  0,      /* Only required for data page */
+#endif
+                                                  MMU_PROTFLAGS_INVALID,
+                                                  uiLog2DataPageSize);
+
+                               PVR_ASSERT(eError == PVRSRV_OK);
+
+                               /* Free table of the level below, pointed to by this table entry.
+                                * We don't destroy the table inside the above _MMU_FreeLevel call because we
+                                * first have to set the table entry of the level above to invalid. */
+                               _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]);
+                               OSFreeMem(psNextLevel);
+
+                               /* The level below us is empty, drop the refcount and clear the pointer */
+                               psLevel->ui32RefCount--;
+                               psLevel->apsNextLevel[i] = NULL;
+
+                               /* Check we haven't wrapped around */
+                               PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+                       }
+                       (*pui32CurrentLevel)--;
+               }
+               else
+               {
+                       psLevel->ui32RefCount--;
+               }
+
+               /*
+                  Free this level if it is no longer referenced, unless it's the base
+                  level in which case it's part of the MMU context and should be freed
+                  when the MMU context is freed
+                */
+               if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo))
+               {
+                       bFreed = IMG_TRUE;
+               }
+       }
+
+       /* Level one flushing is done when we actually write the table entries */
+       if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL))
+       {
+               PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                  &psLevel->sMemDesc.psMapping->sMemHandle,
+                                  uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                  (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+       }
+
+       MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d",
+                       aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1));
+
+       return bFreed;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_AllocLevel
+
+@Description    Recursively allocates the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+                               At each level we might be crossing a boundary from one Px to
+                               another. The values for auiStartArray should be by used for
+                               the first call into each level and the values in auiEndArray
+                               should only be used in the last call for each level.
+                               In order to determine if this is the first/last call we pass
+                               in bFirst and bLast.
+                               When one level calls down to the next only if bFirst/bLast is set
+                               and it's the first/last iteration of the loop at its level will
+                               bFirst/bLast set for the next recursion.
+                               This means that each iteration has the knowledge of the previous
+                               level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input                 bFirst                  This is the first call for this level
+
+@Input                 bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext,
+                                    MMU_Levelx_INFO *psLevel,
+                                    IMG_UINT32 auiStartArray[],
+                                    IMG_UINT32 auiEndArray[],
+                                    IMG_UINT32 auiEntriesPerPxArray[],
+                                    const MMU_PxE_CONFIG *apsConfig[],
+                                    MMU_LEVEL aeMMULevel[],
+                                    IMG_UINT32 *pui32CurrentLevel,
+                                    IMG_UINT32 uiStartIndex,
+                                    IMG_UINT32 uiEndIndex,
+                                    IMG_BOOL bFirst,
+                                    IMG_BOOL bLast,
+                                    IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */
+       const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */
+       PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+       IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */
+       IMG_UINT32 i;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+       /* Parameter check */
+       PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+
+       MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d",
+                       aeMMULevel[uiThisLevel], uiStartIndex,
+                       uiEndIndex, psLevel->ui32RefCount));
+
+       /* Go from uiStartIndex to uiEndIndex through the Px */
+       for (i = uiStartIndex;i < uiEndIndex;i++)
+       {
+               /* Only try an allocation if this is not the last level */
+               /*Because a PT allocation is already done while setting the entry in PD */
+               if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+               {
+                       IMG_UINT32 uiNextStartIndex;
+                       IMG_UINT32 uiNextEndIndex;
+                       IMG_BOOL bNextFirst;
+                       IMG_BOOL bNextLast;
+
+                       /* If there is already a next Px level existing, do not allocate it */
+                       if (!psLevel->apsNextLevel[i])
+                       {
+                               MMU_Levelx_INFO *psNextLevel;
+                               IMG_UINT32 ui32AllocSize;
+                               IMG_UINT32 uiNextEntries;
+
+                               /* Allocate and setup the next level */
+                               uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1];
+                               ui32AllocSize = sizeof(MMU_Levelx_INFO);
+                               if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1)
+                               {
+                                       ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1);
+                               }
+                               psNextLevel = OSAllocZMem(ui32AllocSize);
+                               if (psNextLevel == NULL)
+                               {
+                                       uiAllocState = 0;
+                                       goto e0;
+                               }
+
+                               /* Hook in this level for next time */
+                               psLevel->apsNextLevel[i] = psNextLevel;
+
+                               psNextLevel->ui32NumOfEntries = uiNextEntries;
+                               psNextLevel->ui32RefCount = 0;
+                               /* Allocate Px memory for a sub level*/
+                               eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1],
+                                                    aeMMULevel[uiThisLevel + 1],
+                                                    &psNextLevel->sMemDesc,
+                                                    psConfig->uiAddrLog2Align);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       uiAllocState = 1;
+                                       goto e0;
+                               }
+
+                               /* Wire up the entry */
+                               eError = _SetupPxE(psMMUContext,
+                                                  psLevel,
+                                                  i,
+                                                  psConfig,
+                                                  aeMMULevel[uiThisLevel],
+                                                  &psNextLevel->sMemDesc.sDevPAddr,
+#if defined(PDUMP)
+                                                  NULL, /* Only required for data page */
+                                                  NULL, /* Only required for data page */
+                                                  0,    /* Only required for data page */
+#endif
+                                                  0,
+                                                  uiLog2DataPageSize);
+
+                               if (eError != PVRSRV_OK)
+                               {
+                                       uiAllocState = 2;
+                                       goto e0;
+                               }
+
+                               psLevel->ui32RefCount++;
+                       }
+
+                       /* If we're crossing a Px then the start index changes */
+                       if (bFirst && (i == uiStartIndex))
+                       {
+                               uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+                               bNextFirst = IMG_TRUE;
+                       }
+                       else
+                       {
+                               uiNextStartIndex = 0;
+                               bNextFirst = IMG_FALSE;
+                       }
+
+                       /* If we're crossing a Px then the end index changes */
+                       if (bLast && (i == (uiEndIndex - 1)))
+                       {
+                               uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+                               bNextLast = IMG_TRUE;
+                       }
+                       else
+                       {
+                               uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+                               bNextLast = IMG_FALSE;
+                       }
+
+                       /* Recurse into the next level */
+                       (*pui32CurrentLevel)++;
+                       eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i],
+                                                auiStartArray,
+                                                auiEndArray,
+                                                auiEntriesPerPxArray,
+                                                apsConfig,
+                                                aeMMULevel,
+                                                pui32CurrentLevel,
+                                                uiNextStartIndex,
+                                                uiNextEndIndex,
+                                                bNextFirst,
+                                                bNextLast,
+                                                uiLog2DataPageSize);
+                       (*pui32CurrentLevel)--;
+                       if (eError != PVRSRV_OK)
+                       {
+                               uiAllocState = 2;
+                               goto e0;
+                       }
+               }
+               else
+               {
+                       /* All we need to do for level 1 is bump the refcount */
+                       psLevel->ui32RefCount++;
+               }
+               PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+       }
+
+       /* Level one flushing is done when we actually write the table entries */
+       if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+       {
+               eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                           &psLevel->sMemDesc.psMapping->sMemHandle,
+                                           uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                           (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+               PVR_GOTO_IF_ERROR(eError, e0);
+       }
+
+       MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d",
+                       aeMMULevel[uiThisLevel], psLevel->ui32RefCount));
+       return PVRSRV_OK;
+
+e0:
+       /* Confirm that we've not come down this route unexpectedly */
+       PVR_ASSERT(uiAllocState!=99);
+       PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d"
+                       ,eError, aeMMULevel[uiThisLevel], uiAllocState));
+
+       /* The start value of index variable i is not initialised on purpose.
+        * This clean-up loop deinitialises what was already initialised in
+        * reverse order, so the i index already has the correct value.
+        */
+       for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--)
+       {
+               switch (uiAllocState)
+               {
+                       IMG_UINT32 uiNextStartIndex;
+                       IMG_UINT32 uiNextEndIndex;
+                       IMG_BOOL bNextFirst;
+                       IMG_BOOL bNextLast;
+
+                       case 3:
+                               /* If we're crossing a Px then the start index changes */
+                               if (bFirst && (i == uiStartIndex))
+                               {
+                                       uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+                                       bNextFirst = IMG_TRUE;
+                               }
+                               else
+                               {
+                                       uiNextStartIndex = 0;
+                                       bNextFirst = IMG_FALSE;
+                               }
+
+                               /* If we're crossing a Px then the end index changes */
+                               if (bLast && (i == (uiEndIndex - 1)))
+                               {
+                                       uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+                                       bNextLast = IMG_TRUE;
+                               }
+                               else
+                               {
+                                       uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+                                       bNextLast = IMG_FALSE;
+                               }
+
+                               if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+                               {
+                                       (*pui32CurrentLevel)++;
+                                       if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i],
+                                                          auiStartArray, auiEndArray,
+                                                          auiEntriesPerPxArray, apsConfig,
+                                                          aeMMULevel, pui32CurrentLevel,
+                                                          uiNextStartIndex, uiNextEndIndex,
+                                                          bNextFirst, bNextLast, uiLog2DataPageSize))
+                                       {
+                                               psLevel->ui32RefCount--;
+                                               psLevel->apsNextLevel[i] = NULL;
+
+                                               /* Check we haven't wrapped around */
+                                               PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+                                       }
+                                       (*pui32CurrentLevel)--;
+                               }
+                               else
+                               {
+                                       /* We should never come down this path, but it's here
+                                          for completeness */
+                                       psLevel->ui32RefCount--;
+
+                                       /* Check we haven't wrapped around */
+                                       PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+                               }
+
+                               __fallthrough;
+                       case 2:
+                               if (psLevel->apsNextLevel[i] != NULL  &&
+                                               psLevel->apsNextLevel[i]->ui32RefCount == 0)
+                               {
+                                       _PxMemFree(psMMUContext, &psLevel->sMemDesc,
+                                                  aeMMULevel[uiThisLevel]);
+                               }
+
+                               __fallthrough;
+                       case 1:
+                               if (psLevel->apsNextLevel[i] != NULL  &&
+                                               psLevel->apsNextLevel[i]->ui32RefCount == 0)
+                               {
+                                       OSFreeMem(psLevel->apsNextLevel[i]);
+                                       psLevel->apsNextLevel[i] = NULL;
+                               }
+
+                               __fallthrough;
+                       case 0:
+                               uiAllocState = 3;
+                               break;
+               }
+       }
+       return eError;
+}
+
+/*****************************************************************************
+ *                   MMU page table functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetLevelData
+
+@Description    Get the all the level data and calculates the indexes for the
+                specified address range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Log2 of the page size to use
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          uiEntriesPerPxArray     Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          ppsMMUDevVAddrConfig    Device virtual address config
+
+@Input                 phPriv                                  Private data of page size config
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext,
+                              IMG_DEV_VIRTADDR sDevVAddrStart,
+                              IMG_DEV_VIRTADDR sDevVAddrEnd,
+                              IMG_UINT32 uiLog2DataPageSize,
+                              IMG_UINT32 auiStartArray[],
+                              IMG_UINT32 auiEndArray[],
+                              IMG_UINT32 auiEntriesPerPx[],
+                              const MMU_PxE_CONFIG *apsConfig[],
+                              MMU_LEVEL aeMMULevel[],
+                              const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                              IMG_HANDLE *phPriv)
+{
+       const MMU_PxE_CONFIG *psMMUPDEConfig;
+       const MMU_PxE_CONFIG *psMMUPTEConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 i = 0;
+
+       eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+                                                        &psMMUPDEConfig,
+                                                        &psMMUPTEConfig,
+                                                        ppsMMUDevVAddrConfig,
+                                                        phPriv);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       psDevVAddrConfig = *ppsMMUDevVAddrConfig;
+
+       if (psDevVAddrConfig->uiPCIndexMask != 0)
+       {
+               auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+               auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+               auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC;
+               apsConfig[i] = psDevAttrs->psBaseConfig;
+               aeMMULevel[i] = MMU_LEVEL_3;
+               i++;
+       }
+
+       if (psDevVAddrConfig->uiPDIndexMask != 0)
+       {
+               auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+               auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+               auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD;
+               if (i == 0)
+               {
+                       apsConfig[i] = psDevAttrs->psBaseConfig;
+               }
+               else
+               {
+                       apsConfig[i] = psMMUPDEConfig;
+               }
+               aeMMULevel[i] = MMU_LEVEL_2;
+               i++;
+       }
+
+       /*
+               There is always a PTE entry so we have a slightly different behaviour than above.
+               E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there
+               is a PT with one entry.
+
+        */
+       auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+       if (psDevVAddrConfig->uiPTIndexMask !=0)
+       {
+               auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+       }
+       else
+       {
+               /*
+                       If the PTE mask is zero it means there is only 1 PTE and thus, as an
+                       an exclusive bound, the end array index is equal to the start index + 1.
+                */
+
+               auiEndArray[i] = auiStartArray[i] + 1;
+       }
+
+       auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT;
+
+       if (i == 0)
+       {
+               apsConfig[i] = psDevAttrs->psBaseConfig;
+       }
+       else
+       {
+               apsConfig[i] = psMMUPTEConfig;
+       }
+       aeMMULevel[i] = MMU_LEVEL_1;
+}
+
+static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv)
+{
+       MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+       psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+}
+
+/*************************************************************************/ /*!
+@Function       _AllocPageTables
+
+@Description    Allocate page tables and any higher level MMU objects required
+                for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Page size of the data pages
+
+@Return         PVRSRV_OK if the allocation was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR
+_AllocPageTables(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrStart,
+                 IMG_DEV_VIRTADDR sDevVAddrEnd,
+                 IMG_UINT32 uiLog2DataPageSize)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+       IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+       IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+       MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+       const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+       const MMU_DEVVADDR_CONFIG       *psDevVAddrConfig;
+       IMG_HANDLE hPriv;
+       IMG_UINT32 ui32CurrentLevel = 0;
+
+       PVR_DPF((PVR_DBG_ALLOC,
+                       "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+                       sDevVAddrStart.uiAddr,
+                       sDevVAddrEnd.uiAddr
+       ));
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode,
+                    "Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: "
+                    IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+                    (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr,
+                    (IMG_UINT64)sDevVAddrStart.uiAddr,
+                    (IMG_UINT64)sDevVAddrEnd.uiAddr);
+#endif
+
+       _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+                         (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray,
+                         auiEntriesPerPx, apsConfig, aeMMULevel,
+                         &psDevVAddrConfig, &hPriv);
+
+       HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC,
+               HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+               HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+       eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+                                auiStartArray, auiEndArray, auiEntriesPerPx,
+                                apsConfig, aeMMULevel, &ui32CurrentLevel,
+                                auiStartArray[0], auiEndArray[0],
+                                IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+       _MMU_PutLevelData(psMMUContext, hPriv);
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreePageTables
+
+@Description    Free page tables and any higher level MMU objects at are no
+                longer referenced for the specified virtual range.
+                This will fill the temporary free list of the MMU context which
+                needs cleanup after the call.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Page size of the data pages
+
+@Return         None
+ */
+/*****************************************************************************/
+static void _FreePageTables(MMU_CONTEXT *psMMUContext,
+                            IMG_DEV_VIRTADDR sDevVAddrStart,
+                            IMG_DEV_VIRTADDR sDevVAddrEnd,
+                            IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+       IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+       IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+       MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+       const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+       const MMU_DEVVADDR_CONFIG       *psDevVAddrConfig;
+       IMG_UINT32 ui32CurrentLevel = 0;
+       IMG_HANDLE hPriv;
+
+       PVR_DPF((PVR_DBG_ALLOC,
+                       "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+                       sDevVAddrStart.uiAddr,
+                       sDevVAddrEnd.uiAddr
+       ));
+
+       _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+                         uiLog2DataPageSize, auiStartArray, auiEndArray,
+                         auiEntriesPerPx, apsConfig, aeMMULevel,
+                         &psDevVAddrConfig, &hPriv);
+
+       HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE,
+               HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+               HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+       /* ignoring return code, in this case there should be no references
+        * to the level anymore, and at this stage there is nothing to do with
+        * the return status */
+       (void) _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+                             auiStartArray, auiEndArray, auiEntriesPerPx,
+                             apsConfig, aeMMULevel, &ui32CurrentLevel,
+                             auiStartArray[0], auiEndArray[0],
+                             IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+       _MMU_PutLevelData(psMMUContext, hPriv);
+}
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTInfo
+
+@Description    Get the PT level information and PT entry index for the specified
+                virtual address
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Device virtual address to get the PTE info
+                                        from.
+
+@Input          psDevVAddrConfig        The current virtual address config obtained
+                                        by another function call before.
+
+@Output         psLevel                 Level info of the PT
+
+@Output         pui32PTEIndex           Index into the PT the address corresponds to
+
+@Return         None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_GetPTInfo(MMU_CONTEXT                *psMMUContext,
+                                  IMG_DEV_VIRTADDR            sDevVAddr,
+                                  const MMU_DEVVADDR_CONFIG  *psDevVAddrConfig,
+                                  MMU_Levelx_INFO           **ppsLevel,
+                                  IMG_UINT32                 *pui32PTEIndex)
+{
+       MMU_Levelx_INFO *psLocalLevel = NULL;
+       MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel;
+       IMG_UINT32 uiPCEIndex;
+       IMG_UINT32 uiPDEIndex;
+
+       if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level"));
+               PVR_ASSERT(0);
+       }
+
+       for (; eMMULevel > MMU_LEVEL_0; eMMULevel--)
+       {
+               if (eMMULevel == MMU_LEVEL_3)
+               {
+                       /* find the page directory containing the PCE */
+                       uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig,
+                                                 IMG_FALSE);
+                       psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex];
+               }
+
+               if (eMMULevel == MMU_LEVEL_2)
+               {
+                       /* find the page table containing the PDE */
+                       uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig,
+                                                 IMG_FALSE);
+                       if (psLocalLevel != NULL)
+                       {
+                               psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex];
+                       }
+                       else
+                       {
+                               psLocalLevel =
+                                               psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex];
+                       }
+               }
+
+               if (eMMULevel == MMU_LEVEL_1)
+               {
+                       /* find PTE index into page table */
+                       *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig,
+                                                     IMG_FALSE);
+                       if (psLocalLevel == NULL)
+                       {
+                               psLocalLevel = &psMMUContext->sBaseLevelInfo;
+                       }
+               }
+       }
+       *ppsLevel = psLocalLevel;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTConfig
+
+@Description    Get the level config. Call _MMU_PutPTConfig after use!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiLog2DataPageSize      Log 2 of the page size
+
+@Output         ppsConfig               Config of the PTE
+
+@Output         phPriv                  Private data handle to be passed back
+                                        when the info is put
+
+@Output         ppsDevVAddrConfig       Config of the device virtual addresses
+
+@Return         None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_GetPTConfig(MMU_CONTEXT               *psMMUContext,
+                                    IMG_UINT32                  uiLog2DataPageSize,
+                                    const MMU_PxE_CONFIG      **ppsConfig,
+                                    IMG_HANDLE                 *phPriv,
+                                    const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig)
+{
+       MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       const MMU_PxE_CONFIG *psPDEConfig;
+       const MMU_PxE_CONFIG *psPTEConfig;
+
+       if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+                                                   &psPDEConfig,
+                                                   &psPTEConfig,
+                                                   &psDevVAddrConfig,
+                                                   phPriv) != PVRSRV_OK)
+       {
+               /*
+                  There should be no way we got here unless uiLog2DataPageSize
+                  has changed after the MMU_Alloc call (in which case it's a bug in
+                  the MM code)
+                */
+               PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config"));
+               PVR_ASSERT(0);
+       }
+
+       *ppsConfig = psPTEConfig;
+       *ppsDevVAddrConfig = psDevVAddrConfig;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PutPTConfig
+
+@Description    Put the level info. Has to be called after _MMU_GetPTConfig to
+                ensure correct refcounting.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          phPriv                  Private data handle created by
+                                        _MMU_GetPTConfig.
+
+@Return         None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext,
+                                    IMG_HANDLE hPriv)
+{
+       MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+       if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Could not put page size config",
+                               __func__));
+               PVR_ASSERT(0);
+       }
+}
+
+
+/*****************************************************************************
+ *                     Public interface functions                            *
+ *****************************************************************************/
+
+/*
+       MMU_ContextCreate
+ */
+PVRSRV_ERROR
+MMU_ContextCreate(CONNECTION_DATA *psConnection,
+                  PVRSRV_DEVICE_NODE *psDevNode,
+                  MMU_CONTEXT **ppsMMUContext,
+                  MMU_DEVICEATTRIBS *psDevAttrs)
+{
+       MMU_CONTEXT *psMMUContext;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       const MMU_PxE_CONFIG *psConfig;
+       MMU_PHYSMEM_CONTEXT *psPhysMemCtx;
+       IMG_UINT32 ui32BaseObjects;
+       IMG_UINT32 ui32Size;
+       IMG_CHAR sBuf[40];
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevNode, "MMU context create");
+#endif
+
+       psConfig = psDevAttrs->psBaseConfig;
+       psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig;
+
+       switch (psDevAttrs->eTopLevel)
+       {
+               case MMU_LEVEL_3:
+                       ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC;
+                       break;
+
+               case MMU_LEVEL_2:
+                       ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD;
+                       break;
+
+               case MMU_LEVEL_1:
+                       ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT;
+                       break;
+
+               default:
+                       PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->eTopLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+       }
+
+       /* Allocate the MMU context with the Level 1 Px info's */
+       ui32Size = sizeof(MMU_CONTEXT) +
+                       ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *));
+
+       psMMUContext = OSAllocZMem(ui32Size);
+       PVR_LOG_GOTO_IF_NOMEM(psMMUContext, eError, e0);
+
+#if defined(PDUMP)
+       /* Clear the refcount */
+       psMMUContext->ui32PDumpContextIDRefCount = 0;
+#endif
+       /* Record Device specific attributes in the context for subsequent use */
+       psMMUContext->psDevAttrs = psDevAttrs;
+
+       /*
+         Allocate physmem context and set it up
+        */
+       psPhysMemCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT));
+       PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx, eError, e1);
+
+       psMMUContext->psPhysMemCtx = psPhysMemCtx;
+       psMMUContext->psConnection = psConnection;
+
+       psPhysMemCtx->psDevNode = psDevNode;            /* Needed for Direct Bridge case */
+       psPhysMemCtx->psMMUContext = psMMUContext;      /* Back-link to self */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /* Save the app-specific values for external reference via MMU_GetOSids. */
+       if (psConnection != NULL)
+       {
+               psPhysMemCtx->ui32OSid     = psConnection->ui32OSid;
+               psPhysMemCtx->ui32OSidReg  = psConnection->ui32OSidReg;
+               psPhysMemCtx->bOSidAxiProt = psConnection->bOSidAxiProtReg;
+       }
+       else
+       {
+               /* Direct Bridge calling sequence e.g. Firmware */
+               psPhysMemCtx->ui32OSid     = 0;
+               psPhysMemCtx->ui32OSidReg  = 0;
+               psPhysMemCtx->bOSidAxiProt = IMG_FALSE;
+       }
+#endif
+
+       OSSNPrintf(sBuf, sizeof(sBuf), "pgtables %p", psPhysMemCtx);
+       psPhysMemCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1;
+       psPhysMemCtx->pszPhysMemRAName = OSAllocMem(psPhysMemCtx->uiPhysMemRANameAllocSize);
+       PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->pszPhysMemRAName, eError, e2);
+
+       OSStringLCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize);
+
+       psPhysMemCtx->psPhysMemRA = RA_Create(psPhysMemCtx->pszPhysMemRAName,
+                                             /* subsequent import */
+                                             PhysHeapGetPageShift(psDevNode->psMMUPhysHeap),
+                                             RA_LOCKCLASS_1,
+                                             _MMU_PhysMem_RAImportAlloc,
+                                             _MMU_PhysMem_RAImportFree,
+                                             psPhysMemCtx, /* priv */
+                                             RA_POLICY_DEFAULT);
+       if (psPhysMemCtx->psPhysMemRA == NULL)
+       {
+               OSFreeMem(psPhysMemCtx->pszPhysMemRAName);
+               psPhysMemCtx->pszPhysMemRAName = NULL;
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e3);
+       }
+
+       /* Setup cleanup meta data to check if a MMU context
+        * has been destroyed and should not be accessed anymore */
+       psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData)));
+       PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /* Record the originating OSid for all allocation / free for this context */
+       psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid;
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+       OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock);
+       psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE;
+       dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead);
+       OSAtomicWrite(&psPhysMemCtx->psCleanupData->iRef, 1);
+
+       /* allocate the base level object */
+       /*
+          Note: Although this is not required by the this file until
+                the 1st allocation is made, a device specific callback
+                might request the base object address so we allocate
+                it up front.
+        */
+       if (_PxMemAlloc(psMMUContext,
+                       ui32BaseObjects,
+                       psConfig,
+                       psDevAttrs->eTopLevel,
+                       &psMMUContext->sBaseLevelInfo.sMemDesc,
+                       psDevAttrs->ui32BaseAlign))
+       {
+               PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5);
+       }
+
+       dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+
+       psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects;
+       psMMUContext->sBaseLevelInfo.ui32RefCount = 0;
+
+       eError = OSLockCreate(&psMMUContext->hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e6);
+
+       /* return context */
+       *ppsMMUContext = psMMUContext;
+
+       return PVRSRV_OK;
+
+e6:
+       _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel);
+e5:
+       OSFreeMem(psPhysMemCtx->psCleanupData);
+e4:
+       RA_Delete(psPhysMemCtx->psPhysMemRA);
+e3:
+       OSFreeMem(psPhysMemCtx->pszPhysMemRAName);
+e2:
+       OSFreeMem(psPhysMemCtx);
+e1:
+       OSFreeMem(psMMUContext);
+e0:
+       return eError;
+}
+
+/*
+       MMU_ContextDestroy
+ */
+void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PDLLIST_NODE psNode, psNextNode;
+
+       PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode;
+       MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Enter", __func__));
+
+       if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+       {
+               /* There should be no way to get here with live pages unless
+                  there is a bug in this module or the MM code */
+               PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0);
+       }
+
+       /* Cleanup lock must be acquired before MMUContext lock. Reverse order
+        * may lead to a deadlock and is reported by lockdep. */
+       OSLockAcquire(psCleanupData->hCleanupLock);
+       OSLockAcquire(psMMUContext->hLock);
+
+       /* Free the top level MMU object - will be put on defer free list.
+        * This has to be done before the step below that will empty the
+        * defer-free list. */
+       _PxMemFree(psMMUContext,
+                  &psMMUContext->sBaseLevelInfo.sMemDesc,
+                  psMMUContext->psDevAttrs->eTopLevel);
+
+       /* Empty the temporary defer-free list of Px */
+       _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+       PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead));
+
+       /* Empty the defer free list so the cleanup thread will
+        * not have to access any MMU context related structures anymore */
+       dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead,
+                           psNode,
+                           psNextNode)
+       {
+               MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode,
+                                                              MMU_CLEANUP_ITEM,
+                                                              sMMUCtxCleanupItem);
+
+               _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+               dllist_remove_node(psNode);
+       }
+       PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead));
+
+       psCleanupData->bMMUContextExists = IMG_FALSE;
+
+       /* Free physmem context */
+       RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA);
+       psMMUContext->psPhysMemCtx->psPhysMemRA = NULL;
+       OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName);
+       psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL;
+
+       OSFreeMem(psMMUContext->psPhysMemCtx);
+
+       OSLockRelease(psMMUContext->hLock);
+
+       OSLockRelease(psCleanupData->hCleanupLock);
+
+       if (OSAtomicDecrement(&psCleanupData->iRef) == 0)
+       {
+               OSLockDestroy(psCleanupData->hCleanupLock);
+               OSFreeMem(psCleanupData);
+       }
+
+       OSLockDestroy(psMMUContext->hLock);
+
+       /* free the context itself. */
+       OSFreeMem(psMMUContext);
+       /*not nulling pointer, copy on stack*/
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Exit", __func__));
+}
+
+/*
+       MMU_Alloc
+ */
+PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+           IMG_DEVMEM_SIZE_T uSize,
+           IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+           IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+           IMG_DEV_VIRTADDR *psDevVAddr,
+           IMG_UINT32 uiLog2PageSize)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+       const MMU_PxE_CONFIG *psPDEConfig;
+       const MMU_PxE_CONFIG *psPTEConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+       MMU_DEVICEATTRIBS *psDevAttrs;
+       IMG_HANDLE hPriv;
+
+#if !defined(DEBUG)
+       PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment);
+#endif
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "%s: uSize=" IMG_DEVMEM_SIZE_FMTSPEC
+                       ", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC,
+                       __func__, uSize, uiProtFlags, uDevVAddrAlignment));
+
+       /* check params */
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext, "psMMUContext");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevVAddr, "psDevVAddr");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(puActualSize, "puActualSize");
+
+       psDevAttrs = psMMUContext->psDevAttrs;
+
+       eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize,
+                                                        &psPDEConfig,
+                                                        &psPTEConfig,
+                                                        &psDevVAddrConfig,
+                                                        &hPriv);
+       PVR_LOG_RETURN_IF_ERROR(eError, "pfnGetPageSizeConfiguration");
+
+       /* size and alignment must be datapage granular */
+       if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0)
+                       || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: invalid address or size granularity",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       sDevVAddrEnd = *psDevVAddr;
+       sDevVAddrEnd.uiAddr += uSize;
+
+       OSLockAcquire(psMMUContext->hLock);
+       eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+       OSLockRelease(psMMUContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "_AllocPageTables");
+               return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES;
+       }
+
+       psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+       return PVRSRV_OK;
+}
+
+/*
+       MMU_Free
+ */
+void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       IMG_UINT32 ui32MMULeakMax = psPVRSRVData->sMemLeakIntervals.ui32MMU;
+
+       mutex_lock(&g_sMMULeakMutex);
+
+       g_ui32MMULeakCounter++;
+       if (ui32MMULeakMax && g_ui32MMULeakCounter >= ui32MMULeakMax)
+       {
+               g_ui32MMULeakCounter = 0;
+               mutex_unlock(&g_sMMULeakMutex);
+
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: Skipped MMU free for address 0x%016" IMG_UINT64_FMTSPECx " to trigger memory leak.",
+                        __func__,
+                        sDevVAddr.uiAddr));
+               return;
+       }
+
+       mutex_unlock(&g_sMMULeakMutex);
+#endif
+
+       PVR_ASSERT(psMMUContext != NULL);
+       PVR_LOG_RETURN_VOID_IF_FALSE(psMMUContext != NULL, "psMMUContext");
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC,
+                       __func__, sDevVAddr.uiAddr));
+
+       /* ensure the address range to free is inside the heap */
+       sDevVAddrEnd = sDevVAddr;
+       sDevVAddrEnd.uiAddr += uiSize;
+
+       /* The Cleanup lock has to be taken before the MMUContext hLock to
+        * prevent deadlock scenarios. It is necessary only for parts of
+        * _SetupCleanup_FreeMMUMapping though.*/
+       OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       _FreePageTables(psMMUContext,
+                       sDevVAddr,
+                       sDevVAddrEnd,
+                       uiLog2DataPageSize);
+
+       _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx);
+
+       OSLockRelease(psMMUContext->hLock);
+
+       OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+       return;
+}
+
+PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+             PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+             IMG_DEV_VIRTADDR sDevVAddrBase,
+             PMR *psPMR,
+             IMG_UINT32 ui32PhysPgOffset,
+             IMG_UINT32 ui32MapPageCount,
+             IMG_UINT32 *paui32MapIndices,
+             IMG_UINT32 uiLog2HeapPageSize)
+{
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hPriv;
+
+       MMU_Levelx_INFO *psLevel = NULL;
+
+       MMU_Levelx_INFO *psPrevLevel = NULL;
+
+       IMG_UINT32 uiPTEIndex = 0;
+       IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize);
+       IMG_UINT32 uiLoop = 0;
+       IMG_UINT32 ui32MappedCount = 0;
+       IMG_DEVMEM_OFFSET_T uiPgOffset = 0;
+       IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+
+       IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0;
+       IMG_UINT64 uiDummyProtFlags = 0;
+       MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+
+       const MMU_PxE_CONFIG *psConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+       IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+
+       IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_DEV_PHYADDR *psDevPAddr;
+       IMG_DEV_PHYADDR sDevPAddr;
+       IMG_BOOL *pbValid;
+       IMG_BOOL bValid;
+       IMG_BOOL bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE;
+       IMG_BOOL bNeedBacking = IMG_FALSE;
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+#if defined(PDUMP)
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+
+       PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)",
+                    (IMG_UINT64)(ui32MapPageCount * uiPageSize));
+#endif /*PDUMP*/
+
+       /* Validate the most essential parameters */
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psMMUContext, eError, e0);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psPMR, eError, e0);
+
+       psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+       /* Allocate memory for page-frame-numbers and validity states,
+          N.B. assert could be triggered by an illegal uiSizeBytes */
+       if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR));
+               PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, e0);
+
+               pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL));
+               if (pbValid == NULL)
+               {
+                       /* Should allocation fail, clean-up here before exit */
+                       OSFreeMem(psDevPAddr);
+                       PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0);
+               }
+       }
+       else
+       {
+               psDevPAddr = asDevPAddr;
+               pbValid = abValid;
+       }
+
+       /* Get the Device physical addresses of the pages we are trying to map
+        * In the case of non indexed mapping we can get all addresses at once */
+       if (NULL == paui32MapIndices)
+       {
+               eError = PMR_DevPhysAddr(psPMR,
+                                        uiLog2HeapPageSize,
+                                        ui32MapPageCount,
+                                        ((IMG_DEVMEM_OFFSET_T) ui32PhysPgOffset << uiLog2HeapPageSize),
+                                        psDevPAddr,
+                                        pbValid);
+               PVR_GOTO_IF_ERROR(eError, e1);
+       }
+
+       /*Get the Page table level configuration */
+       _MMU_GetPTConfig(psMMUContext,
+                        (IMG_UINT32) uiLog2HeapPageSize,
+                        &psConfig,
+                        &hPriv,
+                        &psDevVAddrConfig);
+
+       eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+                                        uiMappingFlags,
+                                        &uiMMUProtFlags,
+                                        psMMUContext);
+       PVR_GOTO_IF_ERROR(eError, e2);
+
+       /* Callback to get device specific protection flags */
+       if (psConfig->uiBytesPerEntry == 8)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+               uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+               uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE),
+                                                                                 uiLog2HeapPageSize);
+       }
+       else if (psConfig->uiBytesPerEntry == 4)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+               uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+               uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE));
+       }
+       else
+       {
+               PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, e2);
+       }
+       uiDummyProtFlags = uiProtFlags;
+
+       if (PMR_IsSparse(psPMR))
+       {
+               /* We know there will not be 4G number of PMR's */
+               bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(PMR_Flags(psPMR));
+               if (bDummyBacking)
+               {
+                       bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR));
+               }
+
+               if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags))
+               {
+                       /* Obtain non-coherent protection flags as we cannot have multiple coherent
+                          virtual pages pointing to the same physical page so all dummy page
+                          mappings have to be non-coherent even in a coherent allocation */
+                       eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+                                                                       uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT,
+                                                                       &uiMMUProtFlags,
+                                                                       psMMUContext);
+                       PVR_GOTO_IF_ERROR(eError, e2);
+
+                       /* Callback to get device specific protection flags */
+                       if (psConfig->uiBytesPerEntry == 8)
+                       {
+                               uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+                       }
+                       else
+                       {
+                               /* We've already validated possible values of uiBytesPerEntry at the start of this function */
+                               PVR_ASSERT(psConfig->uiBytesPerEntry == 4);
+                               uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+                       }
+               }
+       }
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++)
+       {
+
+#if defined(PDUMP)
+               IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif /*PDUMP*/
+
+               if (NULL != paui32MapIndices)
+               {
+                       uiPgOffset = paui32MapIndices[uiLoop];
+
+                       /*Calculate the Device Virtual Address of the page */
+                       sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize);
+
+                       /* Get the physical address to map */
+                       eError = PMR_DevPhysAddr(psPMR,
+                                                uiLog2HeapPageSize,
+                                                1,
+                                                uiPgOffset * uiPageSize,
+                                                &sDevPAddr,
+                                                &bValid);
+                       PVR_GOTO_IF_ERROR(eError, e3);
+               }
+               else
+               {
+                       uiPgOffset = uiLoop + ui32PhysPgOffset;
+                       sDevPAddr = psDevPAddr[uiLoop];
+                       bValid = pbValid[uiLoop];
+               }
+
+               uiDefProtFlags = uiProtFlags;
+               /*
+                       The default value of the entry is invalid so we don't need to mark
+                       it as such if the page wasn't valid, we just advance pass that address
+                */
+               if (bValid || bDummyBacking)
+               {
+                       if (!bValid)
+                       {
+                               if (bZeroBacking)
+                               {
+                                       sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr;
+                                       /* Ensure the zero back page PTE is read only */
+                                       uiDefProtFlags = uiProtFlagsReadOnly;
+                               }
+                               else
+                               {
+                                       sDevPAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr;
+                               }
+                       }
+                       else
+                       {
+                               /* check the physical alignment of the memory to map */
+                               PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0);
+                       }
+
+#if defined(DEBUG)
+                       {
+                               IMG_INT32       i32FeatureVal = 0;
+                               IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr);
+
+                               i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH);
+                               do {
+                                       /* i32FeatureVal can be negative for cases where this feature is undefined
+                                        * In that situation we need to bail out than go ahead with debug comparison */
+                                       if (0 > i32FeatureVal)
+                                               break;
+
+                                       if (ui32BitLength > i32FeatureVal)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "%s Failed. The physical address bitlength (%d)"
+                                                               " is greater than the chip can handle (%d).",
+                                                               __func__, ui32BitLength, i32FeatureVal));
+
+                                               PVR_ASSERT(ui32BitLength <= i32FeatureVal);
+                                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                               goto e3;
+                                       }
+                               } while (0);
+                       }
+#endif /*DEBUG*/
+
+#if defined(PDUMP)
+                       if (bValid)
+                       {
+                               eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize,
+                                                              sizeof(aszMemspaceName), &aszMemspaceName[0],
+                                                              sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+                                                              &uiSymbolicAddrOffset,
+                                                              &uiNextSymName);
+                               PVR_ASSERT(eError == PVRSRV_OK);
+                       }
+#endif /*PDUMP*/
+
+                       psPrevLevel = psLevel;
+                       /* Calculate PT index and get new table descriptor */
+                       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                                      &psLevel, &uiPTEIndex);
+
+                       if (psPrevLevel == psLevel)
+                       {
+                               /*
+                                * Sparse allocations may have page offsets which
+                                * decrement as well as increment, so make sure we
+                                * update the range we will flush correctly.
+                                */
+                               if (uiPTEIndex > uiFlushEnd)
+                                       uiFlushEnd = uiPTEIndex;
+                               else if (uiPTEIndex < uiFlushStart)
+                                       uiFlushStart = uiPTEIndex;
+                       }
+                       else
+                       {
+                               /* Flush if we moved to another psLevel, i.e. page table */
+                               if (psPrevLevel != NULL)
+                               {
+                                       eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                                                   &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+                                                                   uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+                                                                   (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+                                       PVR_GOTO_IF_ERROR(eError, e3);
+                               }
+
+                               uiFlushStart = uiPTEIndex;
+                               uiFlushEnd = uiFlushStart;
+                       }
+
+                       HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP,
+                               HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+                               HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr));
+
+                       /* Set the PT entry with the specified address and protection flags */
+                       eError = _SetupPTE(psMMUContext,
+                                          psLevel,
+                                          uiPTEIndex,
+                                          psConfig,
+                                          &sDevPAddr,
+                                          IMG_FALSE,
+#if defined(PDUMP)
+                                          (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName),
+                                                 ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:DUMMY_PAGE)),
+                                                 (bValid)?uiSymbolicAddrOffset:0,
+#endif /*PDUMP*/
+                                           uiDefProtFlags);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", e3);
+
+                       if (bValid)
+                       {
+                               PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+                               PVR_DPF ((PVR_DBG_MESSAGE,
+                                               "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", "
+                                               "size=" IMG_DEVMEM_OFFSET_FMTSPEC,
+                                               __func__,
+                                               sDevVAddr.uiAddr,
+                                               uiPgOffset * uiPageSize));
+
+                               ui32MappedCount++;
+                       }
+               }
+
+               sDevVAddr.uiAddr += uiPageSize;
+       }
+
+       /* Flush the last level we touched */
+       if (psLevel != NULL)
+       {
+               eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                           &psLevel->sMemDesc.psMapping->sMemHandle,
+                                           uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                           (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+               PVR_GOTO_IF_ERROR(eError, e3);
+       }
+
+       OSLockRelease(psMMUContext->hLock);
+
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       if (psDevPAddr != asDevPAddr)
+       {
+               OSFreeMem(pbValid);
+               OSFreeMem(psDevPAddr);
+       }
+
+       /* Flush TLB for PTs*/
+       psDevNode->pfnMMUCacheInvalidate(psDevNode,
+                                        psMMUContext,
+                                        MMU_LEVEL_1,
+                                        IMG_FALSE);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount);
+#endif /*PDUMP*/
+
+       return PVRSRV_OK;
+
+e3:
+       OSLockRelease(psMMUContext->hLock);
+
+       if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags))
+       {
+               bNeedBacking = IMG_TRUE;
+       }
+
+       MMU_UnmapPages(psMMUContext,
+                      (bNeedBacking) ? uiMappingFlags : 0,
+                      sDevVAddrBase,
+                      uiLoop,
+                      paui32MapIndices,
+                      uiLog2HeapPageSize,
+                      PMR_IsSparse(psPMR));
+e2:
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+e1:
+       if (psDevPAddr != asDevPAddr)
+       {
+               OSFreeMem(pbValid);
+               OSFreeMem(psDevPAddr);
+       }
+e0:
+       return eError;
+}
+
+/*
+       MMU_UnmapPages
+ */
+void
+MMU_UnmapPages(MMU_CONTEXT *psMMUContext,
+               PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+               IMG_DEV_VIRTADDR sDevVAddrBase,
+               IMG_UINT32 ui32PageCount,
+               IMG_UINT32 *pai32FreeIndices,
+               IMG_UINT32 uiLog2PageSize,
+               PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags)
+{
+       IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+       IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+       IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+       MMU_Levelx_INFO *psLevel = NULL;
+       MMU_Levelx_INFO *psPrevLevel = NULL;
+       IMG_HANDLE hPriv;
+       const MMU_PxE_CONFIG *psConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0;
+       MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0;
+       IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+       IMG_DEV_PHYADDR sBackingPgDevPhysAddr;
+       IMG_BOOL bUnmap = IMG_TRUE, bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE;
+       IMG_CHAR *pcBackingPageName = NULL;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevNode,
+                    "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX,
+                    ui32PageCount,
+                    (IMG_UINT64)sDevVAddr.uiAddr,
+                    ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+       bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMemAllocFlags);
+       bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiMemAllocFlags);
+
+       if (bZeroBacking)
+       {
+               sBackingPgDevPhysAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr;
+               pcBackingPageName = DEV_ZERO_PAGE;
+       }
+       else
+       {
+               sBackingPgDevPhysAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr;
+               pcBackingPageName = DUMMY_PAGE;
+       }
+
+       bUnmap = (uiMappingFlags)? !bDummyBacking : IMG_TRUE;
+       /* Get PT and address configs */
+       _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+                        &psConfig, &hPriv, &psDevVAddrConfig);
+
+       if (_MMU_ConvertDevMemFlags(bUnmap,
+                                   uiMappingFlags,
+                                   &uiMMUProtFlags,
+                                   psMMUContext) != PVRSRV_OK)
+       {
+               return;
+       }
+
+       uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE;
+
+       /* Callback to get device specific protection flags */
+       if (psConfig->uiBytesPerEntry == 4)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+               uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUReadOnlyProtFlags);
+       }
+       else if (psConfig->uiBytesPerEntry == 8)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+               uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize);
+       }
+
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       /* Unmap page by page */
+       while (ui32Loop < ui32PageCount)
+       {
+               if (NULL != pai32FreeIndices)
+               {
+                       /*Calculate the Device Virtual Address of the page */
+                       sDevVAddr.uiAddr = sDevVAddrBase.uiAddr +
+                                       pai32FreeIndices[ui32Loop] * (IMG_UINT64) uiPageSize;
+               }
+
+               psPrevLevel = psLevel;
+               /* Calculate PT index and get new table descriptor */
+               _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                              &psLevel, &uiPTEIndex);
+
+               if (psPrevLevel == psLevel)
+               {
+                       /*
+                        * Sparse allocations may have page offsets which
+                        * decrement as well as increment, so make sure we
+                        * update the range we will flush correctly.
+                        */
+                       if (uiPTEIndex > uiFlushEnd)
+                               uiFlushEnd = uiPTEIndex;
+                       else if (uiPTEIndex < uiFlushStart)
+                               uiFlushStart = uiPTEIndex;
+               }
+               else
+               {
+                       /* Flush if we moved to another psLevel, i.e. page table */
+                       if (psPrevLevel != NULL)
+                       {
+                               PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                                  &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+                                                  uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+                                                  (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+                       }
+
+                       uiFlushStart = uiPTEIndex;
+                       uiFlushEnd = uiFlushStart;
+               }
+
+               HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+                       HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+               /* Set the PT entry to invalid and poison it with a bad address */
+               if (_SetupPTE(psMMUContext,
+                             psLevel,
+                             uiPTEIndex,
+                             psConfig,
+                             (bDummyBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr,
+                                     bUnmap,
+#if defined(PDUMP)
+                                     (bDummyBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL,
+                                     (bDummyBacking)? pcBackingPageName: NULL,
+                                     0U,
+#endif
+                                     (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK)
+               {
+                       goto e0;
+               }
+
+               /* Check we haven't wrapped around */
+               PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+               ui32Loop++;
+               sDevVAddr.uiAddr += uiPageSize;
+       }
+
+       /* Flush the last level we touched */
+       if (psLevel != NULL)
+       {
+               PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                  &psLevel->sMemDesc.psMapping->sMemHandle,
+                                  uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                  (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+       }
+
+       OSLockRelease(psMMUContext->hLock);
+
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       /* Flush TLB for PTs*/
+       psDevNode->pfnMMUCacheInvalidate(psDevNode,
+                                        psMMUContext,
+                                        MMU_LEVEL_1,
+                                        IMG_TRUE);
+
+       return;
+
+e0:
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+       PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+       PVR_ASSERT(0);
+       OSLockRelease(psMMUContext->hLock);
+       return;
+}
+
+PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+                IMG_DEV_VIRTADDR sDevVAddrBase,
+                const PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSizeBytes,
+                PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                IMG_UINT32 uiLog2HeapPageSize)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 uiCount, i;
+       IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize;
+       IMG_UINT32 uiPTEIndex = 0;
+       IMG_UINT64 uiProtFlags;
+       MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+       MMU_Levelx_INFO *psLevel = NULL;
+       IMG_HANDLE hPriv;
+       const MMU_PxE_CONFIG *psConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+       IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_DEV_PHYADDR *psDevPAddr;
+       IMG_BOOL *pbValid;
+       IMG_UINT32 uiFlushStart = 0;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+#if defined(PDUMP)
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+       IMG_UINT32 ui32MappedCount = 0;
+       PDUMPCOMMENT(psDevNode, "Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes);
+#endif /*PDUMP*/
+
+       /* We should verify the size and contiguity when supporting variable page size */
+
+       PVR_ASSERT (psMMUContext != NULL);
+       PVR_ASSERT (psPMR != NULL);
+
+       /* Allocate memory for page-frame-numbers and validity states,
+          N.B. assert could be triggered by an illegal uiSizeBytes */
+       uiCount = uiSizeBytes >> uiLog2HeapPageSize;
+       PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes);
+       if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR));
+               PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, return_error);
+
+               pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL));
+               if (pbValid == NULL)
+               {
+                       /* Should allocation fail, clean-up here before exit */
+                       OSFreeMem(psDevPAddr);
+                       PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_paddr_array);
+               }
+       }
+       else
+       {
+               psDevPAddr = asDevPAddr;
+               pbValid = abValid;
+       }
+
+       /* Get general PT and address configs */
+       _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize,
+                        &psConfig, &hPriv, &psDevVAddrConfig);
+
+       eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+                                        uiMappingFlags,
+                                        &uiMMUProtFlags,
+                                        psMMUContext);
+       PVR_GOTO_IF_ERROR(eError, put_mmu_context);
+
+       /* Callback to get device specific protection flags */
+
+       if (psConfig->uiBytesPerEntry == 8)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+       }
+       else if (psConfig->uiBytesPerEntry == 4)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+       }
+       else
+       {
+               PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, put_mmu_context);
+       }
+
+
+       /* "uiSize" is the amount of contiguity in the underlying
+          page.  Normally this would be constant for the system, but,
+          that constant needs to be communicated, in case it's ever
+          different; caller guarantees that PMRLockSysPhysAddr() has
+          already been called */
+       eError = PMR_DevPhysAddr(psPMR,
+                                uiLog2HeapPageSize,
+                                uiCount,
+                                0,
+                                psDevPAddr,
+                                pbValid);
+       PVR_GOTO_IF_ERROR(eError, put_mmu_context);
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                      &psLevel, &uiPTEIndex);
+       uiFlushStart = uiPTEIndex;
+
+       /* Map in all pages of that PMR page by page*/
+       for (i=0, uiCount=0; uiCount < uiSizeBytes; i++)
+       {
+#if defined(DEBUG)
+               {
+                       IMG_INT32       i32FeatureVal = 0;
+                       IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr);
+                       i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH);
+                       do {
+                               if (0 > i32FeatureVal)
+                                       break;
+
+                               if (ui32BitLength > i32FeatureVal)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                       "%s Failed. The physical address bitlength (%d)"
+                                                       " is greater than the chip can handle (%d).",
+                                                       __func__, ui32BitLength, i32FeatureVal));
+
+                                       PVR_ASSERT(ui32BitLength <= i32FeatureVal);
+                                       OSLockRelease(psMMUContext->hLock);
+                                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, put_mmu_context);
+                               }
+                       } while (0);
+               }
+#endif /*DEBUG*/
+#if defined(PDUMP)
+               {
+                       IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+                       eError = PMR_PDumpSymbolicAddr(psPMR, uiCount,
+                                                      sizeof(aszMemspaceName), &aszMemspaceName[0],
+                                                      sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+                                                      &uiSymbolicAddrOffset,
+                                                      &uiNextSymName);
+                       PVR_ASSERT(eError == PVRSRV_OK);
+                       ui32MappedCount++;
+               }
+#endif /*PDUMP*/
+
+               HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP,
+                       HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+                       HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr));
+
+               /* Set the PT entry with the specified address and protection flags */
+               eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex,
+                                  psConfig, &psDevPAddr[i], IMG_FALSE,
+#if defined(PDUMP)
+                                  aszMemspaceName,
+                                  aszSymbolicAddress,
+                                  uiSymbolicAddrOffset,
+#endif /*PDUMP*/
+                                  uiProtFlags);
+               PVR_GOTO_IF_ERROR(eError, unlock_mmu_context);
+
+               sDevVAddr.uiAddr += uiPageSize;
+               uiCount += uiPageSize;
+
+               /* Calculate PT index and get new table descriptor */
+               if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes))
+               {
+                       uiPTEIndex++;
+               }
+               else
+               {
+                       eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                                   &psLevel->sMemDesc.psMapping->sMemHandle,
+                                                   uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                                   (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+                       PVR_GOTO_IF_ERROR(eError, unlock_mmu_context);
+
+
+                       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                                      &psLevel, &uiPTEIndex);
+                       uiFlushStart = uiPTEIndex;
+               }
+       }
+
+       OSLockRelease(psMMUContext->hLock);
+
+
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       if (psDevPAddr != asDevPAddr)
+       {
+               OSFreeMem(pbValid);
+               OSFreeMem(psDevPAddr);
+       }
+
+       /* Flush TLB for PTs*/
+       psDevNode->pfnMMUCacheInvalidate(psDevNode,
+                                        psMMUContext,
+                                        MMU_LEVEL_1,
+                                        IMG_FALSE);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, i);
+#endif /*PDUMP*/
+
+       return PVRSRV_OK;
+
+unlock_mmu_context:
+       OSLockRelease(psMMUContext->hLock);
+       MMU_UnmapPMRFast(psMMUContext,
+                        sDevVAddrBase,
+                        uiSizeBytes >> uiLog2HeapPageSize,
+                        uiLog2HeapPageSize);
+
+put_mmu_context:
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       if (pbValid != abValid)
+       {
+               OSFreeMem(pbValid);
+       }
+
+free_paddr_array:
+       if (psDevPAddr != asDevPAddr)
+       {
+               OSFreeMem(psDevPAddr);
+       }
+
+return_error:
+       PVR_ASSERT(eError == PVRSRV_OK);
+       return eError;
+}
+
+/*
+    MMU_UnmapPages
+ */
+void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrBase,
+                 IMG_UINT32 ui32PageCount,
+                 IMG_UINT32 uiLog2PageSize)
+{
+       IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+       IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+       MMU_Levelx_INFO *psLevel = NULL;
+       IMG_HANDLE hPriv;
+       const MMU_PxE_CONFIG *psConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+       IMG_UINT64 uiProtFlags = 0;
+       MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+       IMG_UINT64 uiEntry = 0;
+       IMG_UINT32 uiFlushStart = 0;
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevNode,
+                    "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX,
+                    ui32PageCount,
+                    (IMG_UINT64)sDevVAddr.uiAddr,
+                    ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+       /* Get PT and address configs */
+       _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+                        &psConfig, &hPriv, &psDevVAddrConfig);
+
+       if (_MMU_ConvertDevMemFlags(IMG_TRUE,
+                                   0,
+                                   &uiMMUProtFlags,
+                                   psMMUContext) != PVRSRV_OK)
+       {
+               return;
+       }
+
+       /* Callback to get device specific protection flags */
+
+       if (psConfig->uiBytesPerEntry == 8)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+
+               /* Fill the entry with a bad address but leave space for protection flags */
+               uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags;
+       }
+       else if (psConfig->uiBytesPerEntry == 4)
+       {
+               uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+
+               /* Fill the entry with a bad address but leave space for protection flags */
+               uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: The page table entry byte length is not supported",
+                               __func__));
+               goto e0;
+       }
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                      &psLevel, &uiPTEIndex);
+       uiFlushStart = uiPTEIndex;
+
+       /* Unmap page by page and keep the loop as quick as possible.
+        * Only use parts of _SetupPTE that need to be executed. */
+       while (ui32Loop < ui32PageCount)
+       {
+
+               /* Set the PT entry to invalid and poison it with a bad address */
+               if (psConfig->uiBytesPerEntry == 8)
+               {
+                       ((IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry;
+               }
+               else
+               {
+                       PVR_ASSERT(psConfig->uiBytesPerEntry == 4);
+                       ((IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry;
+               }
+
+               /* Log modifications */
+               HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+                       HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+               HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+                       HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+                       uiPTEIndex, MMU_LEVEL_1,
+                       HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry),
+                       IMG_FALSE);
+
+#if defined(PDUMP)
+               PDumpMMUDumpPxEntries(psDevNode,
+                                     MMU_LEVEL_1,
+                                     psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                                     psLevel->sMemDesc.pvCpuVAddr,
+                                     psLevel->sMemDesc.sDevPAddr,
+                                     uiPTEIndex,
+                                     1,
+                                     NULL,
+                                     NULL,
+                                     0,
+                                     psConfig->uiBytesPerEntry,
+                                     psConfig->uiAddrLog2Align,
+                                     psConfig->uiAddrShift,
+                                     psConfig->uiAddrMask,
+                                     psConfig->uiProtMask,
+                                     psConfig->uiValidEnMask,
+                                     0,
+                                     psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+               sDevVAddr.uiAddr += uiPageSize;
+               ui32Loop++;
+
+               /* Calculate PT index and get new table descriptor */
+               if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount))
+               {
+                       uiPTEIndex++;
+               }
+               else
+               {
+                       PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                          &psLevel->sMemDesc.psMapping->sMemHandle,
+                                          uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                          (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+
+                       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                                      &psLevel, &uiPTEIndex);
+                       uiFlushStart = uiPTEIndex;
+               }
+       }
+
+       OSLockRelease(psMMUContext->hLock);
+
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       /* Flush TLB for PTs*/
+       psDevNode->pfnMMUCacheInvalidate(psDevNode,
+                                        psMMUContext,
+                                        MMU_LEVEL_1,
+                                        IMG_TRUE);
+
+       return;
+
+e0:
+       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__));
+       PVR_ASSERT(0);
+       return;
+}
+
+/*
+       MMU_ChangeValidity
+ */
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+                   IMG_DEV_VIRTADDR sDevVAddr,
+                   IMG_DEVMEM_SIZE_T uiNumPages,
+                   IMG_UINT32 uiLog2PageSize,
+                   IMG_BOOL bMakeValid,
+                   PMR *psPMR)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       IMG_HANDLE hPriv;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       const MMU_PxE_CONFIG *psConfig;
+       MMU_Levelx_INFO *psLevel = NULL;
+       IMG_UINT32 uiFlushStart = 0;
+       IMG_UINT32 uiPTIndex = 0;
+       IMG_UINT32 i;
+       IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+       IMG_BOOL bValid;
+
+       PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+
+#if defined(PDUMP)
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+       PDUMPCOMMENT(psDevNode,
+                    "Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")",
+                    bMakeValid,
+                    sDevVAddr.uiAddr,
+                    sDevVAddr.uiAddr + (uiNumPages<<uiLog2PageSize) - 1);
+#endif /*PDUMP*/
+
+       /* We should verify the size and contiguity when supporting variable page size */
+       PVR_ASSERT (psMMUContext != NULL);
+       PVR_ASSERT (psPMR != NULL);
+
+       /* Get general PT and address configs */
+       _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+                        &psConfig, &hPriv, &psDevVAddrConfig);
+
+       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                      &psLevel, &uiPTIndex);
+       uiFlushStart = uiPTIndex;
+
+       /* Do a page table walk and change attribute for every page in range. */
+       for (i=0; i < uiNumPages;)
+       {
+               /* Set the entry */
+               if (bMakeValid)
+               {
+                       /* Only set valid if physical address exists (sparse allocs might have none)*/
+                       eError = PMR_IsOffsetValid(psPMR, uiLog2PageSize, 1, (IMG_DEVMEM_OFFSET_T) i << uiLog2PageSize, &bValid);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e_exit);
+
+                       if (bValid)
+                       {
+                               if (psConfig->uiBytesPerEntry == 8)
+                               {
+                                       ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+                               }
+                               else if (psConfig->uiBytesPerEntry == 4)
+                               {
+                                       ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+                               }
+                               else
+                               {
+                                       PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit);
+                               }
+                       }
+               }
+               else
+               {
+                       if (psConfig->uiBytesPerEntry == 8)
+                       {
+                               ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+                       }
+                       else if (psConfig->uiBytesPerEntry == 4)
+                       {
+                               ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+                       }
+                       else
+                       {
+                               PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit);
+                       }
+               }
+
+#if defined(PDUMP)
+
+               PMR_PDumpSymbolicAddr(psPMR, i<<uiLog2PageSize,
+                                     sizeof(aszMemspaceName), &aszMemspaceName[0],
+                                     sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+                                     &uiSymbolicAddrOffset,
+                                     &uiNextSymName);
+
+               PDumpMMUDumpPxEntries(psDevNode,
+                                     MMU_LEVEL_1,
+                                     psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+                                     psLevel->sMemDesc.pvCpuVAddr,
+                                     psLevel->sMemDesc.sDevPAddr,
+                                     uiPTIndex,
+                                     1,
+                                     aszMemspaceName,
+                                     aszSymbolicAddress,
+                                     uiSymbolicAddrOffset,
+                                     psConfig->uiBytesPerEntry,
+                                     psConfig->uiAddrLog2Align,
+                                     psConfig->uiAddrShift,
+                                     psConfig->uiAddrMask,
+                                     psConfig->uiProtMask,
+                                     psConfig->uiValidEnMask,
+                                     0,
+                                     psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+               sDevVAddr.uiAddr += uiPageSize;
+               i++;
+
+               /* Calculate PT index and get new table descriptor */
+               if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages))
+               {
+                       uiPTIndex++;
+               }
+               else
+               {
+
+                       eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                                   &psLevel->sMemDesc.psMapping->sMemHandle,
+                                                   uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+                                                   (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+                       PVR_GOTO_IF_ERROR(eError, e_exit);
+
+                       _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+                                      &psLevel, &uiPTIndex);
+                       uiFlushStart = uiPTIndex;
+               }
+       }
+
+e_exit:
+
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       /* Flush TLB for PTs*/
+       psDevNode->pfnMMUCacheInvalidate(psDevNode,
+                                        psMMUContext,
+                                        MMU_LEVEL_1,
+                                        !bMakeValid);
+
+       PVR_ASSERT(eError == PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+       MMU_AcquireBaseAddr
+ */
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr)
+{
+       if (!psMMUContext)
+       {
+               psPhysAddr->uiAddr = 0;
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr;
+
+       return PVRSRV_OK;
+}
+
+/*
+       MMU_AcquireCPUBaseAddr
+ */
+PVRSRV_ERROR
+MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr)
+{
+       if (!psMMUContext)
+       {
+               *ppvCPUVAddr = NULL;
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *ppvCPUVAddr = psMMUContext->sBaseLevelInfo.sMemDesc.pvCpuVAddr;
+
+       return PVRSRV_OK;
+}
+
+/*
+       MMU_ReleaseBaseAddr
+ */
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext)
+{
+       PVR_UNREFERENCED_PARAMETER(psMMUContext);
+}
+
+/*
+       MMU_AppendCacheFlags, MMU_ExchangeCacheFlags
+*/
+
+void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags)
+{
+       PVR_ASSERT(psMMUContext != NULL);
+
+       if (psMMUContext == NULL)
+       {
+               return;
+       }
+
+       OSAtomicOr(&psMMUContext->sCacheFlags, (IMG_INT)ui32AppendFlags);
+}
+
+IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags)
+{
+       PVR_ASSERT(psMMUContext != NULL);
+
+       if (psMMUContext == NULL)
+       {
+               return 0;
+       }
+
+       return (IMG_UINT32)OSAtomicExchange(&psMMUContext->sCacheFlags, (IMG_INT)ui32NewCacheFlags);
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+    MMU_GetOSids
+ */
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+       *pui32OSid     = psMMUContext->psPhysMemCtx->ui32OSid;
+       *pui32OSidReg  = psMMUContext->psPhysMemCtx->ui32OSidReg;
+       *pbOSidAxiProt = psMMUContext->psPhysMemCtx->bOSidAxiProt;
+
+       return;
+}
+
+#endif
+
+/*
+       MMU_CheckFaultAddress
+ */
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+                           IMG_DEV_VIRTADDR *psDevVAddr,
+                           MMU_FAULT_DATA *psOutFaultData)
+{
+       /* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */
+#if defined(SUPPORT_RGX)
+# define MMU_MASK_VALID_FOR_32BITS(level) \
+               ((RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN | \
+                 RGX_MMUCTRL_##level##_DATA_VALID_EN) <= 0xFFFFFFFF)
+# define MMU_VALID_STR(entry,level) \
+               (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \
+                                ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)])
+       static const IMG_PCHAR apszMMUValidStr[1<<2] =  {/*--*/ "not valid",
+                                                        /*-V*/ "valid",
+                                                        /*P-*/ "pending",
+                                                        /*PV*/ "inconsistent (pending and valid)"};
+#else
+# define MMU_MASK_VALID_FOR_32BITS(level) 0
+# define MMU_VALID_STR(entry,level) ("??")
+#endif
+       MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+       MMU_LEVEL       eMMULevel = psDevAttrs->eTopLevel;
+       const MMU_PxE_CONFIG *psConfig;
+       const MMU_PxE_CONFIG *psMMUPDEConfig;
+       const MMU_PxE_CONFIG *psMMUPTEConfig;
+       const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig;
+       IMG_HANDLE hPriv;
+       MMU_Levelx_INFO *psLevel = NULL;
+       PVRSRV_ERROR eError;
+       IMG_UINT64 uiIndex;
+       IMG_UINT32 ui32PCIndex = 0xFFFFFFFF;
+       IMG_UINT32 ui32PDIndex = 0xFFFFFFFF;
+       IMG_UINT32 ui32PTIndex = 0xFFFFFFFF;
+       IMG_UINT32 ui32Log2PageSize;
+       MMU_FAULT_DATA sMMUFaultData = {0};
+       MMU_LEVEL_DATA *psMMULevelData;
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       /*
+               At this point we don't know the page size so assume it's 4K.
+               When we get the PD level (MMU_LEVEL_2) we can check to see
+               if this assumption is correct.
+        */
+       eError = psDevAttrs->pfnGetPageSizeConfiguration(12,
+                                                        &psMMUPDEConfig,
+                                                        &psMMUPTEConfig,
+                                                        &psMMUDevVAddrConfig,
+                                                        &hPriv);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("Failed to get the page size info for log2 page sizeof 12"));
+       }
+
+       psLevel = &psMMUContext->sBaseLevelInfo;
+       psConfig = psDevAttrs->psBaseConfig;
+
+       sMMUFaultData.eTopLevel = psDevAttrs->eTopLevel;
+       sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM;
+
+
+       for (; eMMULevel > MMU_LEVEL_0; eMMULevel--)
+       {
+               if (eMMULevel == MMU_LEVEL_3)
+               {
+                       /* Determine the PC index */
+                       uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask;
+                       uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift;
+                       ui32PCIndex = (IMG_UINT32) uiIndex;
+                       PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex));
+
+                       psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_3];
+                       psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+                       psMMULevelData->ui32Index = ui32PCIndex;
+
+                       if (ui32PCIndex >= psLevel->ui32NumOfEntries)
+                       {
+                               psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+                               break;
+                       }
+
+                       if (psConfig->uiBytesPerEntry == 4)
+                       {
+                               IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+                               psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex];
+                               if (MMU_MASK_VALID_FOR_32BITS(PC))
+                               {
+                                       psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PCIndex] & psConfig->uiProtMask, PC);
+                               }
+                               else
+                               {
+                                       psMMULevelData->psDebugStr = "";
+                                       PVR_LOG(("Invalid RGX_MMUCTRL_PC_DATA_ENTRY mask for 32-bit entry"));
+                               }
+                       }
+                       else
+                       {
+                               IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+                               psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex];
+                               psMMULevelData->psDebugStr  = MMU_VALID_STR(pui64Ptr[ui32PCIndex] & psConfig->uiProtMask, PC);
+
+                       }
+
+                       psLevel = psLevel->apsNextLevel[ui32PCIndex];
+                       if (!psLevel)
+                       {
+                               break;
+                       }
+                       psConfig = psMMUPDEConfig;
+                       continue; /* continue to the next level */
+               }
+
+
+               if (eMMULevel == MMU_LEVEL_2)
+               {
+                       /* Determine the PD index */
+                       uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask;
+                       uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift;
+                       ui32PDIndex = (IMG_UINT32) uiIndex;
+                       PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex));
+
+                       psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_2];
+                       psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+                       psMMULevelData->ui32Index = ui32PDIndex;
+
+                       if (ui32PDIndex >= psLevel->ui32NumOfEntries)
+                       {
+                               psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+                               break;
+                       }
+
+                       if (psConfig->uiBytesPerEntry == 4)
+                       {
+                               IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+                               psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex];
+                               if (MMU_MASK_VALID_FOR_32BITS(PD))
+                               {
+                                       psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD);
+                               }
+                               else
+                               {
+                                       psMMULevelData->psDebugStr = "";
+                                       PVR_LOG(("Invalid RGX_MMUCTRL_PD_DATA_ENTRY mask for 32-bit entry"));
+                               }
+
+                               if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+                               {
+                                       PVR_LOG(("Failed to get the page size from the PDE"));
+                               }
+                       }
+                       else
+                       {
+                               IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+                               psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex];
+                               psMMULevelData->psDebugStr  = MMU_VALID_STR(pui64Ptr[ui32PDIndex]  & psMMUPDEConfig->uiProtMask, PD);
+
+                               if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL)
+                               {
+                                       /* MMU_VERSION >= 4 */
+                                       if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, &ui32Log2PageSize) != PVRSRV_OK)
+                                       {
+                                               PVR_LOG(("Failed to get the page size from the virtual address"));
+                                       }
+                               }
+                               else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+                               {
+                                       PVR_LOG(("Failed to get the page size from the PDE"));
+                               }
+                       }
+
+                       /*
+                                       We assumed the page size was 4K, now we have the actual size
+                                       from the PDE we can confirm if our assumption was correct.
+                                       Until now it hasn't mattered as the PC and PD are the same
+                                       regardless of the page size
+                        */
+                       if (ui32Log2PageSize != 12)
+                       {
+                               /* Put the 4K page size data */
+                               psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+                               /* Get the correct size data */
+                               eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize,
+                                                                                &psMMUPDEConfig,
+                                                                                &psMMUPTEConfig,
+                                                                                &psMMUDevVAddrConfig,
+                                                                                &hPriv);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize));
+                                       break;
+                               }
+                       }
+                       psLevel = psLevel->apsNextLevel[ui32PDIndex];
+                       if (!psLevel)
+                       {
+                               break;
+                       }
+                       psConfig = psMMUPTEConfig;
+                       continue; /* continue to the next level */
+               }
+
+
+               if (eMMULevel == MMU_LEVEL_1)
+               {
+                       /* Determine the PT index */
+                       uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask;
+                       uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift;
+                       ui32PTIndex = (IMG_UINT32) uiIndex;
+                       PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex));
+
+                       psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_1];
+                       psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+                       psMMULevelData->ui32Index = ui32PTIndex;
+
+                       if (ui32PTIndex >= psLevel->ui32NumOfEntries)
+                       {
+                               psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+                               break;
+                       }
+
+                       if (psConfig->uiBytesPerEntry == 4)
+                       {
+                               IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+                               psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex];
+                               if (MMU_MASK_VALID_FOR_32BITS(PT))
+                               {
+                                       psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT);
+                               }
+                               else
+                               {
+                                       psMMULevelData->psDebugStr = "";
+                                       PVR_LOG(("Invalid RGX_MMUCTRL_PT_DATA_ENTRY mask for 32-bit entry"));
+                               }
+                       }
+                       else
+                       {
+                               IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+                               psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex];
+                               psMMULevelData->psDebugStr  = MMU_VALID_STR(pui64Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT);
+
+                       }
+                       goto e1;
+               }
+
+               PVR_LOG(("Unsupported MMU setup: %d", eMMULevel));
+               break;
+       }
+
+e1:
+       /* Put the page size data back */
+       psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+       OSLockRelease(psMMUContext->hLock);
+
+       *psOutFaultData = sMMUFaultData;
+}
+
+static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext,
+                                     const MMU_PxE_CONFIG *psConfig,
+                                     const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                                     IMG_UINT32 uiLog2PageSize,
+                                     IMG_DEV_VIRTADDR sDevVAddr,
+                                     IMG_BOOL *pbStatusOut)
+{
+       MMU_Levelx_INFO *psLevel = NULL;
+       IMG_UINT32 uiIndex = 0;
+       IMG_BOOL bStatus = IMG_FALSE;
+       IMG_UINT64 ui64Entry = 0;
+
+       OSLockAcquire(psMMUContext->hLock);
+
+       switch (psMMUContext->psDevAttrs->eTopLevel)
+       {
+               case MMU_LEVEL_3:
+                       uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+                       psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+                       if (psLevel == NULL)
+                               break;
+
+                       __fallthrough;
+               case MMU_LEVEL_2:
+                       uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+                       if (psLevel != NULL)
+                               psLevel = psLevel->apsNextLevel[uiIndex];
+                       else
+                               psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+
+                       if (psLevel == NULL)
+                               break;
+
+                       __fallthrough;
+               case MMU_LEVEL_1:
+                       uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+                       if (psLevel == NULL)
+                               psLevel = &psMMUContext->sBaseLevelInfo;
+
+                       ui64Entry = ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiIndex];
+                       bStatus = ui64Entry & psConfig->uiValidEnMask;
+
+                       break;
+               default:
+                       PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup"));
+                       break;
+       }
+
+       OSLockRelease(psMMUContext->hLock);
+
+       *pbStatusOut = bStatus;
+
+       return ui64Entry;
+}
+
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr)
+{
+       IMG_BOOL bStatus;
+       const MMU_PxE_CONFIG *psConfig;
+       IMG_HANDLE hPriv;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+       _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig);
+
+       MMU_GetVDevAddrPTE(psMMUContext,
+                          psConfig,
+                          psDevVAddrConfig,
+                          uiLog2PageSize,
+                          sDevVAddr,
+                          &bStatus);
+
+       _MMU_PutPTConfig(psMMUContext, hPriv);
+
+       return bStatus;
+}
+
+#if defined(PDUMP)
+/*
+       MMU_ContextDerivePCPDumpSymAddr
+ */
+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                             IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                             size_t uiPDumpSymbolicNameBufferSize)
+{
+       size_t uiCount;
+       IMG_UINT64 ui64PhysAddr;
+       PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId;
+
+       if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid)
+       {
+               /* We don't have any allocations.  You're not allowed to ask
+                * for the page catalogue base address until you've made at
+                * least one allocation.
+                */
+               return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR;
+       }
+
+       ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr;
+
+       PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName)));
+
+       /* Page table Symbolic Name is formed from page table phys addr
+          prefixed with MMUPT_. */
+       uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer,
+                            uiPDumpSymbolicNameBufferSize,
+                            ":%s:%s%016"IMG_UINT64_FMTSPECX,
+                            psDevId->pszPDumpDevName,
+                            psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX",
+                            ui64PhysAddr);
+
+       if (uiCount + 1 > uiPDumpSymbolicNameBufferSize)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*
+       MMU_PDumpWritePageCatBase
+ */
+PVRSRV_ERROR
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                          const IMG_CHAR *pszSpaceName,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32WordSize,
+                          IMG_UINT32 ui32AlignShift,
+                          IMG_UINT32 ui32Shift,
+                          PDUMP_FLAGS_T uiPdumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszPageCatBaseSymbolicAddr[100];
+       const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName;
+
+       eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext,
+                                                &aszPageCatBaseSymbolicAddr[0],
+                                                sizeof(aszPageCatBaseSymbolicAddr));
+       if (eError == PVRSRV_OK)
+       {
+               eError = PDumpWriteSymbAddress(psMMUContext->psPhysMemCtx->psDevNode,
+                                              pszSpaceName,
+                                              uiOffset,
+                                              aszPageCatBaseSymbolicAddr,
+                                              0, /* offset -- Could be non-zero for var. pgsz */
+                                              pszPDumpDevName,
+                                              ui32WordSize,
+                                              ui32AlignShift,
+                                              ui32Shift,
+                                              uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS);
+       }
+
+       return eError;
+}
+
+/*
+       MMU_AcquirePDumpMMUContext
+ */
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                                        IMG_UINT32 *pui32PDumpMMUContextID,
+                                        IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId;
+
+       if (!psMMUContext->ui32PDumpContextIDRefCount)
+       {
+               PDUMP_MMU_ALLOC_MMUCONTEXT(psMMUContext->psPhysMemCtx->psDevNode,
+                                          psDevId->pszPDumpDevName,
+                                          psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr,
+                                          psMMUContext->psDevAttrs->eMMUType,
+                                          &psMMUContext->uiPDumpContextID,
+                                          ui32PDumpFlags);
+       }
+
+       psMMUContext->ui32PDumpContextIDRefCount++;
+       *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID;
+
+       return PVRSRV_OK;
+}
+
+/*
+       MMU_ReleasePDumpMMUContext
+ */
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                                               IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId;
+
+       PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0);
+       psMMUContext->ui32PDumpContextIDRefCount--;
+
+       if (psMMUContext->ui32PDumpContextIDRefCount == 0)
+       {
+               PDUMP_MMU_FREE_MMUCONTEXT(psMMUContext->psPhysMemCtx->psDevNode,
+                                         psDevId->pszPDumpDevName,
+                                         psMMUContext->uiPDumpContextID,
+                                         ui32PDumpFlags);
+       }
+
+       return PVRSRV_OK;
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu_common.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pdump_mmu.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pdump_mmu.c
new file mode 100644 (file)
index 0000000..5b195d6
--- /dev/null
@@ -0,0 +1,908 @@
+/*************************************************************************/ /*!
+@File
+@Title          MMU PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common PDump (MMU specific) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined(PDUMP)
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pdump_mmu.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+
+#define MAX_PDUMP_MMU_CONTEXTS (10)
+static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1;
+
+
+#define MMUPX_FMT(X) ((X<3) ? ((X<2) ?  "MMUPT_\0" : "MMUPD_\0") : "MMUPC_\0")
+#define MIPSMMUPX_FMT(X) ((X<3) ? ((X<2) ?  "MIPSMMUPT_\0" : "MIPSMMUPD_\0") : "MIPSMMUPC_\0")
+
+
+/* Array used to look-up debug strings from MMU_LEVEL */
+static const IMG_CHAR * const apszMMULevelStringLookup[MMU_LEVEL_LAST] =
+{
+       "MMU_LEVEL_0",
+       "PAGE_TABLE",
+       "PAGE_DIRECTORY",
+       "PAGE_CATALOGUE",
+};
+
+static PVRSRV_ERROR
+_ContiguousPDumpBytes(PVRSRV_DEVICE_NODE *psDeviceNode,
+                     const IMG_CHAR *pszSymbolicName,
+                     IMG_UINT32 ui32SymAddrOffset,
+                     IMG_BOOL bFlush,
+                     IMG_UINT32 uiNumBytes,
+                     void *pvBytes,
+                     IMG_UINT32 ui32Flags)
+{
+       static const IMG_CHAR *pvBeyondLastPointer;
+       static const IMG_CHAR *pvBasePointer;
+       static IMG_UINT32 ui32BeyondLastOffset;
+       static IMG_UINT32 ui32BaseOffset;
+       static IMG_UINT32 uiAccumulatedBytes;
+       IMG_UINT32 ui32ParamOutPos;
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+       /* Caller has PDUMP_LOCK */
+
+       if (!bFlush && uiAccumulatedBytes > 0)
+       {
+               /* do some tests for contiguity.  If it fails, we flush anyway */
+               if (pvBeyondLastPointer != pvBytes ||
+                   ui32SymAddrOffset != ui32BeyondLastOffset
+                   /* NB: ought to check that symbolic name agrees too, but
+                      we know this always to be the case in the current use-case */
+                       )
+               {
+                       bFlush = IMG_TRUE;
+               }
+       }
+
+       /* Flush if necessary */
+       if (bFlush && uiAccumulatedBytes > 0)
+       {
+               eErr = PDumpWriteParameter(psDeviceNode,
+                                          (IMG_UINT8 *)(uintptr_t)pvBasePointer,
+                                          uiAccumulatedBytes, ui32Flags,
+                                          &ui32ParamOutPos, pszFileName);
+               if (eErr == PVRSRV_OK)
+               {
+                       eErr = PDumpSNPrintf(hScript, ui32MaxLenScript,
+                                            "LDB %s:0x%X 0x%X 0x%X %s",
+                                            /* dest */
+                                            pszSymbolicName,
+                                            ui32BaseOffset,
+                                            /* size */
+                                            uiAccumulatedBytes,
+                                            /* file offset */
+                                            ui32ParamOutPos,
+                                            /* filename */
+                                            pszFileName);
+                       PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpSNPrintf", ErrOut);
+
+                       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+               }
+               else if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+               {
+                       PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpWriteParameter", ErrOut);
+               }
+               else
+               {
+                       /* else Write to parameter file prevented under the flags and
+                        * current state of the driver so skip write to script and error IF.
+                        * this is normal e.g. no in capture range for example.
+                        */
+                       eErr = PVRSRV_OK;
+               }
+
+               uiAccumulatedBytes = 0;
+       }
+
+       /* Initialise offsets and pointers if necessary */
+       if (uiAccumulatedBytes == 0)
+       {
+               ui32BaseOffset = ui32BeyondLastOffset = ui32SymAddrOffset;
+               pvBeyondLastPointer = pvBasePointer = (const IMG_CHAR *)pvBytes;
+       }
+
+       /* Accumulate some bytes */
+       ui32BeyondLastOffset += uiNumBytes;
+       pvBeyondLastPointer += uiNumBytes;
+       uiAccumulatedBytes += uiNumBytes;
+
+ErrOut:
+       PDUMP_RELEASE_SCRIPT_AND_FILE_STRING();
+       return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMMUMalloc
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc(PVRSRV_DEVICE_NODE *psDeviceNode,
+                           const IMG_CHAR *pszPDumpDevName,
+                           MMU_LEVEL eMMULevel,
+                           IMG_DEV_PHYADDR *psDevPAddr,
+                           IMG_UINT32 ui32Size,
+                           IMG_UINT32 ui32Align,
+                           PDUMP_MMU_TYPE eMMUType)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+       IMG_UINT64 ui64SymbolicAddr;
+       IMG_CHAR *pszMMUPX;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       PVR_GOTO_IF_INVALID_PARAM(eMMULevel < MMU_LEVEL_LAST, eErr, ErrOut);
+
+       PDUMP_LOCK(ui32Flags);
+
+       /*
+         Write a comment to the PDump2 script streams indicating the memory allocation
+       */
+       eErr = PDumpSNPrintf(hScript,
+                            ui32MaxLen,
+                            "-- CALLOC :%s:%s Size=0x%08X Alignment=0x%08X 0x0 DevPAddr=0x%08"IMG_UINT64_FMTSPECX,
+                            pszPDumpDevName,
+                            apszMMULevelStringLookup[eMMULevel],
+                            ui32Size,
+                            ui32Align,
+                            psDevPAddr->uiAddr);
+       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+       /*
+         construct the symbolic address
+       */
+       ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+       /*
+         Write to the MMU script stream indicating the memory allocation
+       */
+       if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+       {
+               pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+       }
+       else
+       {
+               pszMMUPX = MMUPX_FMT(eMMULevel);
+       }
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "CALLOC :%s:%s%016"IMG_UINT64_FMTSPECX" 0x%X 0x%X 0x0",
+                            pszPDumpDevName,
+                            pszMMUPX,
+                            ui64SymbolicAddr,
+                            ui32Size,
+                            ui32Align
+                            /* don't need this sDevPAddr.uiAddr*/);
+       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+ErrUnlock:
+       PDUMP_UNLOCK(ui32Flags);
+ErrOut:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFree
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       const IMG_CHAR *pszPDumpDevName,
+                         MMU_LEVEL eMMULevel,
+                         IMG_DEV_PHYADDR *psDevPAddr,
+                         PDUMP_MMU_TYPE eMMUType)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       IMG_UINT64 ui64SymbolicAddr;
+       IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+       IMG_CHAR *pszMMUPX;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       PVR_GOTO_IF_INVALID_PARAM(eMMULevel < MMU_LEVEL_LAST, eErr, ErrOut);
+
+       PDUMP_LOCK(ui32Flags);
+       /*
+         Write a comment to the PDUMP2 script streams indicating the memory free
+       */
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "-- FREE :%s:%s",
+                            pszPDumpDevName, apszMMULevelStringLookup[eMMULevel]);
+       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+       /*
+         construct the symbolic address
+       */
+       ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+       /*
+         Write to the MMU script stream indicating the memory free
+       */
+       if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+       {
+               pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+       }
+       else
+       {
+               pszMMUPX = MMUPX_FMT(eMMULevel);
+       }
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "FREE :%s:%s%016"IMG_UINT64_FMTSPECX,
+                            pszPDumpDevName,
+                            pszMMUPX,
+                            ui64SymbolicAddr);
+       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+ErrUnlock:
+       PDUMP_UNLOCK(ui32Flags);
+ErrOut:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+/*******************************************************************************************************
+ * Function Name  : PDumpPTBaseObjectToMem64
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from the baseobject
+ *                                     for MIPS MMU device type
+********************************************************************************************************/
+PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+                                     PMR *psPMRDest,
+                                     IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                     IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                     IMG_UINT32 ui32Flags,
+                                     MMU_LEVEL eMMULevel,
+                                     IMG_UINT64 ui64PxSymAddr,
+                                     IMG_UINT64 ui64PxOffset)
+{
+
+       IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+       IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+                                    uiLogicalOffsetDest,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceNameDest,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicNameDest,
+                                    &uiPDumpSymbolicOffsetDest,
+                                    &uiNextSymNameDest);
+
+       PVR_GOTO_IF_ERROR(eErr, Err);
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen,
+                                                "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s%016"IMG_UINT64_FMTSPECX":0x%"IMG_UINT64_FMTSPECX,
+                                                aszMemspaceNameDest, aszSymbolicNameDest, uiPDumpSymbolicOffsetDest,
+                                                pszPDumpDevName, MIPSMMUPX_FMT(eMMULevel), ui64PxSymAddr, ui64PxOffset);
+
+       PVR_GOTO_IF_ERROR(eErr, Err);
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(PMR_DeviceNode(psPMRDest), hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+Err:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMMUDumpPxEntries
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUDumpPxEntries(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  MMU_LEVEL eMMULevel,
+                                  const IMG_CHAR *pszPDumpDevName,
+                                  void *pvPxMem,
+                                  IMG_DEV_PHYADDR sPxDevPAddr,
+                                  IMG_UINT32 uiFirstEntry,
+                                  IMG_UINT32 uiNumEntries,
+                                  const IMG_CHAR *pszMemspaceName,
+                                  const IMG_CHAR *pszSymbolicAddr,
+                                  IMG_UINT64 uiSymbolicAddrOffset,
+                                  IMG_UINT32 uiBytesPerEntry,
+                                  IMG_UINT32 uiLog2Align,
+                                  IMG_UINT32 uiAddrShift,
+                                  IMG_UINT64 uiAddrMask,
+                                  IMG_UINT64 uiPxEProtMask,
+                                  IMG_UINT64 uiDataValidEnable,
+                                  IMG_UINT32 ui32Flags,
+                                  PDUMP_MMU_TYPE eMMUType)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       IMG_UINT64 ui64PxSymAddr;
+       IMG_UINT64 ui64PxEValueSymAddr;
+       IMG_UINT32 ui32SymAddrOffset = 0;
+       IMG_UINT32 *pui32PxMem;
+       IMG_UINT64 *pui64PxMem;
+       IMG_BOOL   bPxEValid;
+       IMG_UINT32 uiPxEIdx;
+       IMG_INT32  iShiftAmount;
+       IMG_CHAR   *pszWrwSuffix = NULL;
+       void *pvRawBytes = NULL;
+       IMG_CHAR aszPxSymbolicAddr[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_UINT64 ui64PxE64;
+       IMG_UINT64 ui64Protflags64;
+       IMG_CHAR *pszMMUPX;
+
+       PDUMP_GET_SCRIPT_STRING();
+       ui32Flags |= (PDUMP_FLAGS_BLKDATA | PDUMP_FLAGS_CONTINUOUS);
+
+       eErr = PDumpReady();
+       if (eErr != PVRSRV_OK)
+       {
+               /* Mask suspension from caller as this is terminal & logged */
+               eErr = (eErr == PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ? PVRSRV_OK : eErr;
+               goto ErrOut;
+       }
+
+       PVR_GOTO_IF_INVALID_PARAM(pvPxMem, eErr, ErrOut);
+
+       /*
+         create the symbolic address of the Px
+       */
+       ui64PxSymAddr = sPxDevPAddr.uiAddr;
+
+       if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+       {
+               pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+       }
+       else
+       {
+               pszMMUPX = MMUPX_FMT(eMMULevel);
+       }
+
+       OSSNPrintf(aszPxSymbolicAddr,
+                  PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                  ":%s:%s%016"IMG_UINT64_FMTSPECX,
+                  pszPDumpDevName,
+                  pszMMUPX,
+                  ui64PxSymAddr);
+
+       PDUMP_LOCK(ui32Flags);
+
+       /*
+         traverse PxEs, dumping entries
+       */
+       for (uiPxEIdx = uiFirstEntry;
+            uiPxEIdx < uiFirstEntry + uiNumEntries;
+            uiPxEIdx++)
+       {
+               /* Calc the symbolic address offset of the PxE location
+                  This is what we have to add to the table address to get to a certain entry */
+               ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry);
+
+               /* Calc the symbolic address of the PxE value and HW protflags */
+               /* just read it here */
+               switch (uiBytesPerEntry)
+               {
+                       case 4:
+                       {
+                               pui32PxMem = pvPxMem;
+                               ui64PxE64 = pui32PxMem[uiPxEIdx];
+                               pszWrwSuffix = "";
+                               pvRawBytes = &pui32PxMem[uiPxEIdx];
+                               break;
+                       }
+                       case 8:
+                       {
+                               pui64PxMem = pvPxMem;
+                               ui64PxE64 = pui64PxMem[uiPxEIdx];
+                               pszWrwSuffix = "64";
+                               pvRawBytes = &pui64PxMem[uiPxEIdx];
+                               break;
+                       }
+                       default:
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "PDumpMMUPxEntries: error"));
+                               ui64PxE64 = 0;
+                               //!!error
+                               break;
+                       }
+               }
+
+               ui64PxEValueSymAddr = (ui64PxE64 & uiAddrMask) >> uiAddrShift << uiLog2Align;
+               ui64Protflags64 = ui64PxE64 & uiPxEProtMask;
+               bPxEValid = (ui64Protflags64 & uiDataValidEnable) ? IMG_TRUE : IMG_FALSE;
+
+               if (!bPxEValid)
+               {
+                       /* If the entry was "invalid", simply write the actual
+                          value found to the memory location */
+                       eErr = _ContiguousPDumpBytes(psDeviceNode,aszPxSymbolicAddr,
+                                                    ui32SymAddrOffset, IMG_FALSE,
+                                                    uiBytesPerEntry, pvRawBytes,
+                                                    ui32Flags);
+                       if (eErr == PVRSRV_OK)
+                       {
+                               goto done;
+                       }
+                       else
+                       {
+                               goto ErrUnlock;
+                       }
+               }
+
+               _ContiguousPDumpBytes(psDeviceNode, aszPxSymbolicAddr,
+                                     ui32SymAddrOffset, IMG_TRUE,
+                                     0, NULL,
+                                     ui32Flags);
+
+               iShiftAmount = (IMG_INT32)(uiLog2Align - uiAddrShift);
+
+               /* First put the symbolic representation of the actual
+                  address of the entry into a pdump internal register */
+               /* MOV seemed cleaner here, since (a) it's 64-bit; (b) the
+                  target is not memory.  However, MOV cannot do the
+                  "reference" of the symbolic address.  Apparently WRW is
+                  correct. */
+
+               if (pszSymbolicAddr == NULL)
+               {
+                       pszSymbolicAddr = "none";
+               }
+
+               if (eMMULevel == MMU_LEVEL_1)
+               {
+                       if (iShiftAmount == 0)
+                       {
+                               eErr = PDumpSNPrintf(hScript,
+                                                    ui32MaxLen,
+                                                    "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:%s:0x%"IMG_UINT64_FMTSPECX" | 0x%"IMG_UINT64_FMTSPECX"\n",
+                                                    pszWrwSuffix,
+                                                    /* dest */
+                                                    pszPDumpDevName,
+                                                    pszMMUPX,
+                                                    ui64PxSymAddr,
+                                                    ui32SymAddrOffset,
+                                                    /* src */
+                                                    pszMemspaceName,
+                                                    pszSymbolicAddr,
+                                                    uiSymbolicAddrOffset,
+                                                    /* ORing prot flags */
+                                                    ui64Protflags64);
+                       }
+                       else
+                       {
+                               eErr = PDumpSNPrintf(hScript,
+                                                    ui32MaxLen,
+                                                   "WRW :%s:$1 :%s:%s:0x%"IMG_UINT64_FMTSPECX"\n",
+                                                    /* dest */
+                                                    pszPDumpDevName,
+                                                    /* src */
+                                                    pszMemspaceName,
+                                                    pszSymbolicAddr,
+                                                    uiSymbolicAddrOffset);
+                       }
+               }
+               else
+               {
+                       if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+                       {
+                               pszMMUPX = MIPSMMUPX_FMT(eMMULevel - 1);
+                       }
+                       else
+                       {
+                               pszMMUPX = MMUPX_FMT(eMMULevel - 1);
+                       }
+                       eErr = PDumpSNPrintf(hScript,
+                                            ui32MaxLen,
+                                            "WRW :%s:$1 :%s:%s%016"IMG_UINT64_FMTSPECX":0x0",
+                                            /* dest */
+                                            pszPDumpDevName,
+                                            /* src */
+                                            pszPDumpDevName,
+                                            pszMMUPX,
+                                            ui64PxEValueSymAddr);
+                       if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+                       {
+                               pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+                       }
+                       else
+                       {
+                               pszMMUPX = MMUPX_FMT(eMMULevel);
+                       }
+               }
+
+               PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+               PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+               /* Now shift it to the right place, if necessary: */
+               /* Now shift that value down, by the "Align shift"
+                  amount, to get it into units (ought to assert that
+                  we get an integer - i.e. we don't shift any bits
+                  off the bottom, don't know how to do PDUMP
+                  assertions yet) and then back up by the right
+                  amount to get it into the position of the field.
+                  This is optimised into a single shift right by the
+                  difference between the two. */
+               if (iShiftAmount > 0)
+               {
+                       /* Page X Address is specified in units larger
+                          than the position in the PxE would suggest. */
+                       eErr = PDumpSNPrintf(hScript,
+                                            ui32MaxLen,
+                                            "SHR :%s:$1 :%s:$1 0x%X",
+                                            /* dest */
+                                            pszPDumpDevName,
+                                            /* src A */
+                                            pszPDumpDevName,
+                                            /* src B */
+                                            iShiftAmount);
+                       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+                       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+               }
+               else if (iShiftAmount < 0)
+               {
+                       /* Page X Address is specified in units smaller
+                          than the position in the PxE would suggest. */
+                       eErr = PDumpSNPrintf(hScript,
+                                            ui32MaxLen,
+                                            "SHL :%s:$1 :%s:$1 0x%X",
+                                            /* dest */
+                                            pszPDumpDevName,
+                                            /* src A */
+                                            pszPDumpDevName,
+                                            /* src B */
+                                            -iShiftAmount);
+                       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+                       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+               }
+
+               if (eMMULevel == MMU_LEVEL_1)
+               {
+                       if (iShiftAmount != 0)
+                       {
+                               /* Now we can "or" in the protection flags */
+                               eErr = PDumpSNPrintf(hScript,
+                                                    ui32MaxLen,
+                                                    "OR :%s:$1 :%s:$1 0x%"IMG_UINT64_FMTSPECX,
+                                                    /* dest */
+                                                    pszPDumpDevName,
+                                                    /* src A */
+                                                    pszPDumpDevName,
+                                                    /* src B */
+                                                    ui64Protflags64);
+                               PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+                               PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+                               eErr = PDumpSNPrintf(hScript,
+                                                    ui32MaxLen,
+                                                    "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:$1",
+                                                    pszWrwSuffix,
+                                                    /* dest */
+                                                    pszPDumpDevName,
+                                                    pszMMUPX,
+                                                    ui64PxSymAddr,
+                                                    ui32SymAddrOffset,
+                                                    /* src */
+                                                    pszPDumpDevName);
+                               PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+                               PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+                       }
+               }
+               else
+               {
+                       /* Now we can "or" in the protection flags */
+                       eErr = PDumpSNPrintf(hScript,
+                                            ui32MaxLen,
+                                            "OR :%s:$1 :%s:$1 0x%"IMG_UINT64_FMTSPECX,
+                                            /* dest */
+                                            pszPDumpDevName,
+                                            /* src A */
+                                            pszPDumpDevName,
+                                            /* src B */
+                                            ui64Protflags64);
+                       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+                       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+                       /* Finally, we write the register into the actual PxE */
+                       eErr = PDumpSNPrintf(hScript,
+                                            ui32MaxLen,
+                                            "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:$1",
+                                            pszWrwSuffix,
+                                            /* dest */
+                                            pszPDumpDevName,
+                                            pszMMUPX,
+                                            ui64PxSymAddr,
+                                            ui32SymAddrOffset,
+                                            /* src */
+                                            pszPDumpDevName);
+                       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+                       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+               }
+       }
+
+done:
+       /* flush out any partly accumulated stuff for LDB */
+       _ContiguousPDumpBytes(psDeviceNode, aszPxSymbolicAddr,
+                             ui32SymAddrOffset, IMG_TRUE,
+                             0, NULL,
+                             ui32Flags);
+
+ErrUnlock:
+       PDUMP_UNLOCK(ui32Flags);
+ErrOut:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : _PdumpAllocMMUContext
+ * Inputs         : pui32MMUContextID
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : pdump util to allocate MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
+{
+       IMG_UINT32 i;
+
+       /* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */
+       for (i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
+       {
+               if ((guiPDumpMMUContextAvailabilityMask & (1U << i)))
+               {
+                       /* mark in use */
+                       guiPDumpMMUContextAvailabilityMask &= ~(1U << i);
+                       *pui32MMUContextID = i;
+                       return PVRSRV_OK;
+               }
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
+
+       return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+/**************************************************************************
+ * Function Name  : _PdumpFreeMMUContext
+ * Inputs         : ui32MMUContextID
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : pdump util to free MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
+{
+       if (ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
+       {
+               /* free the id */
+               PVR_ASSERT (!(guiPDumpMMUContextAvailabilityMask & (1U << ui32MMUContextID)));
+               guiPDumpMMUContextAvailabilityMask |= (1U << ui32MMUContextID);
+               return PVRSRV_OK;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
+
+       return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMMUAllocMMUContext
+ * Inputs         :
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Alloc MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUAllocMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               const IMG_CHAR *pszPDumpMemSpaceName,
+                                    IMG_DEV_PHYADDR sPCDevPAddr,
+                                    PDUMP_MMU_TYPE eMMUType,
+                                    IMG_UINT32 *pui32MMUContextID,
+                                    IMG_UINT32 ui32PDumpFlags)
+{
+       IMG_UINT64 ui64PCSymAddr;
+       IMG_CHAR *pszMMUPX;
+       IMG_UINT32 ui32MMUContextID;
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = _PdumpAllocMMUContext(&ui32MMUContextID);
+       if (eErr != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eErr, "_PdumpAllocMMUContext");
+               PVR_DBG_BREAK;
+               goto ErrOut;
+       }
+
+       /*
+         create the symbolic address of the PC
+       */
+       ui64PCSymAddr = sPCDevPAddr.uiAddr;
+
+       if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+       {
+               pszMMUPX = MIPSMMUPX_FMT(1);
+               /* Giving it a mock value until the Pdump player implements
+                  the support for the MIPS microAptiv MMU*/
+               eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+       }
+       else
+       {
+               pszMMUPX = MMUPX_FMT(3);
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                            ui32MaxLen,
+                            "MMU :%s:v%d %d :%s:%s%016"IMG_UINT64_FMTSPECX,
+                            /* mmu context */
+                            pszPDumpMemSpaceName,
+                            ui32MMUContextID,
+                            /* mmu type */
+                            eMMUType,
+                            /* PC base address */
+                            pszPDumpMemSpaceName,
+                            pszMMUPX,
+                            ui64PCSymAddr);
+       if (eErr != PVRSRV_OK)
+       {
+               PVR_DBG_BREAK;
+               goto ErrOut;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       /* return the MMU Context ID */
+       *pui32MMUContextID = ui32MMUContextID;
+
+ErrOut:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFreeMMUContext
+ * Inputs         :
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Free MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFreeMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const IMG_CHAR *pszPDumpMemSpaceName,
+                                   IMG_UINT32 ui32MMUContextID,
+                                   IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       PDUMP_GET_SCRIPT_STRING();
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       eErr = PDumpSNPrintf(hScript,
+                            ui32MaxLen,
+                            "-- Clear MMU Context for memory space %s", pszPDumpMemSpaceName);
+       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+
+       PDumpWriteScript(psDeviceNode,hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                            ui32MaxLen,
+                            "MMU :%s:v%d",
+                            pszPDumpMemSpaceName,
+                            ui32MMUContextID);
+
+       PVR_GOTO_IF_ERROR(eErr, ErrUnlock);
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = _PdumpFreeMMUContext(ui32MMUContextID);
+       PVR_LOG_GOTO_IF_ERROR(eErr, "_PdumpFreeMMUContext", ErrUnlock);
+
+ErrUnlock:
+       PDUMP_UNLOCK(ui32PDumpFlags);
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+PVRSRV_ERROR
+PDumpMMUSAB(PVRSRV_DEVICE_NODE *psDeviceNode,
+           const IMG_CHAR *pszPDumpMemNamespace,
+           IMG_UINT32 uiPDumpMMUCtx,
+           IMG_DEV_VIRTADDR sDevAddrStart,
+           IMG_DEVMEM_SIZE_T uiSize,
+           const IMG_CHAR *pszFilename,
+           IMG_UINT32 uiFileOffset,
+           IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+
+       // "SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin",
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpReady();
+       if (eErr != PVRSRV_OK)
+       {
+               /* Mask suspension from caller as this is terminal & logged */
+               eErr = (eErr == PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ? PVRSRV_OK : eErr;
+               goto ErrOut;
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                            ui32MaxLen,
+                            "SAB :%s:v%x:" IMG_DEV_VIRTADDR_FMTSPEC " "
+                            IMG_DEVMEM_SIZE_FMTSPEC " "
+                            "0x%x %s.bin\n",
+                            pszPDumpMemNamespace,
+                            uiPDumpMMUCtx,
+                            sDevAddrStart.uiAddr,
+                            uiSize,
+                            uiFileOffset,
+                            pszFilename);
+       PVR_GOTO_IF_ERROR(eErr, ErrOut);
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+ErrOut:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eErr;
+}
+
+#endif /* #if defined(PDUMP) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pdump_physmem.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pdump_physmem.c
new file mode 100644 (file)
index 0000000..32b0463
--- /dev/null
@@ -0,0 +1,670 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common PDump (PMR specific) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /***************************************************************************/
+
+#if defined(PDUMP)
+
+#if defined(__linux__)
+#include <linux/ctype.h>
+#else
+#include <ctype.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "pvrsrv.h"
+
+/* #define MAX_PDUMP_MMU_CONTEXTS      (10) */
+/* static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1; */
+
+
+struct _PDUMP_PHYSMEM_INFO_T_
+{
+       IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+       IMG_UINT64 ui64Size;
+       IMG_UINT32 ui32Align;
+       IMG_UINT32 ui32SerialNum;
+};
+
+static IMG_BOOL _IsAllowedSym(IMG_CHAR sym)
+{
+       /* Numbers, Characters or '_' are allowed */
+       if (isalnum(sym) || sym == '_')
+               return IMG_TRUE;
+       else
+               return IMG_FALSE;
+}
+
+static IMG_BOOL _IsLowerCaseSym(IMG_CHAR sym)
+{
+       if (sym >= 'a' && sym <= 'z')
+               return IMG_TRUE;
+       else
+               return IMG_FALSE;
+}
+
+void PDumpMakeStringValid(IMG_CHAR *pszString,
+                          IMG_UINT32 ui32StrLen)
+{
+       IMG_UINT32 i;
+
+       if (pszString)
+       {
+               for (i = 0; i < ui32StrLen; i++)
+               {
+                       if (_IsAllowedSym(pszString[i]))
+                       {
+                               if (_IsLowerCaseSym(pszString[i]))
+                                       pszString[i] = pszString[i]-32;
+                               else
+                                       pszString[i] = pszString[i];
+                       }
+                       else
+                       {
+                               pszString[i] = '_';
+                       }
+               }
+       }
+}
+
+/**************************************************************************
+ * Function Name  : PDumpGetSymbolicAddr
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+ **************************************************************************/
+PVRSRV_ERROR PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                                  IMG_CHAR **ppszSymbolicAddress)
+{
+       PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+       PVR_RETURN_IF_INVALID_PARAM(hPhysmemPDumpHandle);
+
+       psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPhysmemPDumpHandle;
+       *ppszSymbolicAddress = psPDumpAllocationInfo->aszSymbolicAddress;
+
+       return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMalloc
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+ **************************************************************************/
+PVRSRV_ERROR PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode,
+                         const IMG_CHAR *pszDevSpace,
+                         const IMG_CHAR *pszSymbolicAddress,
+                         IMG_UINT64 ui64Size,
+                         IMG_DEVMEM_ALIGN_T uiAlign,
+                         IMG_BOOL bInitialise,
+                         IMG_UINT32 ui32InitValue,
+                         IMG_HANDLE *phHandlePtr,
+                         IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psPDumpAllocationInfo = OSAllocMem(sizeof*psPDumpAllocationInfo);
+       PVR_ASSERT(psPDumpAllocationInfo != NULL);
+
+       /*
+        * PDUMP_CONT and PDUMP_PERSIST flag can't set together.
+        */
+       if (ui32PDumpFlags == PDUMP_NONE)
+       {
+               /*
+                       Set continuous flag because there is no way of knowing beforehand which
+                       allocation is needed for playback of the captured range.
+               */
+               ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
+       }
+
+       ui32PDumpFlags |= PDUMP_FLAGS_BLKDATA;
+
+       /*
+               construct the symbolic address
+        */
+
+       OSSNPrintf(psPDumpAllocationInfo->aszSymbolicAddress,
+                  sizeof(psPDumpAllocationInfo->aszSymbolicAddress),
+                  ":%s:%s",
+                  pszDevSpace,
+                  pszSymbolicAddress);
+
+       /*
+               Write to the MMU script stream indicating the memory allocation
+       */
+       if (bInitialise)
+       {
+               eError = PDumpSNPrintf(hScript, ui32MaxLen, "CALLOC %s 0x%"IMG_UINT64_FMTSPECX" 0x%"IMG_UINT64_FMTSPECX" 0x%X\n",
+                                         psPDumpAllocationInfo->aszSymbolicAddress,
+                                         ui64Size,
+                                         uiAlign,
+                                         ui32InitValue);
+       }
+       else
+       {
+               eError = PDumpSNPrintf(hScript, ui32MaxLen, "MALLOC %s 0x%"IMG_UINT64_FMTSPECX" 0x%"IMG_UINT64_FMTSPECX"\n",
+                                         psPDumpAllocationInfo->aszSymbolicAddress,
+                                         ui64Size,
+                                         uiAlign);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               OSFreeMem(psPDumpAllocationInfo);
+               goto _return;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       psPDumpAllocationInfo->ui64Size = ui64Size;
+       psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign);
+
+       *phHandlePtr = (IMG_HANDLE)psPDumpAllocationInfo;
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpFree
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+ **************************************************************************/
+PVRSRV_ERROR PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+
+       PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPDumpAllocationInfoHandle;
+
+       /*
+               Write to the MMU script stream indicating the memory free
+        */
+       eError = PDumpSNPrintf(hScript, ui32MaxLen, "FREE %s\n",
+                                 psPDumpAllocationInfo->aszSymbolicAddress);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       OSFreeMem(psPDumpAllocationInfo);
+       PDUMP_UNLOCK(ui32Flags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRWRW32(PVRSRV_DEVICE_NODE *psDeviceNode,
+              const IMG_CHAR *pszDevSpace,
+              const IMG_CHAR *pszSymbolicName,
+              IMG_DEVMEM_OFFSET_T uiOffset,
+              IMG_UINT32 ui32Value,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                              ui32MaxLen,
+                              "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              PMR_VALUE32_FMTSPEC,
+                              pszDevSpace,
+                              pszSymbolicName,
+                              uiOffset,
+                              ui32Value);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s",
+                                 pszDevSpace,
+                                 pszSymbolicName,
+                                 uiOffset,
+                                 pszInternalVar);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "RDW %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC,
+                                 pszInternalVar,
+                                 pszDevSpace,
+                                 pszSymbolicName,
+                                 uiOffset);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRWRW64(PVRSRV_DEVICE_NODE *psDeviceNode,
+              const IMG_CHAR *pszDevSpace,
+              const IMG_CHAR *pszSymbolicName,
+              IMG_DEVMEM_OFFSET_T uiOffset,
+              IMG_UINT64 ui64Value,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                              ui32MaxLen,
+                              "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              PMR_VALUE64_FMTSPEC,
+                              pszDevSpace,
+                              pszSymbolicName,
+                              uiOffset,
+                              ui64Value);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s",
+                                 pszDevSpace,
+                                 pszSymbolicName,
+                                 uiOffset,
+                                 pszInternalVar);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "RDW64 %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC,
+                                 pszInternalVar,
+                                 pszDevSpace,
+                                 pszSymbolicName,
+                                 uiOffset);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRLDB(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "LDB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                                 IMG_DEVMEM_SIZE_FMTSPEC " "
+                                 PDUMP_FILEOFFSET_FMTSPEC " %s\n",
+                                 pszDevSpace,
+                                 pszSymbolicName,
+                                 uiOffset,
+                                 uiSize,
+                                 uiFileOffset,
+                                 pszFilename);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR PDumpPMRSAB(PVRSRV_DEVICE_NODE *psDeviceNode,
+                         const IMG_CHAR *pszDevSpace,
+                         const IMG_CHAR *pszSymbolicName,
+                         IMG_DEVMEM_OFFSET_T uiOffset,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         const IMG_CHAR *pszFileName,
+                         IMG_UINT32 uiFileOffset)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 uiPDumpFlags;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       uiPDumpFlags = 0;
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "SAB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                                 IMG_DEVMEM_SIZE_FMTSPEC " "
+                                 "0x%08X %s.bin\n",
+                                 pszDevSpace,
+                                 pszSymbolicName,
+                                 uiOffset,
+                                 uiSize,
+                                 uiFileOffset,
+                                 pszFileName);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRPOL(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "POL :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                                 "0x%08X 0x%08X %d %d %d\n",
+                                 pszMemspaceName,
+                                 pszSymbolicName,
+                                 uiOffset,
+                                 ui32Value,
+                                 ui32Mask,
+                                 eOperator,
+                                 uiCount,
+                                 uiDelay);
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpPMRCBP(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PDUMP_FLAGS_T uiPDumpFlags = 0;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "CBP :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                                 IMG_DEVMEM_OFFSET_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC "\n",
+                                 pszMemspaceName,
+                                 pszSymbolicName,
+                                 uiReadOffset,
+                                 uiWriteOffset,
+                                 uiPacketSize,
+                                 uiBufferSize);
+
+       PVR_GOTO_IF_ERROR(eError, _return);
+
+       PDUMP_LOCK(uiPDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+_return:
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/* Checking that the request is for the PDump-bound device
+ * should be done before the following function is called
+ */
+PVRSRV_ERROR
+PDumpWriteParameterBlob(PVRSRV_DEVICE_NODE *psDeviceNode,
+                        IMG_UINT8 *pcBuffer,
+                        size_t uiNumBytes,
+                        PDUMP_FLAGS_T uiPDumpFlags,
+                        IMG_CHAR *pszFilenameOut,
+                        size_t uiFilenameBufSz,
+                        PDUMP_FILEOFFSET_T *puiOffsetOut)
+{
+       PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+       PVR_UNREFERENCED_PARAMETER(uiFilenameBufSz);
+
+       PVR_ASSERT(uiNumBytes > 0);
+
+       eError = PDumpReady();
+       if (eError != PVRSRV_OK)
+       {
+               /* Mask suspension from caller as this is terminal & logged */
+               eError = (eError == PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ?
+                               PVRSRV_ERROR_PDUMP_NOT_ALLOWED :
+                               eError;
+               return eError;
+       }
+
+       PVR_ASSERT(uiFilenameBufSz <= PDUMP_PARAM_MAX_FILE_NAME);
+
+       PDUMP_LOCK(uiPDumpFlags);
+
+       eError = PDumpWriteParameter(psDeviceNode, pcBuffer, uiNumBytes,
+                                    uiPDumpFlags, puiOffsetOut, pszFilenameOut);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED))
+       {
+               PVR_LOG_RETURN_IF_ERROR(eError, "PDumpWriteParameter");
+       }
+       /* else Write to parameter file Ok or Prevented under the flags or
+        * current state of the driver so skip further writes and let caller know.
+        */
+       return eError;
+}
+
+#endif /* PDUMP */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pdump_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pdump_server.c
new file mode 100644 (file)
index 0000000..f5f2f55
--- /dev/null
@@ -0,0 +1,5563 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common Server PDump functions layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+
+#if defined(__linux__)
+ #include <linux/version.h>
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+  #include <linux/stdarg.h>
+ #else
+  #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
+
+#include "pvrversion.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pdump_physmem.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "services_km.h"
+#include <powervr/buffer_attribs.h>
+#include "oskm_apphint.h"
+
+/* pdump headers */
+#include "tlstream.h"
+#include "pdump_km.h"
+
+#include "pdumpdesc.h"
+#include "rgxpdump.h"
+
+#include "tutilsdefs.h"
+#include "tutils_km.h"
+/* Allow temporary buffer size override */
+#if !defined(PDUMP_TEMP_BUFFER_SIZE)
+#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U)
+#endif
+
+#define        PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x)))
+#define        VPTR_PLUS(p, x) PTR_PLUS(void *, p, x)
+#define        VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x))
+#define MAX_PDUMP_MMU_CONTEXTS (32)
+
+#define PRM_FILE_SIZE_MAX      0x7FDFFFFFU /*!< Default maximum file size to split output files, 2GB-2MB as fwrite limits it to 2GB-1 on 32bit systems */
+
+#define MAX_PDUMP_WRITE_RETRIES        200     /*!< Max number of retries to dump pdump data in to respective buffers */
+
+/* 'Magic' cookie used in this file only, where no psDeviceNode is available
+ * but writing to the PDump log should be permitted
+ */
+#define PDUMP_MAGIC_COOKIE 0x9E0FF
+
+static ATOMIC_T                g_sConnectionCount;
+
+/*
+ * Structure to store some essential attributes of a PDump stream buffer.
+ */
+typedef struct
+{
+       IMG_CHAR*  pszName;                     /*!< Name of the PDump TL Stream buffer */
+       IMG_HANDLE hTL;                         /*!< Handle of created TL stream buffer */
+       IMG_UINT32 ui32BufferSize;              /*!< The size of the buffer in bytes */
+       IMG_UINT32 ui32BufferFullRetries;       /*!< The number of times the buffer got full */
+       IMG_UINT32 ui32BufferFullAborts;        /*!< The number of times we failed to write data */
+       IMG_UINT32 ui32HighestRetriesWatermark; /*!< Max number of retries try to dump pdump data */
+       IMG_UINT32 ui32MaxAllowedWriteSize;     /*!< Max allowed write packet size */
+} PDUMP_STREAM;
+
+typedef struct
+{
+       PDUMP_STREAM sInitStream;   /*!< Driver initialisation PDump stream */
+       PDUMP_STREAM sMainStream;   /*!< App framed PDump stream */
+       PDUMP_STREAM sDeinitStream; /*!< Driver/HW de-initialisation PDump stream */
+       PDUMP_STREAM sBlockStream;  /*!< Block mode PDump block data stream - currently its script only */
+} PDUMP_CHANNEL;
+
+typedef struct
+{
+       PDUMP_CHANNEL sCh;         /*!< Channel handles */
+       IMG_UINT32    ui32FileIdx; /*!< File index gets incremented on script out file split */
+} PDUMP_SCRIPT;
+
+typedef struct
+{
+       IMG_UINT32    ui32Init;    /*!< Count of bytes written to the init phase stream */
+       IMG_UINT32    ui32Main;    /*!< Count of bytes written to the main stream */
+       IMG_UINT32    ui32Deinit;  /*!< Count of bytes written to the deinit stream */
+       IMG_UINT32    ui32Block;   /*!< Count of bytes written to the block stream */
+} PDUMP_CHANNEL_WOFFSETS;
+
+typedef struct
+{
+       PDUMP_CHANNEL          sCh;             /*!< Channel handles */
+       PDUMP_CHANNEL_WOFFSETS sWOff;           /*!< Channel file write offsets */
+       IMG_UINT32             ui32FileIdx;     /*!< File index used when file size limit reached and a new file is started, parameter channel only */
+       IMG_UINT32             ui32MaxFileSize; /*!< Maximum file size for parameter files */
+
+       PDUMP_FILEOFFSET_T     uiZeroPageOffset; /*!< Offset of the zero page in the parameter file */
+       size_t                 uiZeroPageSize;   /*!< Size of the zero page in the parameter file */
+       IMG_CHAR               szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */
+} PDUMP_PARAMETERS;
+
+/* PDump lock to keep pdump write atomic.
+ * Which will protect g_PDumpScript & g_PDumpParameters pdump
+ * specific shared variable.
+ */
+static POS_LOCK g_hPDumpWriteLock;
+
+static PDUMP_SCRIPT     g_PDumpScript    = { {
+               {       PDUMP_SCRIPT_INIT_STREAM_NAME,   NULL,
+                       PDUMP_SCRIPT_INIT_STREAM_SIZE,   0, 0, 0 },
+               {       PDUMP_SCRIPT_MAIN_STREAM_NAME,   NULL,
+                       PDUMP_SCRIPT_MAIN_STREAM_SIZE,   0, 0, 0 },
+               {       PDUMP_SCRIPT_DEINIT_STREAM_NAME, NULL,
+                       PDUMP_SCRIPT_DEINIT_STREAM_SIZE, 0, 0, 0 },
+               {       PDUMP_SCRIPT_BLOCK_STREAM_NAME,   NULL,
+                       PDUMP_SCRIPT_BLOCK_STREAM_SIZE,   0, 0, 0 },
+               }, 0 };
+static PDUMP_PARAMETERS g_PDumpParameters = { {
+               {       PDUMP_PARAM_INIT_STREAM_NAME,   NULL,
+                       PDUMP_PARAM_INIT_STREAM_SIZE,   0, 0, 0 },
+               {       PDUMP_PARAM_MAIN_STREAM_NAME,   NULL,
+                       PDUMP_PARAM_MAIN_STREAM_SIZE,   0, 0, 0 },
+               {       PDUMP_PARAM_DEINIT_STREAM_NAME, NULL,
+                       PDUMP_PARAM_DEINIT_STREAM_SIZE, 0, 0, 0 },
+               {       PDUMP_PARAM_BLOCK_STREAM_NAME,   NULL,
+                       PDUMP_PARAM_BLOCK_STREAM_SIZE,   0, 0, 0 },
+               }, {0, 0, 0, 0}, 0, PRM_FILE_SIZE_MAX};
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+ATOMIC_T g_sEveryLineCounter;
+#endif
+
+// #define PDUMP_DEBUG_TRANSITION
+#if defined(PDUMP_DEBUG_TRANSITION)
+# define DEBUG_OUTFILES_COMMENT(dev, fmt, ...) (void)PDumpCommentWithFlags(dev, PDUMP_FLAGS_CONTINUOUS, fmt, __VA_ARGS__)
+#else
+# define DEBUG_OUTFILES_COMMENT(dev, fmt, ...)
+#endif
+
+#if defined(PDUMP_DEBUG) || defined(REFCOUNT_DEBUG)
+# define PDUMP_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+# define PDUMP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* Prototype for the test/debug state dump routine used in debugging */
+#if defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS)
+void PDumpCommonDumpState(void);
+#endif
+
+
+/*****************************************************************************/
+/* PDump Control Module Definitions                                          */
+/*****************************************************************************/
+
+/*
+ * struct _PDUMP_CAPTURE_RANGE_ is interpreted differently in different modes of PDump
+ *
+ * Non-Block mode:
+ *    ui32Start     - Start frame number of range
+ *    ui32End       - End frame number of range
+ *    ui32Interval  - Frame sample rate interval
+ *
+ * Block mode:
+ *    ui32Start     - If set to '0', first PDump-block will of minimal (i.e. PDUMP_BLOCKLEN_MIN)
+ *                    length, else all blocks will be of block-length provided
+ *
+ *    ui32End       - By default this is set to PDUMP_FRAME_MAX so that Blocked PDump
+ *                    will be captured indefinitely till stopped externally. On force capture
+ *                    stop, this will be set to (ui32CurrentFrame + 1) to stop capture from
+ *                    next frame onwards
+ *
+ *    ui32Interval  - This will be interpreted as PDump block-length provided
+ **/
+typedef struct _PDUMP_CAPTURE_RANGE_
+{
+       IMG_UINT32 ui32Start;
+       IMG_UINT32 ui32End;
+       IMG_UINT32 ui32Interval;
+} PDUMP_CAPTURE_RANGE;
+
+/* PDump Block mode specific controls */
+typedef struct _PDUMP_BLOCK_CTRL_
+{
+       IMG_UINT32 ui32BlockLength;       /*!< PDump block length in term of number of frames per block */
+       IMG_UINT32 ui32CurrentBlock;      /*!< Current block number */
+} PDUMP_BLOCK_CTRL;
+
+/*! PDump common module State Machine states */
+typedef enum _PDUMP_SM_
+{
+       PDUMP_SM_UNINITIALISED,           /*!< Starting state */
+       PDUMP_SM_INITIALISING,            /*!< Module is initialising */
+       PDUMP_SM_READY,                   /*!< Module is initialised and ready */
+       PDUMP_SM_READY_CLIENT_CONNECTED,  /*!< Module is ready and capture client connected */
+       PDUMP_SM_FORCED_SUSPENDED,        /*!< Module forced error, PDumping suspended, this is to force driver reload before next capture */
+       PDUMP_SM_ERROR_SUSPENDED,         /*!< Module fatal error, PDumping suspended semi-final state */
+       PDUMP_SM_DEINITIALISED            /*!< Final state */
+} PDUMP_SM;
+
+/*! PDump control flags */
+#define FLAG_IS_DRIVER_IN_INIT_PHASE 0x1  /*! Control flag that keeps track of State of driver initialisation phase */
+#define FLAG_IS_IN_CAPTURE_RANGE     0x2  /*! Control flag that keeps track of Current capture status, is current frame in range */
+#define FLAG_IS_IN_CAPTURE_INTERVAL  0x4  /*! Control flag that keeps track of Current capture status, is current frame in an interval where no capture takes place. */
+
+#define CHECK_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_HAS(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG)
+#define SET_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG)   BITMASK_SET(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG)
+#define UNSET_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_UNSET(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG)
+
+/* No direct access to members from outside the control module - please */
+typedef struct _PDUMP_CTRL_STATE_
+{
+       PDUMP_SM            eServiceState;      /*!< State of the pdump_common module */
+       IMG_UINT32          ui32Flags;
+
+       IMG_UINT32          ui32DefaultCapMode; /*!< Capture mode of the dump */
+       PDUMP_CAPTURE_RANGE sCaptureRange;      /*|< The capture range for capture mode 'framed' */
+       IMG_UINT32          ui32CurrentFrame;   /*!< Current frame number */
+
+       PDUMP_BLOCK_CTRL    sBlockCtrl;         /*!< Pdump block mode ctrl data */
+
+       POS_LOCK            hLock;              /*!< Exclusive lock to this structure */
+       IMG_PID             InPowerTransitionPID;/*!< pid of thread requesting power transition */
+} PDUMP_CTRL_STATE;
+
+static PDUMP_CTRL_STATE g_PDumpCtrl =
+{
+       PDUMP_SM_UNINITIALISED,
+
+       FLAG_IS_DRIVER_IN_INIT_PHASE,
+
+       PDUMP_CAPMODE_UNSET,
+       {
+               PDUMP_FRAME_UNSET,
+               PDUMP_FRAME_UNSET,
+               0
+       },
+       0,
+
+       {
+               0,
+               PDUMP_BLOCKNUM_INVALID,
+       },
+
+       NULL,
+       0
+};
+
+static void PDumpAssertWriteLockHeld(void);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+
+/*************************************************************************/ /*!
+ @Function             PDumpCreateIncVarNameStr
+ @Description  When 64 bit register access is split between two 32 bit
+       accesses, it needs two PDump Internal variables to store register value.
+       This function creates the string for the second PDump Internal variable
+       for example if Passed Variable name is :SYSMEM:$1 this function will
+       generate the string :SYSMEM:$2
+
+ @Input        pszInternalVar  String for PDump internal variable in use
+
+ @Return IMG_CHAR*  String for second PDump internal variable to be used
+*/ /**************************************************************************/
+static INLINE IMG_CHAR* PDumpCreateIncVarNameStr(const IMG_CHAR* pszInternalVar)
+{
+       IMG_CHAR *pszPDumpVarName;
+       IMG_UINT32 ui32Size = (IMG_UINT32)OSStringLength(pszInternalVar);
+       if (ui32Size == 0)
+       {
+               return NULL;
+       }
+
+       ui32Size++;
+       pszPDumpVarName = (IMG_CHAR*)OSAllocMem((ui32Size) * sizeof(IMG_CHAR));
+       if (pszPDumpVarName == NULL)
+       {
+               return NULL;
+       }
+
+       OSStringLCopy(pszPDumpVarName, pszInternalVar, ui32Size);
+       /* Increase the number on the second variable */
+       pszPDumpVarName[ui32Size-2] += 1;
+       return pszPDumpVarName;
+}
+
+/*************************************************************************/ /*!
+ @Function             PDumpFreeIncVarNameStr
+ @Description  Free the string created by function PDumpCreateIncVarNameStr
+
+ @Input        pszPDumpVarName String to free
+
+ @Return       void
+*/ /**************************************************************************/
+static INLINE void PDumpFreeIncVarNameStr(IMG_CHAR* pszPDumpVarName)
+{
+       if (pszPDumpVarName != NULL)
+       {
+               OSFreeMem(pszPDumpVarName);
+       }
+}
+#endif
+
+static PVRSRV_ERROR PDumpCtrlInit(void)
+{
+       g_PDumpCtrl.eServiceState = PDUMP_SM_INITIALISING;
+
+       /* Create lock for PDUMP_CTRL_STATE struct, which is shared between pdump client
+          and PDumping app. This lock will help us serialize calls from pdump client
+          and PDumping app */
+       PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&g_PDumpCtrl.hLock), "OSLockCreate");
+
+       return PVRSRV_OK;
+}
+
+static void PDumpCtrlDeInit(void)
+{
+       if (g_PDumpCtrl.hLock)
+       {
+               OSLockDestroy(g_PDumpCtrl.hLock);
+               g_PDumpCtrl.hLock = NULL;
+       }
+}
+
+static INLINE void PDumpCtrlLockAcquire(void)
+{
+       OSLockAcquire(g_PDumpCtrl.hLock);
+}
+
+static INLINE void PDumpCtrlLockRelease(void)
+{
+       OSLockRelease(g_PDumpCtrl.hLock);
+}
+
+static INLINE PDUMP_SM PDumpCtrlGetModuleState(void)
+{
+       return g_PDumpCtrl.eServiceState;
+}
+
+PVRSRV_ERROR PDumpReady(void)
+{
+       switch (PDumpCtrlGetModuleState())
+       {
+       case PDUMP_SM_READY:
+       case PDUMP_SM_READY_CLIENT_CONNECTED:
+               return PVRSRV_OK;
+
+       case PDUMP_SM_FORCED_SUSPENDED:
+       case PDUMP_SM_ERROR_SUSPENDED:
+               return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+
+       case PDUMP_SM_UNINITIALISED:
+       case PDUMP_SM_INITIALISING:
+       case PDUMP_SM_DEINITIALISED:
+               return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+
+       default:
+               /* Bad state */
+               PVR_ASSERT(1);
+               return PVRSRV_ERROR_BAD_MAPPING;
+       }
+}
+
+
+/******************************************************************************
+       NOTE:
+       The following PDumpCtrl*** functions require the PDUMP_CTRL_STATE lock be
+       acquired BEFORE they are called. This is because the PDUMP_CTRL_STATE data
+       is shared between the PDumping App and the PDump client, hence an exclusive
+       access is required. The lock can be acquired and released by using the
+       PDumpCtrlLockAcquire & PDumpCtrlLockRelease functions respectively.
+******************************************************************************/
+
+static void PDumpCtrlUpdateCaptureStatus(void)
+{
+       if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_FRAMED)
+       {
+               if ((g_PDumpCtrl.ui32CurrentFrame >= g_PDumpCtrl.sCaptureRange.ui32Start) &&
+                       (g_PDumpCtrl.ui32CurrentFrame <= g_PDumpCtrl.sCaptureRange.ui32End))
+               {
+                       if (((g_PDumpCtrl.ui32CurrentFrame - g_PDumpCtrl.sCaptureRange.ui32Start) % g_PDumpCtrl.sCaptureRange.ui32Interval) == 0)
+                       {
+                               SET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE);
+                               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL);
+                       }
+                       else
+                       {
+                               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE);
+                               SET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL);
+                       }
+               }
+               else
+               {
+                       UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE);
+                       UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL);
+               }
+       }
+       else if ((g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_CONTINUOUS) || (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_BLOCKED))
+       {
+               SET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE);
+               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL);
+       }
+       else if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_UNSET)
+       {
+               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE);
+               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL);
+       }
+       else
+       {
+               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE);
+               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL);
+               PVR_DPF((PVR_DBG_ERROR, "PDumpCtrlUpdateCaptureStatus: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+       }
+
+}
+
+static INLINE IMG_UINT32 PDumpCtrlCapModIsBlocked(void)
+{
+       return (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_BLOCKED);
+}
+
+static INLINE IMG_UINT32 PDumpCtrlMinimalFirstBlock(void)
+{
+       /* If ui32Start is set to zero, first block length will be set to minimum
+        * (i.e. PDUMP_BLOCKLEN_MIN), else it will be of same length as that of
+        * rest of the blocks (i.e. ui32BlockLength)
+        *
+        * Having shorter first block reduces playback time of final capture.
+        * */
+
+       return (PDumpCtrlCapModIsBlocked() && (g_PDumpCtrl.sCaptureRange.ui32Start == 0));
+}
+
+static void PDumpCtrlSetBlock(IMG_UINT32 ui32BlockNum)
+{
+       g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock = PDumpCtrlCapModIsBlocked()? ui32BlockNum : PDUMP_BLOCKNUM_INVALID;
+}
+
+static INLINE IMG_UINT32 PDumpCtrlGetBlock(void)
+{
+       return PDumpCtrlCapModIsBlocked()? g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock : PDUMP_BLOCKNUM_INVALID;
+}
+
+static PVRSRV_ERROR PDumpCtrlForcedStop(void)
+{
+       /* In block-mode on forced stop request, capture will be stopped after (current_frame + 1)th frame number.
+        * This ensures that DumpAfterRender always be called on last frame before exiting the PDump capturing
+        * */
+       g_PDumpCtrl.sCaptureRange.ui32End = g_PDumpCtrl.ui32CurrentFrame + 1;
+
+       return PVRSRV_OK;
+}
+
+static INLINE IMG_BOOL PDumpCtrlIsCaptureForceStopped(void)
+{
+       return (PDumpCtrlCapModIsBlocked() && (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End));
+}
+
+static void PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame)
+{
+       g_PDumpCtrl.ui32CurrentFrame = ui32Frame;
+
+       PDumpCtrlUpdateCaptureStatus();
+
+       /* Force PDump module to suspend PDumping on forced capture stop */
+       if ((PDumpCtrlGetModuleState() != PDUMP_SM_FORCED_SUSPENDED) && PDumpCtrlIsCaptureForceStopped())
+       {
+               PVR_LOG(("PDump forced capture stop received. Suspend PDumping to force driver reload before next capture."));
+               g_PDumpCtrl.eServiceState = PDUMP_SM_FORCED_SUSPENDED;
+       }
+#if defined(PDUMP_TRACE_STATE)
+       PDumpCommonDumpState();
+#endif
+}
+
+static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval)
+{
+       /* Set the capture range to that supplied by the PDump client tool
+        */
+       g_PDumpCtrl.ui32DefaultCapMode = ui32Mode;
+       g_PDumpCtrl.sCaptureRange.ui32Start = ui32Start;
+       g_PDumpCtrl.sCaptureRange.ui32End = ui32End;
+       g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval;
+
+       /* Set pdump block mode ctrl variables */
+       g_PDumpCtrl.sBlockCtrl.ui32BlockLength = (ui32Mode == PDUMP_CAPMODE_BLOCKED)? ui32Interval : 0; /* ui32Interval is interpreted as block length */
+       g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock = PDUMP_BLOCKNUM_INVALID;
+
+       /* Change module state to record capture client connected */
+       if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_UNSET)
+               g_PDumpCtrl.eServiceState = PDUMP_SM_READY;
+       else
+               g_PDumpCtrl.eServiceState = PDUMP_SM_READY_CLIENT_CONNECTED;
+
+       /* Reset the current frame on reset of the capture range, the helps to
+        * avoid inter-pdump start frame issues when the driver is not reloaded.
+        * No need to call PDumpCtrlUpdateCaptureStatus() direct as the set
+        * current frame call will.
+        */
+       PDumpCtrlSetCurrentFrame(0);
+
+}
+
+static IMG_UINT32 PDumpCtrlGetCurrentFrame(void)
+{
+       return g_PDumpCtrl.ui32CurrentFrame;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureOn(void)
+{
+       return ((g_PDumpCtrl.eServiceState == PDUMP_SM_READY_CLIENT_CONNECTED) &&
+                       CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE)) ? IMG_TRUE : IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureInInterval(void)
+{
+       return ((g_PDumpCtrl.eServiceState == PDUMP_SM_READY_CLIENT_CONNECTED) &&
+                       CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL)) ? IMG_TRUE : IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureRangePast(void)
+{
+       return (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End);
+}
+
+static IMG_BOOL PDumpCtrlIsLastCaptureFrame(void)
+{
+       if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_FRAMED)
+       {
+               /* Is the next capture frame within the range end limit? */
+               if ((g_PDumpCtrl.ui32CurrentFrame + g_PDumpCtrl.sCaptureRange.ui32Interval) > g_PDumpCtrl.sCaptureRange.ui32End)
+               {
+                       return IMG_TRUE;
+               }
+       }
+       else if (g_PDumpCtrl.ui32DefaultCapMode == PDUMP_CAPMODE_BLOCKED)
+       {
+               if (g_PDumpCtrl.ui32CurrentFrame == g_PDumpCtrl.sCaptureRange.ui32End)
+               {
+                       return IMG_TRUE;
+               }
+       }
+       /* Return false for all other conditions: framed mode but not last frame,
+        * continuous mode; unset mode.
+        */
+       return IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInitPhaseComplete(void)
+{
+       return !CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE);
+}
+
+static INLINE void PDumpCtrlSetInitPhaseComplete(IMG_BOOL bIsComplete)
+{
+       PDUMP_HERE_VAR;
+
+       if (bIsComplete)
+       {
+               UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE);
+               PDUMP_HEREA(102);
+       }
+       else
+       {
+               SET_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE);
+               PDUMP_HEREA(103);
+       }
+}
+
+static INLINE void PDumpCtrlPowerTransitionStart(void)
+{
+       g_PDumpCtrl.InPowerTransitionPID = OSGetCurrentProcessID();
+}
+
+static INLINE void PDumpCtrlPowerTransitionEnd(void)
+{
+       g_PDumpCtrl.InPowerTransitionPID = 0;
+}
+
+static INLINE IMG_PID PDumpCtrlInPowerTransitionPID(void)
+{
+       return g_PDumpCtrl.InPowerTransitionPID;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInPowerTransition(void)
+{
+       IMG_BOOL bPDumpInPowerTransition = IMG_FALSE;
+       if (PDumpCtrlInPowerTransitionPID())
+       {
+               bPDumpInPowerTransition = IMG_TRUE;
+       }
+       return bPDumpInPowerTransition;
+}
+
+static PVRSRV_ERROR PDumpCtrlGetState(IMG_UINT64 *ui64State)
+{
+       PDUMP_SM eState;
+
+       *ui64State = 0;
+
+       if (PDumpCtrlCaptureOn())
+       {
+               *ui64State |= PDUMP_STATE_CAPTURE_FRAME;
+       }
+
+       if (PDumpCtrlCaptureInInterval())
+       {
+               *ui64State |= PDUMP_STATE_CAPTURE_IN_INTERVAL;
+       }
+
+       eState = PDumpCtrlGetModuleState();
+
+       if (eState == PDUMP_SM_READY_CLIENT_CONNECTED)
+       {
+               *ui64State |= PDUMP_STATE_CONNECTED;
+       }
+
+       if (eState == PDUMP_SM_ERROR_SUSPENDED)
+       {
+               *ui64State |= PDUMP_STATE_SUSPENDED;
+       }
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+       End of PDumpCtrl*** functions
+******************************************************************************/
+
+/*
+       Wrapper functions which need to be exposed in pdump_km.h for use in other
+       pdump_*** modules safely. These functions call the specific PDumpCtrl layer
+       function after acquiring the PDUMP_CTRL_STATE lock, hence making the calls
+       from other modules hassle free by avoiding the acquire/release CtrlLock
+       calls.
+*/
+
+static INLINE void PDumpModuleTransitionState(PDUMP_SM eNewState)
+{
+       PDumpCtrlLockAcquire();
+       g_PDumpCtrl.eServiceState = eNewState;
+       PDumpCtrlLockRelease();
+}
+
+void PDumpPowerTransitionStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (PDumpIsDevicePermitted(psDeviceNode))
+       {
+               PDumpCtrlLockAcquire();
+               PDumpCtrlPowerTransitionStart();
+               PDumpCtrlLockRelease();
+       }
+}
+
+void PDumpPowerTransitionEnd(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (PDumpIsDevicePermitted(psDeviceNode))
+       {
+               PDumpCtrlLockAcquire();
+               PDumpCtrlPowerTransitionEnd();
+               PDumpCtrlLockRelease();
+       }
+}
+
+static PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum)
+{
+       PDumpCtrlLockAcquire();
+       *pui32BlockNum = PDumpCtrlGetBlock();
+       PDumpCtrlLockRelease();
+
+       return PVRSRV_OK;
+}
+
+static IMG_BOOL PDumpIsClientConnected(void)
+{
+       IMG_BOOL bPDumpClientConnected;
+
+       PDumpCtrlLockAcquire();
+       bPDumpClientConnected = (PDumpCtrlGetModuleState() == PDUMP_SM_READY_CLIENT_CONNECTED);
+       PDumpCtrlLockRelease();
+
+       return bPDumpClientConnected;
+}
+
+/* Prototype write allowed for exposure in PDumpCheckFlagsWrite */
+static IMG_BOOL PDumpWriteAllowed(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_UINT32 ui32Flags, IMG_UINT32* ui32ExitHere);
+
+IMG_BOOL PDumpCheckFlagsWrite(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              IMG_UINT32 ui32Flags)
+{
+       return PDumpWriteAllowed(psDeviceNode, ui32Flags, NULL);
+}
+
+/*****************************************************************************/
+/* PDump Common Write Layer just above common Transport Layer                */
+/*****************************************************************************/
+
+
+/*!
+ * \name       _PDumpOSGetStreamOffset
+ */
+static IMG_BOOL _PDumpSetSplitMarker(IMG_HANDLE hStream, IMG_BOOL bRemoveOld)
+{
+       PVRSRV_ERROR eError;
+       /* We have to indicate the reader that we wish to split. Insert an EOS packet in the TL stream */
+       eError = TLStreamMarkEOS(hStream, bRemoveOld);
+
+       /* If unsuccessful, return false */
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "TLStreamMarkEOS");
+
+               return IMG_FALSE;
+       }
+
+       return IMG_TRUE;
+}
+
+IMG_BOOL PDumpIsDevicePermitted(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if ((void*)psDeviceNode == (void*)PDUMP_MAGIC_COOKIE)
+       {
+               /* Always permit PDumping if passed 'magic' cookie */
+               return IMG_TRUE;
+       }
+
+       if (psDeviceNode)
+       {
+               if ((psDeviceNode->sDevId.ui32InternalID > PVRSRV_MAX_DEVICES) ||
+                   ((psPVRSRVData->ui32PDumpBoundDevice < PVRSRV_MAX_DEVICES) &&
+                    (psDeviceNode->sDevId.ui32InternalID != psPVRSRVData->ui32PDumpBoundDevice)))
+               {
+                       return IMG_FALSE;
+               }
+       }
+       else
+       {
+               /* Assert if provided with a NULL psDeviceNode */
+               OSDumpStack();
+               PVR_ASSERT(psDeviceNode);
+               return IMG_FALSE;
+       }
+       return IMG_TRUE;
+}
+
+/*
+       Checks in this method were seeded from the original PDumpWriteILock()
+       and DBGDrivWriteCM() and have grown since to ensure PDump output
+       matches legacy output.
+       Note: the order of the checks in this method is important as some
+       writes have multiple pdump flags set!
+ */
+static IMG_BOOL PDumpWriteAllowed(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_UINT32 ui32Flags, IMG_UINT32* ui32ExitHere)
+{
+       PDUMP_HERE_VAR;
+
+       /* No writes if for a different device than the PDump-bound device
+        *  NB. psDeviceNode may be NULL if called during initialisation
+        */
+       if (!PDumpIsDevicePermitted(psDeviceNode))
+       {
+               PDUMP_HERE(5);
+               goto returnFalse;
+       }
+
+       /* PDUMP_FLAGS_CONTINUOUS and PDUMP_FLAGS_PERSISTENT can't come together. */
+       PVR_ASSERT(IMG_FALSE == ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) &&
+                                    (ui32Flags & PDUMP_FLAGS_PERSISTENT)));
+
+       /* Lock down the PDUMP_CTRL_STATE struct before calling the following
+          PDumpCtrl*** functions. This is to avoid updates to the Control data
+          while we are reading from it */
+       PDumpCtrlLockAcquire();
+
+       /* No writes if in framed mode and range pasted */
+       if (PDumpCtrlCaptureRangePast())
+       {
+               PDUMP_HERE(10);
+               goto unlockAndReturnFalse;
+       }
+
+       /* No writes while PDump is not ready or is suspended */
+       if (PDumpReady() != PVRSRV_OK)
+       {
+               PDUMP_HERE(11);
+               goto unlockAndReturnFalse;
+       }
+
+       /* Prevent PDumping during a power transition */
+       if (PDumpCtrlInPowerTransition())
+       {       /* except when it's flagged */
+               if (ui32Flags & PDUMP_FLAGS_POWER)
+               {
+                       PDUMP_HERE(20);
+                       goto unlockAndReturnTrue;
+               }
+               else if (PDumpCtrlInPowerTransitionPID() == OSGetCurrentProcessID())
+               {
+                       PDUMP_HERE(16);
+                       goto unlockAndReturnFalse;
+               }
+       }
+
+       /* Always allow dumping in init phase and when persistent flagged */
+       if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+       {
+               PDUMP_HERE(12);
+               goto unlockAndReturnTrue;
+       }
+       if (!PDumpCtrlInitPhaseComplete())
+       {
+               PDUMP_HERE(15);
+               goto unlockAndReturnTrue;
+       }
+
+       /* The following checks are made when the driver has completed initialisation */
+       /* No last/deinit statements allowed when not in initialisation phase */
+       else /* init phase over */
+       {
+               if (ui32Flags & PDUMP_FLAGS_DEINIT)
+               {
+                       PVR_ASSERT(0);
+                       PDUMP_HERE(17);
+                       goto unlockAndReturnFalse;
+               }
+       }
+
+       /* If PDump client connected allow continuous flagged writes */
+       if (PDUMP_IS_CONTINUOUS(ui32Flags))
+       {
+               if (PDumpCtrlGetModuleState() != PDUMP_SM_READY_CLIENT_CONNECTED) /* Is client connected? */
+               {
+                       PDUMP_HERE(13);
+                       goto unlockAndReturnFalse;
+               }
+               PDUMP_HERE(14);
+               goto unlockAndReturnTrue;
+       }
+
+       /* If in a capture interval but a write is still required.
+        * Force write out if FLAGS_INTERVAL has been set and we are in
+        * a capture interval */
+       if (ui32Flags & PDUMP_FLAGS_INTERVAL)
+       {
+               if (PDumpCtrlCaptureInInterval()){
+                       PDUMP_HERE(21);
+                       goto unlockAndReturnTrue;
+               }
+       }
+
+       /*
+               If no flags are provided then it is FRAMED output and the frame
+               range must be checked matching expected behaviour.
+        */
+       if (!PDumpCtrlCaptureOn())
+       {
+               PDUMP_HERE(18);
+               goto unlockAndReturnFalse;
+       }
+
+       PDUMP_HERE(19);
+
+unlockAndReturnTrue:
+       /* Allow the write to take place */
+
+       PDumpCtrlLockRelease();
+       return IMG_TRUE;
+
+unlockAndReturnFalse:
+       PDumpCtrlLockRelease();
+returnFalse:
+       if (ui32ExitHere != NULL)
+       {
+               *ui32ExitHere = here;
+       }
+       return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+ @Function             PDumpWriteToBuffer
+ @Description  Write the supplied data to the PDump stream buffer and attempt
+                to handle any buffer full conditions to ensure all the data
+                requested to be written, is.
+
+ @Input                        psDeviceNode The device the PDump pertains to
+ @Input                        psStream        The address of the PDump stream buffer to write to
+ @Input                        pui8Data    Pointer to the data to be written
+ @Input                        ui32BCount      Number of bytes to write
+ @Input                        ui32Flags       PDump statement flags.
+
+ @Return               IMG_UINT32  Actual number of bytes written, may be less than
+                            ui32BCount when buffer full condition could not
+                            be avoided.
+*/ /**************************************************************************/
+static IMG_UINT32 PDumpWriteToBuffer(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     PDUMP_STREAM* psStream,
+                                     IMG_UINT8 *pui8Data,
+                                     IMG_UINT32 ui32BCount,
+                                     IMG_UINT32 ui32Flags)
+{
+       IMG_UINT32      ui32BytesToBeWritten;
+       IMG_UINT32      ui32Off = 0;
+       IMG_BYTE *pbyDataBuffer;
+       IMG_UINT32 ui32BytesAvailable = 0;
+       static IMG_UINT32 ui32TotalBytesWritten;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiRetries = 0;
+
+       /* Check PDump stream validity */
+       if (psStream->hTL == NULL)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "PDumpWriteToBuffer: PDump stream '%s' is invalid", psStream->pszName));
+               return 0;
+       }
+
+       /* This API always called by holding pdump write lock
+        * to ensure atomic pdump write happened and
+        * if not holding pdump lock then assert.
+        */
+       PDumpAssertWriteLockHeld();
+
+       /* No need to check size of data to write as this is asserted
+        * higher up in the call stack as 1KB and 16KB for each channel
+        * respectively. */
+
+       while (ui32BCount > 0)
+       {
+               ui32BytesToBeWritten = MIN ( ui32BCount, psStream->ui32MaxAllowedWriteSize );
+
+               eError = TLStreamReserve2(psStream->hTL, &pbyDataBuffer, ui32BytesToBeWritten, 0, &ui32BytesAvailable, NULL);
+               if (eError == PVRSRV_ERROR_STREAM_FULL)
+               {
+                       psStream->ui32BufferFullRetries++;
+
+                       /*! Retry write2 only if available bytes is at least 1024 or more. */
+                       if (ui32BytesAvailable >= 0x400)
+                       {
+                               ui32BytesToBeWritten = ui32BytesAvailable;
+                               PVR_DPF((PVR_DBG_WARNING, "PDumpWriteToBuffer: TL buffer '%s' retrying write2=%u out of %u", psStream->pszName, ui32BytesToBeWritten, ui32BCount));
+                               eError = TLStreamReserve(psStream->hTL, &pbyDataBuffer, ui32BytesToBeWritten);
+                               /*! Not expected to get PVRSRV_ERROR_STREAM_FULL error and other error may get */
+                               PVR_ASSERT(eError != PVRSRV_ERROR_STREAM_FULL);
+                       }
+                       else
+                       {
+                               uiRetries++;
+                               PVR_DPF((PVR_DBG_WARNING, "PDumpWriteToBuffer: TL buffer '%s' full, rq=%u, av=%u, retrying write", psStream->pszName, ui32BCount, ui32BytesAvailable));
+
+                               /* Check if we are out of retries , if so then print warning */
+                               if (uiRetries >= MAX_PDUMP_WRITE_RETRIES)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                "PDumpWriteToBuffer: PDump writes blocked to dump %d bytes, %s TLBuffers full for %d seconds, check system",
+                                                ui32BCount,
+                                                psStream->pszName,
+                                                ((200 * uiRetries)/1000)));
+
+                                       if (uiRetries > psStream->ui32HighestRetriesWatermark)
+                                       {
+                                               psStream->ui32HighestRetriesWatermark = uiRetries;
+                                       }
+
+                                       psStream->ui32BufferFullAborts++;
+                                       uiRetries = 0;
+
+                                       /* As uiRetries exceed max write retries that means,
+                                        * something went wrong in system and thus suspend pdump.
+                                        */
+                                       PDumpModuleTransitionState(PDUMP_SM_ERROR_SUSPENDED);
+                                       return 0;
+                               }
+
+                               OSSleepms(100);
+                               continue;
+                       }
+               }
+
+               if (eError == PVRSRV_OK)
+               {
+                       ui32TotalBytesWritten += ui32BytesToBeWritten;
+
+                       PVR_ASSERT(pbyDataBuffer != NULL);
+
+                       OSDeviceMemCopy((void*)pbyDataBuffer, pui8Data + ui32Off, ui32BytesToBeWritten);
+
+                       eError = TLStreamCommit(psStream->hTL, ui32BytesToBeWritten);
+                       if (PVRSRV_OK != eError)
+                       {
+                               return 0;
+                       }
+
+                       if (uiRetries > psStream->ui32HighestRetriesWatermark)
+                       {
+                               psStream->ui32HighestRetriesWatermark = uiRetries;
+                       }
+
+                       uiRetries = 0;
+                       ui32Off += ui32BytesToBeWritten;
+                       ui32BCount -= ui32BytesToBeWritten;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToBuffer: TLStreamReserve2(%s) unrecoverable error %s", psStream->pszName, PVRSRVGETERRORSTRING(eError)));
+                       /* Fatal -suspend PDump to prevent flooding kernel log buffer */
+                       PVR_LOG(("Unrecoverable error, PDump suspended!"));
+
+                       PDumpModuleTransitionState(PDUMP_SM_ERROR_SUSPENDED);
+                       return 0;
+               }
+
+               /*
+                  if the capture range is unset
+                  (which is detected via PDumpWriteAllowed())
+               */
+
+               if (!PDumpWriteAllowed(psDeviceNode, ui32Flags, NULL))
+               {
+                       psStream->ui32BufferFullAborts++;
+                       break;
+               }
+       }
+
+       return ui32Off;
+}
+
+/*************************************************************************/ /*!
+ @Function      PDumpWriteToChannel
+ @Description   Write the supplied data to the PDump channel specified obeying
+                flags to write to the necessary channel buffers.
+
+ @Input         psDeviceNode The device the PDump pertains to
+ @Input         psChannel   Address of the script or parameter channel object
+ @Input/Output  psWOff      Address of the channel write offsets object to
+                            update on successful writing
+ @Input         pui8Data    Pointer to the data to be written
+ @Input         ui32Size    Number of bytes to write
+ @Input         ui32Flags   PDump statement flags, they may be clear (no flags)
+                            or persistent flagged and they determine how the
+                            which implies framed data, continuous flagged, data
+                            is output. On the first test app run after driver
+                            load, the Display Controller dumps a resource that
+                            is persistent and this needs writing to both the
+                            init (persistent) and main (continuous) channel
+                            buffers to ensure the data is dumped in subsequent
+                            test runs without reloading the driver.
+                            In subsequent runs the PDump client 'freezes' the
+                            init buffer so that only one dump of persistent
+                            data for the "extended init phase" is captured to
+                            the init buffer.
+ @Return        IMG_BOOL    True when the data has been consumed, false otherwise
+*/ /**************************************************************************/
+static IMG_BOOL PDumpWriteToChannel(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    PDUMP_CHANNEL* psChannel,
+                                    PDUMP_CHANNEL_WOFFSETS* psWOff,
+                                    IMG_UINT8* pui8Data,
+                                    IMG_UINT32 ui32Size,
+                                    IMG_UINT32 ui32Flags)
+{
+       IMG_UINT32 ui32BytesWritten = 0;
+       PDUMP_HERE_VAR;
+
+       PDUMP_HERE(210);
+
+       /* At this point, PDumpWriteAllowed() has returned TRUE (or called from
+        * PDumpParameterChannelZeroedPageBlock() during driver init) we know the
+        * write must proceed because:
+        * - pdump is not suspended and
+        * - there is not an ongoing power transition or POWER override flag is set or
+        * - in driver init phase with ANY flag set or
+        * - post init with the pdump client connected and
+        * -   - PERSIST flag is present, xor
+        * -   - the CONTINUOUS flag is present, xor
+        * -   - in capture frame range
+        */
+       PDumpAssertWriteLockHeld();
+
+       /* Dump data to deinit buffer when flagged as deinit */
+       if (ui32Flags & PDUMP_FLAGS_DEINIT)
+       {
+               PDUMP_HERE(211);
+               ui32BytesWritten = PDumpWriteToBuffer(psDeviceNode,
+                                                     &psChannel->sDeinitStream,
+                                                     pui8Data, ui32Size, ui32Flags);
+               if (ui32BytesWritten != ui32Size)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: DEINIT Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+                       PDUMP_HERE(212);
+                       return IMG_FALSE;
+               }
+
+               if (psWOff)
+               {
+                       psWOff->ui32Deinit += ui32Size;
+               }
+
+       }
+       else
+       {
+               IMG_BOOL bDumpedToInitAlready = IMG_FALSE;
+               IMG_BOOL bMainStreamData = IMG_FALSE;
+               PDUMP_STREAM*  psStream = NULL;
+               IMG_UINT32* pui32Offset = NULL;
+
+               /* Always append persistent data to init phase so it's available on
+                * subsequent app runs, but also to the main stream if client connected */
+               if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+               {
+                       PDUMP_HERE(213);
+                       ui32BytesWritten = PDumpWriteToBuffer(psDeviceNode,
+                                                             &psChannel->sInitStream,
+                                                             pui8Data, ui32Size, ui32Flags);
+                       if (ui32BytesWritten != ui32Size)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: PERSIST Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+                               PDUMP_HERE(214);
+                               return IMG_FALSE;
+                       }
+
+                       bDumpedToInitAlready = IMG_TRUE;
+                       if (psWOff)
+                       {
+                               psWOff->ui32Init += ui32Size;
+                       }
+
+                       /* Don't write continuous data if client not connected */
+                       if (PDumpCtrlGetModuleState() != PDUMP_SM_READY_CLIENT_CONNECTED)
+                       {
+                               return IMG_TRUE;
+                       }
+               }
+
+               /* Prepare to write the data to the main stream for
+                * persistent, continuous or framed data. Override and use init
+                * stream if driver still in init phase and we have not written
+                * to it yet.*/
+               PDumpCtrlLockAcquire();
+               if (!PDumpCtrlInitPhaseComplete() && !bDumpedToInitAlready)
+               {
+                       PDUMP_HERE(215);
+                       psStream = &psChannel->sInitStream;
+                       if (psWOff)
+                       {
+                               pui32Offset = &psWOff->ui32Init;
+                       }
+               }
+               else
+               {
+                       PDUMP_HERE(216);
+                       psStream = &psChannel->sMainStream;
+                       if (psWOff)
+                       {
+                               pui32Offset = &psWOff->ui32Main;
+                       }
+                       bMainStreamData = IMG_TRUE;
+
+               }
+               PDumpCtrlLockRelease();
+
+               if (PDumpCtrlCapModIsBlocked() && bMainStreamData && !psWOff)
+               {
+                       /* if PDUMP_FLAGS_BLKDATA flag is set in Blocked mode, Make copy of Main script stream data to Block script stream as well */
+                       if (ui32Flags & PDUMP_FLAGS_BLKDATA)
+                       {
+                               PDUMP_HERE(217);
+                               ui32BytesWritten = PDumpWriteToBuffer(psDeviceNode,
+                                                                     &psChannel->sBlockStream,
+                                                                     pui8Data, ui32Size, ui32Flags);
+                               if (ui32BytesWritten != ui32Size)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: BLOCK Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+                                       PDUMP_HERE(218);
+                                       return IMG_FALSE;
+                               }
+                       }
+               }
+
+               /* Write the data to the stream */
+               ui32BytesWritten = PDumpWriteToBuffer(psDeviceNode,
+                                                     psStream, pui8Data,
+                                                     ui32Size, ui32Flags);
+               if (ui32BytesWritten != ui32Size)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: MAIN Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+                       PDUMP_HERE(219);
+                       return IMG_FALSE;
+               }
+
+               if (pui32Offset)
+               {
+                       *pui32Offset += ui32BytesWritten;
+               }
+       }
+
+       return IMG_TRUE;
+}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+
+static IMG_UINT32 _GenerateChecksum(void *pvData, size_t uiSize)
+{
+       IMG_UINT32 ui32Sum = 0;
+       IMG_UINT32 *pui32Data = pvData;
+       IMG_UINT8 *pui8Data = pvData;
+       IMG_UINT32 i;
+       IMG_UINT32 ui32LeftOver;
+
+       for (i = 0; i < uiSize / sizeof(IMG_UINT32); i++)
+       {
+               ui32Sum += pui32Data[i];
+       }
+
+       ui32LeftOver = uiSize % sizeof(IMG_UINT32);
+
+       while (ui32LeftOver)
+       {
+               ui32Sum += pui8Data[uiSize - ui32LeftOver];
+               ui32LeftOver--;
+       }
+
+       return ui32Sum;
+}
+
+#endif
+
+PVRSRV_ERROR PDumpWriteParameter(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT8 *pui8Data,
+                                 IMG_UINT32 ui32Size,
+                                 IMG_UINT32 ui32Flags,
+                                 IMG_UINT32* pui32FileOffset,
+                                 IMG_CHAR* aszFilenameStr)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bPDumpCtrlInitPhaseComplete = IMG_FALSE;
+       IMG_UINT32 here = 0;
+       IMG_INT32 iCount;
+
+       PDumpAssertWriteLockHeld();
+
+       PVR_ASSERT(pui8Data && (ui32Size!=0));
+       PVR_ASSERT(pui32FileOffset && aszFilenameStr);
+
+       PDUMP_HERE(1);
+
+       /* Check if write can proceed? */
+       if (!PDumpWriteAllowed(psDeviceNode, ui32Flags, &here))
+       {
+               /* Abort write for the above reason but indicate what happened to
+                * caller to avoid disrupting the driver, caller should treat it as OK
+                * but skip any related PDump writes to the script file. */
+               return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+       }
+
+       PDUMP_HERE(2);
+
+       PDumpCtrlLockAcquire();
+       bPDumpCtrlInitPhaseComplete = PDumpCtrlInitPhaseComplete();
+       PDumpCtrlLockRelease();
+
+       if (!bPDumpCtrlInitPhaseComplete || (ui32Flags & PDUMP_FLAGS_PERSISTENT))
+       {
+               PDUMP_HERE(3);
+
+               /* Init phase stream not expected to get above the file size max */
+               PVR_ASSERT(g_PDumpParameters.sWOff.ui32Init < g_PDumpParameters.ui32MaxFileSize);
+
+               /* Return the file write offset at which the parameter data was dumped */
+               *pui32FileOffset = g_PDumpParameters.sWOff.ui32Init;
+       }
+       else
+       {
+               PDUMP_HERE(4);
+
+               /* Do we need to signal the PDump client that a split is required? */
+               if (g_PDumpParameters.sWOff.ui32Main + ui32Size > g_PDumpParameters.ui32MaxFileSize)
+               {
+                       PDUMP_HERE(5);
+                       _PDumpSetSplitMarker(g_PDumpParameters.sCh.sMainStream.hTL, IMG_FALSE);
+                       g_PDumpParameters.ui32FileIdx++;
+                       g_PDumpParameters.sWOff.ui32Main = 0;
+               }
+
+               /* Return the file write offset at which the parameter data was dumped */
+               *pui32FileOffset = g_PDumpParameters.sWOff.ui32Main;
+       }
+
+       /* Create the parameter file name, based on index, to be used in the script */
+       if (g_PDumpParameters.ui32FileIdx == 0)
+       {
+               iCount = OSSNPrintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_0_FILE_NAME);
+       }
+       else
+       {
+               PDUMP_HERE(6);
+               iCount = OSSNPrintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_N_FILE_NAME, g_PDumpParameters.ui32FileIdx);
+       }
+
+       PVR_LOG_GOTO_IF_FALSE(((iCount != -1) && (iCount < PDUMP_PARAM_MAX_FILE_NAME)), "OSSNPrintf", errExit);
+
+       /* Write the parameter data to the parameter channel */
+       eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+       if (!PDumpWriteToChannel(psDeviceNode, &g_PDumpParameters.sCh,
+                                &g_PDumpParameters.sWOff, pui8Data,
+                                ui32Size, ui32Flags))
+       {
+               PDUMP_HERE(7);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PDumpWrite", errExit);
+       }
+#if defined(PDUMP_DEBUG_OUTFILES)
+       else
+       {
+               IMG_UINT32 ui32Checksum;
+               PDUMP_GET_SCRIPT_STRING();
+
+               ui32Checksum = _GenerateChecksum(pui8Data, ui32Size);
+
+               /* CHK CHKSUM SIZE PRMOFFSET PRMFILE */
+               eError = PDumpSNPrintf(hScript, ui32MaxLen, "-- CHK 0x%08X 0x%08X 0x%08X %s",
+                                                                       ui32Checksum,
+                                                                       ui32Size,
+                                                                       *pui32FileOffset,
+                                                                       aszFilenameStr);
+               PVR_GOTO_IF_ERROR(eError, errExit);
+
+               PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+               PDUMP_RELEASE_SCRIPT_STRING();
+       }
+#endif
+
+       return PVRSRV_OK;
+
+errExit:
+       return eError;
+}
+
+
+IMG_BOOL PDumpWriteScript(PVRSRV_DEVICE_NODE *psDeviceNode,
+                          IMG_HANDLE hString, IMG_UINT32 ui32Flags)
+{
+       PDUMP_HERE_VAR;
+
+       PVR_ASSERT(hString);
+
+       PDumpAssertWriteLockHeld();
+
+       PDUMP_HERE(201);
+
+#if defined(DEBUG)
+       /* Since buffer sizes and buffer writing/reading are a balancing act to
+        * avoid buffer full errors, check here our assumption on the maximum write size.
+        */
+       {
+               IMG_UINT32 ui32Size = (IMG_UINT32) OSStringLength((const IMG_CHAR *)hString);
+               if (ui32Size > 0x400) // 1KB
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PDUMP large script write %u bytes", ui32Size));
+                       OSDumpStack();
+               }
+       }
+#endif
+
+       if (!PDumpWriteAllowed(psDeviceNode, ui32Flags, NULL))
+       {
+               /* Abort write for the above reasons but indicated it was OK to
+                * caller to avoid disrupting the driver */
+               return IMG_TRUE;
+       }
+
+       if (PDumpCtrlCapModIsBlocked())
+       {
+               if (ui32Flags & PDUMP_FLAGS_FORCESPLIT)
+               {
+                       IMG_UINT32 ui32CurrentBlock;
+
+                       PDumpGetCurrentBlockKM(&ui32CurrentBlock);
+                       /* Keep Main stream script output files belongs to first and last block only */
+                       if (ui32CurrentBlock == 1)
+                       {
+                               /* To keep first(0th) block, do not remove old script file while
+                                * splitting to second(1st) block (i.e. bRemoveOld=IMG_FALSE).
+                                * */
+                               _PDumpSetSplitMarker(g_PDumpScript.sCh.sMainStream.hTL, IMG_FALSE);
+                       }
+                       else
+                       {
+                               /* Previous block's Main script output file will be removed
+                                * before splitting to next
+                                * */
+                               _PDumpSetSplitMarker(g_PDumpScript.sCh.sMainStream.hTL, IMG_TRUE);
+                       }
+
+                       /* Split Block stream output file
+                        *
+                        * We are keeping block script output files from all PDump blocks.
+                        * */
+                       _PDumpSetSplitMarker(g_PDumpScript.sCh.sBlockStream.hTL, IMG_FALSE);
+                       g_PDumpScript.ui32FileIdx++;
+               }
+       }
+
+       return PDumpWriteToChannel(psDeviceNode, &g_PDumpScript.sCh, NULL,
+                                 (IMG_UINT8*) hString,
+                                 (IMG_UINT32) OSStringLength((IMG_CHAR*) hString),
+                                 ui32Flags);
+}
+
+
+/*****************************************************************************/
+
+
+struct _PDUMP_CONNECTION_DATA_ {
+       ATOMIC_T                  sRefCount;
+       POS_LOCK                  hLock;                       /*!< Protects access to sListHead. */
+       DLLIST_NODE               sListHead;
+       IMG_UINT32                ui32LastSetFrameNumber;
+       PDUMP_TRANSITION_EVENT    eLastEvent;                  /*!< Last processed transition event */
+       PDUMP_TRANSITION_EVENT    eFailedEvent;                /*!< Failed transition event to retry */
+       PFN_PDUMP_SYNCBLOCKS      pfnPDumpSyncBlocks;          /*!< Callback to PDump sync blocks */
+       void                      *hSyncPrivData;              /*!< Sync private data */
+};
+
+static PDUMP_CONNECTION_DATA * _PDumpConnectionAcquire(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+       IMG_INT iRefCount = OSAtomicIncrement(&psPDumpConnectionData->sRefCount);
+
+       PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d", __func__,
+                            psPDumpConnectionData, iRefCount);
+       PVR_UNREFERENCED_PARAMETER(iRefCount);
+
+       return psPDumpConnectionData;
+}
+
+static void _PDumpConnectionRelease(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+       IMG_INT iRefCount = OSAtomicDecrement(&psPDumpConnectionData->sRefCount);
+       if (iRefCount == 0)
+       {
+               OSLockDestroy(psPDumpConnectionData->hLock);
+               PVR_ASSERT(dllist_is_empty(&psPDumpConnectionData->sListHead));
+               OSFreeMem(psPDumpConnectionData);
+       }
+
+       PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d", __func__,
+                            psPDumpConnectionData, iRefCount);
+}
+
+/******************************************************************************
+ * Function Name  : PDumpInitStreams
+ * Outputs        : None
+ * Returns        :
+ * Description    : Create the PDump streams
+******************************************************************************/
+static PVRSRV_ERROR PDumpInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript)
+{
+
+       PVRSRV_ERROR   eError;
+       TL_STREAM_INFO sTLStreamInfo;
+
+       /* TL - Create the streams */
+
+       /**************************** Parameter stream ***************************/
+
+       /* Parameter - Init */
+       eError = TLStreamCreate(&psParam->sInitStream.hTL,
+                               psParam->sInitStream.pszName, psParam->sInitStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamInit", end);
+
+       TLStreamInfo(psParam->sInitStream.hTL, &sTLStreamInfo);
+       psParam->sInitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       /* Parameter - Main */
+       eError = TLStreamCreate(&psParam->sMainStream.hTL,
+                               psParam->sMainStream.pszName, psParam->sMainStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER ,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamMain", param_main_failed);
+
+       TLStreamInfo(psParam->sMainStream.hTL, &sTLStreamInfo);
+       psParam->sMainStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       /* Parameter - Deinit */
+       eError = TLStreamCreate(&psParam->sDeinitStream.hTL,
+                               psParam->sDeinitStream.pszName, psParam->sDeinitStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ParamDeinit", param_deinit_failed);
+
+       TLStreamInfo(psParam->sDeinitStream.hTL, &sTLStreamInfo);
+       psParam->sDeinitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       /* Parameter - Block */
+       /* As in current implementation Block script stream is just a filtered
+        * Main script stream using PDUMP_FLAGS_BLKDATA flag, no separate
+        * Parameter stream is needed. Block script will be referring to the
+        * same Parameters as that of Main script stream.
+        */
+
+       /***************************** Script streams ****************************/
+
+       /* Script - Init */
+       eError = TLStreamCreate(&psScript->sInitStream.hTL,
+                               psScript->sInitStream.pszName, psScript->sInitStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptInit", script_init_failed);
+
+       TLStreamInfo(psScript->sInitStream.hTL, &sTLStreamInfo);
+       psScript->sInitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       /* Script - Main */
+       eError = TLStreamCreate(&psScript->sMainStream.hTL,
+                               psScript->sMainStream.pszName, psScript->sMainStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptMain", script_main_failed);
+
+       TLStreamInfo(psScript->sMainStream.hTL, &sTLStreamInfo);
+       psScript->sMainStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       /* Script - Deinit */
+       eError = TLStreamCreate(&psScript->sDeinitStream.hTL,
+                               psScript->sDeinitStream.pszName, psScript->sDeinitStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER | TL_FLAG_PERMANENT_NO_WRAP,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptDeinit", script_deinit_failed);
+
+       TLStreamInfo(psScript->sDeinitStream.hTL, &sTLStreamInfo);
+       psScript->sDeinitStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       /* Script - Block */
+       eError = TLStreamCreate(&psScript->sBlockStream.hTL,
+                               psScript->sBlockStream.pszName, psScript->sBlockStream.ui32BufferSize,
+                               TL_OPMODE_DROP_NEWER,
+                               NULL, NULL,
+                               NULL, NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate ScriptBlock", script_block_failed);
+
+       TLStreamInfo(psScript->sBlockStream.hTL, &sTLStreamInfo);
+       psScript->sBlockStream.ui32MaxAllowedWriteSize = sTLStreamInfo.maxTLpacketSize;
+
+       return PVRSRV_OK;
+
+script_block_failed:
+       TLStreamClose(psScript->sDeinitStream.hTL);
+
+script_deinit_failed:
+       TLStreamClose(psScript->sMainStream.hTL);
+
+script_main_failed:
+       TLStreamClose(psScript->sInitStream.hTL);
+
+script_init_failed:
+       TLStreamClose(psParam->sDeinitStream.hTL);
+
+param_deinit_failed:
+       TLStreamClose(psParam->sMainStream.hTL);
+
+param_main_failed:
+       TLStreamClose(psParam->sInitStream.hTL);
+
+end:
+       return eError;
+}
+/******************************************************************************
+ * Function Name  : PDumpDeInitStreams
+ * Inputs         : psParam, psScript
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Deinitialises the PDump streams
+******************************************************************************/
+static void PDumpDeInitStreams(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript)
+{
+       /* Script streams */
+       TLStreamClose(psScript->sDeinitStream.hTL);
+       TLStreamClose(psScript->sMainStream.hTL);
+       TLStreamClose(psScript->sInitStream.hTL);
+       TLStreamClose(psScript->sBlockStream.hTL);
+
+       /* Parameter streams */
+       TLStreamClose(psParam->sDeinitStream.hTL);
+       TLStreamClose(psParam->sMainStream.hTL);
+       TLStreamClose(psParam->sInitStream.hTL);
+
+}
+
+/******************************************************************************
+ * Function Name  : PDumpParameterChannelZeroedPageBlock
+ * Inputs         : psDeviceNode
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Set up the zero page block in the parameter stream
+******************************************************************************/
+static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       IMG_UINT8 aui8Zero[32] = { 0 };
+       size_t uiBytesToWrite;
+       PVRSRV_ERROR eError;
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE;
+       IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, GeneralNon4KHeapPageSize,
+                       &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+       OSFreeKMAppHintState(pvAppHintState);
+
+       /* ZeroPageSize can't be smaller than page size */
+       g_PDumpParameters.uiZeroPageSize = MAX(ui32GeneralNon4KHeapPageSize, OSGetPageSize());
+
+       /* ensure the zero page size of a multiple of the zero source on the stack */
+       PVR_ASSERT(g_PDumpParameters.uiZeroPageSize % sizeof(aui8Zero) == 0);
+
+       /* the first write gets the parameter file name and stream offset,
+        * then subsequent writes do not need to know this as the data is
+        * contiguous in the stream
+        */
+       PDUMP_LOCK(0);
+       eError = PDumpWriteParameter(psDeviceNode, aui8Zero,
+                                                       sizeof(aui8Zero),
+                                                       0,
+                                                       &g_PDumpParameters.uiZeroPageOffset,
+                                                       g_PDumpParameters.szZeroPageFilename);
+
+       /* Also treat PVRSRV_ERROR_PDUMP_NOT_ALLOWED as an error in this case
+        * as it should never happen since all writes during driver Init are
+        * allowed.
+       */
+       PVR_GOTO_IF_ERROR(eError, err_write);
+
+       uiBytesToWrite = g_PDumpParameters.uiZeroPageSize - sizeof(aui8Zero);
+
+       while (uiBytesToWrite)
+       {
+               IMG_BOOL bOK;
+
+               bOK = PDumpWriteToChannel(psDeviceNode,
+                                                                 &g_PDumpParameters.sCh,
+                                                                 &g_PDumpParameters.sWOff,
+                                                                 aui8Zero,
+                                                                 sizeof(aui8Zero), 0);
+
+               if (!bOK)
+               {
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PDUMP_BUFFER_FULL, err_write);
+               }
+
+               uiBytesToWrite -= sizeof(aui8Zero);
+       }
+
+err_write:
+       PDUMP_UNLOCK(0);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to initialise parameter stream zero block"));
+       }
+
+       return eError;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpGetParameterZeroPageInfo
+ * Inputs         : None
+ * Outputs        : puiZeroPageOffset: set to the offset of the zero page
+ *                : puiZeroPageSize: set to the size of the zero page
+ *                : ppszZeroPageFilename: set to a pointer to the PRM file name
+ *                :                       containing the zero page
+ * Returns        : None
+ * Description    : Get information about the zero page
+******************************************************************************/
+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+                                       size_t *puiZeroPageSize,
+                                       const IMG_CHAR **ppszZeroPageFilename)
+{
+               *puiZeroPageOffset = g_PDumpParameters.uiZeroPageOffset;
+               *puiZeroPageSize = g_PDumpParameters.uiZeroPageSize;
+               *ppszZeroPageFilename = g_PDumpParameters.szZeroPageFilename;
+}
+
+
+PVRSRV_ERROR PDumpInitCommon(void)
+{
+       PVRSRV_ERROR eError;
+       PDUMP_HERE_VAR;
+
+       PDUMP_HEREA(2010);
+
+       /* Initialised with default initial value */
+       OSAtomicWrite(&g_sConnectionCount, 0);
+#if defined(PDUMP_DEBUG_OUTFILES)
+       OSAtomicWrite(&g_sEveryLineCounter, 1);
+#endif
+
+       eError = OSLockCreate(&g_hPDumpWriteLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errRet);
+
+       /* Initialise PDump control module in common layer, also sets
+        * state to PDUMP_SM_INITIALISING.
+        */
+       eError = PDumpCtrlInit();
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCtrlInit", errRetLock);
+
+       /* Call environment specific PDump initialisation Part 2*/
+       eError = PDumpInitStreams(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpInitStreams", errRetCtrl);
+
+       /* PDump now ready for write calls */
+       PDumpModuleTransitionState(PDUMP_SM_READY);
+
+       PDUMP_HEREA(2011);
+
+       /* Test PDump initialised and ready by logging driver details */
+       eError = PDumpCommentWithFlags((PVRSRV_DEVICE_NODE*)PDUMP_MAGIC_COOKIE,
+                                      PDUMP_FLAGS_CONTINUOUS,
+                                      "Driver Product Version: %s - %s (%s)",
+                                      PVRVERSION_STRING, PVR_BUILD_DIR, PVR_BUILD_TYPE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCommentWithFlags", errRetState);
+
+       eError = PDumpCommentWithFlags((PVRSRV_DEVICE_NODE*)PDUMP_MAGIC_COOKIE,
+                                      PDUMP_FLAGS_CONTINUOUS,
+                                      "Start of Init Phase");
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCommentWithFlags", errRetState);
+
+       eError = PDumpParameterChannelZeroedPageBlock((PVRSRV_DEVICE_NODE*)PDUMP_MAGIC_COOKIE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpParameterChannelZeroedPageBlock", errRetState);
+
+       PDUMP_HEREA(2012);
+ret:
+       return eError;
+
+
+errRetState:
+       PDumpModuleTransitionState(PDUMP_SM_UNINITIALISED);
+       PDumpDeInitStreams(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+errRetCtrl:
+       PDumpCtrlDeInit();
+errRetLock:
+       OSLockDestroy(g_hPDumpWriteLock);
+       PDUMP_HEREA(2013);
+errRet:
+       goto ret;
+}
+void PDumpDeInitCommon(void)
+{
+       PDUMP_HERE_VAR;
+
+       PDUMP_HEREA(2020);
+
+       /* Suspend PDump as we want PDumpWriteAllowed to deliberately fail during PDump deinit */
+       PDumpModuleTransitionState(PDUMP_SM_DEINITIALISED);
+
+       /*Call environment specific PDump Deinitialisation */
+       PDumpDeInitStreams(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+
+       /* DeInit the PDUMP_CTRL_STATE data */
+       PDumpCtrlDeInit();
+
+       /* take down the global PDump lock */
+       OSLockDestroy(g_hPDumpWriteLock);
+}
+
+void PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       IMG_UINT32 ui32PDumpBoundDevice = PVRSRVGetPVRSRVData()->ui32PDumpBoundDevice;
+
+       /* Stop the init phase for the PDump-bound device only */
+       if (psDeviceNode->sDevId.ui32InternalID == ui32PDumpBoundDevice)
+       {
+               /* output this comment to indicate init phase ending OSs */
+               PDUMPCOMMENT(psDeviceNode, "Stop Init Phase");
+
+               PDumpCtrlLockAcquire();
+               PDumpCtrlSetInitPhaseComplete(IMG_TRUE);
+               PDumpCtrlLockRelease();
+       }
+}
+
+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+       PDumpCtrlLockAcquire();
+       *pbIsLastCaptureFrame = PDumpCtrlIsLastCaptureFrame();
+       PDumpCtrlLockRelease();
+
+       return PVRSRV_OK;
+}
+
+
+
+typedef struct _PDUMP_Transition_DATA_
+{
+       PFN_PDUMP_TRANSITION        pfnCallback;
+       void                        *hPrivData;
+       void                        *pvDevice;
+       PDUMP_CONNECTION_DATA       *psPDumpConnectionData;
+       DLLIST_NODE                 sNode;
+} PDUMP_Transition_DATA;
+
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                                                                                         PFN_PDUMP_TRANSITION pfnCallback,
+                                                                                         void *hPrivData,
+                                                                                         void *pvDevice,
+                                                                                         void **ppvHandle)
+{
+       PDUMP_Transition_DATA *psData;
+       PVRSRV_ERROR eError;
+
+       psData = OSAllocMem(sizeof(*psData));
+       PVR_GOTO_IF_NOMEM(psData, eError, fail_alloc);
+
+       /* Setup the callback and add it to the list for this process */
+       psData->pfnCallback = pfnCallback;
+       psData->hPrivData = hPrivData;
+       psData->pvDevice = pvDevice;
+
+       OSLockAcquire(psPDumpConnectionData->hLock);
+       dllist_add_to_head(&psPDumpConnectionData->sListHead, &psData->sNode);
+       OSLockRelease(psPDumpConnectionData->hLock);
+
+       /* Take a reference on the connection so it doesn't get freed too early */
+       psData->psPDumpConnectionData =_PDumpConnectionAcquire(psPDumpConnectionData);
+       *ppvHandle = psData;
+
+       return PVRSRV_OK;
+
+fail_alloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+       PDUMP_Transition_DATA *psData = pvHandle;
+
+       OSLockAcquire(psData->psPDumpConnectionData->hLock);
+       dllist_remove_node(&psData->sNode);
+       OSLockRelease(psData->psPDumpConnectionData->hLock);
+       _PDumpConnectionRelease(psData->psPDumpConnectionData);
+       OSFreeMem(psData);
+}
+
+typedef struct _PDUMP_Transition_DATA_FENCE_SYNC_
+{
+       PFN_PDUMP_TRANSITION_FENCE_SYNC         pfnCallback;
+       void                                    *hPrivData;
+} PDUMP_Transition_DATA_FENCE_SYNC;
+
+PVRSRV_ERROR PDumpRegisterTransitionCallbackFenceSync(void *hPrivData,
+                                                         PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, void **ppvHandle)
+{
+       PDUMP_Transition_DATA_FENCE_SYNC *psData;
+       PVRSRV_ERROR eError;
+
+       psData = OSAllocMem(sizeof(*psData));
+       PVR_GOTO_IF_NOMEM(psData, eError, fail_alloc_exit);
+
+       /* Setup the callback and add it to the list for this process */
+       psData->pfnCallback = pfnCallback;
+       psData->hPrivData = hPrivData;
+
+       *ppvHandle = psData;
+       return PVRSRV_OK;
+
+fail_alloc_exit:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+void PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle)
+{
+       PDUMP_Transition_DATA_FENCE_SYNC *psData = pvHandle;
+
+       OSFreeMem(psData);
+}
+
+static PVRSRV_ERROR _PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                                        PDUMP_TRANSITION_EVENT eEvent,
+                                        IMG_UINT32 ui32PDumpFlags)
+{
+       DLLIST_NODE *psNode, *psNext;
+       PVRSRV_ERROR eError;
+
+       /* Only call the callbacks if we've really got new event */
+       if ((eEvent != psPDumpConnectionData->eLastEvent) && (eEvent != PDUMP_TRANSITION_EVENT_NONE))
+       {
+               OSLockAcquire(psPDumpConnectionData->hLock);
+
+               dllist_foreach_node(&psPDumpConnectionData->sListHead, psNode, psNext)
+               {
+                       PDUMP_Transition_DATA *psData =
+                               IMG_CONTAINER_OF(psNode, PDUMP_Transition_DATA, sNode);
+
+                       eError = psData->pfnCallback(psData->hPrivData, psData->pvDevice, eEvent, ui32PDumpFlags);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               OSLockRelease(psPDumpConnectionData->hLock);
+                               psPDumpConnectionData->eFailedEvent = eEvent; /* Save failed event to retry */
+                               return eError;
+                       }
+               }
+               OSLockRelease(psPDumpConnectionData->hLock);
+
+               /* PDump sync blocks:
+                *
+                * Client sync prims are managed in blocks.
+                *
+                * sync-blocks gets re-dumped each time we enter into capture range or
+                * enter into new PDump block. Ensure that live-FW thread and app-thread
+                * are synchronised before this.
+                *
+                * At playback time, script-thread and sim-FW threads needs to be
+                * synchronised before re-loading sync-blocks.
+                * */
+               psPDumpConnectionData->pfnPDumpSyncBlocks(psDeviceNode, psPDumpConnectionData->hSyncPrivData, eEvent);
+
+               if (psDeviceNode->hTransition)
+               {
+                       PDUMP_Transition_DATA_FENCE_SYNC *psData = (PDUMP_Transition_DATA_FENCE_SYNC*)psDeviceNode->hTransition;
+                       psData->pfnCallback(psData->hPrivData, eEvent);
+               }
+
+               psPDumpConnectionData->eLastEvent = eEvent;
+               psPDumpConnectionData->eFailedEvent = PDUMP_TRANSITION_EVENT_NONE; /* Clear failed event on success */
+       }
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _PDumpBlockTransition(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                                          PDUMP_TRANSITION_EVENT eEvent,
+                                          IMG_UINT32 ui32PDumpFlags)
+{
+
+       /* Need to follow following sequence for Block transition:
+        *
+        * (1) _PDumpTransition with BLOCK_FINISHED event for current block
+        * (2) Split MAIN and Block script files
+        * (3) _PDumpTransition with BLOCK_STARTED event for new block
+        *
+        * */
+
+       PVRSRV_ERROR        eError;
+       IMG_UINT32          ui32CurrentBlock;
+       IMG_UINT32          ui32Flags = (PDUMP_FLAGS_BLKDATA | PDUMP_FLAGS_CONTINUOUS); /* Internal Block mode specific PDump flags */
+
+       PDumpGetCurrentBlockKM(&ui32CurrentBlock);
+
+       if (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_FINISHED)
+       {
+               /* (1) Current block has finished */
+               eError = _PDumpTransition(psDeviceNode,
+                                             psPDumpConnectionData,
+                                             PDUMP_TRANSITION_EVENT_BLOCK_FINISHED,
+                                             ui32PDumpFlags);
+               PVR_RETURN_IF_ERROR(eError);
+
+               (void) PDumpCommentWithFlags(psDeviceNode, ui32Flags,
+                                            "}PDUMP_BLOCK_END_0x%08X",
+                                            ui32CurrentBlock - 1); /* Add pdump-block end marker */
+
+               /* (2) Split MAIN and BLOCK script out files on current pdump-block end */
+               ui32Flags |= PDUMP_FLAGS_FORCESPLIT;
+
+               (void) PDumpCommentWithFlags(psDeviceNode, ui32Flags,
+                                            "PDUMP_BLOCK_START_0x%08X{",
+                                            ui32CurrentBlock); /* Add pdump-block start marker */
+       }
+
+       /* (3) New block has started */
+       return _PDumpTransition(psDeviceNode,
+                                   psPDumpConnectionData,
+                                   PDUMP_TRANSITION_EVENT_BLOCK_STARTED,
+                                   ui32PDumpFlags);
+}
+
+
+PVRSRV_ERROR PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                             PDUMP_TRANSITION_EVENT eEvent,
+                             IMG_UINT32 ui32PDumpFlags)
+{
+       if ((eEvent == PDUMP_TRANSITION_EVENT_BLOCK_FINISHED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED))
+       {
+               /* Block mode transition events */
+               PVR_ASSERT(PDumpCtrlCapModIsBlocked());
+               return _PDumpBlockTransition(psDeviceNode, psPDumpConnectionData, eEvent, ui32PDumpFlags);
+       }
+       else
+       {
+               /* Non-block mode transition events */
+               return _PDumpTransition(psDeviceNode, psPDumpConnectionData, eEvent, ui32PDumpFlags);
+       }
+}
+
+static PVRSRV_ERROR PDumpIsCaptureFrame(IMG_BOOL *bInCaptureRange)
+{
+       IMG_UINT64 ui64State = 0;
+       PVRSRV_ERROR eError;
+
+       eError = PDumpCtrlGetState(&ui64State);
+
+       *bInCaptureRange = (ui64State & PDUMP_STATE_CAPTURE_FRAME) ? IMG_TRUE : IMG_FALSE;
+
+       return eError;
+}
+
+PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State)
+{
+       PVRSRV_ERROR eError;
+
+       PDumpCtrlLockAcquire();
+       eError = PDumpCtrlGetState(ui64State);
+       PDumpCtrlLockRelease();
+
+       return eError;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpUpdateBlockCtrlStatus
+ * Inputs         : ui32Frame - frame number
+ * Outputs        : None
+ * Returns        : IMG_TRUE if Block transition is required, else IMG_FALSE
+ * Description    : Updates Block Ctrl status and checks if block transition
+ *                  is required or not
+******************************************************************************/
+static INLINE IMG_BOOL PDumpUpdateBlockCtrlStatus(IMG_UINT32 ui32Frame)
+{
+       IMG_BOOL bForceBlockTransition;
+
+       /* Default length of first block will be PDUMP_BLOCKLEN_MIN.
+        * User can force it to be same as block length provided (i.e. ui32BlockLength)
+        * through pdump client.
+        *
+        * Here is how blocks will be created.
+        *
+        * Assume,
+        * ui32BlockLength = 20
+        * PDUMP_BLOCKLEN_MIN = 10
+        *
+        * Then different pdump blocks will have following number of frames in it:
+        *
+        * if(!PDumpCtrlMinimalFirstBlock())
+        * {
+        *              //pdump -b<block len>
+        *              block 0 -> 00...09        -->minimal first block
+        *              block 1 -> 10...29
+        *              block 2 -> 30...49
+        *              block 3 -> 50...69
+        *              ...
+        * }
+        * else
+        * {
+        *              //pdump -bf<block len>
+        *              block 0 -> 00...19
+        *              block 1 -> 20...39
+        *              block 2 -> 40...59
+        *              block 3 -> 60...79
+        *              ...
+        * }
+        *
+        * */
+
+       if (PDumpCtrlMinimalFirstBlock())
+       {
+               bForceBlockTransition = ((ui32Frame >= PDUMP_BLOCKLEN_MIN) && !((ui32Frame - PDUMP_BLOCKLEN_MIN) % g_PDumpCtrl.sBlockCtrl.ui32BlockLength)) || (ui32Frame == 0);
+       }
+       else
+       {
+               bForceBlockTransition = !(ui32Frame % g_PDumpCtrl.sBlockCtrl.ui32BlockLength);
+       }
+
+       if (bForceBlockTransition) /* Entering in new pdump-block */
+       {
+               /* Update block number
+                *
+                * Logic below is to maintain block number and frame number mappings
+                * in case of some applications where setFrame(0) gets called twice
+                * at the start.
+                * */
+               PDumpCtrlLockAcquire();
+               PDumpCtrlSetBlock((ui32Frame == 0)? 0 : (PDumpCtrlGetBlock() + 1));
+               PDumpCtrlLockRelease();
+
+               if (ui32Frame > 0) /* Do not do transition on first frame itself */
+               {
+                       return IMG_TRUE; /* Transition */
+               }
+       }
+       return IMG_FALSE; /* No transition */
+}
+
+PVRSRV_ERROR PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* If call is not for the pdump-bound device, return immediately
+        * taking no action.
+        */
+       if (!PDumpIsDevicePermitted(psDeviceNode))
+       {
+               return PVRSRV_OK;
+       }
+
+       if (!PDumpCtrlCapModIsBlocked())
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: This call is valid only in Block mode of PDump i.e. pdump -b<block-len>", __func__));
+               return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+       }
+
+       (void) PDumpCommentWithFlags(psDeviceNode,
+                                    PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA,
+                                    "PDdump forced STOP capture request received at frame %u",
+                                    g_PDumpCtrl.ui32CurrentFrame);
+
+       PDumpCtrlLockAcquire();
+       eError = PDumpCtrlForcedStop();
+       PDumpCtrlLockRelease();
+
+       return eError;
+}
+
+static PVRSRV_ERROR _PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 ui32Frame)
+{
+       PDUMP_CONNECTION_DATA *psPDumpConnectionData = psConnection->psPDumpConnectionData;
+       PDUMP_TRANSITION_EVENT eTransitionEvent = PDUMP_TRANSITION_EVENT_NONE;
+       IMG_BOOL bWasInCaptureRange = IMG_FALSE;
+       IMG_BOOL bIsInCaptureRange = IMG_FALSE;
+       PVRSRV_ERROR eError;
+
+       /*
+               Note:
+               As we can't test to see if the new frame will be in capture range
+               before we set the frame number and we don't want to roll back the
+               frame number if we fail then we have to save the "transient" data
+               which decides if we're entering or exiting capture range along
+               with a failure boolean so we know what's required on a retry
+       */
+       if (psPDumpConnectionData->ui32LastSetFrameNumber != ui32Frame)
+       {
+               (void) PDumpCommentWithFlags(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                            "Set pdump frame %u", ui32Frame);
+
+               /*
+                       The boolean values below decide if the PDump transition
+                       should trigger because of the current context setting the
+                       frame number, hence the functions below should execute
+                       atomically and do not give a chance to some other context
+                       to transition
+               */
+               PDumpCtrlLockAcquire();
+
+               PDumpIsCaptureFrame(&bWasInCaptureRange);
+               PDumpCtrlSetCurrentFrame(ui32Frame);
+               PDumpIsCaptureFrame(&bIsInCaptureRange);
+
+               PDumpCtrlLockRelease();
+
+               psPDumpConnectionData->ui32LastSetFrameNumber = ui32Frame;
+
+               /* Check for any transition event only if client is connected */
+               if (PDumpIsClientConnected())
+               {
+                       if (!bWasInCaptureRange && bIsInCaptureRange)
+                       {
+                               eTransitionEvent = PDUMP_TRANSITION_EVENT_RANGE_ENTERED;
+                       }
+                       else if (bWasInCaptureRange && !bIsInCaptureRange)
+                       {
+                               eTransitionEvent = PDUMP_TRANSITION_EVENT_RANGE_EXITED;
+                       }
+
+                       if (PDumpCtrlCapModIsBlocked())
+                       {
+                               /* Update block ctrl status and check for block transition */
+                               if (PDumpUpdateBlockCtrlStatus(ui32Frame))
+                               {
+                                       PVR_ASSERT(eTransitionEvent == PDUMP_TRANSITION_EVENT_NONE); /* Something went wrong, can't handle two events at same time */
+                                       eTransitionEvent = PDUMP_TRANSITION_EVENT_BLOCK_FINISHED;
+                               }
+                       }
+               }
+       }
+       else if (psPDumpConnectionData->eFailedEvent != PDUMP_TRANSITION_EVENT_NONE)
+       {
+               /* Load the Transition data so we can try again */
+               eTransitionEvent = psPDumpConnectionData->eFailedEvent;
+       }
+       else
+       {
+               /* New frame is the same as the last frame set and the last
+                * transition succeeded, no need to perform another transition.
+                */
+               return PVRSRV_OK;
+       }
+
+       if (eTransitionEvent != PDUMP_TRANSITION_EVENT_NONE)
+       {
+               DEBUG_OUTFILES_COMMENT(psDeviceNode, "PDump transition event(%u)-begin frame %u (post)", eTransitionEvent, ui32Frame);
+               eError = PDumpTransition(psDeviceNode, psPDumpConnectionData, eTransitionEvent, PDUMP_FLAGS_NONE);
+               DEBUG_OUTFILES_COMMENT(psDeviceNode, "PDump transition event(%u)-complete frame %u (post)", eTransitionEvent, ui32Frame);
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                             IMG_UINT32 ui32Frame)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* If call is not for the pdump-bound device, return immediately
+        * taking no action.
+        */
+       if (!PDumpIsDevicePermitted(psDeviceNode))
+       {
+               return PVRSRV_OK;
+       }
+
+#if defined(PDUMP_TRACE_STATE)
+       PVR_DPF((PVR_DBG_WARNING, "PDumpSetFrameKM: ui32Frame( %d )", ui32Frame));
+#endif
+
+       DEBUG_OUTFILES_COMMENT(psDeviceNode, "(pre) Set pdump frame %u", ui32Frame);
+
+       eError = _PDumpSetFrameKM(psConnection, psDeviceNode, ui32Frame);
+       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+       {
+               PVR_LOG_ERROR(eError, "_PDumpSetFrameKM");
+       }
+
+       DEBUG_OUTFILES_COMMENT(psDeviceNode, "(post) Set pdump frame %u", ui32Frame);
+
+       return eError;
+}
+
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                             IMG_UINT32* pui32Frame)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /*
+               It may be safe to avoid acquiring this lock here as all the other calls
+               which read/modify current frame will wait on the PDump Control bridge
+               lock first. Also, in no way as of now, does the PDumping app modify the
+               current frame through a call which acquires the global bridge lock.
+               Still, as a legacy we acquire and then read.
+       */
+       PDumpCtrlLockAcquire();
+
+       *pui32Frame = PDumpCtrlGetCurrentFrame();
+
+       PDumpCtrlLockRelease();
+       return eError;
+}
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection,
+                                            PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_UINT32 ui32Mode,
+                                            IMG_UINT32 ui32Start,
+                                            IMG_UINT32 ui32End,
+                                            IMG_UINT32 ui32Interval,
+                                            IMG_UINT32 ui32MaxParamFileSize)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* NB. We choose not to check that the device is the pdump-bound
+        * device here, as this particular bridge call is made only from the pdump
+        * tool itself (which may only connect to the bound device).
+        */
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       eError = PDumpReady();
+       PVR_LOG_RETURN_IF_ERROR(eError, "PDumpReady");
+
+       /* Validate parameters */
+       if ((ui32End < ui32Start) || (ui32Mode > PDUMP_CAPMODE_MAX))
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       else if (ui32Mode == PDUMP_CAPMODE_BLOCKED)
+       {
+               if ((ui32Interval < PDUMP_BLOCKLEN_MIN) || (ui32Interval > PDUMP_BLOCKLEN_MAX))
+               {
+                       /* Force client to set ui32Interval (i.e. block length) in valid range */
+                       eError = PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN;
+               }
+
+               if (ui32End != PDUMP_FRAME_MAX)
+               {
+                       /* Force client to set ui32End to PDUMP_FRAME_MAX */
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+       else if ((ui32Mode != PDUMP_CAPMODE_UNSET) && (ui32Interval < 1))
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_LOG_RETURN_IF_ERROR(eError, "PDumpSetDefaultCaptureParamsKM");
+
+       /*
+          Acquire PDUMP_CTRL_STATE struct lock before modifications as a
+          PDumping app may be reading the state data for some checks
+       */
+       PDumpCtrlLockAcquire();
+       PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval);
+       PDumpCtrlLockRelease();
+
+       if (ui32MaxParamFileSize == 0)
+       {
+               g_PDumpParameters.ui32MaxFileSize = PRM_FILE_SIZE_MAX;
+       }
+       else
+       {
+               g_PDumpParameters.ui32MaxFileSize = ui32MaxParamFileSize;
+       }
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpReg32
+ * Inputs         : pszPDumpDevName, Register offset, and value to write
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+******************************************************************************/
+PVRSRV_ERROR PDumpReg32(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_CHAR        *pszPDumpRegName,
+                                               IMG_UINT32      ui32Reg,
+                                               IMG_UINT32      ui32Data,
+                                               IMG_UINT32      ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", pszPDumpRegName, ui32Reg, ui32Data);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpReg64
+ * Inputs         : pszPDumpDevName, Register offset, and value to write
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+******************************************************************************/
+PVRSRV_ERROR PDumpReg64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_CHAR        *pszPDumpRegName,
+                                               IMG_UINT32      ui32Reg,
+                                               IMG_UINT64      ui64Data,
+                                               IMG_UINT32      ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Data >> 32);
+       IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Data);
+#endif
+
+       PDUMP_GET_SCRIPT_STRING()
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X",
+                                       pszPDumpRegName, ui32Reg, ui32LowerValue);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X",
+                                       pszPDumpRegName, ui32Reg + 4, ui32UpperValue);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+#else
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X 0x%010" IMG_UINT64_FMTSPECX, pszPDumpRegName, ui32Reg, ui64Data);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+#endif
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpRegLabelToReg64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+ *                  from a register label
+******************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToReg64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32RegDst,
+                                  IMG_UINT32 ui32RegSrc,
+                                  IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X :%s:0x%08X", pszPDumpRegName, ui32RegDst, pszPDumpRegName, ui32RegSrc);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+
+}
+
+/******************************************************************************
+ * Function Name  : PDumpRegLabelToMem32
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write
+ *                  from a register label
+******************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32Reg,
+                                  PMR *psPMR,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                  IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psDeviceNode = PMR_DeviceNode(psPMR);
+
+       eErr = PMR_PDumpSymbolicAddr(psPMR,
+                                    uiLogicalOffset,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceName,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicName,
+                                    &uiPDumpSymbolicOffset,
+                                    &uiNextSymName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+                                                       uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpRegLabelToMem64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write
+ *                  from a register label
+******************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+                                                                 IMG_UINT32 ui32Reg,
+                                                                 PMR *psPMR,
+                                                                 IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                                                 IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psDeviceNode = PMR_DeviceNode(psPMR);
+
+       eErr = PMR_PDumpSymbolicAddr(psPMR,
+                                    uiLogicalOffset,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceName,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicName,
+                                    &uiPDumpSymbolicOffset,
+                                    &uiNextSymName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+                                                       uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpPhysHandleToInternalVar64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents an internal var
+                    write using a PDump pages handle
+******************************************************************************/
+PVRSRV_ERROR PDumpPhysHandleToInternalVar64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_CHAR *pszInternalVar,
+                                            IMG_HANDLE hPdumpPages,
+                                            IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR *pszSymbolicName;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+#endif
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpGetSymbolicAddr(hPdumpPages,
+                                   &pszSymbolicName);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen,
+                               "WRW %s %s:0x%llX",
+                               pszInternalVar, pszSymbolicName, 0llu);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s 0x%X", pszPDumpVarName, 0);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#endif
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpMemLabelToInternalVar64
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents an internal var
+ *                  write using a memory label
+******************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+                                          PMR *psPMR,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                          IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+#endif
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psDeviceNode = PMR_DeviceNode(psPMR);
+
+       eErr = PMR_PDumpSymbolicAddr(psPMR,
+                                    uiLogicalOffset,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceName,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicName,
+                                    &uiPDumpSymbolicOffset,
+                                    &uiNextSymName);
+
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:%s:0x%"IMG_UINT64_FMTSPECX, pszInternalVar,
+                                                       aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset);
+
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:%s:0x%"IMG_UINT64_FMTSPECX, pszPDumpVarName,
+                                                       aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "SHR %s %s 0x20", pszPDumpVarName, pszPDumpVarName);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#endif
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpInternalVarToMemLabel
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory label
+ *                  write using an internal var
+******************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR,
+                                        IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                        IMG_CHAR *pszInternalVar,
+                                        IMG_UINT32     ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+#endif
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psDeviceNode = PMR_DeviceNode(psPMR);
+
+       eErr = PMR_PDumpSymbolicAddr(psPMR,
+                                    uiLogicalOffset,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceName,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicName,
+                                    &uiPDumpSymbolicOffset,
+                                    &uiNextSymName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" %s",
+                                                       aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset, pszInternalVar);
+
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s 0x%X", pszPDumpVarName, 0);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#endif
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpWriteRegORValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical OR operation
+ Var <- Var OR Value
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarORValueOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    const IMG_CHAR *pszInternalVariable,
+                                    const IMG_UINT64 ui64Value,
+                                    const IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+       IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Value >> 32);
+       IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Value);
+#endif
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+                       "OR %s %s 0x%X",
+#else
+                       "OR %s %s 0x%"IMG_UINT64_FMTSPECX,
+#endif
+                       pszInternalVariable,
+                       pszInternalVariable,
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+                       ui32LowerValue
+#else
+                       ui64Value
+#endif
+                       );
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVariable);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32PDumpFlags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+                       "OR %s %s 0x%X",
+                       pszPDumpVarName,
+                       pszPDumpVarName,
+                       ui32UpperValue);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32PDumpFlags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+#endif
+
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpWriteVarORVarOp
+
+ @Description
+
+ Emits the PDump commands for the logical OR operation
+ Var <- Var OR Var2
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarORVarOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const IMG_CHAR *pszInternalVar,
+                                  const IMG_CHAR *pszInternalVar2,
+                                  const IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+                       "OR %s %s %s",
+                       pszInternalVar,
+                       pszInternalVar,
+                       pszInternalVar2);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       PDUMP_UNLOCK(ui32PDumpFlags);
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpWriteVarANDVarOp
+
+ @Description
+
+ Emits the PDump commands for the logical AND operation
+ Var <- Var AND Var2
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarANDVarOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const IMG_CHAR *pszInternalVar,
+                                   const IMG_CHAR *pszInternalVar2,
+                                   const IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+                       "AND %s %s %s",
+                       pszInternalVar,
+                       pszInternalVar,
+                       pszInternalVar2);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       PDUMP_UNLOCK(ui32PDumpFlags);
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpRegLabelToInternalVar
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which writes a register label into
+ *                  an internal variable
+******************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        IMG_CHAR *pszPDumpRegName,
+                                        IMG_UINT32 ui32Reg,
+                                        IMG_CHAR *pszInternalVar,
+                                        IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+#endif
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszInternalVar, pszPDumpRegName, ui32Reg);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszPDumpVarName, pszPDumpRegName, ui32Reg + 4);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+#endif
+
+       PDUMP_UNLOCK(ui32Flags);
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+
+}
+
+/******************************************************************************
+ * Function Name  : PDumpInternalVarToReg32
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+ *                  from an internal variable
+******************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg32(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32 ui32Reg,
+                                     IMG_CHAR *pszInternalVar,
+                                     IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpInternalVarToReg64
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+ *                  from an internal variable
+******************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32 ui32Reg,
+                                     IMG_CHAR *pszInternalVar,
+                                     IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+#endif
+       PDUMP_GET_SCRIPT_STRING()
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg + 4, pszPDumpVarName);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+#else
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+#endif
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+
+
+/******************************************************************************
+ * Function Name  : PDumpMemLabelToMem32
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from
+ *                  a memory label
+******************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+                                  PMR *psPMRDest,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                  IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+       IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+       IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psDeviceNode = PMR_DeviceNode(psPMRSource);
+
+       eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+                                    uiLogicalOffsetSource,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceNameSource,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicNameSource,
+                                    &uiPDumpSymbolicOffsetSource,
+                                    &uiNextSymNameSource);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+                                    uiLogicalOffsetDest,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceNameDest,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicNameDest,
+                                    &uiPDumpSymbolicOffsetDest,
+                                    &uiNextSymNameDest);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen,
+                            "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s:0x%"IMG_UINT64_FMTSPECX,
+                            aszMemspaceNameDest, aszSymbolicNameDest,
+                            uiPDumpSymbolicOffsetDest, aszMemspaceNameSource,
+                            aszSymbolicNameSource, uiPDumpSymbolicOffsetSource);
+
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpMemLabelToMem64
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a memory write from
+ *                  a memory label
+******************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+                                                                 PMR *psPMRDest,
+                                                                 IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                                                 IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                                                 IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+       IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+       IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       psDeviceNode = PMR_DeviceNode(psPMRSource);
+
+       eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+                                    uiLogicalOffsetSource,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceNameSource,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicNameSource,
+                                    &uiPDumpSymbolicOffsetSource,
+                                    &uiNextSymNameSource);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+                                    uiLogicalOffsetDest,
+                                    PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+                                    aszMemspaceNameDest,
+                                    PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+                                    aszSymbolicNameDest,
+                                    &uiPDumpSymbolicOffsetDest,
+                                    &uiNextSymNameDest);
+
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen,
+                            "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s:0x%"IMG_UINT64_FMTSPECX,
+                            aszMemspaceNameDest, aszSymbolicNameDest,
+                            uiPDumpSymbolicOffsetDest, aszMemspaceNameSource,
+                            aszSymbolicNameSource, uiPDumpSymbolicOffsetSource);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpWriteVarSHRValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical SHR operation
+ Var <-  Var SHR Value
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarSHRValueOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const IMG_CHAR *pszInternalVariable,
+                                     const IMG_UINT64 ui64Value,
+                                     const IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+       IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Value >> 32);
+       IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Value);
+#endif
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+                       "SHR %s %s 0x%X",
+#else
+                       "SHR %s %s 0x%"IMG_UINT64_FMTSPECX,
+#endif
+                       pszInternalVariable,
+                       pszInternalVariable,
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+                       ui32LowerValue
+#else
+                       ui64Value
+#endif
+                       );
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVariable);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32PDumpFlags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+                       "SHR %s %s 0x%X",
+                       pszPDumpVarName,
+                       pszPDumpVarName,
+                       ui32UpperValue);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32PDumpFlags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+#endif
+
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical AND operation
+ Var <-  Var AND Value
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarANDValueOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const IMG_CHAR *pszInternalVariable,
+                                     const IMG_UINT64 ui64Value,
+                                     const IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+       IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64Value >> 32);
+       IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64Value);
+#endif
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+                       "AND %s %s 0x%X",
+#else
+                       "AND %s %s 0x%"IMG_UINT64_FMTSPECX,
+#endif
+                       pszInternalVariable,
+                       pszInternalVariable,
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+                       ui32LowerValue
+#else
+                       ui64Value
+#endif
+                       );
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVariable);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32PDumpFlags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+                       "AND %s %s 0x%X",
+                       pszPDumpVarName,
+                       pszPDumpVarName,
+                       ui32UpperValue);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32PDumpFlags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+#endif
+
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpSAW
+ * Inputs         : pszDevSpaceName -- device space from which to output
+ *                  ui32Offset -- offset value from register base
+ *                  ui32NumSaveBytes -- number of bytes to output
+ *                  pszOutfileName -- name of file to output to
+ *                  ui32OutfileOffsetByte -- offset into output file to write
+ *                  uiPDumpFlags -- flags to pass to PDumpOSWriteScript
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Dumps the contents of a register bank into a file
+ *                  NB: ui32NumSaveBytes must be divisible by 4
+******************************************************************************/
+PVRSRV_ERROR PDumpSAW(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_CHAR           *pszDevSpaceName,
+                      IMG_UINT32         ui32HPOffsetBytes,
+                      IMG_UINT32         ui32NumSaveBytes,
+                      IMG_CHAR           *pszOutfileName,
+                      IMG_UINT32         ui32OutfileOffsetByte,
+                      PDUMP_FLAGS_T      uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       PDUMP_GET_SCRIPT_STRING()
+
+       PVR_DPF((PVR_DBG_ERROR, "PDumpSAW"));
+
+       eError = PDumpSNPrintf(hScript,
+                                 ui32MaxLen,
+                                 "SAW :%s:0x%x 0x%x 0x%x %s\n",
+                                 pszDevSpaceName,
+                                 ui32HPOffsetBytes,
+                                 ui32NumSaveBytes / (IMG_UINT32)sizeof(IMG_UINT32),
+                                 ui32OutfileOffsetByte,
+                                 pszOutfileName);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpSNPrintf failed: eError=%u", eError));
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eError;
+       }
+
+       PDUMP_LOCK(uiPDumpFlags);
+       if (! PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpWriteScript failed!"));
+       }
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpRegPolKM
+ * Inputs         : Description of what this register read is trying to do
+ *                                     pszPDumpDevName
+ *                                     Register offset
+ *                                     expected value
+ *                                     mask for that value
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents a register read
+ *                                     with the expected value
+******************************************************************************/
+PVRSRV_ERROR PDumpRegPolKM(PVRSRV_DEVICE_NODE  *psDeviceNode,
+                                                  IMG_CHAR                             *pszPDumpRegName,
+                                                  IMG_UINT32                   ui32RegAddr,
+                                                  IMG_UINT32                   ui32RegValue,
+                                                  IMG_UINT32                   ui32Mask,
+                                                  IMG_UINT32                   ui32Flags,
+                                                  PDUMP_POLL_OPERATOR  eOperator)
+{
+       /* Timings correct for Linux and XP */
+       /* Timings should be passed in */
+       #define POLL_DELAY                      1000U
+       #define POLL_COUNT_LONG         (2000000000U / POLL_DELAY)
+       #define POLL_COUNT_SHORT        (1000000U / POLL_DELAY)
+
+       PVRSRV_ERROR eErr;
+       IMG_UINT32      ui32PollCount;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       ui32PollCount = POLL_COUNT_LONG;
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d",
+                                                       pszPDumpRegName, ui32RegAddr, ui32RegValue,
+                                                       ui32Mask, eOperator, ui32PollCount, POLL_DELAY);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return PVRSRV_OK;
+}
+
+/*!
+ * \name       PDumpOSVerifyLineEnding
+ */
+static void _PDumpVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+       IMG_UINT32 ui32Count;
+       IMG_CHAR* pszBuf = hBuffer;
+
+       /* strlen */
+       ui32Count = OSStringNLength(pszBuf, ui32BufferSizeMax);
+
+       /* Put \n sequence at the end if it isn't already there */
+       if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
+       {
+               pszBuf[ui32Count] = '\n';
+               ui32Count++;
+               pszBuf[ui32Count] = '\0';
+       }
+}
+
+
+/* Never call direct, needs caller to hold OS Lock.
+ * Use PDumpCommentWithFlags() from within the server.
+ * Clients call this via the bridge and PDumpCommentKM().
+ */
+static PVRSRV_ERROR _PDumpWriteComment(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_CHAR *pszComment,
+                                       IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_DEBUG_OUTFILES)
+       IMG_CHAR pszTemp[PVRSRV_PDUMP_MAX_COMMENT_SIZE+80];
+       IMG_INT32 iCount;
+#endif
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       PVR_ASSERT(pszComment != NULL);
+
+       if (OSStringNLength(pszComment, ui32MaxLen) == 0)
+       {
+               /* PDumpOSVerifyLineEnding silently fails if pszComment is too short to
+                  actually hold the line endings that it's trying to enforce, so
+                  short circuit it and force safety */
+               pszComment = "\n";
+       }
+       else
+       {
+               /* Put line ending sequence at the end if it isn't already there */
+               _PDumpVerifyLineEnding(pszComment, ui32MaxLen);
+       }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+       /* Prefix comment with PID and line number */
+       iCount = OSSNPrintf(pszTemp, PVRSRV_PDUMP_MAX_COMMENT_SIZE+80, "%u %u:%lu %s: %s",
+               OSAtomicRead(&g_sEveryLineCounter),
+               OSGetCurrentClientProcessIDKM(),
+               (unsigned long)OSGetCurrentClientThreadIDKM(),
+               OSGetCurrentClientProcessNameKM(),
+               pszComment);
+       if ((iCount < 0) || (iCount >= (PVRSRV_PDUMP_MAX_COMMENT_SIZE+80)))
+       {
+               eErr = PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+       }
+
+       /* Append the comment to the script stream */
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "-- %s",
+               pszTemp);
+#else
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "-- %s",
+               pszComment);
+#endif
+       if ((eErr != PVRSRV_OK) &&
+               (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
+       {
+               PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpSNPrintf", ErrUnlock);
+       }
+
+       if (!PDumpWriteScript(psDeviceNode, hScript, ui32Flags))
+       {
+               if (PDUMP_IS_CONTINUOUS(ui32Flags))
+               {
+                       eErr = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+                       PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+               }
+               else
+               {
+                       eErr = PVRSRV_ERROR_CMD_NOT_PROCESSED;
+                       PVR_LOG_GOTO_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+               }
+       }
+
+ErrUnlock:
+       PDUMP_RELEASE_SCRIPT_STRING()
+       return eErr;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpCommentKM
+ * Inputs         : ui32CommentSize, pszComment, ui32Flags
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Dumps a pre-formatted comment, primarily called from the
+ *                : bridge.
+******************************************************************************/
+PVRSRV_ERROR PDumpCommentKM(CONNECTION_DATA *psConnection,
+                            PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_UINT32 ui32CommentSize,
+                            IMG_CHAR *pszComment,
+                            IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(ui32CommentSize); /* Generated bridge code appends null char to pszComment. */
+
+       PDUMP_LOCK(ui32Flags);
+
+       eErr = _PDumpWriteComment(psDeviceNode, pszComment, ui32Flags);
+
+       PDUMP_UNLOCK(ui32Flags);
+       return eErr;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpCommentWithFlagsNoLockVA
+ * Inputs         : ui32Flags - PDump flags
+ *                               : pszFormat - format string for comment
+ *                               : args      - pre-started va_list args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comment, caller need to acquire pdump lock
+ *                  explicitly before calling this function
+******************************************************************************/
+static PVRSRV_ERROR PDumpCommentWithFlagsNoLockVA(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32 ui32Flags,
+                                           const IMG_CHAR * pszFormat, va_list args)
+{
+       IMG_INT32 iCount;
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       PDUMP_GET_MSG_STRING();
+
+       /* Construct the string */
+       iCount = OSVSNPrintf(pszMsg, ui32MaxLen, pszFormat, args);
+       PVR_LOG_GOTO_IF_FALSE(((iCount != -1) && (iCount < ui32MaxLen)), "OSVSNPrintf", exit);
+
+       eErr = _PDumpWriteComment(psDeviceNode, pszMsg, ui32Flags);
+
+exit:
+       PDUMP_RELEASE_MSG_STRING();
+       return eErr;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpCommentWithFlagsNoLock
+ * Inputs         : ui32Flags - PDump flags
+ *                               : pszFormat - format string for comment
+ *                               : ... - args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comment, caller need to acquire pdump lock
+ *                  explicitly before calling this function.
+******************************************************************************/
+__printf(3, 4)
+static PVRSRV_ERROR PDumpCommentWithFlagsNoLock(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         IMG_UINT32 ui32Flags,
+                                         IMG_CHAR *pszFormat, ...)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       va_list args;
+
+       va_start(args, pszFormat);
+       PDumpCommentWithFlagsNoLockVA(psDeviceNode, ui32Flags, pszFormat, args);
+       va_end(args);
+
+       return eErr;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpCommentWithFlags
+ * Inputs         : ui32Flags - PDump flags
+ *                               : pszFormat - format string for comment
+ *                               : ... - args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comments
+******************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   IMG_UINT32 ui32Flags,
+                                   IMG_CHAR * pszFormat, ...)
+{
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       va_list args;
+
+       va_start(args, pszFormat);
+       PDumpCommentWithFlagsVA(psDeviceNode, ui32Flags, pszFormat, args);
+       va_end(args);
+
+       return eErr;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpCommentWithFlagsVA
+ * Inputs         : ui32Flags - PDump flags
+ *                               : pszFormat - format string for comment
+ *                               : args      - pre-started va_list args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comments
+******************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlagsVA(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 ui32Flags,
+                                     const IMG_CHAR * pszFormat, va_list args)
+{
+       IMG_INT32 iCount;
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+       PDUMP_GET_MSG_STRING();
+
+       /* Construct the string */
+       iCount = OSVSNPrintf(pszMsg, ui32MaxLen, pszFormat, args);
+       PVR_LOG_GOTO_IF_FALSE(((iCount != -1) && (iCount < ui32MaxLen)), "OSVSNPrintf", exit);
+
+       PDUMP_LOCK(ui32Flags);
+       eErr = _PDumpWriteComment(psDeviceNode, pszMsg, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+exit:
+       PDUMP_RELEASE_MSG_STRING();
+       return eErr;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpCOMCommand
+ * Inputs         : ui32PDumpFlags - PDump flags
+ *                     : pszPdumpStr - string for COM command
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : PDumps a COM command
+******************************************************************************/
+PVRSRV_ERROR PDumpCOMCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             IMG_UINT32 ui32PDumpFlags,
+                             const IMG_CHAR * pszPdumpStr)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "COM %s\n", pszPdumpStr);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+ * Function Name  : PDumpPanic
+ * Inputs         : ui32PanicNo - Unique number for panic condition
+ *                               : pszPanicMsg - Panic reason message limited to ~90 chars
+ *                               : pszPPFunc   - Function name string where panic occurred
+ *                               : ui32PPline  - Source line number where panic occurred
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : PDumps a panic assertion. Used when the host driver
+ *                : detects a condition that will lead to an invalid PDump
+ *                : script that cannot be played back off-line.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpPanic(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_UINT32      ui32PanicNo,
+                                               IMG_CHAR*       pszPanicMsg,
+                                               const IMG_CHAR* pszPPFunc,
+                                               IMG_UINT32      ui32PPline)
+{
+       PVRSRV_ERROR   eError = PVRSRV_OK;
+       PDUMP_FLAGS_T  uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+       PDUMP_GET_SCRIPT_STRING();
+
+       /* Log the panic condition to the live kern.log in both REL and DEB mode
+        * to aid user PDump troubleshooting. */
+       PVR_LOG(("PDUMP PANIC %08x: %s", ui32PanicNo, pszPanicMsg));
+       PVR_DPF((PVR_DBG_MESSAGE, "PDUMP PANIC start %s:%d", pszPPFunc, ui32PPline));
+
+       /* Check the supplied panic reason string is within length limits */
+       PVR_ASSERT(OSStringLength(pszPanicMsg)+sizeof("PANIC   ") < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+       /* Obtain lock to keep the multi-line
+        * panic statement together in a single atomic write */
+       PDUMP_BLKSTART(uiPDumpFlags);
+
+
+       /* Write -- Panic start (Function:line) */
+       eError = PDumpSNPrintf(hScript, ui32MaxLen, "-- Panic start (%s:%d)", pszPPFunc, ui32PPline);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpSNPrintf", e1);
+       (void)PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+       /* Write COM messages */
+       eError = PDumpCOMCommand(psDeviceNode, uiPDumpFlags,
+                                 "**** Script invalid and not compatible with off-line playback. ****");
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCOMCommand", e1);
+
+       eError = PDumpCOMCommand(psDeviceNode, uiPDumpFlags,
+                                 "**** Check test parameters and driver configuration, stop imminent. ****");
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpCOMCommand", e1);
+
+       /* Write PANIC no msg command */
+       eError = PDumpSNPrintf(hScript, ui32MaxLen, "PANIC %08x %s", ui32PanicNo, pszPanicMsg);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpSNPrintf", e1);
+       (void)PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+       /* Write -- Panic end */
+       eError = PDumpSNPrintf(hScript, ui32MaxLen, "-- Panic end");
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDumpSNPrintf", e1);
+       (void)PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+e1:
+       PDUMP_BLKEND(uiPDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return eError;
+}
+
+/*************************************************************************/ /*!
+ * Function Name  : PDumpCaptureError
+ * Inputs         : ui32ErrorNo - Unique number for panic condition
+ *                : pszErrorMsg - Panic reason message limited to ~90 chars
+ *                : pszPPFunc   - Function name string where panic occurred
+ *                : ui32PPline  - Source line number where panic occurred
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : PDumps an error string to the script file to interrupt
+ *                : play back to inform user of a fatal issue that occurred
+ *                : during PDump capture.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpCaptureError(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               PVRSRV_ERROR       ui32ErrorNo,
+                               IMG_CHAR*          pszErrorMsg,
+                               const IMG_CHAR     *pszPPFunc,
+                               IMG_UINT32         ui32PPline)
+{
+       IMG_CHAR*       pszFormatStr = "DRIVER_ERROR: %3d: %s";
+       PDUMP_FLAGS_T   uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+       /* Need to return an error using this macro */
+       PDUMP_GET_SCRIPT_STRING();
+
+       /* Check the supplied panic reason string is within length limits */
+       PVR_ASSERT(OSStringLength(pszErrorMsg)+sizeof(pszFormatStr) < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+       /* Write driver error message to the script file */
+       (void) PDumpSNPrintf(hScript, ui32MaxLen, pszFormatStr, ui32ErrorNo, pszErrorMsg);
+
+       /* Obtain lock to keep the multi-line
+        * panic statement together in a single atomic write */
+       PDUMP_LOCK(uiPDumpFlags);
+       (void) PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       PDUMP_UNLOCK(uiPDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpImageDescriptor
+
+ @Description
+
+ Dumps an OutputImage command and its associated header info.
+
+ @Input    psDeviceNode                        : device
+ @Input    ui32MMUContextID            : MMU context
+ @Input    pszSABFileName              : filename string
+
+ @Return   PVRSRV_ERROR                        :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32MMUContextID,
+                                                                       IMG_CHAR *pszSABFileName,
+                                                                       IMG_DEV_VIRTADDR sData,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32LogicalWidth,
+                                                                       IMG_UINT32 ui32LogicalHeight,
+                                                                       IMG_UINT32 ui32PhysicalWidth,
+                                                                       IMG_UINT32 ui32PhysicalHeight,
+                                                                       PDUMP_PIXEL_FORMAT ePixFmt,
+                                                                       IMG_MEMLAYOUT eMemLayout,
+                                                                       IMG_FB_COMPRESSION eFBCompression,
+                                                                       const IMG_UINT32 *paui32FBCClearColour,
+                                                                       PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                                                                       IMG_DEV_VIRTADDR sHeader,
+                                                                       IMG_UINT32 ui32HeaderSize,
+                                                                       IMG_UINT32 ui32PDumpFlags)
+{
+#if !defined(SUPPORT_RGX)
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+       PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+       PVR_UNREFERENCED_PARAMETER(sData);
+       PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+       PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+       PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+       PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+       PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+       PVR_UNREFERENCED_PARAMETER(ePixFmt);
+       PVR_UNREFERENCED_PARAMETER(eMemLayout);
+       PVR_UNREFERENCED_PARAMETER(eFBCompression);
+       PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+       PVR_UNREFERENCED_PARAMETER(eFBCSwizzle);
+       PVR_UNREFERENCED_PARAMETER(sHeader);
+       PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+       PVRSRV_ERROR  eErr = PVRSRV_OK;
+       IMG_CHAR      *pszPDumpDevName = psDeviceNode->sDevId.pszPDumpDevName;
+       IMG_BYTE      abyPDumpDesc[IMAGE_HEADER_SIZE];
+       IMG_UINT32    ui32ParamOutPos, ui32SABOffset = 0;
+       IMG_BOOL      bRawImageData = IMG_FALSE;
+
+       PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+       if (pszSABFileName == NULL)
+       {
+               eErr = PVRSRV_ERROR_INVALID_PARAMS;
+               goto error_release_script;
+       }
+
+       /* Writing image descriptor to persistent buffer is not permitted */
+       if (ui32PDumpFlags & PDUMP_FLAGS_PERSISTENT)
+       {
+               goto error_release_script;
+       }
+
+       /* Prepare OutputImage descriptor header */
+       eErr = RGXPDumpPrepareOutputImageDescriptorHdr(psDeviceNode,
+                                                                       ui32HeaderSize,
+                                                                       ui32DataSize,
+                                                                       ui32LogicalWidth,
+                                                                       ui32LogicalHeight,
+                                                                       ui32PhysicalWidth,
+                                                                       ui32PhysicalHeight,
+                                                                       ePixFmt,
+                                                                       eMemLayout,
+                                                                       eFBCompression,
+                                                                       paui32FBCClearColour,
+                                                                       eFBCSwizzle,
+                                                                       &(abyPDumpDesc[0]));
+       PVR_LOG_GOTO_IF_ERROR(eErr, "RGXPDumpPrepareOutputImageDescriptorHdr", error_release_script);
+
+       PDUMP_LOCK(ui32PDumpFlags);
+
+       PDumpCommentWithFlagsNoLock(psDeviceNode, ui32PDumpFlags, "Dump Image descriptor");
+
+       bRawImageData =
+                (ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_YUV8
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888
+          || ePixFmt == PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888);
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_FBCDC_SIGNATURE_CHECK)
+       {
+               PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+               /*
+                * The render data may be corrupted, so write out the raw
+                * image buffer to avoid errors in the post-processing tools.
+                */
+               bRawImageData |= (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN);
+       }
+#endif
+
+       if (bRawImageData)
+       {
+               IMG_UINT32 ui32ElementType;
+               IMG_UINT32 ui32ElementCount;
+
+               PDumpCommentWithFlagsNoLock(psDeviceNode, ui32PDumpFlags,
+                                           "YUV data. Switching from OutputImage to SAB. Width=0x%08X Height=0x%08X",
+                                           ui32LogicalWidth, ui32LogicalHeight);
+
+               PDUMP_UNLOCK(ui32PDumpFlags);
+
+               PDUMP_RELEASE_SCRIPT_AND_FILE_STRING();
+
+               ui32ElementType = 0;
+               ui32ElementCount = 0;
+
+               /* Switch to CMD:OutputData with IBIN header. */
+               return PDumpDataDescriptor(psDeviceNode,
+                                                                  ui32MMUContextID,
+                                                                  pszSABFileName,
+                                                                  sData,
+                                                                  ui32DataSize,
+                                                                  IBIN_HEADER_TYPE,
+                                                                  ui32ElementType,
+                                                                  ui32ElementCount,
+                                                                  ui32PDumpFlags);
+       }
+
+       /* Write OutputImage descriptor header to parameter file */
+       eErr = PDumpWriteParameter(psDeviceNode,
+                                                          abyPDumpDesc,
+                                                          IMAGE_HEADER_SIZE,
+                                                          ui32PDumpFlags,
+                                                          &ui32ParamOutPos,
+                                                          pszFileName);
+       if (eErr != PVRSRV_OK)
+       {
+               if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+               {
+                       PDUMP_ERROR(psDeviceNode, eErr,
+                                   "Failed to write device allocation to parameter file");
+                       PVR_LOG_ERROR(eErr, "PDumpWriteParameter");
+               }
+               else
+               {
+                       /*
+                        * Write to parameter file prevented under the flags and
+                        * current state of the driver so skip write to script and return.
+                        */
+                       eErr = PVRSRV_OK;
+               }
+               goto error;
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "MALLOC :%s:BINHEADER 0x%08X 0x%08X\n",
+                                                       pszPDumpDevName,
+                                                       IMAGE_HEADER_SIZE,
+                                                       IMAGE_HEADER_SIZE);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "LDB :%s:BINHEADER:0x00 0x%08x 0x%08x %s\n",
+                                                       pszPDumpDevName,
+                                                       IMAGE_HEADER_SIZE,
+                                                       ui32ParamOutPos,
+                                                       pszFileName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "SAB :%s:BINHEADER:0x00 0x%08X 0x00000000 %s.bin\n",
+                                                       pszPDumpDevName,
+                                                       IMAGE_HEADER_SIZE,
+                                                       pszSABFileName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       ui32SABOffset += IMAGE_HEADER_SIZE;
+
+       /*
+        * Write out the header section if image is FB compressed
+        */
+       if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+       {
+               eErr = PDumpSNPrintf(hScript,
+                                                               ui32MaxLenScript,
+                                                               "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n",
+                                                               pszPDumpDevName,
+                                                               ui32MMUContextID,
+                                                               (IMG_UINT64)sHeader.uiAddr,
+                                                               ui32HeaderSize,
+                                                               ui32SABOffset,
+                                                               pszSABFileName);
+               PVR_GOTO_IF_ERROR(eErr, error);
+               PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+               ui32SABOffset += ui32HeaderSize;
+       }
+
+       /*
+        * Now dump out the actual data associated with the surface
+        */
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n",
+                                                       pszPDumpDevName,
+                                                       ui32MMUContextID,
+                                                       (IMG_UINT64)sData.uiAddr,
+                                                       ui32DataSize,
+                                                       ui32SABOffset,
+                                                       pszSABFileName);
+
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       /*
+        * The OutputImage command is required to trigger processing of the output
+        * data
+        */
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "CMD:OutputImage %s.bin\n",
+                                                       pszSABFileName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "FREE :%s:BINHEADER\n",
+                                                       pszPDumpDevName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+error:
+       PDUMP_UNLOCK(ui32PDumpFlags);
+error_release_script:
+       PDUMP_RELEASE_SCRIPT_AND_FILE_STRING()
+       return eErr;
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpDataDescriptor
+
+ @Description
+
+ Dumps an OutputData command and its associated header info.
+
+ @Input    psDeviceNode         : device
+ @Input    ui32MMUContextID     : MMU context
+ @Input    pszSABFileName       : filename string
+ @Input    sData                : GPU virtual address of data
+ @Input    ui32HeaderType       : Header type
+ @Input    ui32DataSize         : Data size
+ @Input    ui32ElementType      : Element type being dumped
+ @Input    ui32ElementCount     : Number of elements to be dumped
+ @Input    ui32PDumpFlags       : PDump flags
+
+ @Return   PVRSRV_ERROR         :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32MMUContextID,
+                                                                       IMG_CHAR *pszSABFileName,
+                                                                       IMG_DEV_VIRTADDR sData,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32HeaderType,
+                                                                       IMG_UINT32 ui32ElementType,
+                                                                       IMG_UINT32 ui32ElementCount,
+                                                                       IMG_UINT32 ui32PDumpFlags)
+{
+#if !defined(SUPPORT_RGX)
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+       PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+       PVR_UNREFERENCED_PARAMETER(sData);
+       PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+       PVR_UNREFERENCED_PARAMETER(ui32ElementType);
+       PVR_UNREFERENCED_PARAMETER(ui32ElementCount);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+       PVRSRV_ERROR   eErr = PVRSRV_OK;
+       IMG_CHAR       *pszPDumpDevName = psDeviceNode->sDevId.pszPDumpDevName;
+       IMG_BYTE       abyPDumpDesc[DATA_HEADER_SIZE];
+       IMG_UINT32     ui32ParamOutPos, ui32SABOffset = 0;
+       IMG_UINT32     ui32HeaderSize;
+
+       PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+       PVR_GOTO_IF_INVALID_PARAM(pszSABFileName, eErr, error_release_script);
+
+       if (ui32HeaderType == DATA_HEADER_TYPE)
+       {
+               ui32HeaderSize = DATA_HEADER_SIZE;
+       }
+       else if (ui32HeaderType == IBIN_HEADER_TYPE)
+       {
+               ui32HeaderSize = IBIN_HEADER_SIZE;
+       }
+       else
+       {
+               PVR_GOTO_WITH_ERROR(eErr, PVRSRV_ERROR_INVALID_PARAMS, error_release_script);
+       }
+
+       /* Writing data descriptor to persistent buffer is not permitted */
+       if (ui32PDumpFlags & PDUMP_FLAGS_PERSISTENT)
+       {
+               goto error_release_script;
+       }
+
+       /* Prepare OutputData descriptor header */
+       eErr = RGXPDumpPrepareOutputDataDescriptorHdr(psDeviceNode,
+                                                                       ui32HeaderType,
+                                                                       ui32DataSize,
+                                                                       ui32ElementType,
+                                                                       ui32ElementCount,
+                                                                       &(abyPDumpDesc[0]));
+       PVR_LOG_GOTO_IF_ERROR(eErr, "RGXPDumpPrepareOutputDataDescriptorHdr", error_release_script);
+
+       PDUMP_LOCK(ui32PDumpFlags);
+
+       PDumpCommentWithFlagsNoLock(psDeviceNode, ui32PDumpFlags, "Dump Data descriptor");
+
+       /* Write OutputImage command header to parameter file */
+       eErr = PDumpWriteParameter(psDeviceNode,
+                                                          abyPDumpDesc,
+                                                          ui32HeaderSize,
+                                                          ui32PDumpFlags,
+                                                          &ui32ParamOutPos,
+                                                          pszFileName);
+       if (eErr != PVRSRV_OK)
+       {
+               if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+               {
+                       PDUMP_ERROR(psDeviceNode, eErr,
+                                   "Failed to write device allocation to parameter file");
+                       PVR_LOG_ERROR(eErr, "PDumpWriteParameter");
+               }
+               else
+               {
+                       /*
+                        * Write to parameter file prevented under the flags and
+                        * current state of the driver so skip write to script and return.
+                        */
+                       eErr = PVRSRV_OK;
+               }
+               goto error;
+       }
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "MALLOC :%s:BINHEADER 0x%08X 0x%08X\n",
+                                                       pszPDumpDevName,
+                                                       ui32HeaderSize,
+                                                       ui32HeaderSize);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "LDB :%s:BINHEADER:0x00 0x%08x 0x%08x %s\n",
+                                                       pszPDumpDevName,
+                                                       ui32HeaderSize,
+                                                       ui32ParamOutPos,
+                                                       pszFileName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "SAB :%s:BINHEADER:0x00 0x%08X 0x00000000 %s.bin\n",
+                                                       pszPDumpDevName,
+                                                       ui32HeaderSize,
+                                                       pszSABFileName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       ui32SABOffset += ui32HeaderSize;
+
+       /*
+        * Now dump out the actual data associated
+        */
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n",
+                                                       pszPDumpDevName,
+                                                       ui32MMUContextID,
+                                                       (IMG_UINT64)sData.uiAddr,
+                                                       ui32DataSize,
+                                                       ui32SABOffset,
+                                                       pszSABFileName);
+
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       /*
+        * The OutputData command is required to trigger processing of the output
+        * data
+        */
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "CMD:OutputData %s.bin\n",
+                                                       pszSABFileName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLenScript,
+                                                       "FREE :%s:BINHEADER\n",
+                                                       pszPDumpDevName);
+       PVR_GOTO_IF_ERROR(eErr, error);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+
+error:
+       PDUMP_UNLOCK(ui32PDumpFlags);
+error_release_script:
+       PDUMP_RELEASE_SCRIPT_AND_FILE_STRING()
+       return eErr;
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function     PDumpReadRegKM
+
+ @Description
+
+ Dumps a read from a device register to a file
+
+ @Input    psConnection                        : connection info
+ @Input    pszFileName
+ @Input    ui32FileOffset
+ @Input    ui32Address
+ @Input    ui32Size
+ @Input    ui32PDumpFlags
+
+ @Return   PVRSRV_ERROR                        :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpReadRegKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_CHAR *pszPDumpRegName,
+                            IMG_CHAR *pszFileName,
+                            IMG_UINT32 ui32FileOffset,
+                            IMG_UINT32 ui32Address,
+                            IMG_UINT32 ui32Size,
+                            IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING();
+
+       PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+       eErr = PDumpSNPrintf(hScript,
+                       ui32MaxLen,
+                       "SAB :%s:0x%08X 0x%08X %s",
+                       pszPDumpRegName,
+                       ui32Address,
+                       ui32FileOffset,
+                       pszFileName);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpRegRead32ToInternalVar
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which reads register into an
+ *                  internal variable
+******************************************************************************/
+PVRSRV_ERROR PDumpRegRead32ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_CHAR *pszPDumpRegName,
+                                                       IMG_UINT32 ui32Reg,
+                                                       IMG_CHAR *pszInternalVar,
+                                                       IMG_UINT32 ui32Flags)
+
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript,
+                                                       ui32MaxLen,
+                                                       "RDW %s :%s:0x%08X",
+                                                       pszInternalVar,
+                                                       pszPDumpRegName,
+                                                       ui32Reg);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ @name         PDumpRegRead32
+ @brief                Dump 32-bit register read to script
+ @param                pszPDumpDevName - pdump device name
+ @param                ui32RegOffset - register offset
+ @param                ui32Flags - pdump flags
+ @return       Error
+******************************************************************************/
+PVRSRV_ERROR PDumpRegRead32(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_CHAR *pszPDumpRegName,
+                                                       const IMG_UINT32 ui32RegOffset,
+                                                       IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+                                                       pszPDumpRegName,
+                                                       ui32RegOffset);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ @name      PDumpRegRead64ToInternalVar
+ @brief     Read 64-bit register into an internal variable
+ @param     pszPDumpDevName - pdump device name
+ @param     ui32RegOffset - register offset
+ @param     ui32Flags - pdump flags
+ @return    Error
+******************************************************************************/
+PVRSRV_ERROR PDumpRegRead64ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_CHAR *pszPDumpRegName,
+                            IMG_CHAR *pszInternalVar,
+                            const IMG_UINT32 ui32RegOffset,
+                            IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       IMG_CHAR *pszPDumpVarName;
+#endif
+       PDUMP_GET_SCRIPT_STRING();
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW %s :%s:0x%X",
+                            pszInternalVar,
+                            pszPDumpRegName,
+                            ui32RegOffset);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+       pszPDumpVarName = PDumpCreateIncVarNameStr(pszInternalVar);
+       if (pszPDumpVarName == NULL)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW %s :%s:0x%X",
+                               pszPDumpVarName,
+                               pszPDumpRegName,
+                               ui32RegOffset + 4);
+
+       PDumpFreeIncVarNameStr(pszPDumpVarName);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+#else
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW64 %s :%s:0x%X",
+                               pszInternalVar,
+                               pszPDumpRegName,
+                               ui32RegOffset);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+#endif
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ @name         PDumpRegRead64
+ @brief                Dump 64-bit register read to script
+ @param                pszPDumpDevName - pdump device name
+ @param                ui32RegOffset - register offset
+ @param                ui32Flags - pdump flags
+ @return       Error
+******************************************************************************/
+PVRSRV_ERROR PDumpRegRead64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_CHAR *pszPDumpRegName,
+                                                       const IMG_UINT32 ui32RegOffset,
+                                                       IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING();
+
+#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+                                                       pszPDumpRegName, ui32RegOffset);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               return eErr;
+       }
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+                                                       pszPDumpRegName, ui32RegOffset + 4);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING()
+               PDUMP_UNLOCK(ui32Flags);
+               return eErr;
+       }
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+#else
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "RDW64 :%s:0x%X",
+                                                       pszPDumpRegName,
+                                                       ui32RegOffset);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+#endif
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ FUNCTION      : PDumpWriteShiftedMaskedValue
+
+ PURPOSE       : Emits the PDump commands for writing a masked shifted address
+              into another location
+
+ PARAMETERS    : PDump symbolic name and offset of target word
+              PDump symbolic name and offset of source address
+              right shift amount
+              left shift amount
+              mask
+
+ RETURNS       : None
+******************************************************************************/
+PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags)
+{
+       PVRSRV_ERROR         eError;
+
+       /* Suffix of WRW command in PDump (i.e. WRW or WRW64) */
+       const IMG_CHAR       *pszWrwSuffix;
+
+       /* Internal PDump register used for interim calculation */
+       const IMG_CHAR       *pszPDumpIntRegSpace;
+       IMG_UINT32           uiPDumpIntRegNum;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       if ((uiWordSize != 4) && (uiWordSize != 8))
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       pszWrwSuffix = (uiWordSize == 8) ? "64" : "";
+
+       /* Should really "Acquire" a pdump register here */
+       pszPDumpIntRegSpace = pszDestRegspaceName;
+       uiPDumpIntRegNum = 1;
+
+       PDUMP_LOCK(uiPDumpFlags);
+       eError = PDumpSNPrintf(hScript,
+                      ui32MaxLen,
+                      /* Should this be "MOV" instead? */
+                      "WRW :%s:$%d :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+                      /* dest */
+                      pszPDumpIntRegSpace,
+                      uiPDumpIntRegNum,
+                      /* src */
+                      pszRefRegspaceName,
+                      pszRefSymbolicName,
+                      uiRefOffset);
+       PVR_GOTO_IF_ERROR(eError, ErrUnlock);
+
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+       if (uiSHRAmount > 0)
+       {
+               eError = PDumpSNPrintf(hScript,
+                              ui32MaxLen,
+                              "SHR :%s:$%d :%s:$%d 0x%X\n",
+                              /* dest */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src A */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src B */
+                              uiSHRAmount);
+               PVR_GOTO_IF_ERROR(eError, ErrUnlock);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       }
+
+       if (uiSHLAmount > 0)
+       {
+               eError = PDumpSNPrintf(hScript,
+                              ui32MaxLen,
+                              "SHL :%s:$%d :%s:$%d 0x%X\n",
+                              /* dest */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src A */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src B */
+                              uiSHLAmount);
+               PVR_GOTO_IF_ERROR(eError, ErrUnlock);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       }
+
+       if (uiMask != (1ULL << (8*uiWordSize))-1)
+       {
+               eError = PDumpSNPrintf(hScript,
+                              ui32MaxLen,
+                              "AND :%s:$%d :%s:$%d 0x%X\n",
+                              /* dest */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src A */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src B */
+                              uiMask);
+               PVR_GOTO_IF_ERROR(eError, ErrUnlock);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       }
+
+       eError = PDumpSNPrintf(hScript,
+                      ui32MaxLen,
+                      "WRW%s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " :%s:$%d\n",
+                      pszWrwSuffix,
+                      /* dest */
+                      pszDestRegspaceName,
+                      pszDestSymbolicName,
+                      uiDestOffset,
+                      /* src */
+                      pszPDumpIntRegSpace,
+                      uiPDumpIntRegNum);
+       PVR_GOTO_IF_ERROR(eError, ErrUnlock);
+       PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+ErrUnlock:
+       PDUMP_UNLOCK(uiPDumpFlags);
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return eError;
+}
+
+
+PVRSRV_ERROR
+PDumpWriteSymbAddress(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags)
+{
+       const IMG_CHAR       *pszWrwSuffix = "";
+       PVRSRV_ERROR         eError = PVRSRV_OK;
+
+       PDUMP_GET_SCRIPT_STRING();
+
+       if (ui32WordSize == 8)
+       {
+               pszWrwSuffix = "64";
+       }
+
+       PDUMP_LOCK(uiPDumpFlags);
+
+       if (ui32AlignShift != ui32Shift)
+       {
+               /* Write physical address into a variable */
+               eError = PDumpSNPrintf(hScript,
+                                      ui32MaxLen,
+                                      "WRW%s :%s:$1 %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+                                      pszWrwSuffix,
+                                      /* dest */
+                                      pszPDumpDevName,
+                                      /* src */
+                                      pszRefSymbolicName,
+                                      uiRefOffset);
+               PVR_GOTO_IF_ERROR(eError, symbAddress_error);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+               /* apply address alignment */
+               eError = PDumpSNPrintf(hScript,
+                                      ui32MaxLen,
+                                      "SHR :%s:$1 :%s:$1 0x%X",
+                                      /* dest */
+                                      pszPDumpDevName,
+                                      /* src A */
+                                      pszPDumpDevName,
+                                      /* src B */
+                                      ui32AlignShift);
+               PVR_GOTO_IF_ERROR(eError, symbAddress_error);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+               /* apply address shift */
+               eError = PDumpSNPrintf(hScript,
+                                      ui32MaxLen,
+                                      "SHL :%s:$1 :%s:$1 0x%X",
+                                      /* dest */
+                                      pszPDumpDevName,
+                                      /* src A */
+                                      pszPDumpDevName,
+                                      /* src B */
+                                      ui32Shift);
+               PVR_GOTO_IF_ERROR(eError, symbAddress_error);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+
+
+               /* write result to register */
+               eError = PDumpSNPrintf(hScript,
+                                      ui32MaxLen,
+                                      "WRW%s :%s:0x%08X :%s:$1",
+                                      pszWrwSuffix,
+                                      pszDestSpaceName,
+                                      (IMG_UINT32)uiDestOffset,
+                                      pszPDumpDevName);
+               PVR_GOTO_IF_ERROR(eError, symbAddress_error);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       }
+       else
+       {
+               eError = PDumpSNPrintf(hScript,
+                                      ui32MaxLen,
+                                      "WRW%s :%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+                                      pszWrwSuffix,
+                                      /* dest */
+                                      pszDestSpaceName,
+                                      uiDestOffset,
+                                      /* src */
+                                      pszRefSymbolicName,
+                                      uiRefOffset);
+               PVR_GOTO_IF_ERROR(eError, symbAddress_error);
+               PDumpWriteScript(psDeviceNode, hScript, uiPDumpFlags);
+       }
+
+symbAddress_error:
+       PDUMP_UNLOCK(uiPDumpFlags);
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return eError;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpIDLWithFlags
+ * Inputs         : Idle time in clocks
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump IDL command to script
+******************************************************************************/
+PVRSRV_ERROR PDumpIDLWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "IDL %u", ui32Clocks);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpIDL
+ * Inputs         : Idle time in clocks
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump IDL command to script
+******************************************************************************/
+PVRSRV_ERROR PDumpIDL(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_UINT32 ui32Clocks)
+{
+       return PDumpIDLWithFlags(psDeviceNode, ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
+}
+
+/******************************************************************************
+ * Function Name  : PDumpRegBasedCBP
+ * Inputs         : pszPDumpRegName, ui32RegOffset, ui32WPosVal, ui32PacketSize
+ *                  ui32BufferSize, ui32Flags
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump CBP command to script
+******************************************************************************/
+PVRSRV_ERROR PDumpRegBasedCBP(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                         IMG_CHAR              *pszPDumpRegName,
+                                                         IMG_UINT32    ui32RegOffset,
+                                                         IMG_UINT32    ui32WPosVal,
+                                                         IMG_UINT32    ui32PacketSize,
+                                                         IMG_UINT32    ui32BufferSize,
+                                                         IMG_UINT32    ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING();
+
+       eErr = PDumpSNPrintf(hScript,
+                        ui32MaxLen,
+                        "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X",
+                        pszPDumpRegName,
+                        ui32RegOffset,
+                        ui32WPosVal,
+                        ui32PacketSize,
+                        ui32BufferSize);
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpTRG(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_CHAR *pszMemSpace,
+                      IMG_UINT32 ui32MMUCtxID,
+                      IMG_UINT32 ui32RegionID,
+                      IMG_BOOL bEnable,
+                      IMG_UINT64 ui64VAddr,
+                      IMG_UINT64 ui64LenBytes,
+                      IMG_UINT32 ui32XStride,
+                      IMG_UINT32 ui32Flags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING();
+
+       if (bEnable)
+       {
+               eErr = PDumpSNPrintf(hScript, ui32MaxLen,
+                                    "TRG :%s:v%u %u 0x%08"IMG_UINT64_FMTSPECX" 0x%08"IMG_UINT64_FMTSPECX" %u",
+                                    pszMemSpace, ui32MMUCtxID, ui32RegionID,
+                                    ui64VAddr, ui64LenBytes, ui32XStride);
+       }
+       else
+       {
+               eErr = PDumpSNPrintf(hScript, ui32MaxLen,
+                                    "TRG :%s:v%u %u",
+                                    pszMemSpace, ui32MMUCtxID, ui32RegionID);
+
+       }
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32Flags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
+       PDUMP_UNLOCK(ui32Flags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpConnectionNotify
+ * Description    : Called by the srvcore to tell PDump core that the
+ *                  PDump capture and control client has connected
+******************************************************************************/
+void PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(TL_BUFFER_STATS)
+       PVRSRV_ERROR            eErr;
+#endif
+
+       OSAtomicIncrement(&g_sConnectionCount);
+
+       /* Reset the parameter file attributes */
+       g_PDumpParameters.sWOff.ui32Main = g_PDumpParameters.sWOff.ui32Init;
+       g_PDumpParameters.ui32FileIdx = 0;
+
+       /* Reset the script file attributes */
+       g_PDumpScript.ui32FileIdx = 0;
+
+       /* The Main script & parameter buffers should be empty after the previous
+        * PDump capture if it completed correctly.
+        * When PDump client is not connected, writes are prevented to Main
+        * buffers in PDumpWriteAllowed() since no capture range, no client,
+        * no writes to Main buffers for continuous flagged and regular writes.
+        */
+       if (!TLStreamOutOfData(g_PDumpParameters.sCh.sMainStream.hTL)) /* !empty */
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PDump Main parameter buffer not empty, capture will be corrupt!"));
+       }
+       if (!TLStreamOutOfData(g_PDumpScript.sCh.sMainStream.hTL)) /* !empty */
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PDump Main script buffer not empty, capture will be corrupt!"));
+       }
+
+#if defined(TL_BUFFER_STATS)
+       eErr = TLStreamResetProducerByteCount(g_PDumpParameters.sCh.sMainStream.hTL, g_PDumpParameters.sWOff.ui32Init);
+       PVR_LOG_IF_ERROR(eErr, "TLStreamResetByteCount Parameter Main");
+
+       eErr = TLStreamResetProducerByteCount(g_PDumpScript.sCh.sMainStream.hTL, 0);
+       PVR_LOG_IF_ERROR(eErr, "TLStreamResetByteCount Script Main");
+#endif
+
+       if (psDeviceNode->pfnPDumpInitDevice)
+       {
+               /* Reset pdump according to connected device */
+               psDeviceNode->pfnPDumpInitDevice(psDeviceNode);
+       }
+}
+
+/******************************************************************************
+ * Function Name  : PDumpDisconnectionNotify
+ * Description    : Called by the connection_server to tell PDump core that
+ *                  the PDump capture and control client has disconnected
+******************************************************************************/
+void PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eErr;
+
+       if (PDumpCtrlCaptureOn())
+       {
+               PVR_LOG(("pdump killed, capture files may be invalid or incomplete!"));
+
+               /* Disable capture in server, in case PDump client was killed and did
+                * not get a chance to reset the capture parameters.
+                * Will set module state back to READY.
+                */
+               eErr = PDumpSetDefaultCaptureParamsKM(NULL, psDeviceNode, PDUMP_CAPMODE_UNSET,
+                                                     PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 0, 0);
+               PVR_LOG_IF_ERROR(eErr, "PDumpSetDefaultCaptureParamsKM");
+       }
+}
+
+/******************************************************************************
+ * Function Name  : PDumpRegCondStr
+ * Inputs         : Description of what this register read is trying to do
+ *                                     pszPDumpDevName
+ *                                     Register offset
+ *                                     expected value
+ *                                     mask for that value
+ * Outputs        : PDump conditional test for use with 'IF' and 'DOW'
+ * Returns        : None
+ * Description    : Create a PDUMP conditional test. The string is allocated
+ *                                     on the heap and should be freed by the caller on success.
+******************************************************************************/
+PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR            **ppszPDumpCond,
+                             IMG_CHAR            *pszPDumpRegName,
+                             IMG_UINT32          ui32RegAddr,
+                             IMG_UINT32          ui32RegValue,
+                             IMG_UINT32          ui32Mask,
+                             IMG_UINT32          ui32Flags,
+                             PDUMP_POLL_OPERATOR eOperator)
+{
+       IMG_UINT32      ui32PollCount;
+
+       PDUMP_GET_MSG_STRING();
+
+       ui32PollCount = POLL_COUNT_SHORT;
+
+       if (0 == OSSNPrintf(pszMsg, ui32MaxLen, ":%s:0x%08X 0x%08X 0x%08X %d %u %d",
+                                               pszPDumpRegName, ui32RegAddr, ui32RegValue,
+                                               ui32Mask, eOperator, ui32PollCount, POLL_DELAY))
+       {
+               PDUMP_RELEASE_MSG_STRING()
+               return PVRSRV_ERROR_INTERNAL_ERROR;
+       }
+
+       *ppszPDumpCond = pszMsg;
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpInternalValCondStr
+ * Inputs         : Description of what this register read is trying to do
+ *                                     pszPDumpDevName
+ *                                     Internal variable
+ *                                     expected value
+ *                                     mask for that value
+ * Outputs        : PDump conditional test for use with 'IF' and 'DOW'
+ * Returns        : None
+ * Description    : Create a PDUMP conditional test. The string is allocated
+ *                                     on the heap and should be freed by the caller on success.
+******************************************************************************/
+PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR            **ppszPDumpCond,
+                                     IMG_CHAR            *pszInternalVar,
+                                     IMG_UINT32          ui32RegValue,
+                                     IMG_UINT32          ui32Mask,
+                                     IMG_UINT32          ui32Flags,
+                                     PDUMP_POLL_OPERATOR eOperator)
+{
+       IMG_UINT32      ui32PollCount;
+
+       PDUMP_GET_MSG_STRING();
+
+       ui32PollCount = POLL_COUNT_SHORT;
+
+       if (0 == OSSNPrintf(pszMsg, ui32MaxLen, "%s 0x%08X 0x%08X %d %u %d",
+                                               pszInternalVar, ui32RegValue,
+                                               ui32Mask, eOperator, ui32PollCount, POLL_DELAY))
+       {
+               PDUMP_RELEASE_MSG_STRING()
+               return PVRSRV_ERROR_INTERNAL_ERROR;
+       }
+
+       *ppszPDumpCond = pszMsg;
+
+       return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ * Function Name  : PDumpIfKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents IF command
+                                       with condition.
+******************************************************************************/
+PVRSRV_ERROR PDumpIfKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "IF %s\n", pszPDumpCond);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpElseKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents ELSE command
+                                       with condition.
+******************************************************************************/
+PVRSRV_ERROR PDumpElseKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "ELSE %s\n", pszPDumpCond);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpFiKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents FI command
+                                       with condition.
+******************************************************************************/
+PVRSRV_ERROR PDumpFiKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "FI %s\n", pszPDumpCond);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpStartDoLoopKM
+ * Inputs         : None
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents SDO command
+                                       with condition.
+******************************************************************************/
+PVRSRV_ERROR PDumpStartDoLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "SDO");
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ * Function Name  : PDumpEndDoWhileLoopKM
+ * Inputs         : pszPDumpWhileCond - string for loop condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents DOW command
+                                       with condition.
+******************************************************************************/
+PVRSRV_ERROR PDumpEndDoWhileLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   IMG_CHAR *pszPDumpWhileCond,
+                                   IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eErr;
+       PDUMP_GET_SCRIPT_STRING()
+
+       eErr = PDumpSNPrintf(hScript, ui32MaxLen, "DOW %s\n", pszPDumpWhileCond);
+
+       if (eErr != PVRSRV_OK)
+       {
+               PDUMP_RELEASE_SCRIPT_STRING();
+               return eErr;
+       }
+
+       PDUMP_LOCK(ui32PDumpFlags);
+       PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
+       PDUMP_UNLOCK(ui32PDumpFlags);
+
+       PDUMP_RELEASE_SCRIPT_STRING();
+
+       return PVRSRV_OK;
+}
+
+
+void PDumpLock(void)
+{
+       OSLockAcquire(g_hPDumpWriteLock);
+}
+void PDumpUnlock(void)
+{
+       OSLockRelease(g_hPDumpWriteLock);
+}
+static void PDumpAssertWriteLockHeld(void)
+{
+       /* It is expected to be g_hPDumpWriteLock is locked at this point. */
+       PVR_ASSERT(OSLockIsLocked(g_hPDumpWriteLock));
+}
+
+#if defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS)
+void PDumpCommonDumpState(void)
+{
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.hTL (In, Mn, De, Bk) ( %p, %p, %p, %p )",
+                       g_PDumpScript.sCh.sInitStream.hTL, g_PDumpScript.sCh.sMainStream.hTL, g_PDumpScript.sCh.sDeinitStream.hTL, g_PDumpScript.sCh.sBlockStream.hTL));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.ui32BufferFullRetries (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )",
+                       g_PDumpScript.sCh.sInitStream.ui32BufferFullRetries,
+                       g_PDumpScript.sCh.sMainStream.ui32BufferFullRetries,
+                       g_PDumpScript.sCh.sDeinitStream.ui32BufferFullRetries,
+                       g_PDumpScript.sCh.sBlockStream.ui32BufferFullRetries));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.ui32BufferFullAborts (In, Mn, De, Bk)  ( %5d, %5d, %5d, %5d )",
+                               g_PDumpScript.sCh.sInitStream.ui32BufferFullAborts,
+                               g_PDumpScript.sCh.sMainStream.ui32BufferFullAborts,
+                               g_PDumpScript.sCh.sDeinitStream.ui32BufferFullAborts,
+                               g_PDumpScript.sCh.sBlockStream.ui32BufferFullAborts));
+
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.*.ui32HighestRetriesWatermark (In, Mn, De, Bk)  ( %5d, %5d, %5d, %5d )",
+                               g_PDumpScript.sCh.sInitStream.ui32HighestRetriesWatermark,
+                               g_PDumpScript.sCh.sMainStream.ui32HighestRetriesWatermark,
+                               g_PDumpScript.sCh.sDeinitStream.ui32HighestRetriesWatermark,
+                           g_PDumpScript.sCh.sBlockStream.ui32HighestRetriesWatermark));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.ui32FileIdx( %d )", g_PDumpScript.ui32FileIdx));
+
+
+
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.hTL (In, Mn, De, Bk) ( %p, %p, %p, %p )",
+                       g_PDumpParameters.sCh.sInitStream.hTL, g_PDumpParameters.sCh.sMainStream.hTL, g_PDumpParameters.sCh.sDeinitStream.hTL, g_PDumpParameters.sCh.sBlockStream.hTL));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.ui32BufferFullRetries (In, Mn, De, Bk) ( %5d, %5d, %5d, %5d )",
+                       g_PDumpParameters.sCh.sInitStream.ui32BufferFullRetries,
+                       g_PDumpParameters.sCh.sMainStream.ui32BufferFullRetries,
+                       g_PDumpParameters.sCh.sDeinitStream.ui32BufferFullRetries,
+                       g_PDumpParameters.sCh.sBlockStream.ui32BufferFullRetries));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.ui32BufferFullAborts (In, Mn, De, Bk)  ( %5d, %5d, %5d, %5d )",
+                       g_PDumpParameters.sCh.sInitStream.ui32BufferFullAborts,
+                       g_PDumpParameters.sCh.sMainStream.ui32BufferFullAborts,
+                       g_PDumpParameters.sCh.sDeinitStream.ui32BufferFullAborts,
+                       g_PDumpParameters.sCh.sBlockStream.ui32BufferFullAborts));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.*.ui32HighestRetriesWatermark (In, Mn, De, Bk)  ( %5d, %5d, %5d, %5d )",
+                               g_PDumpParameters.sCh.sInitStream.ui32HighestRetriesWatermark,
+                               g_PDumpParameters.sCh.sMainStream.ui32HighestRetriesWatermark,
+                               g_PDumpParameters.sCh.sDeinitStream.ui32HighestRetriesWatermark,
+                               g_PDumpParameters.sCh.sBlockStream.ui32HighestRetriesWatermark));
+
+
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sWOff.* (In, Mn, De, Bk) ( %d, %d, %d, %d )",
+                       g_PDumpParameters.sWOff.ui32Init, g_PDumpParameters.sWOff.ui32Main, g_PDumpParameters.sWOff.ui32Deinit, g_PDumpParameters.sWOff.ui32Block));
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.ui32FileIdx( %d )", g_PDumpParameters.ui32FileIdx));
+
+       PVR_LOG(("--- PDUMP COMMON: g_PDumpCtrl( %p ) eServiceState( %d ), IsDriverInInitPhase( %s ) ui32Flags( %x )",
+                       &g_PDumpCtrl, g_PDumpCtrl.eServiceState, CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_DRIVER_IN_INIT_PHASE) ? "yes" : "no", g_PDumpCtrl.ui32Flags));
+       PVR_LOG(("--- PDUMP COMMON: ui32DefaultCapMode( %d ) ui32CurrentFrame( %d )",
+                       g_PDumpCtrl.ui32DefaultCapMode, g_PDumpCtrl.ui32CurrentFrame));
+       PVR_LOG(("--- PDUMP COMMON: sCaptureRange.ui32Start( %x ) sCaptureRange.ui32End( %x ) sCaptureRange.ui32Interval( %u )",
+                       g_PDumpCtrl.sCaptureRange.ui32Start, g_PDumpCtrl.sCaptureRange.ui32End, g_PDumpCtrl.sCaptureRange.ui32Interval));
+       PVR_LOG(("--- PDUMP COMMON: IsInCaptureRange( %s ) IsInCaptureInterval( %s ) InPowerTransition( %d )",
+                       CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_RANGE) ? "yes" : "no",
+                       CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_IN_CAPTURE_INTERVAL) ? "yes" : "no",
+                       PDumpCtrlInPowerTransition()));
+       PVR_LOG(("--- PDUMP COMMON: sBlockCtrl.ui32BlockLength( %d ), sBlockCtrl.ui32CurrentBlock( %d )",
+                       g_PDumpCtrl.sBlockCtrl.ui32BlockLength, g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock));
+}
+#endif /* defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS) */
+
+
+PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData,
+                                     PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
+                                     PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+       PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(ppsPDumpConnectionData != NULL);
+       PVR_ASSERT(pfnPDumpSyncBlocks != NULL);
+       PVR_ASSERT(hSyncPrivData != NULL);
+
+       psPDumpConnectionData = OSAllocMem(sizeof(*psPDumpConnectionData));
+       PVR_GOTO_IF_NOMEM(psPDumpConnectionData, eError, fail_alloc);
+
+       eError = OSLockCreate(&psPDumpConnectionData->hLock);
+       PVR_GOTO_IF_ERROR(eError, fail_lockcreate);
+
+       dllist_init(&psPDumpConnectionData->sListHead);
+       OSAtomicWrite(&psPDumpConnectionData->sRefCount, 1);
+       psPDumpConnectionData->ui32LastSetFrameNumber = PDUMP_FRAME_UNSET;
+       psPDumpConnectionData->eLastEvent = PDUMP_TRANSITION_EVENT_NONE;
+       psPDumpConnectionData->eFailedEvent = PDUMP_TRANSITION_EVENT_NONE;
+
+       /*
+        * Although we don't take a ref count here, handle base destruction
+        * will ensure that any resource that might trigger us to do a Transition
+        * will have been freed before the sync blocks which are keeping the sync
+        * connection data alive.
+        */
+       psPDumpConnectionData->hSyncPrivData = hSyncPrivData;
+       psPDumpConnectionData->pfnPDumpSyncBlocks = pfnPDumpSyncBlocks;
+
+       *ppsPDumpConnectionData = psPDumpConnectionData;
+
+       return PVRSRV_OK;
+
+fail_lockcreate:
+       OSFreeMem(psPDumpConnectionData);
+fail_alloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+       _PDumpConnectionRelease(psPDumpConnectionData);
+}
+
+
+/*!
+ * \name       PDumpSNPrintf
+ */
+PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
+{
+       IMG_CHAR* pszBuf = hBuf;
+       IMG_INT32 n;
+       va_list vaArgs;
+
+       va_start(vaArgs, pszFormat);
+
+       n = OSVSNPrintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+       va_end(vaArgs);
+
+       if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)   /* glibc >= 2.1 or glibc 2.0 */
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+               return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+       }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+       OSAtomicIncrement(&g_sEveryLineCounter);
+#endif
+
+       /* Put line ending sequence at the end if it isn't already there */
+       _PDumpVerifyLineEnding(pszBuf, ui32ScriptSizeMax);
+
+       return PVRSRV_OK;
+}
+
+#endif /* defined(PDUMP) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/physheap.c b/drivers/gpu/drm/img/img-rogue/services/server/common/physheap.c
new file mode 100644 (file)
index 0000000..2155bcb
--- /dev/null
@@ -0,0 +1,1184 @@
+/*************************************************************************/ /*!
+@File           physheap.c
+@Title          Physical heap management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Management functions for the physical heap(s). A heap contains
+                all the information required by services when using memory from
+                that heap (such as CPU <> Device physical address translation).
+                A system must register one heap but can have more then one which
+                is why a heap must register with a (system) unique ID.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include "physheap.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "physmem.h"
+#include "physmem_hostmem.h"
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+struct _PHYS_HEAP_
+{
+       /*! The type of this heap */
+       PHYS_HEAP_TYPE                  eType;
+       /* Config flags */
+       PHYS_HEAP_USAGE_FLAGS           ui32UsageFlags;
+
+       /*! Pointer to device node struct */
+       PPVRSRV_DEVICE_NODE         psDevNode;
+       /*! PDump name of this physical memory heap */
+       IMG_CHAR                                        *pszPDumpMemspaceName;
+       /*! Private data for the translate routines */
+       IMG_HANDLE                                      hPrivData;
+       /*! Function callbacks */
+       PHYS_HEAP_FUNCTIONS                     *psMemFuncs;
+
+       /*! Refcount */
+       IMG_UINT32                                      ui32RefCount;
+
+       /*! Implementation specific */
+       PHEAP_IMPL_DATA             pvImplData;
+       PHEAP_IMPL_FUNCS            *psImplFuncs;
+
+       /*! Pointer to next physical heap */
+       struct _PHYS_HEAP_              *psNext;
+};
+
+static PHYS_HEAP *g_psPhysHeapList;
+static POS_LOCK g_hPhysHeapLock;
+
+#if defined(REFCOUNT_DEBUG)
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)      \
+       PVRSRVDebugPrintf(PVR_DBG_WARNING,      \
+                         __FILE__,             \
+                         __LINE__,             \
+                         fmt,                  \
+                         __VA_ARGS__)
+#else
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+
+typedef struct PHYS_HEAP_PROPERTIES_TAG
+{
+       PVRSRV_PHYS_HEAP eFallbackHeap;
+       IMG_BOOL bPVRLayerAcquire;
+       IMG_BOOL bUserModeAlloc;
+} PHYS_HEAP_PROPERTIES;
+
+/* NOTE: Table entries and order must match enum PVRSRV_PHYS_HEAP to ensure
+ * correct operation of PhysHeapCreatePMR().
+ */
+static PHYS_HEAP_PROPERTIES gasHeapProperties[PVRSRV_PHYS_HEAP_LAST] =
+{
+       /* eFallbackHeap,               bPVRLayerAcquire, bUserModeAlloc */
+    {  PVRSRV_PHYS_HEAP_DEFAULT,    IMG_TRUE,         IMG_TRUE  }, /* DEFAULT */
+    {  PVRSRV_PHYS_HEAP_DEFAULT,    IMG_TRUE,         IMG_TRUE  }, /* GPU_LOCAL */
+    {  PVRSRV_PHYS_HEAP_DEFAULT,    IMG_TRUE,         IMG_TRUE  }, /* CPU_LOCAL */
+    {  PVRSRV_PHYS_HEAP_DEFAULT,    IMG_TRUE,         IMG_TRUE  }, /* GPU_PRIVATE */
+    {  PVRSRV_PHYS_HEAP_GPU_LOCAL,  IMG_FALSE,        IMG_FALSE }, /* FW_MAIN */
+    {  PVRSRV_PHYS_HEAP_GPU_LOCAL,  IMG_TRUE,         IMG_FALSE }, /* EXTERNAL */
+    {  PVRSRV_PHYS_HEAP_GPU_LOCAL,  IMG_TRUE,         IMG_FALSE }, /* GPU_COHERENT */
+    {  PVRSRV_PHYS_HEAP_GPU_LOCAL,  IMG_TRUE,         IMG_TRUE  }, /* GPU_SECURE */
+    {  PVRSRV_PHYS_HEAP_FW_MAIN,    IMG_FALSE,        IMG_FALSE }, /* FW_CONFIG */
+    {  PVRSRV_PHYS_HEAP_FW_MAIN,    IMG_FALSE,        IMG_FALSE }, /* FW_CODE */
+    {  PVRSRV_PHYS_HEAP_FW_MAIN,    IMG_FALSE,        IMG_FALSE }, /* FW_DATA */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP0, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP0 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP1, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP1 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP2, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP2 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP3, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP3 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP4, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP4 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP5, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP5 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP6, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP6 */
+    {  PVRSRV_PHYS_HEAP_FW_PREMAP7, IMG_FALSE,        IMG_FALSE }, /* FW_PREMAP7 */
+};
+
+static_assert((ARRAY_SIZE(gasHeapProperties) == PVRSRV_PHYS_HEAP_LAST),
+       "Size or order of gasHeapProperties entries incorrect for PVRSRV_PHYS_HEAP enum");
+
+void PVRSRVGetDevicePhysHeapCount(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                 IMG_UINT32 *pui32PhysHeapCount)
+{
+       *pui32PhysHeapCount = psDevNode->ui32UserAllocHeapCount;
+}
+
+static IMG_UINT32 PhysHeapOSGetPageShift(void)
+{
+       return (IMG_UINT32)OSGetPageShift();
+}
+
+static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs =
+{
+       .pfnDestroyData = NULL,
+       .pfnGetPMRFactoryMemStats = PhysmemGetOSRamMemStats,
+       .pfnCreatePMR = PhysmemNewOSRamBackedPMR,
+       .pfnPagesAlloc = &OSPhyContigPagesAlloc,
+       .pfnPagesFree = &OSPhyContigPagesFree,
+       .pfnPagesMap = &OSPhyContigPagesMap,
+       .pfnPagesUnMap = &OSPhyContigPagesUnmap,
+       .pfnPagesClean = &OSPhyContigPagesClean,
+       .pfnGetPageShift = &PhysHeapOSGetPageShift,
+};
+
+/*************************************************************************/ /*!
+@Function       _PhysHeapDebugRequest
+@Description    This function is used to output debug information for a given
+                device's PhysHeaps.
+@Input          pfnDbgRequestHandle Data required by this function that is
+                                    passed through the RegisterDeviceDbgRequestNotify
+                                    function.
+@Input          ui32VerbLevel       The maximum verbosity of the debug request.
+@Input          pfnDumpDebugPrintf  The specified print function that should be
+                                    used to dump any debug information
+                                    (see PVRSRVDebugRequest).
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the print function if required.
+@Return         void
+*/ /**************************************************************************/
+static void _PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle,
+                                  IMG_UINT32 ui32VerbLevel,
+                                  DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile)
+{
+       static const IMG_CHAR *const pszTypeStrings[] = {
+               "UNKNOWN",
+               "UMA",
+               "LMA",
+               "DMA",
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+               "WRAP"
+#endif
+       };
+
+       PPVRSRV_DEVICE_NODE psDeviceNode = (PPVRSRV_DEVICE_NODE)pfnDbgRequestHandle;
+       PHYS_HEAP *psPhysHeap = NULL;
+       IMG_UINT64 ui64TotalSize;
+       IMG_UINT64 ui64FreeSize;
+       IMG_UINT32 i;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE(psDeviceNode != NULL,
+                                    "Phys Heap debug request failed. psDeviceNode was NULL");
+
+       PVR_DUMPDEBUG_LOG("------[ Device ID: %d - Phys Heaps ]------",
+                         psDeviceNode->sDevId.i32OsDeviceID);
+
+       for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+       {
+               psPhysHeap = psDeviceNode->papsRegisteredPhysHeaps[i];
+
+               if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE",
+                                psPhysHeap));
+                       break;
+               }
+
+               psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
+                                                                 &ui64TotalSize,
+                                                                 &ui64FreeSize);
+
+               if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA)
+               {
+                       IMG_CPU_PHYADDR sCPUPAddr;
+                       IMG_DEV_PHYADDR sGPUPAddr;
+                       PVRSRV_ERROR eError;
+
+                       PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL);
+                       PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL);
+
+                       eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData,
+                                                                        &sCPUPAddr);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_LOG_ERROR(eError, "pfnGetCPUPAddr");
+                               sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX);
+                       }
+
+                       eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData,
+                                                                        &sGPUPAddr);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_LOG_ERROR(eError, "pfnGetDevPAddr");
+                               sGPUPAddr.uiAddr = IMG_UINT64_MAX;
+                       }
+
+                       PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, "
+
+                                         "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", "
+                                         "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", "
+                                         "Usage Flags: 0x%08x, Refs: %d, "
+                                         "Free Size: %"IMG_UINT64_FMTSPEC", "
+                                         "Total Size: %"IMG_UINT64_FMTSPEC,
+                                         psPhysHeap,
+                                         psPhysHeap->pszPDumpMemspaceName,
+                                         pszTypeStrings[psPhysHeap->eType],
+                                         CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr),
+                                         sGPUPAddr.uiAddr,
+                                         psPhysHeap->ui32UsageFlags,
+                                         psPhysHeap->ui32RefCount,
+                                         ui64FreeSize,
+                                         ui64TotalSize);
+               }
+               else
+               {
+                       PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, "
+                                         "Usage Flags: 0x%08x, Refs: %d, "
+                                         "Free Size: %"IMG_UINT64_FMTSPEC", "
+                                         "Total Size: %"IMG_UINT64_FMTSPEC,
+                                         psPhysHeap,
+                                         psPhysHeap->pszPDumpMemspaceName,
+                                         pszTypeStrings[psPhysHeap->eType],
+                                         psPhysHeap->ui32UsageFlags,
+                                         psPhysHeap->ui32RefCount,
+                                         ui64FreeSize,
+                                         ui64TotalSize);
+               }
+       }
+}
+
+PVRSRV_ERROR
+PhysHeapCreateHeapFromConfig(PVRSRV_DEVICE_NODE *psDevNode,
+                                                        PHYS_HEAP_CONFIG *psConfig,
+                                                        PHYS_HEAP **ppsPhysHeap)
+{
+       PVRSRV_ERROR eResult;
+
+       if (psConfig->eType == PHYS_HEAP_TYPE_UMA
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+               || psConfig->eType == PHYS_HEAP_TYPE_WRAP
+#endif
+               )
+       {
+               eResult = PhysHeapCreate(psDevNode, psConfig, NULL,
+                                                                  &_sPHEAPImplFuncs, ppsPhysHeap);
+       }
+       else if (psConfig->eType == PHYS_HEAP_TYPE_LMA ||
+                        psConfig->eType == PHYS_HEAP_TYPE_DMA)
+       {
+               eResult = PhysmemCreateHeapLMA(psDevNode, psConfig, "GPU LMA (Sys)", ppsPhysHeap);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s Invalid phys heap type: %d",
+                                __func__, psConfig->eType));
+               eResult = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return eResult;
+}
+
+PVRSRV_ERROR
+PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode,
+                                     PHYS_HEAP_CONFIG *pasConfigs,
+                                     IMG_UINT32 ui32NumConfigs)
+{
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError;
+
+       /* Register the physical memory heaps */
+       psDevNode->papsRegisteredPhysHeaps =
+               OSAllocZMem(sizeof(*psDevNode->papsRegisteredPhysHeaps) * ui32NumConfigs);
+       PVR_LOG_RETURN_IF_NOMEM(psDevNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+
+       psDevNode->ui32RegisteredPhysHeaps = 0;
+
+       for (i = 0; i < ui32NumConfigs; i++)
+       {
+               eError = PhysHeapCreateHeapFromConfig(psDevNode,
+                                                                                         pasConfigs + i,
+                                                                                         psDevNode->papsRegisteredPhysHeaps + i);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+
+               psDevNode->ui32RegisteredPhysHeaps++;
+       }
+
+#if defined(SUPPORT_PHYSMEM_TEST)
+       /* For a temporary device node there will never be a debug dump
+        * request targeting it */
+       if (psDevNode->hDebugTable != NULL)
+#endif
+       {
+               eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hPhysHeapDbgReqNotify,
+                                                             psDevNode,
+                                                             _PhysHeapDebugRequest,
+                                                             DEBUG_REQUEST_SYS,
+                                                             psDevNode);
+
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify");
+       }
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                                                         PHYS_HEAP_CONFIG *psConfig,
+                                                         PHEAP_IMPL_DATA pvImplData,
+                                                         PHEAP_IMPL_FUNCS *psImplFuncs,
+                                                         PHYS_HEAP **ppsPhysHeap)
+{
+       PHYS_HEAP *psNew;
+
+       PVR_DPF_ENTERED;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
+
+       if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs != NULL, "psImplFuncs");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs->pfnCreatePMR != NULL, "psImplFuncs->pfnCreatePMR");
+
+       psNew = OSAllocMem(sizeof(PHYS_HEAP));
+       PVR_RETURN_IF_NOMEM(psNew);
+       psNew->psDevNode = psDevNode;
+       psNew->eType = psConfig->eType;
+       psNew->psMemFuncs = psConfig->psMemFuncs;
+       psNew->hPrivData = psConfig->hPrivData;
+       psNew->ui32RefCount = 0;
+       psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName;
+       psNew->ui32UsageFlags = psConfig->ui32UsageFlags;
+
+       psNew->pvImplData = pvImplData;
+       psNew->psImplFuncs = psImplFuncs;
+
+       psNew->psNext = g_psPhysHeapList;
+       g_psPhysHeapList = psNew;
+
+       *ppsPhysHeap = psNew;
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+}
+
+void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode)
+{
+       IMG_UINT32 i;
+
+       if (psDevNode->hPhysHeapDbgReqNotify)
+       {
+               PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hPhysHeapDbgReqNotify);
+       }
+
+       /* Unregister heaps */
+       for (i = 0; i < psDevNode->ui32RegisteredPhysHeaps; i++)
+       {
+               PhysHeapDestroy(psDevNode->papsRegisteredPhysHeaps[i]);
+       }
+
+       OSFreeMem(psDevNode->papsRegisteredPhysHeaps);
+}
+
+void PhysHeapDestroy(PHYS_HEAP *psPhysHeap)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+
+       PVR_DPF_ENTERED1(psPhysHeap);
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+       {
+               PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
+       }
+
+       if (g_psPhysHeapList == psPhysHeap)
+       {
+               g_psPhysHeapList = psPhysHeap->psNext;
+       }
+       else
+       {
+               PHYS_HEAP *psTmp = g_psPhysHeapList;
+
+               while (psTmp->psNext != psPhysHeap)
+               {
+                       psTmp = psTmp->psNext;
+               }
+               psTmp->psNext = psPhysHeap->psNext;
+       }
+
+       if (psImplFuncs->pfnDestroyData != NULL)
+       {
+               psImplFuncs->pfnDestroyData(psPhysHeap->pvImplData);
+       }
+
+       OSFreeMem(psPhysHeap);
+
+       PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap");
+
+       psPhysHeap->ui32RefCount++;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag,
+                                                                       PPVRSRV_DEVICE_NODE psDevNode,
+                                                                       PHYS_HEAP **ppsPhysHeap)
+{
+       PHYS_HEAP *psNode = g_psPhysHeapList;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ui32UsageFlag != 0, "ui32UsageFlag");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
+
+       PVR_DPF_ENTERED1(ui32UsageFlag);
+
+       OSLockAcquire(g_hPhysHeapLock);
+
+       while (psNode)
+       {
+               if (psNode->psDevNode != psDevNode)
+               {
+                       psNode = psNode->psNext;
+                       continue;
+               }
+               if (BITMASK_ANY(psNode->ui32UsageFlags, ui32UsageFlag))
+               {
+                       break;
+               }
+               psNode = psNode->psNext;
+       }
+
+       if (psNode == NULL)
+       {
+               eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+       }
+       else
+       {
+               psNode->ui32RefCount++;
+               PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
+                                                               __func__, psNode, psNode->ui32RefCount);
+       }
+
+       OSLockRelease(g_hPhysHeapLock);
+
+       *ppsPhysHeap = psNode;
+       PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+static PHYS_HEAP * _PhysHeapFindHeap(PVRSRV_PHYS_HEAP ePhysHeap,
+                                                                  PPVRSRV_DEVICE_NODE psDevNode)
+{
+       PHYS_HEAP *psPhysHeapNode = g_psPhysHeapList;
+       PVRSRV_PHYS_HEAP eFallback;
+
+       if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT)
+       {
+               ePhysHeap = psDevNode->psDevConfig->eDefaultHeap;
+       }
+
+       while (psPhysHeapNode)
+       {
+               if ((psPhysHeapNode->psDevNode == psDevNode) &&
+                       BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap))
+               {
+                       return psPhysHeapNode;
+               }
+
+               psPhysHeapNode = psPhysHeapNode->psNext;
+       }
+
+       eFallback = gasHeapProperties[ePhysHeap].eFallbackHeap;
+
+       if (ePhysHeap == eFallback)
+       {
+               return NULL;
+       }
+       else
+       {
+               return _PhysHeapFindHeap(eFallback, psDevNode);
+       }
+}
+
+PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap,
+                                                                                 PPVRSRV_DEVICE_NODE psDevNode,
+                                                                                 PHYS_HEAP **ppsPhysHeap)
+{
+       PHYS_HEAP *psPhysHeap;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap != PVRSRV_PHYS_HEAP_DEFAULT, "eDevPhysHeap");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap < PVRSRV_PHYS_HEAP_LAST, "eDevPhysHeap");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
+
+       PVR_DPF_ENTERED1(ui32Flags);
+
+       OSLockAcquire(g_hPhysHeapLock);
+
+       psPhysHeap = _PhysHeapFindHeap(eDevPhysHeap, psDevNode);
+
+       if (psPhysHeap != NULL)
+       {
+               psPhysHeap->ui32RefCount++;
+               PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
+                                                               __func__, psPhysHeap, psPhysHeap->ui32RefCount);
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+       }
+
+       OSLockRelease(g_hPhysHeapLock);
+
+       *ppsPhysHeap = psPhysHeap;
+       PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap)
+{
+       PVR_DPF_ENTERED1(psPhysHeap);
+
+       OSLockAcquire(g_hPhysHeapLock);
+       psPhysHeap->ui32RefCount--;
+       PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
+                                                       __func__, psPhysHeap, psPhysHeap->ui32RefCount);
+       OSLockRelease(g_hPhysHeapLock);
+
+       PVR_DPF_RETURN;
+}
+
+PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap)
+{
+       return psPhysHeap->pvImplData;
+}
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap)
+{
+       PVR_ASSERT(psPhysHeap->eType != PHYS_HEAP_TYPE_UNKNOWN);
+       return psPhysHeap->eType;
+}
+
+PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap)
+{
+       return psPhysHeap->ui32UsageFlags;
+}
+
+IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode)
+{
+       PHYS_HEAP *psDefaultHeap;
+       IMG_BOOL bDefaultHeapFound;
+       PhysHeapAcquireByUsage(1<<(psDevNode->psDevConfig->eDefaultHeap), psDevNode, &psDefaultHeap);
+       if (psDefaultHeap == NULL)
+       {
+               bDefaultHeapFound = IMG_FALSE;
+       }
+       else
+       {
+               PhysHeapRelease(psDefaultHeap);
+               bDefaultHeapFound = IMG_TRUE;
+       }
+       return bDefaultHeapFound;
+}
+
+
+/*
+ * This function will set the psDevPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psDevPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap,
+                                                                          IMG_DEV_PHYADDR *psDevPAddr)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnGetDevPAddr != NULL)
+       {
+               eResult = psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData,
+                                                                                         psDevPAddr);
+       }
+
+       return eResult;
+}
+
+/*
+ * This function will set the psCpuPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psCpuPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+                                                               IMG_CPU_PHYADDR *psCpuPAddr)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnGetCPUPAddr != NULL)
+       {
+               eResult = psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData,
+                                                                                         psCpuPAddr);
+       }
+
+       return eResult;
+}
+
+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
+                                                                  IMG_UINT64 *puiSize)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnGetSize != NULL)
+       {
+               eResult = psImplFuncs->pfnGetSize(psPhysHeap->pvImplData,
+                                                                                 puiSize);
+       }
+
+       return eResult;
+}
+
+PVRSRV_ERROR
+PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode,
+              IMG_UINT32 ui32PhysHeapCount,
+                         PVRSRV_PHYS_HEAP *paePhysHeapID,
+                         PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats)
+{
+       IMG_UINT32 i = 0;
+       PHYS_HEAP *psPhysHeap;
+
+       PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST);
+
+       for (i = 0; i < ui32PhysHeapCount; i++)
+       {
+               if (paePhysHeapID[i] >= PVRSRV_PHYS_HEAP_LAST)
+               {
+                       return PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+               }
+
+               if (paePhysHeapID[i] == PVRSRV_PHYS_HEAP_DEFAULT)
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               psPhysHeap = _PhysHeapFindHeap(paePhysHeapID[i], psDevNode);
+
+               paPhysHeapMemStats[i].ui32PhysHeapFlags = 0;
+
+               if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i])
+                               && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats)
+               {
+                       psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
+                                       &paPhysHeapMemStats[i].ui64TotalSize,
+                                       &paPhysHeapMemStats[i].ui64FreeSize);
+                       paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap);
+
+                       if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap)
+                       {
+                               paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT;
+                       }
+               }
+               else
+               {
+                       paPhysHeapMemStats[i].ui64TotalSize = 0;
+                       paPhysHeapMemStats[i].ui64FreeSize = 0;
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PhysHeapGetMemInfoPkd(PVRSRV_DEVICE_NODE *psDevNode,
+              IMG_UINT32 ui32PhysHeapCount,
+                         PVRSRV_PHYS_HEAP *paePhysHeapID,
+                         PHYS_HEAP_MEM_STATS_PKD_PTR paPhysHeapMemStats)
+{
+       IMG_UINT32 i = 0;
+       PHYS_HEAP *psPhysHeap;
+
+       PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST);
+
+       for (i = 0; i < ui32PhysHeapCount; i++)
+       {
+               if (paePhysHeapID[i] >= PVRSRV_PHYS_HEAP_LAST)
+               {
+                       return PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+               }
+
+               if (paePhysHeapID[i] == PVRSRV_PHYS_HEAP_DEFAULT)
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               psPhysHeap = _PhysHeapFindHeap(paePhysHeapID[i], psDevNode);
+
+               paPhysHeapMemStats[i].ui32PhysHeapFlags = 0;
+
+               if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i])
+                               && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats)
+               {
+                       psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
+                                       &paPhysHeapMemStats[i].ui64TotalSize,
+                                       &paPhysHeapMemStats[i].ui64FreeSize);
+                       paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap);
+
+                       if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap)
+                       {
+                               paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT;
+                       }
+               }
+               else
+               {
+                       paPhysHeapMemStats[i].ui64TotalSize = 0;
+                       paPhysHeapMemStats[i].ui64FreeSize = 0;
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap, IMG_UINT64 *pui64TotalSize, IMG_UINT64 *pui64FreeSize)
+{
+       if (psPhysHeap && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats)
+       {
+               psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
+                               pui64TotalSize,
+                               pui64FreeSize);
+       }
+       else
+       {
+               *pui64TotalSize = *pui64FreeSize = 0;
+       }
+}
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+                                                               IMG_UINT32 ui32NumOfAddr,
+                                                               IMG_DEV_PHYADDR *psDevPAddr,
+                                                               IMG_CPU_PHYADDR *psCpuPAddr)
+{
+       psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData,
+                                                                                                ui32NumOfAddr,
+                                                                                                psDevPAddr,
+                                                                                                psCpuPAddr);
+}
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+                                                               IMG_UINT32 ui32NumOfAddr,
+                                                               IMG_CPU_PHYADDR *psCpuPAddr,
+                                                               IMG_DEV_PHYADDR *psDevPAddr)
+{
+       psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData,
+                                                                                                ui32NumOfAddr,
+                                                                                                psCpuPAddr,
+                                                                                                psDevPAddr);
+}
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap)
+{
+       return psPhysHeap->pszPDumpMemspaceName;
+}
+
+PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap,
+                                                          struct _CONNECTION_DATA_ *psConnection,
+                                                          IMG_DEVMEM_SIZE_T uiSize,
+                                                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                          IMG_UINT32 ui32NumPhysChunks,
+                                                          IMG_UINT32 ui32NumVirtChunks,
+                                                          IMG_UINT32 *pui32MappingTable,
+                                                          IMG_UINT32 uiLog2PageSize,
+                                                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                          const IMG_CHAR *pszAnnotation,
+                                                          IMG_PID uiPid,
+                                                          PMR **ppsPMRPtr,
+                                                          IMG_UINT32 ui32PDumpFlags)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+
+       return psImplFuncs->pfnCreatePMR(psPhysHeap,
+                                                                        psConnection,
+                                                                        uiSize,
+                                                                        uiChunkSize,
+                                                                        ui32NumPhysChunks,
+                                                                        ui32NumVirtChunks,
+                                                                        pui32MappingTable,
+                                                                        uiLog2PageSize,
+                                                                        uiFlags,
+                                                                        pszAnnotation,
+                                                                        uiPid,
+                                                                        ppsPMRPtr,
+                                                                        ui32PDumpFlags);
+}
+
+PVRSRV_ERROR PhysHeapInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       g_psPhysHeapList = NULL;
+
+       eError = OSLockCreate(&g_hPhysHeapLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
+
+       return PVRSRV_OK;
+}
+
+void PhysHeapDeinit(void)
+{
+       PVR_ASSERT(g_psPhysHeapList == NULL);
+
+       OSLockDestroy(g_hPhysHeapLock);
+}
+
+PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap)
+{
+       PVR_ASSERT(psPhysHeap != NULL);
+
+       return psPhysHeap->psDevNode;
+}
+
+IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap)
+{
+       PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST);
+
+       return gasHeapProperties[ePhysHeap].bPVRLayerAcquire;
+}
+
+IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap)
+{
+       PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST);
+
+       return gasHeapProperties[ePhysHeap].bUserModeAlloc;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*************************************************************************/ /*!
+@Function       CreateGpuVirtValArenas
+@Description    Create virtualization validation arenas
+@Input          psDeviceNode The device node
+@Return         PVRSRV_ERROR PVRSRV_OK on success
+*/ /**************************************************************************/
+static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /* aui64OSidMin and aui64OSidMax are what we program into HW registers.
+          The values are different from base/size of arenas. */
+       IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+       IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+       PHYS_HEAP_CONFIG *psGPULocalHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL);
+       PHYS_HEAP_CONFIG *psDisplayHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY);
+       IMG_UINT64 uBase;
+       IMG_UINT64 uSize;
+       IMG_UINT64 uBaseShared;
+       IMG_UINT64 uSizeShared;
+       IMG_UINT64 uSizeSharedReg;
+       IMG_UINT32 i;
+
+       /* Shared region is fixed size, the remaining space is divided amongst OSes */
+       uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+       uSize = psGPULocalHeap->uiSize - uSizeShared;
+       uSize /= GPUVIRT_VALIDATION_NUM_OS;
+       uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */
+
+       uBase = psGPULocalHeap->sCardBase.uiAddr;
+       uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS;
+       uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase);
+
+       PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".",
+                        psGPULocalHeap->sCardBase.uiAddr,
+                        psGPULocalHeap->uiSize));
+
+       /* If a display heap config exists, include the display heap in the non-secure regions */
+       if (psDisplayHeap)
+       {
+               /* Only works when DISPLAY heap follows GPU_LOCAL heap. */
+               PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".",
+                                psDisplayHeap->sCardBase.uiAddr,
+                                psDisplayHeap->uiSize));
+
+               uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize;
+       }
+       else
+       {
+               uSizeSharedReg = uSizeShared;
+       }
+
+       PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE);
+       PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED);
+
+       for (i = 0; i < GPUVIRT_VALIDATION_NUM_OS; i++)
+       {
+               IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH];
+
+               PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize));
+
+               OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i);
+
+               psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName,
+                                                                     OSGetPageShift(),
+                                                                     0,
+                                                                     uBase,
+                                                                     uSize);
+               PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span");
+
+               aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase;
+
+               if (i == 0)
+               {
+                       /* OSid0 has access to all regions */
+                       aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL;
+               }
+               else
+               {
+                       aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL;
+               }
+
+               /* uSizeSharedReg includes display heap */
+               aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared;
+               aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL;
+
+               PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",",
+                                i,
+                                aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i],
+                                aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i],
+                                aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i],
+                                aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i]));
+               uBase += uSize;
+       }
+
+       PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared));
+
+       PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED);
+
+       /* uSizeShared does not include  display heap */
+       psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED",
+                                                           OSGetPageShift(),
+                                                           0,
+                                                           uBaseShared,
+                                                           uSizeShared);
+       PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span");
+
+       if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL)
+       {
+               psDeviceNode->psDevConfig->pfnSysDevVirtInit(aui64OSidMin, aui64OSidMax);
+       }
+
+       return PVRSRV_OK;
+}
+
+/*
+ * Counter-part to CreateGpuVirtValArenas.
+ */
+static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       IMG_UINT32      uiCounter = 0;
+
+       /*
+        * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must
+        * not free it here as it gets cleared later.
+        */
+       for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+       {
+               if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
+               {
+                       continue;
+               }
+               RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+       }
+
+       if (psDeviceNode->psOSSharedArena != NULL)
+       {
+               RA_Delete(psDeviceNode->psOSSharedArena);
+       }
+}
+#endif
+
+PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       PHYS_HEAP_TYPE eHeapType;
+       PVRSRV_ERROR eError;
+
+       eError = PhysHeapAcquireByDevPhysHeap(psDeviceNode->psDevConfig->eDefaultHeap,
+                                             psDeviceNode, &psDeviceNode->psMMUPhysHeap);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit);
+
+       eHeapType = PhysHeapGetType(psDeviceNode->psMMUPhysHeap);
+
+       if (eHeapType == PHYS_HEAP_TYPE_UMA)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__));
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+               PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only"
+                                                                " supported on systems with local memory (LMA).", __func__));
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               goto ErrorDeinit;
+#endif
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__));
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+               eError = CreateGpuVirtValArenas(psDeviceNode);
+               PVR_LOG_GOTO_IF_ERROR(eError, "CreateGpuVirtValArenas", ErrorDeinit);
+#endif
+       }
+
+       return PVRSRV_OK;
+ErrorDeinit:
+       return eError;
+}
+
+void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /* Remove local LMA subarenas */
+       DestroyGpuVirtValArenas(psDeviceNode);
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+       if (psDeviceNode->psMMUPhysHeap != NULL)
+       {
+               PhysHeapRelease(psDeviceNode->psMMUPhysHeap);
+               psDeviceNode->psMMUPhysHeap = NULL;
+       }
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap, size_t uiSize,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_DEV_PHYADDR *psDevPAddr,
+                                   IMG_UINT32 ui32OSid, IMG_PID uiPid)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnPagesAllocGPV != NULL)
+       {
+               eResult = psImplFuncs->pfnPagesAllocGPV(psPhysHeap,
+                                                       uiSize, psMemHandle, psDevPAddr, ui32OSid, uiPid);
+       }
+
+       return eResult;
+}
+#endif
+
+PVRSRV_ERROR PhysHeapPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize,
+                                                               PG_HANDLE *psMemHandle,
+                                                               IMG_DEV_PHYADDR *psDevPAddr,
+                                                               IMG_PID uiPid)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnPagesAlloc != NULL)
+       {
+               eResult = psImplFuncs->pfnPagesAlloc(psPhysHeap,
+                                                      uiSize, psMemHandle, psDevPAddr, uiPid);
+       }
+
+       return eResult;
+}
+
+void PhysHeapPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+
+       PVR_ASSERT(psImplFuncs->pfnPagesFree != NULL);
+
+       if (psImplFuncs->pfnPagesFree != NULL)
+       {
+               psImplFuncs->pfnPagesFree(psPhysHeap,
+                                         psMemHandle);
+       }
+}
+
+PVRSRV_ERROR PhysHeapPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+                                                         void **pvPtr)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnPagesMap != NULL)
+       {
+               eResult = psImplFuncs->pfnPagesMap(psPhysHeap,
+                                                  pshMemHandle, uiSize, psDevPAddr, pvPtr);
+       }
+
+       return eResult;
+}
+
+void PhysHeapPagesUnMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+
+       PVR_ASSERT(psImplFuncs->pfnPagesUnMap != NULL);
+
+       if (psImplFuncs->pfnPagesUnMap != NULL)
+       {
+               psImplFuncs->pfnPagesUnMap(psPhysHeap,
+                                          psMemHandle, pvPtr);
+       }
+}
+
+PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle,
+                                                         IMG_UINT32 uiOffset,
+                                                         IMG_UINT32 uiLength)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
+
+       if (psImplFuncs->pfnPagesClean != NULL)
+       {
+               eResult = psImplFuncs->pfnPagesClean(psPhysHeap,
+                                                    pshMemHandle, uiOffset, uiLength);
+       }
+
+       return eResult;
+}
+
+IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap)
+{
+       PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+       IMG_UINT32 ui32PageShift = 0;
+
+       PVR_ASSERT(psImplFuncs->pfnGetPageShift != NULL);
+
+       if (psImplFuncs->pfnGetPageShift != NULL)
+       {
+               ui32PageShift = psImplFuncs->pfnGetPageShift();
+       }
+
+       return ui32PageShift;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/physmem.c b/drivers/gpu/drm/img/img-rogue/services/server/common/physmem.c
new file mode 100644 (file)
index 0000000..34c174d
--- /dev/null
@@ -0,0 +1,839 @@
+/*************************************************************************/ /*!
+@File           physmem.c
+@Title          Physmem
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "device.h"
+#include "physmem.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "rgx_heaps.h"
+#include "pvr_ricommon.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#if defined(DEBUG)
+static IMG_UINT32 gPMRAllocFail;
+
+#if defined(__linux__)
+#include <linux/moduleparam.h>
+
+module_param(gPMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches "
+                                "this value, it will fail (default value is 0 which "
+                                "means that alloc function will behave normally).");
+#endif /* defined(__linux__) */
+#endif /* defined(DEBUG) */
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#include "proc_stats.h"
+#endif
+
+PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE        *psDevNode,
+                             IMG_UINT32 ui32MemSize,
+                             IMG_UINT32 ui32Log2Align,
+                             const IMG_UINT8 u8Value,
+                             IMG_BOOL bInitPage,
+#if defined(PDUMP)
+                             const IMG_CHAR *pszDevSpace,
+                             const IMG_CHAR *pszSymbolicAddress,
+                             IMG_HANDLE *phHandlePtr,
+#endif
+                             IMG_HANDLE hMemHandle,
+                             IMG_DEV_PHYADDR *psDevPhysAddr)
+{
+       void *pvCpuVAddr;
+       PVRSRV_ERROR eError;
+#if defined(PDUMP)
+       IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME];
+       PDUMP_FILEOFFSET_T uiOffsetOut;
+       IMG_UINT32 ui32PageSize;
+       IMG_UINT32 ui32PDumpMemSize = ui32MemSize;
+       PVRSRV_ERROR ePDumpError;
+#endif
+       PG_HANDLE *psMemHandle;
+       IMG_UINT64 uiMask;
+       IMG_DEV_PHYADDR sDevPhysAddr_int;
+       IMG_PID uiPid = 0;
+
+       psMemHandle = hMemHandle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ?
+               PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM();
+#endif
+
+       /* Allocate the pages */
+       eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap,
+                                   TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+                                   psMemHandle,
+                                   &sDevPhysAddr_int,
+                                   uiPid);
+       PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:1");
+
+       /* Check to see if the page allocator returned pages with our desired
+        * alignment, which is not unlikely
+        */
+       uiMask = (1 << ui32Log2Align) - 1;
+       if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask))
+       {
+               /* use over allocation instead */
+               PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle);
+
+               ui32MemSize += (IMG_UINT32) uiMask;
+               eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap,
+                                           TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+                                           psMemHandle,
+                                           &sDevPhysAddr_int,
+                                           uiPid);
+               PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:2");
+
+               sDevPhysAddr_int.uiAddr += uiMask;
+               sDevPhysAddr_int.uiAddr &= ~uiMask;
+       }
+       *psDevPhysAddr = sDevPhysAddr_int;
+
+#if defined(PDUMP)
+       ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize();
+       eError = PDumpMalloc(psDevNode,
+                            pszDevSpace,
+                            pszSymbolicAddress,
+                            ui32PDumpMemSize,
+                            ui32PageSize,
+                            IMG_FALSE,
+                            0,
+                            phHandlePtr,
+                            PDUMP_NONE);
+       if (PVRSRV_OK != eError)
+       {
+               PDUMPCOMMENT(psDevNode, "Allocating pages failed");
+               *phHandlePtr = NULL;
+       }
+       ePDumpError = eError;
+#endif
+
+       if (bInitPage)
+       {
+               /*Map the page to the CPU VA space */
+               eError = PhysHeapPagesMap(psDevNode->psMMUPhysHeap,
+                                         psMemHandle,
+                                         ui32MemSize,
+                                         &sDevPhysAddr_int,
+                                         &pvCpuVAddr);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_LOG_ERROR(eError, "DevPxMap");
+                       PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle);
+                       return eError;
+               }
+
+               /*Fill the memory with given content */
+               OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize);
+
+               /*Map the page to the CPU VA space */
+               eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
+                                           psMemHandle,
+                                           0,
+                                           ui32MemSize);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_LOG_ERROR(eError, "DevPxClean");
+                       PhysHeapPagesUnMap(psDevNode->psMMUPhysHeap, psMemHandle, pvCpuVAddr);
+                       PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle);
+                       return eError;
+               }
+
+#if defined(PDUMP)
+               if (ePDumpError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+               {
+                       /* PDumping of the page contents can be done in two ways
+                        * 1. Store the single byte init value to the .prm file
+                        *    and load the same value to the entire dummy page buffer
+                        *    This method requires lot of LDB's inserted into the out2.txt
+                        *
+                        * 2. Store the entire contents of the buffer to the .prm file
+                        *    and load them back.
+                        *    This only needs a single LDB instruction in the .prm file
+                        *    and chosen this method
+                        *    size of .prm file might go up but that's not huge at least
+                        *    for this allocation
+                        */
+                       /* Write the buffer contents to the prm file */
+                       eError = PDumpWriteParameterBlob(psDevNode,
+                                                                                        pvCpuVAddr,
+                                                                                        ui32PDumpMemSize,
+                                                                                        PDUMP_FLAGS_CONTINUOUS,
+                                                                                        szFilenameOut,
+                                                                                        sizeof(szFilenameOut),
+                                                                                        &uiOffsetOut);
+                       if (PVRSRV_OK == eError)
+                       {
+                               /* Load the buffer back to the allocated memory when playing the pdump */
+                               eError = PDumpPMRLDB(psDevNode,
+                                                                        pszDevSpace,
+                                                                        pszSymbolicAddress,
+                                                                        0,
+                                                                        ui32PDumpMemSize,
+                                                                        szFilenameOut,
+                                                                        uiOffsetOut,
+                                                                        PDUMP_FLAGS_CONTINUOUS);
+                               if (PVRSRV_OK != eError)
+                               {
+                                       PDUMP_ERROR(psDevNode, eError, "Failed to write LDB statement to script file");
+                                       PVR_LOG_ERROR(eError, "PDumpPMRLDB");
+                               }
+                       }
+                       else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+                       {
+                               PDUMP_ERROR(psDevNode, eError, "Failed to write device allocation to parameter file");
+                               PVR_LOG_ERROR(eError, "PDumpWriteParameterBlob");
+                       }
+                       else
+                       {
+                               /* Else write to parameter file prevented under the flags and
+                                * current state of the driver so skip write to script and error IF.
+                                * This is expected e.g., if not in the capture range.
+                                */
+                               eError = PVRSRV_OK;
+                       }
+               }
+#endif
+
+               /* Unmap the page */
+               PhysHeapPagesUnMap(psDevNode->psMMUPhysHeap,
+                                  psMemHandle,
+                                  pvCpuVAddr);
+       }
+
+       return PVRSRV_OK;
+}
+
+void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+                                                       IMG_HANDLE hPDUMPMemHandle,
+#endif
+                                                       IMG_HANDLE      hMemHandle)
+{
+       PG_HANDLE *psMemHandle;
+
+       psMemHandle = hMemHandle;
+       PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle);
+#if defined(PDUMP)
+       if (NULL != hPDUMPMemHandle)
+       {
+               PDumpFree(psDevNode, hPDUMPMemHandle);
+       }
+#endif
+
+}
+
+
+/* Checks the input parameters and adjusts them if possible and necessary */
+static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks,
+                                           IMG_UINT32 ui32NumVirtChunks,
+                                           PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                           IMG_UINT32 *puiLog2AllocPageSize,
+                                           IMG_DEVMEM_SIZE_T *puiSize,
+                                           PMR_SIZE_T *puiChunkSize)
+{
+       IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize;
+       IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+       PMR_SIZE_T uiChunkSize = *puiChunkSize;
+       /* Sparse if we have different number of virtual and physical chunks plus
+        * in general all allocations with more than one virtual chunk */
+       IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks ||
+                       ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE;
+
+       if (ui32NumPhysChunks == 0 && ui32NumVirtChunks == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Number of physical chunks and number of virtual chunks "
+                               "cannot be both 0",
+                               __func__));
+
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Protect against ridiculous page sizes */
+       if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Range check of the alloc size */
+       if (uiSize >= 0x1000000000ULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Cancelling allocation request of over 64 GB. "
+                                "This is likely a bug."
+                               , __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Fail if requesting coherency on one side but uncached on the other */
+       if (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) &&
+           (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags)))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached "
+                               "Please use GPU cached flags for coherency."));
+               return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+       }
+
+       if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) &&
+           (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached "
+                               "Please use CPU cached flags for coherency."));
+               return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+       }
+
+       if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (bIsSparse)
+       {
+               /* For sparse we need correct parameters like a suitable page size....  */
+               if (OSGetPageShift() > uiLog2AllocPageSize)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Invalid log2-contiguity for sparse allocation. "
+                                       "Requested %u, required minimum %zd",
+                                       __func__,
+                                       uiLog2AllocPageSize,
+                                       OSGetPageShift() ));
+
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               /* ... chunk size must be a equal to page size ... */
+               if (uiChunkSize != (1 << uiLog2AllocPageSize))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Invalid chunk size for sparse allocation. Requested "
+                                        "%#" IMG_UINT64_FMTSPECx ", must be same as page size %#x.",
+                                       __func__, uiChunkSize, 1 << uiLog2AllocPageSize));
+
+                       return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+               }
+
+               if (ui32NumVirtChunks * uiChunkSize != uiSize)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") "
+                                        "is not equal to virtual chunks * chunk size "
+                                        "(%#" IMG_UINT64_FMTSPECx ")",
+                                       __func__, uiSize, ui32NumVirtChunks * uiChunkSize));
+
+                       return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+               }
+
+               if (ui32NumPhysChunks > ui32NumVirtChunks)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Number of physical chunks (%u) must not be greater "
+                                       "than number of virtual chunks (%u)",
+                                       __func__,
+                                       ui32NumPhysChunks,
+                                       ui32NumVirtChunks));
+
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+       else
+       {
+               /*
+                * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
+                * because it would never be harmful for memory to be _more_ contiguous that
+                * was desired.
+                */
+               uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ?
+                               OSGetPageShift() : uiLog2AllocPageSize;
+
+               /* Same for total size */
+               uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               *puiChunkSize = uiSize;
+       }
+
+       if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Total size (%#" IMG_UINT64_FMTSPECx ") "
+                       "must be a multiple of the requested contiguity (%"
+                       IMG_UINT64_FMTSPEC ")", __func__, uiSize,
+                       (IMG_UINT64) (1ULL << uiLog2AllocPageSize)));
+               return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+       }
+
+       *puiLog2AllocPageSize = uiLog2AllocPageSize;
+       *puiSize = uiSize;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                 PVRSRV_PHYS_HEAP *peDevPhysHeap)
+{
+       PVRSRV_PHYS_HEAP eHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags);
+
+       switch (eHeap)
+       {
+               case PVRSRV_PHYS_HEAP_FW_PREMAP0:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP1:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP2:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP3:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP4:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP5:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP6:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP7:
+               {
+                       /* keep heap (with check) */
+                       PVR_RETURN_IF_INVALID_PARAM(PVRSRV_VZ_MODE_IS(HOST));
+                       break;
+               }
+               case PVRSRV_PHYS_HEAP_LAST:
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+               default:
+               {
+                       break;
+               }
+       }
+
+       *peDevPhysHeap = eHeap;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PMR_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2AllocPageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       IMG_PID uiPid,
+                       PMR **ppsPMRPtr,
+                       IMG_UINT32 ui32PDumpFlags,
+                       PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_PHYS_HEAP ePhysHeapIdx;
+       PVRSRV_MEMALLOCFLAGS_T uiPMRFlags = uiFlags;
+       PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize =
+               psDevNode->psDevConfig->pfnCheckMemAllocSize;
+
+       PVR_UNREFERENCED_PARAMETER(uiAnnotationLength);
+
+       eError = _ValidateParams(ui32NumPhysChunks,
+                                ui32NumVirtChunks,
+                                uiFlags,
+                                &uiLog2AllocPageSize,
+                                &uiSize,
+                                &uiChunkSize);
+       PVR_RETURN_IF_ERROR(eError);
+
+       eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx);
+       PVR_RETURN_IF_ERROR(eError);
+
+       if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_DEFAULT)
+       {
+               ePhysHeapIdx = psDevNode->psDevConfig->eDefaultHeap;
+               PVRSRV_CHANGE_PHYS_HEAP_HINT(ePhysHeapIdx, uiPMRFlags);
+       }
+
+       if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_LOCAL)
+       {
+               if ((uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK) == 0)
+               {
+                       ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_PRIVATE;
+                       PVRSRV_SET_PHYS_HEAP_HINT(GPU_PRIVATE, uiPMRFlags);
+                       PVR_DPF((PVR_DBG_VERBOSE, "%s: Consider explicit use of GPU_PRIVATE for PMR %s."
+                               " Implicit conversion to GPU PRIVATE performed",
+                               __func__, pszAnnotation));
+               }
+               else if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) &&
+                                PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig))
+               {
+                       ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_COHERENT;
+                       PVRSRV_SET_PHYS_HEAP_HINT(GPU_COHERENT, uiPMRFlags);
+               }
+       }
+       else if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_PRIVATE)
+       {
+               if (uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid flags for PMR %s!"
+                               " Client requested GPU_PRIVATE physical heap with CPU access flags.",
+                               __func__, pszAnnotation));
+                       return PVRSRV_ERROR_INVALID_HEAP;
+               }
+       }
+
+       if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx])
+       {
+               /* In case a heap hasn't been acquired for this type, return invalid heap error */
+               PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from "
+                       "an invalid heap (HeapIndex=%d)",
+                       __func__, psDevNode, ePhysHeapIdx));
+               return PVRSRV_ERROR_INVALID_HEAP;
+       }
+
+       /* Apply memory budgeting policy */
+       if (pfnCheckMemAllocSize)
+       {
+               IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks;
+
+               eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize);
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+#if defined(DEBUG)
+       if (gPMRAllocFail > 0)
+       {
+               static IMG_UINT32 ui32AllocCount = 1;
+
+               if (ui32AllocCount < gPMRAllocFail)
+               {
+                       ui32AllocCount++;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.",
+                                __func__, ui32AllocCount));
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+       }
+#endif /* defined(DEBUG) */
+
+       /* If the driver is in an 'init' state all of the allocated memory
+        * should be attributed to the driver (PID 1) rather than to the
+        * process those allocations are made under. Same applies to the memory
+        * allocated for the Firmware. */
+       if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+           PVRSRV_CHECK_FW_MAIN(uiFlags))
+       {
+               uiPid = PVR_SYS_ALLOC_PID;
+       }
+
+       eError = PhysHeapCreatePMR(psDevNode->apsPhysHeap[ePhysHeapIdx],
+                                                          psConnection,
+                                                          uiSize,
+                                                          uiChunkSize,
+                                                          ui32NumPhysChunks,
+                                                          ui32NumVirtChunks,
+                                                          pui32MappingTable,
+                                                          uiLog2AllocPageSize,
+                                                          uiFlags,
+                                                          pszAnnotation,
+                                                          uiPid,
+                                                          ppsPMRPtr,
+                                                          ui32PDumpFlags);
+
+       if (puiPMRFlags != NULL)
+       {
+               *puiPMRFlags = uiPMRFlags;
+       }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       if (eError != PVRSRV_OK)
+       {
+               PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT,
+                                         OSGetCurrentClientProcessIDKM());
+       }
+#endif
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PMR_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2AllocPageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       IMG_PID uiPid,
+                       PMR **ppsPMRPtr,
+                       IMG_UINT32 ui32PDumpFlags,
+                       PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags)
+{
+       PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(uiAnnotationLength != 0, "uiAnnotationLength");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation");
+
+       if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT)
+       {
+               ePhysHeap = psDevNode->psDevConfig->eDefaultHeap;
+       }
+
+       if (!PhysHeapUserModeAlloc(ePhysHeap))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid phys heap hint: %d.", __func__, ePhysHeap));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return PhysmemNewRamBackedPMR_direct(psConnection,
+                                                                                psDevNode,
+                                                                                uiSize,
+                                                                                uiChunkSize,
+                                                                                ui32NumPhysChunks,
+                                                                                ui32NumVirtChunks,
+                                                                                pui32MappingTable,
+                                                                                uiLog2AllocPageSize,
+                                                                                uiFlags,
+                                                                                uiAnnotationLength,
+                                                                                pszAnnotation,
+                                                                                uiPid,
+                                                                                ppsPMRPtr,
+                                                                                ui32PDumpFlags,
+                                                                                puiPMRFlags);
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA *psConnection,
+                                                       PVRSRV_DEVICE_NODE *psDevNode,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       PMR_SIZE_T uiChunkSize,
+                                                       IMG_UINT32 ui32NumPhysChunks,
+                                                       IMG_UINT32 ui32NumVirtChunks,
+                                                       IMG_UINT32 *pui32MappingTable,
+                                                       IMG_UINT32 uiLog2PageSize,
+                                                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                       IMG_UINT32 uiAnnotationLength,
+                                                       const IMG_CHAR *pszAnnotation,
+                                                       IMG_PID uiPid,
+                                                       PMR **ppsPMRPtr,
+                                                       IMG_UINT32 ui32PDumpFlags,
+                                                       PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags)
+{
+
+       PVRSRV_ERROR eError;
+       eError = PhysmemNewRamBackedPMR(psConnection,
+                                                                       psDevNode,
+                                                                       uiSize,
+                                                                       uiChunkSize,
+                                                                       ui32NumPhysChunks,
+                                                                       ui32NumVirtChunks,
+                                                                       pui32MappingTable,
+                                                                       uiLog2PageSize,
+                                                                       uiFlags,
+                                                                       uiAnnotationLength,
+                                                                       pszAnnotation,
+                                                                       uiPid,
+                                                                       ppsPMRPtr,
+                                                                       ui32PDumpFlags,
+                                                                       puiPMRFlags);
+
+       if (eError == PVRSRV_OK)
+       {
+               eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDevNode,
+                                 IMG_UINT32 *pui32PhysHeapCount)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVRSRVGetDevicePhysHeapCount(psDevNode, pui32PhysHeapCount);
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         PVRSRV_PHYS_HEAP *peHeap)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       *peHeap = psDevNode->psDevConfig->eDefaultHeap;
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_UINT32 ui32PhysHeapCount,
+                         PHYS_HEAP_MEM_STATS *apPhysHeapMemStats)
+{
+       PHYS_HEAP *psPhysHeap;
+       IMG_UINT uiHeapIndex, i = 0;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++)
+       {
+               psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex];
+
+               if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex))
+               {
+                       PVR_ASSERT(i < ui32PhysHeapCount);
+
+                       PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize,
+                                       &apPhysHeapMemStats[i].ui64FreeSize);
+
+                       i++;
+               }
+       }
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVGetHeapPhysMemUsagePkdKM(CONNECTION_DATA *psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_UINT32 ui32PhysHeapCount,
+                         PHYS_HEAP_MEM_STATS_PKD *apPhysHeapMemStats)
+{
+       PHYS_HEAP *psPhysHeap;
+       IMG_UINT uiHeapIndex, i = 0;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++)
+       {
+               psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex];
+
+               if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex))
+               {
+                       PVR_ASSERT(i < ui32PhysHeapCount);
+
+                       PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize,
+                                       &apPhysHeapMemStats[i].ui64FreeSize);
+
+                       i++;
+               }
+       }
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_UINT32 ui32PhysHeapCount,
+                         PVRSRV_PHYS_HEAP *paePhysHeapID,
+                         PHYS_HEAP_MEM_STATS *paPhysHeapMemStats)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       return PhysHeapGetMemInfo(psDevNode,
+                                 ui32PhysHeapCount,
+                                 paePhysHeapID,
+                                 paPhysHeapMemStats);
+}
+
+PVRSRV_ERROR
+PVRSRVPhysHeapGetMemInfoPkdKM(CONNECTION_DATA *psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_UINT32 ui32PhysHeapCount,
+                         PVRSRV_PHYS_HEAP *paePhysHeapID,
+                         PHYS_HEAP_MEM_STATS_PKD *paPhysHeapMemStats)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       return PhysHeapGetMemInfoPkd(psDevNode,
+                                 ui32PhysHeapCount,
+                                 paePhysHeapID,
+                                 paPhysHeapMemStats);
+}
+
+/* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is
+ * for the current device. This avoids the need to do this in pmr.c, which
+ * would then need PVRSRV_DEVICE_NODE (defining this type in pmr.h causes a
+ * typedef redefinition issue).
+ */
+PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+             PVRSRV_DEVICE_NODE *psDevNode,
+             PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (PMRGetExportDeviceNode(psPMRExport) != psDevNode)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__));
+               return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+       }
+
+       return PMRImportPMR(psPMRExport,
+                           uiPassword,
+                           uiSize,
+                           uiLog2Contig,
+                           ppsPMR);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/physmem_hostmem.c b/drivers/gpu/drm/img/img-rogue/services/server/common/physmem_hostmem.c
new file mode 100644 (file)
index 0000000..2f1dc40
--- /dev/null
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@File           physmem_hostmem.c
+@Title          Host memory device node functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Functions relevant to device memory allocations made from host
+                mem device node.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "physmem_hostmem.h"
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "physheap.h"
+#include "pvrsrv_device.h"
+#include "physheap.h"
+#include "physmem_osmem.h"
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr);
+
+/* heap callbacks for host driver's device's heap */
+static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs =
+{
+       /* pfnCpuPAddrToDevPAddr */
+       HostMemCpuPAddrToDevPAddr,
+       /* pfnDevPAddrToCpuPAddr */
+       HostMemDevPAddrToCpuPAddr,
+};
+
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[];
+
+/* heap configuration for host driver's device */
+static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] =
+{
+       {
+               PHYS_HEAP_TYPE_UMA,
+               "SYSMEM",
+               &gsHostMemDevPhysHeapFuncs,
+               {0},
+               {0},
+               0,
+               (IMG_HANDLE)&gsHostMemDevConfig[0],
+               PHYS_HEAP_USAGE_CPU_LOCAL,
+       }
+};
+
+/* device configuration for host driver's device */
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] =
+{
+       {
+               .pszName = "HostMemDevice",
+               .eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE,
+               .pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0],
+               .ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice),
+       }
+};
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivData);
+       /* Optimise common case */
+       psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+       if (ui32NumOfAddr > 1)
+       {
+               IMG_UINT32 ui32Idx;
+               for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+               {
+                       psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+               }
+       }
+}
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                      IMG_UINT32 ui32NumOfAddr,
+                                      IMG_CPU_PHYADDR *psCpuPAddr,
+                                      IMG_DEV_PHYADDR *psDevPAddr)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivData);
+       /* Optimise common case */
+       psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr);
+       if (ui32NumOfAddr > 1)
+       {
+               IMG_UINT32 ui32Idx;
+               for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+               {
+                       psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr);
+               }
+       }
+}
+
+PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       PVRSRV_DEVICE_CONFIG *psDevConfig = &gsHostMemDevConfig[0];
+
+       /* Assert ensures HostMemory device isn't already created and
+        * that data is initialised */
+       PVR_ASSERT(*ppsDeviceNode == NULL);
+
+       /* for now, we only know a single heap (UMA) config for host device */
+       PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 &&
+                               psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA);
+
+       /* N.B.- In case of any failures in this function, we just return error to
+          the caller, as clean-up is taken care by _HostMemDeviceDestroy function */
+
+       psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+       PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+       /* early save return pointer to aid clean-up */
+       *ppsDeviceNode = psDeviceNode;
+
+       psDeviceNode->psDevConfig = psDevConfig;
+       psDeviceNode->papsRegisteredPhysHeaps =
+               OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+                                       psDevConfig->ui32PhysHeapCount);
+       PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+
+       eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+                                                                                 &psDevConfig->pasPhysHeaps[0],
+                                                                                 &psDeviceNode->papsRegisteredPhysHeaps[0]);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig");
+       psDeviceNode->ui32RegisteredPhysHeaps = 1;
+
+       /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_CPU_LOCAL,
+                                                                                 psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire");
+
+       return PVRSRV_OK;
+}
+
+void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (!psDeviceNode)
+       {
+               return;
+       }
+
+       if (psDeviceNode->papsRegisteredPhysHeaps)
+       {
+               if (psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL])
+               {
+                       PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]);
+               }
+
+               if (psDeviceNode->papsRegisteredPhysHeaps[0])
+               {
+                       /* clean-up function as well is aware of only one heap */
+                       PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1);
+                       PhysHeapDestroy(psDeviceNode->papsRegisteredPhysHeaps[0]);
+               }
+
+               OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+       }
+       OSFreeMem(psDeviceNode);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/physmem_lma.c b/drivers/gpu/drm/img/img-rogue/services/server/common/physmem_lma.c
new file mode 100644 (file)
index 0000000..4fa61ac
--- /dev/null
@@ -0,0 +1,2003 @@
+/*************************************************************************/ /*!
+@File           physmem_lma.c
+@Title          Local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "devicemem_server_utils.h"
+#include "physmem_lma.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "rgxutils.h"
+#endif
+
+#if defined(INTEGRITY_OS)
+#include "mm.h"
+#include "integrity_memobject.h"
+#endif
+
+/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid
+ * page address */
+#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0)
+
+typedef struct _PMR_LMALLOCARRAY_DATA_ {
+       IMG_PID uiPid;
+       IMG_INT32 iNumPagesAllocated;
+       /*
+        * uiTotalNumPages:
+        * Total number of pages supported by this PMR.
+        * (Fixed as of now due the fixed Page table array size)
+        */
+       IMG_UINT32 uiTotalNumPages;
+       IMG_UINT32 uiPagesToAlloc;
+
+       IMG_UINT32 uiLog2AllocSize;
+       IMG_UINT32 uiContigAllocSize;
+       IMG_DEV_PHYADDR *pasDevPAddr;
+
+       IMG_BOOL bZeroOnAlloc;
+       IMG_BOOL bPoisonOnAlloc;
+
+       IMG_BOOL bOnDemand;
+
+       /*
+         Record at alloc time whether poisoning will be required when the
+         PMR is freed.
+       */
+       IMG_BOOL bPoisonOnFree;
+
+       /* Physical heap and arena pointers for this allocation */
+       PHYS_HEAP* psPhysHeap;
+       RA_ARENA* psArena;
+       PVRSRV_MEMALLOCFLAGS_T uiAllocFlags;
+
+       /*
+          Connection data for this requests' originating process. NULL for
+          direct-bridge originating calls
+        */
+       CONNECTION_DATA *psConnection;
+} PMR_LMALLOCARRAY_DATA;
+
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
+/* Global structure to manage GPU memory leak */
+static DEFINE_MUTEX(g_sLMALeakMutex);
+static IMG_UINT32 g_ui32LMALeakCounter = 0;
+#endif
+
+typedef struct PHYSMEM_LMA_DATA_TAG {
+       RA_ARENA                        *psRA;
+
+       IMG_CPU_PHYADDR         sStartAddr;
+       IMG_DEV_PHYADDR         sCardBase;
+       IMG_UINT64                      uiSize;
+} PHYSMEM_LMA_DATA;
+
+/*
+ * This function will set the psDevPAddr to whatever the system layer
+ * has set it for the referenced heap.
+ * It will not fail if the psDevPAddr is invalid.
+ */
+static PVRSRV_ERROR
+_GetDevPAddr(PHEAP_IMPL_DATA pvImplData,
+                        IMG_DEV_PHYADDR *psDevPAddr)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+
+       *psDevPAddr = psLMAData->sCardBase;
+
+       return PVRSRV_OK;
+}
+
+/*
+ * This function will set the psCpuPAddr to whatever the system layer
+ * has set it for the referenced heap.
+ * It will not fail if the psCpuPAddr is invalid.
+ */
+static PVRSRV_ERROR
+_GetCPUPAddr(PHEAP_IMPL_DATA pvImplData,
+                        IMG_CPU_PHYADDR *psCpuPAddr)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+
+       *psCpuPAddr = psLMAData->sStartAddr;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_GetSize(PHEAP_IMPL_DATA pvImplData,
+                IMG_UINT64 *puiSize)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+
+       *puiSize = psLMAData->uiSize;
+
+       return PVRSRV_OK;
+}
+
+static IMG_UINT32
+_GetPageShift(void)
+{
+       return PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT;
+}
+
+static void PhysmemGetLocalRamMemStats(PHEAP_IMPL_DATA pvImplData,
+                IMG_UINT64 *pui64TotalSize,
+                IMG_UINT64 *pui64FreeSize)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+       RA_USAGE_STATS sRAUsageStats;
+
+       RA_Get_Usage_Stats(psLMAData->psRA, &sRAUsageStats);
+
+       *pui64TotalSize = sRAUsageStats.ui64TotalArenaSize;
+       *pui64FreeSize = sRAUsageStats.ui64FreeArenaSize;
+}
+
+static PVRSRV_ERROR
+PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap,
+                                  RA_ARENA **ppsArena)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)PhysHeapGetImplData(psPhysHeap);
+
+       PVR_LOG_RETURN_IF_FALSE(psLMAData != NULL, "psLMAData", PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       *ppsArena = psLMAData->psRA;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+
+       psLMAData->psRA = RA_Create_With_Span(pszLabel,
+                                    OSGetPageShift(),
+                                    psLMAData->sStartAddr.uiAddr,
+                                    psLMAData->sCardBase.uiAddr,
+                                    psLMAData->uiSize);
+       PVR_LOG_RETURN_IF_NOMEM(psLMAData->psRA, "RA_Create_With_Span");
+
+       return PVRSRV_OK;
+}
+
+static void
+_DestroyArenas(PHEAP_IMPL_DATA pvImplData)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+
+       /* Remove RAs and RA names for local card memory */
+       if (psLMAData->psRA)
+       {
+               OSFreeMem(psLMAData->psRA);
+               psLMAData->psRA = NULL;
+       }
+}
+
+static void
+_DestroyImplData(PHEAP_IMPL_DATA pvImplData)
+{
+       PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+
+       _DestroyArenas(pvImplData);
+
+       OSFreeMem(psLMAData);
+}
+
+struct _PHYS_HEAP_ITERATOR_ {
+       PHYS_HEAP *psPhysHeap;
+       RA_ARENA_ITERATOR *psRAIter;
+
+       IMG_UINT64 uiTotalSize;
+       IMG_UINT64 uiInUseSize;
+};
+
+PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode,
+                                    PHYS_HEAP_USAGE_FLAGS ui32Flags,
+                                    PHYS_HEAP_ITERATOR **ppsIter)
+{
+       PVRSRV_ERROR eError;
+       PHYSMEM_LMA_DATA *psLMAData;
+       PHYS_HEAP_ITERATOR *psHeapIter;
+       PHYS_HEAP *psPhysHeap = NULL;
+       RA_USAGE_STATS sStats;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppsIter != NULL, "ppsIter");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Flags != 0, "ui32Flags");
+
+       eError = PhysHeapAcquireByUsage(ui32Flags, psDevNode, &psPhysHeap);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByUsage");
+
+       PVR_LOG_GOTO_IF_FALSE(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA,
+                             "PhysHeap must be of LMA type", release_heap);
+
+       psLMAData = (PHYSMEM_LMA_DATA *) PhysHeapGetImplData(psPhysHeap);
+
+       psHeapIter = OSAllocMem(sizeof(*psHeapIter));
+       PVR_LOG_GOTO_IF_NOMEM(psHeapIter, eError, release_heap);
+
+       psHeapIter->psPhysHeap = psPhysHeap;
+       psHeapIter->psRAIter = RA_IteratorAcquire(psLMAData->psRA, IMG_FALSE);
+       PVR_LOG_GOTO_IF_NOMEM(psHeapIter->psRAIter, eError, free_heap_iter);
+
+       /* get heap usage */
+       RA_Get_Usage_Stats(psLMAData->psRA, &sStats);
+
+       psHeapIter->uiTotalSize = sStats.ui64TotalArenaSize;
+       psHeapIter->uiInUseSize = sStats.ui64TotalArenaSize - sStats.ui64FreeArenaSize;
+
+       *ppsIter = psHeapIter;
+
+       return PVRSRV_OK;
+
+free_heap_iter:
+       OSFreeMem(psHeapIter);
+
+release_heap:
+       PhysHeapRelease(psPhysHeap);
+
+       return eError;
+}
+
+void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter)
+{
+       PHYS_HEAP_ITERATOR *psHeapIter = psIter;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE(psHeapIter != NULL, "psHeapIter is NULL");
+
+       PhysHeapRelease(psHeapIter->psPhysHeap);
+       RA_IteratorRelease(psHeapIter->psRAIter);
+       OSFreeMem(psHeapIter);
+}
+
+PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter)
+{
+       PHYS_HEAP_ITERATOR *psHeapIter = psIter;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "ppsIter");
+
+       RA_IteratorReset(psHeapIter->psRAIter);
+
+       return PVRSRV_OK;
+}
+
+IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter,
+                              IMG_DEV_PHYADDR *psDevPAddr,
+                              IMG_UINT64 *puiSize)
+{
+       PHYS_HEAP_ITERATOR *psHeapIter = psIter;
+       RA_ITERATOR_DATA sData = {0};
+
+       if (psHeapIter == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "psHeapIter in %s() is NULL", __func__));
+               return IMG_FALSE;
+       }
+
+       if (!RA_IteratorNext(psHeapIter->psRAIter, &sData))
+       {
+               return IMG_FALSE;
+       }
+
+       PVR_ASSERT(sData.uiSize != 0);
+
+       psDevPAddr->uiAddr = sData.uiAddr;
+       *puiSize = sData.uiSize;
+
+       return IMG_TRUE;
+}
+
+PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter,
+                                          IMG_UINT64 *puiTotalSize,
+                                          IMG_UINT64 *puiInUseSize)
+{
+       PHYS_HEAP_ITERATOR *psHeapIter = psIter;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "psHeapIter");
+
+       *puiTotalSize = psHeapIter->uiTotalSize;
+       *puiInUseSize = psHeapIter->uiInUseSize;
+
+       return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR
+_LMA_DoPhyContigPagesAlloc(RA_ARENA *pArena,
+                           size_t uiSize,
+                           PG_HANDLE *psMemHandle,
+                           IMG_DEV_PHYADDR *psDevPAddr,
+                           IMG_PID uiPid)
+{
+       RA_BASE_T uiCardAddr = 0;
+       RA_LENGTH_T uiActualSize;
+       PVRSRV_ERROR eError;
+#if defined(DEBUG)
+       static IMG_UINT32       ui32MaxLog2NumPages = 4;        /* 16 pages => 64KB */
+#endif /* defined(DEBUG) */
+
+       IMG_UINT32 ui32Log2NumPages = 0;
+
+       PVR_ASSERT(uiSize != 0);
+       ui32Log2NumPages = OSGetOrder(uiSize);
+       uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+       eError = RA_Alloc(pArena,
+                         uiSize,
+                         RA_NO_IMPORT_MULTIPLIER,
+                         0,                         /* No flags */
+                         uiSize,
+                         "LMA_PhyContigPagesAlloc",
+                         &uiCardAddr,
+                         &uiActualSize,
+                         NULL);                     /* No private handle */
+
+       PVR_ASSERT(uiSize == uiActualSize);
+
+       psMemHandle->u.ui64Handle = uiCardAddr;
+       psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
+
+       if (PVRSRV_OK == eError)
+       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+               PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+                                                   uiSize,
+                                                   uiCardAddr,
+                                                   uiPid);
+#else
+               IMG_CPU_PHYADDR sCpuPAddr;
+               sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
+
+               PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+                                            NULL,
+                                            sCpuPAddr,
+                                            uiSize,
+                                            NULL,
+                                            uiPid
+                                            DEBUG_MEMSTATS_VALUES);
+#endif
+#endif
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+               PVR_DPF((PVR_DBG_MESSAGE,
+                       "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" IMG_UINT64_FMTSPECX ", Arena ID %u",
+                       __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid));
+#endif
+
+#if defined(DEBUG)
+               PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages));
+               if (ui32Log2NumPages > ui32MaxLog2NumPages)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__,
+                               ui32MaxLog2NumPages, ui32Log2NumPages ));
+                       ui32MaxLog2NumPages = ui32Log2NumPages;
+               }
+#endif /* defined(DEBUG) */
+               psMemHandle->uiOrder = ui32Log2NumPages;
+       }
+
+       return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static PVRSRV_ERROR
+LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap,
+                           size_t uiSize,
+                           PG_HANDLE *psMemHandle,
+                           IMG_DEV_PHYADDR *psDevPAddr,
+                           IMG_UINT32 ui32OSid,
+                           IMG_PID uiPid)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap);
+       RA_ARENA *pArena;
+       IMG_UINT32 ui32Log2NumPages = 0;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(uiSize != 0);
+       ui32Log2NumPages = OSGetOrder(uiSize);
+       uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+       PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS);
+       if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u defaulting to 0",
+                       __func__, ui32OSid));
+               ui32OSid = 0;
+       }
+
+       pArena = psDevNode->psOSidSubArena[ui32OSid];
+
+       if (psMemHandle->uiOSid != ui32OSid)
+       {
+               PVR_LOG(("%s: Unexpected OSid value %u - expecting %u", __func__,
+                       psMemHandle->uiOSid, ui32OSid));
+       }
+
+       psMemHandle->uiOSid = ui32OSid;         /* For Free() use */
+
+       eError =  _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle,
+                                            psDevPAddr, uiPid);
+       PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc");
+
+       return eError;
+}
+#endif
+
+static PVRSRV_ERROR
+LMA_PhyContigPagesAlloc(PHYS_HEAP *psPhysHeap,
+                        size_t uiSize,
+                        PG_HANDLE *psMemHandle,
+                        IMG_DEV_PHYADDR *psDevPAddr,
+                        IMG_PID uiPid)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       IMG_UINT32 ui32OSid = 0;
+       return LMA_PhyContigPagesAllocGPV(psPhysHeap, uiSize, psMemHandle, psDevPAddr,
+                                                                         ui32OSid, uiPid);
+#else
+       PVRSRV_ERROR eError;
+
+       RA_ARENA *pArena;
+       IMG_UINT32 ui32Log2NumPages = 0;
+
+       eError = PhysmemGetArenaLMA(psPhysHeap, &pArena);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA");
+
+       PVR_ASSERT(uiSize != 0);
+       ui32Log2NumPages = OSGetOrder(uiSize);
+       uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+       eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle,
+                                           psDevPAddr, uiPid);
+       PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc");
+
+       return eError;
+#endif
+}
+
+static void
+LMA_PhyContigPagesFree(PHYS_HEAP *psPhysHeap,
+                                          PG_HANDLE *psMemHandle)
+{
+       RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle;
+       RA_ARENA        *pArena;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap);
+       IMG_UINT32      ui32OSid = psMemHandle->uiOSid;
+
+       /*
+        * The Arena ID is set by the originating allocation, and maintained via
+        * the call stacks into this function. We have a limited range of IDs
+        * and if the passed value falls outside this we simply treat it as a
+        * 'global' arena ID of 0. This is where all default OS-specific allocations
+        * are created.
+        */
+       PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS);
+       if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u PhysAddr 0x%"
+                        IMG_UINT64_FMTSPECx " Reverting to Arena 0", __func__,
+                        ui32OSid, uiCardAddr));
+               /*
+                * No way of determining what we're trying to free so default to the
+                * global default arena index 0.
+                */
+               ui32OSid = 0;
+       }
+
+       pArena = psDevNode->psOSidSubArena[ui32OSid];
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: (GPU Virtualisation) Freeing 0x%"
+               IMG_UINT64_FMTSPECx ", Arena %u", __func__,
+               uiCardAddr, ui32OSid));
+
+#else
+       PhysmemGetArenaLMA(psPhysHeap, &pArena);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+                                             (IMG_UINT64)uiCardAddr);
+#else
+       PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+                                                                       (IMG_UINT64)uiCardAddr,
+                                                                       OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+       RA_Free(pArena, uiCardAddr);
+       psMemHandle->uiOrder = 0;
+}
+
+static PVRSRV_ERROR
+LMA_PhyContigPagesMap(PHYS_HEAP *psPhysHeap,
+                      PG_HANDLE *psMemHandle,
+                      size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+                      void **pvPtr)
+{
+       IMG_CPU_PHYADDR sCpuPAddr;
+       IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+
+       PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+       *pvPtr = OSMapPhysToLin(sCpuPAddr,
+                                                       ui32NumPages * OSGetPageSize(),
+                                                       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC);
+       PVR_RETURN_IF_NOMEM(*pvPtr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+                                   ui32NumPages * OSGetPageSize(),
+                                   OSGetCurrentClientProcessIDKM());
+#else
+       {
+               PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+                                                                        *pvPtr,
+                                                                        sCpuPAddr,
+                                                                        ui32NumPages * OSGetPageSize(),
+                                                                        NULL,
+                                                                        OSGetCurrentClientProcessIDKM()
+                                                                        DEBUG_MEMSTATS_VALUES);
+       }
+#endif
+#endif
+       return PVRSRV_OK;
+}
+
+static void
+LMA_PhyContigPagesUnmap(PHYS_HEAP *psPhysHeap,
+                        PG_HANDLE *psMemHandle,
+                        void *pvPtr)
+{
+       IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder);
+       PVR_UNREFERENCED_PARAMETER(psPhysHeap);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+                                           ui32NumPages * OSGetPageSize(),
+                                           OSGetCurrentClientProcessIDKM());
+#else
+       PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+                                       (IMG_UINT64)(uintptr_t)pvPtr,
+                                       OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+       OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize());
+}
+
+static PVRSRV_ERROR
+LMA_PhyContigPagesClean(PHYS_HEAP *psPhysHeap,
+                                               PG_HANDLE *psMemHandle,
+                                               IMG_UINT32 uiOffset,
+                                               IMG_UINT32 uiLength)
+{
+       /* No need to flush because we map as uncached */
+       PVR_UNREFERENCED_PARAMETER(psPhysHeap);
+       PVR_UNREFERENCED_PARAMETER(psMemHandle);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(uiLength);
+
+       return PVRSRV_OK;
+}
+
+static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs =
+{
+       .pfnDestroyData = &_DestroyImplData,
+       .pfnGetDevPAddr = &_GetDevPAddr,
+       .pfnGetCPUPAddr = &_GetCPUPAddr,
+       .pfnGetSize = &_GetSize,
+       .pfnGetPageShift = &_GetPageShift,
+       .pfnGetPMRFactoryMemStats = &PhysmemGetLocalRamMemStats,
+       .pfnCreatePMR = &PhysmemNewLocalRamBackedPMR,
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       .pfnPagesAllocGPV = &LMA_PhyContigPagesAllocGPV,
+#endif
+       .pfnPagesAlloc = &LMA_PhyContigPagesAlloc,
+       .pfnPagesFree = &LMA_PhyContigPagesFree,
+       .pfnPagesMap = &LMA_PhyContigPagesMap,
+       .pfnPagesUnMap = &LMA_PhyContigPagesUnmap,
+       .pfnPagesClean = &LMA_PhyContigPagesClean,
+};
+
+PVRSRV_ERROR
+PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode,
+                                        PHYS_HEAP_CONFIG *psConfig,
+                                        IMG_CHAR *pszLabel,
+                                        PHYS_HEAP **ppsPhysHeap)
+{
+       PHYSMEM_LMA_DATA *psLMAData;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel");
+
+       psLMAData = OSAllocMem(sizeof(*psLMAData));
+       PVR_LOG_RETURN_IF_NOMEM(psLMAData, "OSAllocMem");
+
+       psLMAData->sStartAddr = psConfig->sStartAddr;
+       psLMAData->sCardBase = psConfig->sCardBase;
+       psLMAData->uiSize = psConfig->uiSize;
+
+
+       eError = PhysHeapCreate(psDevNode,
+                                                       psConfig,
+                                                       (PHEAP_IMPL_DATA)psLMAData,
+                                                       &_sPHEAPImplFuncs,
+                                                       ppsPhysHeap);
+       if (eError != PVRSRV_OK)
+       {
+               OSFreeMem(psLMAData);
+               return eError;
+       }
+
+       eError = _CreateArenas(psLMAData, pszLabel);
+       PVR_LOG_RETURN_IF_ERROR(eError, "_CreateArenas");
+
+
+       return eError;
+}
+
+static PVRSRV_ERROR _MapAlloc(PHYS_HEAP *psPhysHeap,
+                                                         IMG_DEV_PHYADDR *psDevPAddr,
+                                                         size_t uiSize,
+                                                         PMR_FLAGS_T ulFlags,
+                                                         void **pvPtr)
+{
+       IMG_UINT32 ui32CPUCacheFlags;
+       IMG_CPU_PHYADDR sCpuPAddr;
+       PVRSRV_ERROR eError;
+
+       eError = DevmemCPUCacheMode(PhysHeapDeviceNode(psPhysHeap), ulFlags, &ui32CPUCacheFlags);
+       PVR_RETURN_IF_ERROR(eError);
+
+       PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+
+       *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
+       PVR_RETURN_IF_NOMEM(*pvPtr);
+
+       return PVRSRV_OK;
+}
+
+static void _UnMapAlloc(size_t uiSize,
+                                               void *pvPtr)
+{
+       OSUnMapPhysToLin(pvPtr, uiSize);
+}
+
+static PVRSRV_ERROR
+_PoisonAlloc(PHYS_HEAP *psPhysHeap,
+                        IMG_DEV_PHYADDR *psDevPAddr,
+                        IMG_UINT32 uiContigAllocSize,
+                        IMG_BYTE ui8PoisonValue)
+{
+       PVRSRV_ERROR eError;
+       void *pvKernLin = NULL;
+
+       eError = _MapAlloc(psPhysHeap,
+                                          psDevPAddr,
+                                          uiContigAllocSize,
+                                          PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+                                          &pvKernLin);
+       PVR_GOTO_IF_ERROR(eError, map_failed);
+
+       OSCachedMemSetWMB(pvKernLin, ui8PoisonValue, uiContigAllocSize);
+
+       _UnMapAlloc(uiContigAllocSize, pvKernLin);
+
+       return PVRSRV_OK;
+
+map_failed:
+       PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
+       return eError;
+}
+
+static PVRSRV_ERROR
+_ZeroAlloc(PHYS_HEAP *psPhysHeap,
+                  IMG_DEV_PHYADDR *psDevPAddr,
+                  IMG_UINT32 uiContigAllocSize)
+{
+       void *pvKernLin = NULL;
+       PVRSRV_ERROR eError;
+
+       eError = _MapAlloc(psPhysHeap,
+                                          psDevPAddr,
+                                          uiContigAllocSize,
+                                          PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+                                          &pvKernLin);
+       PVR_GOTO_IF_ERROR(eError, map_failed);
+
+       OSCachedMemSetWMB(pvKernLin, 0, uiContigAllocSize);
+
+       _UnMapAlloc(uiContigAllocSize, pvKernLin);
+
+       return PVRSRV_OK;
+
+map_failed:
+       PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+       return eError;
+}
+
+static PVRSRV_ERROR
+_AllocLMPageArray(PMR_SIZE_T uiSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *pabMappingTable,
+                  IMG_UINT32 uiLog2AllocPageSize,
+                  IMG_BOOL bZero,
+                  IMG_BOOL bPoisonOnAlloc,
+                  IMG_BOOL bPoisonOnFree,
+                  IMG_BOOL bContig,
+                  IMG_BOOL bOnDemand,
+                  PHYS_HEAP* psPhysHeap,
+                  PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+                  IMG_PID uiPid,
+                  PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr,
+                  CONNECTION_DATA *psConnection
+                  )
+{
+       PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
+       IMG_UINT32 ui32Index;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(!bZero || !bPoisonOnAlloc);
+       PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize);
+
+       psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
+       PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray);
+
+       if (bContig)
+       {
+               /*
+                       Some allocations require kernel mappings in which case in order
+                       to be virtually contiguous we also have to be physically contiguous.
+               */
+               psPageArrayData->uiTotalNumPages = 1;
+               psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
+               psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
+               psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+       }
+       else
+       {
+               IMG_UINT32 uiNumPages;
+
+               /* Use of cast below is justified by the assertion that follows to
+               prove that no significant bits have been truncated */
+               uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1);
+               PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize);
+
+               psPageArrayData->uiTotalNumPages = uiNumPages;
+
+               if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks))
+               {
+                       psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
+               }
+               else
+               {
+                       psPageArrayData->uiPagesToAlloc = uiNumPages;
+               }
+               psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize;
+               psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+       }
+       psPageArrayData->psConnection = psConnection;
+       psPageArrayData->uiPid = uiPid;
+       psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) *
+                                                                                               psPageArrayData->uiTotalNumPages);
+       PVR_GOTO_IF_NOMEM(psPageArrayData->pasDevPAddr, eError, errorOnAllocAddr);
+
+       /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */
+       for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++)
+       {
+               psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+       }
+
+       psPageArrayData->iNumPagesAllocated = 0;
+       psPageArrayData->bZeroOnAlloc = bZero;
+       psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+       psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+       psPageArrayData->bOnDemand = bOnDemand;
+       psPageArrayData->psPhysHeap = psPhysHeap;
+       psPageArrayData->uiAllocFlags = uiAllocFlags;
+
+       *ppsPageArrayDataPtr = psPageArrayData;
+
+       return PVRSRV_OK;
+
+       /*
+         error exit paths follow:
+       */
+errorOnAllocAddr:
+       OSFreeMem(psPageArrayData);
+
+errorOnAllocArray:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+static PVRSRV_ERROR
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
+{
+       PVRSRV_ERROR eError;
+       RA_BASE_T uiCardAddr;
+       RA_LENGTH_T uiActualSize;
+       IMG_UINT32 i, ui32Index = 0;
+       IMG_UINT32 uiContigAllocSize;
+       IMG_UINT32 uiLog2AllocSize;
+       PVRSRV_DEVICE_NODE *psDevNode;
+       IMG_BOOL bPoisonOnAlloc;
+       IMG_BOOL bZeroOnAlloc;
+       RA_ARENA *pArena;
+
+       PVR_ASSERT(NULL != psPageArrayData);
+       PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+       psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap);
+       uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+       uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
+       bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+       bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+
+       /* Get suitable local memory region for this GPU physheap allocation */
+       eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA");
+
+       if (psPageArrayData->uiTotalNumPages <
+                       (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. "
+                               "Allocated: %u + Requested: %u > Total Allowed: %u",
+                               psPageArrayData->iNumPagesAllocated,
+                               psPageArrayData->uiPagesToAlloc,
+                               psPageArrayData->uiTotalNumPages));
+               return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+       }
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       {
+               IMG_UINT32 ui32OSid=0;
+
+               /* Obtain the OSid specific data from our connection handle */
+               if (psPageArrayData->psConnection != NULL)
+               {
+                       ui32OSid = psPageArrayData->psConnection->ui32OSid;
+               }
+
+               if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags))
+               {
+                       pArena=psDevNode->psOSSharedArena;
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                        "(GPU Virtualization Validation): Giving from shared mem"));
+               }
+               else
+               {
+                       pArena=psDevNode->psOSidSubArena[ui32OSid];
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                        "(GPU Virtualization Validation): Giving from OS slot %d",
+                                        ui32OSid));
+               }
+       }
+#endif
+
+       psPageArrayData->psArena = pArena;
+
+       for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++)
+       {
+               /* This part of index finding should happen before allocating the page.
+                * Just avoiding intricate paths */
+               if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+               {
+                       ui32Index = i;
+               }
+               else
+               {
+                       if (NULL == pui32MapTable)
+                       {
+                               PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc);
+                       }
+
+                       ui32Index = pui32MapTable[i];
+                       if (ui32Index >= psPageArrayData->uiTotalNumPages)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Page alloc request Index out of bounds for PMR @0x%p",
+                                               __func__,
+                                               psPageArrayData));
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, errorOnRAAlloc);
+                       }
+
+                       if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+                       {
+                               PVR_LOG_GOTO_WITH_ERROR("Mapping already exists", eError, PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS, errorOnRAAlloc);
+                       }
+               }
+
+               eError = RA_Alloc(pArena,
+                                 uiContigAllocSize,
+                                 RA_NO_IMPORT_MULTIPLIER,
+                                 0,                       /* No flags */
+                                 1ULL << uiLog2AllocSize,
+                                 "LMA_Page_Alloc",
+                                 &uiCardAddr,
+                                 &uiActualSize,
+                                 NULL);                   /* No private handle */
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "Failed to Allocate the page @index:%d, size = 0x%llx",
+                                       ui32Index, 1ULL << uiLog2AllocSize));
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc);
+               }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+               PVR_DPF((PVR_DBG_MESSAGE,
+                               "(GPU Virtualization Validation): Address: 0x%"IMG_UINT64_FMTSPECX,
+                               uiCardAddr));
+}
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+               /* Allocation is done a page at a time */
+               PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid);
+#else
+               {
+                       IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+                       sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+                       PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+                                                                        NULL,
+                                                                        sLocalCpuPAddr,
+                                                                        uiActualSize,
+                                                                        NULL,
+                                                                        psPageArrayData->uiPid
+                                                                        DEBUG_MEMSTATS_VALUES);
+               }
+#endif
+#endif
+
+               psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
+               if (bPoisonOnAlloc)
+               {
+                       eError = _PoisonAlloc(psPageArrayData->psPhysHeap,
+                                                                 &psPageArrayData->pasDevPAddr[ui32Index],
+                                                                 uiContigAllocSize,
+                                                                 PVRSRV_POISON_ON_ALLOC_VALUE);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "_PoisonAlloc", errorOnPoison);
+               }
+
+               if (bZeroOnAlloc)
+               {
+                       eError = _ZeroAlloc(psPageArrayData->psPhysHeap,
+                                                               &psPageArrayData->pasDevPAddr[ui32Index],
+                                                               uiContigAllocSize);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "_ZeroAlloc", errorOnZero);
+               }
+       }
+       psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
+
+       return PVRSRV_OK;
+
+       /*
+         error exit paths follow:
+       */
+errorOnZero:
+errorOnPoison:
+       eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+errorOnRAAlloc:
+       PVR_DPF((PVR_DBG_ERROR,
+                       "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",
+                       __func__,
+                       ui32Index,
+                       i,
+                       psPageArrayData->uiPagesToAlloc,
+                       PVRSRVGetErrorString(eError)));
+       while (--i < psPageArrayData->uiPagesToAlloc)
+       {
+               if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+               {
+                       ui32Index = i;
+               }
+               else
+               {
+                       if (NULL == pui32MapTable)
+                       {
+                               break;
+                       }
+
+                       ui32Index = pui32MapTable[i];
+               }
+
+               if (ui32Index < psPageArrayData->uiTotalNumPages)
+               {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+                       /* Allocation is done a page at a time */
+                       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+                                                   uiContigAllocSize,
+                                                   psPageArrayData->uiPid);
+#else
+                       {
+                               PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+                                                               psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
+                                                               psPageArrayData->uiPid);
+                       }
+#endif
+#endif
+                       RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+                       psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+               }
+       }
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+       OSFreeMem(psPageArrayData->pasDevPAddr);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "physmem_lma.c: freed local memory array structure for PMR @0x%p",
+                       psPageArrayData));
+
+       OSFreeMem(psPageArrayData);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+             IMG_UINT32 *pui32FreeIndices,
+             IMG_UINT32 ui32FreePageCount)
+{
+       IMG_UINT32 uiContigAllocSize;
+       IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0;
+       RA_ARENA *pArena = psPageArrayData->psArena;
+
+       PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+       uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+
+       ui32PagesToFree = (NULL == pui32FreeIndices) ?
+                       psPageArrayData->uiTotalNumPages : ui32FreePageCount;
+
+       for (i = 0; i < ui32PagesToFree; i++)
+       {
+               if (NULL == pui32FreeIndices)
+               {
+                       ui32Index = i;
+               }
+               else
+               {
+                       ui32Index = pui32FreeIndices[i];
+               }
+
+               if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+               {
+                       ui32PagesFreed++;
+                       if (psPageArrayData->bPoisonOnFree)
+                       {
+                               _PoisonAlloc(psPageArrayData->psPhysHeap,
+                                                        &psPageArrayData->pasDevPAddr[ui32Index],
+                                                        uiContigAllocSize,
+                                                        PVRSRV_POISON_ON_FREE_VALUE);
+                       }
+
+                       RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+                       /* Allocation is done a page at a time */
+                       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+                                                   uiContigAllocSize,
+                                                   psPageArrayData->uiPid);
+#else
+                       {
+                               PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+                                                               psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
+                                                               psPageArrayData->uiPid);
+                       }
+#endif
+#endif
+                       psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+               }
+       }
+       psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
+
+       PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "%s: freed %d local memory for PMR @0x%p",
+                       __func__,
+                       (ui32PagesFreed * uiContigAllocSize),
+                       psPageArrayData));
+
+       return PVRSRV_OK;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+   before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PVRSRV_ERROR eError;
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+
+       psLMAllocArrayData = pvPriv;
+
+       /* We can't free pages until now. */
+       if (psLMAllocArrayData->iNumPagesAllocated != 0)
+       {
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
+               PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+               IMG_UINT32 ui32LMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU;
+
+               mutex_lock(&g_sLMALeakMutex);
+
+               g_ui32LMALeakCounter++;
+               if (ui32LMALeakMax && g_ui32LMALeakCounter >= ui32LMALeakMax)
+               {
+                       g_ui32LMALeakCounter = 0;
+                       mutex_unlock(&g_sLMALeakMutex);
+
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv));
+                       return PVRSRV_OK;
+               }
+
+               mutex_unlock(&g_sLMALeakMutex);
+#endif
+               eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+               PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+       }
+
+       eError = _FreeLMPageArray(psLMAllocArrayData);
+       PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+
+       return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+   As we are LMA there is nothing to do as we control physical memory. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+
+       PVRSRV_ERROR eError;
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+       psLMAllocArrayData = pvPriv;
+
+       if (psLMAllocArrayData->bOnDemand)
+       {
+               /* Allocate Memory for deferred allocation */
+               eError = _AllocLMPages(psLMAllocArrayData, NULL);
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+       psLMAllocArrayData = pvPriv;
+
+       if (psLMAllocArrayData->bOnDemand)
+       {
+               /* Free Memory for deferred allocation */
+               eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       PVR_ASSERT(eError == PVRSRV_OK);
+       return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+                                          IMG_UINT32 ui32Log2PageSize,
+                                          IMG_UINT32 ui32NumOfPages,
+                                          IMG_DEVMEM_OFFSET_T *puiOffset,
+                                          IMG_BOOL *pbValid,
+                                          IMG_DEV_PHYADDR *psDevPAddr)
+{
+       IMG_UINT32 idx;
+       IMG_UINT32 uiLog2AllocSize;
+       IMG_UINT32 uiNumAllocs;
+       IMG_UINT64 uiAllocIndex;
+       IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+
+       if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Requested physical addresses from PMR "
+                        "for incompatible contiguity %u!",
+                        __func__,
+                        ui32Log2PageSize));
+               return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+       }
+
+       uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
+       if (uiNumAllocs > 1)
+       {
+               PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+               uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+
+               for (idx=0; idx < ui32NumOfPages; idx++)
+               {
+                       if (pbValid[idx])
+                       {
+                               uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+                               uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+                               PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs,
+                                                       "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE);
+
+                               PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+                               psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+                       }
+               }
+       }
+       else
+       {
+               for (idx=0; idx < ui32NumOfPages; idx++)
+               {
+                       if (pbValid[idx])
+                       {
+                               psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+                       }
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+                                                                size_t uiOffset,
+                                                                size_t uiSize,
+                                                                void **ppvKernelAddressOut,
+                                                                IMG_HANDLE *phHandleOut,
+                                                                PMR_FLAGS_T ulFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+       void *pvKernLinAddr = NULL;
+       IMG_UINT32 ui32PageIndex = 0;
+       size_t uiOffsetMask = uiOffset;
+
+       psLMAllocArrayData = pvPriv;
+
+       /* Check that we can map this in contiguously */
+       if (psLMAllocArrayData->uiTotalNumPages != 1)
+       {
+               size_t uiStart = uiOffset;
+               size_t uiEnd = uiOffset + uiSize - 1;
+               size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+
+               /* We can still map if only one page is required */
+               if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+               {
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, e0);
+               }
+
+               /* Locate the desired physical page to map in */
+               ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+               uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
+       }
+
+       PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
+
+       eError = _MapAlloc(psLMAllocArrayData->psPhysHeap,
+                                               &psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
+                                               psLMAllocArrayData->uiContigAllocSize,
+                                               ulFlags,
+                                               &pvKernLinAddr);
+
+       *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
+       *phHandleOut = pvKernLinAddr;
+
+       return eError;
+
+       /*
+         error exit paths follow:
+       */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+                                                                                                IMG_HANDLE hHandle)
+{
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+       void *pvKernLinAddr = NULL;
+
+       psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+       pvKernLinAddr = (void *) hHandle;
+
+       _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize,
+                               pvKernLinAddr);
+}
+
+
+static PVRSRV_ERROR
+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+                                 IMG_DEVMEM_OFFSET_T uiOffset,
+                                 IMG_UINT8 *pcBuffer,
+                                 size_t uiBufSz,
+                                 size_t *puiNumBytes,
+                                 void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
+                                                                          IMG_UINT8 *pcPMR,
+                                                                          size_t uiSize))
+{
+       PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+       size_t uiBytesCopied;
+       size_t uiBytesToCopy;
+       size_t uiBytesCopyableFromAlloc;
+       void *pvMapping = NULL;
+       IMG_UINT8 *pcKernelPointer = NULL;
+       size_t uiBufferOffset;
+       IMG_UINT64 uiAllocIndex;
+       IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+       PVRSRV_ERROR eError;
+
+       psLMAllocArrayData = pvPriv;
+
+       uiBytesCopied = 0;
+       uiBytesToCopy = uiBufSz;
+       uiBufferOffset = 0;
+
+       if (psLMAllocArrayData->uiTotalNumPages > 1)
+       {
+               while (uiBytesToCopy > 0)
+               {
+                       /* we have to map one alloc in at a time */
+                       PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+                       uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+                       uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+                       uiBytesCopyableFromAlloc = uiBytesToCopy;
+                       if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+                       {
+                               uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+                       }
+
+                       PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
+                       PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
+                       PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
+
+                       eError = _MapAlloc(psLMAllocArrayData->psPhysHeap,
+                                                               &psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
+                                                               psLMAllocArrayData->uiContigAllocSize,
+                                                               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+                                                               &pvMapping);
+                       PVR_GOTO_IF_ERROR(eError, e0);
+                       pcKernelPointer = pvMapping;
+                       pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
+
+                       _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize,
+                                               pvMapping);
+
+                       uiBufferOffset += uiBytesCopyableFromAlloc;
+                       uiBytesToCopy -= uiBytesCopyableFromAlloc;
+                       uiOffset += uiBytesCopyableFromAlloc;
+                       uiBytesCopied += uiBytesCopyableFromAlloc;
+               }
+       }
+       else
+       {
+                       PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize);
+                       PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0);
+                       eError = _MapAlloc(psLMAllocArrayData->psPhysHeap,
+                                                               &psLMAllocArrayData->pasDevPAddr[0],
+                                                               psLMAllocArrayData->uiContigAllocSize,
+                                                               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+                                                               &pvMapping);
+                       PVR_GOTO_IF_ERROR(eError, e0);
+                       pcKernelPointer = pvMapping;
+                       pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+
+                       _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize,
+                                               pvMapping);
+
+                       uiBytesCopied = uiBufSz;
+       }
+       *puiNumBytes = uiBytesCopied;
+       return PVRSRV_OK;
+e0:
+       *puiNumBytes = uiBytesCopied;
+       return eError;
+}
+
+static void ReadLocalMem(IMG_UINT8 *pcBuffer,
+                                                IMG_UINT8 *pcPMR,
+                                                size_t uiSize)
+{
+       /* the memory is mapped as WC (and also aligned to page size) so we can
+        * safely call "Cached" memcpy */
+       OSCachedMemCopy(pcBuffer, pcPMR, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+                                 IMG_DEVMEM_OFFSET_T uiOffset,
+                                 IMG_UINT8 *pcBuffer,
+                                 size_t uiBufSz,
+                                 size_t *puiNumBytes)
+{
+       return CopyBytesLocalMem(pvPriv,
+                                                        uiOffset,
+                                                        pcBuffer,
+                                                        uiBufSz,
+                                                        puiNumBytes,
+                                                        ReadLocalMem);
+}
+
+static void WriteLocalMem(IMG_UINT8 *pcBuffer,
+                                                 IMG_UINT8 *pcPMR,
+                                                 size_t uiSize)
+{
+       /* the memory is mapped as WC (and also aligned to page size) so we can
+        * safely call "Cached" memcpy but need to issue a write memory barrier
+        * to flush the write buffers after */
+       OSCachedMemCopyWMB(pcPMR, pcBuffer, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+                                         IMG_DEVMEM_OFFSET_T uiOffset,
+                                         IMG_UINT8 *pcBuffer,
+                                         size_t uiBufSz,
+                                         size_t *puiNumBytes)
+{
+       return CopyBytesLocalMem(pvPriv,
+                                                        uiOffset,
+                                                        pcBuffer,
+                                                        uiBufSz,
+                                                        puiNumBytes,
+                                                        WriteLocalMem);
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemLocalMem
+@Description    This function Changes the sparse mapping by allocating and
+                freeing of pages. It also changes the GPU maps accordingly.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
+                           const PMR *psPMR,
+                           IMG_UINT32 ui32AllocPageCount,
+                           IMG_UINT32 *pai32AllocIndices,
+                           IMG_UINT32 ui32FreePageCount,
+                           IMG_UINT32 *pai32FreeIndices,
+                           IMG_UINT32 uiFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+       IMG_UINT32 ui32AdtnlAllocPages = 0;
+       IMG_UINT32 ui32AdtnlFreePages = 0;
+       IMG_UINT32 ui32CommonRequstCount = 0;
+       IMG_UINT32 ui32Loop = 0;
+       IMG_UINT32 ui32Index = 0;
+       IMG_UINT32 uiAllocpgidx;
+       IMG_UINT32 uiFreepgidx;
+
+       PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+       IMG_DEV_PHYADDR sPhyAddr;
+
+#if defined(DEBUG)
+       IMG_BOOL bPoisonFail = IMG_FALSE;
+       IMG_BOOL bZeroFail = IMG_FALSE;
+#endif
+
+       /* Fetch the Page table array represented by the PMR */
+       IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
+       PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR);
+
+       /* The incoming request is classified into two operations independent of
+        * each other: alloc & free pages.
+        * These operations can be combined with two mapping operations as well
+        * which are GPU & CPU space mappings.
+        *
+        * From the alloc and free page requests, the net amount of pages to be
+        * allocated or freed is computed. Pages that were requested to be freed
+        * will be reused to fulfil alloc requests.
+        *
+        * The order of operations is:
+        * 1. Allocate new pages from the OS
+        * 2. Move the free pages from free request to alloc positions.
+        * 3. Free the rest of the pages not used for alloc
+        *
+        * Alloc parameters are validated at the time of allocation
+        * and any error will be handled then. */
+
+       if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+       {
+               ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
+                               ui32FreePageCount : ui32AllocPageCount;
+
+               PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+       }
+
+       if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+       {
+               ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount;
+       }
+       else
+       {
+               ui32AllocPageCount = 0;
+       }
+
+       if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+       {
+               ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount;
+       }
+       else
+       {
+               ui32FreePageCount = 0;
+       }
+
+       PVR_LOG_RETURN_IF_FALSE(
+           (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0,
+           "Invalid combination of parameters: ui32CommonRequstCount,"
+           " ui32AdtnlAllocPages and ui32AdtnlFreePages.",
+           PVRSRV_ERROR_INVALID_PARAMS
+       );
+
+       {
+               /* Validate the free page indices */
+               if (ui32FreePageCount)
+               {
+                       if (NULL != pai32FreeIndices)
+                       {
+                               for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+                               {
+                                       uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+                                       if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+                                       {
+                                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0);
+                                       }
+
+                                       if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr)
+                                       {
+                                               PVR_LOG_GOTO_WITH_ERROR("psPageArray[uiFreepgidx].uiAddr", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+                                       }
+                               }
+                       }else
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Given non-zero free count but missing indices array",
+                                        __func__));
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+                       }
+               }
+
+               /*The following block of code verifies any issues with common alloc page indices */
+               for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+               {
+                       uiAllocpgidx = pai32AllocIndices[ui32Loop];
+                       if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+                       {
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0);
+                       }
+
+                       if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+                       {
+                               if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) ||
+                                               (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+                               {
+                                       PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+                               }
+                       }
+                       else
+                       {
+                               if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx].uiAddr) ||
+                                   (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
+                               {
+                                       PVR_LOG_GOTO_WITH_ERROR("Unable to remap memory due to missing page", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+                               }
+                       }
+               }
+
+
+               ui32Loop = 0;
+
+               /* Allocate new pages */
+               if (0 != ui32AdtnlAllocPages)
+               {
+                       /* Say how many pages to allocate */
+                       psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
+
+                       eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0);
+
+                       /* Mark the corresponding pages of translation table as valid */
+                       for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+                       {
+                               psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+                       }
+
+                       psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+               }
+
+               ui32Index = ui32Loop;
+
+               /* Move the corresponding free pages to alloc request */
+               for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
+               {
+
+                       uiAllocpgidx = pai32AllocIndices[ui32Index];
+                       uiFreepgidx  = pai32FreeIndices[ui32Loop];
+                       sPhyAddr = psPageArray[uiAllocpgidx];
+                       psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+                       /* Is remap mem used in real world scenario? Should it be turned to a
+                        *  debug feature? The condition check needs to be out of loop, will be
+                        *  done at later point though after some analysis */
+                       if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+                       {
+                               psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+                               psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+                               psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR;
+                       }
+                       else
+                       {
+                               psPageArray[uiFreepgidx] = sPhyAddr;
+                               psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+                               psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+                       }
+
+                       /* Be sure to honour the attributes associated with the allocation
+                        * such as zeroing, poisoning etc. */
+                       if (psPMRPageArrayData->bPoisonOnAlloc)
+                       {
+                               eError = _PoisonAlloc(psPMRPageArrayData->psPhysHeap,
+                                                     &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+                                                     psPMRPageArrayData->uiContigAllocSize,
+                                                     PVRSRV_POISON_ON_ALLOC_VALUE);
+
+                               /* Consider this as a soft failure and go ahead but log error to kernel log */
+                               if (eError != PVRSRV_OK)
+                               {
+#if defined(DEBUG)
+                                       bPoisonFail = IMG_TRUE;
+#endif
+                               }
+                       }
+                       else
+                       {
+                               if (psPMRPageArrayData->bZeroOnAlloc)
+                               {
+                                       eError = _ZeroAlloc(psPMRPageArrayData->psPhysHeap,
+                                                           &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+                                                           psPMRPageArrayData->uiContigAllocSize);
+                                       /* Consider this as a soft failure and go ahead but log error to kernel log */
+                                       if (eError != PVRSRV_OK)
+                                       {
+#if defined(DEBUG)
+                                               /*Don't think we need to zero any pages further*/
+                                               bZeroFail = IMG_TRUE;
+#endif
+                                       }
+                               }
+                       }
+               }
+
+               /*Free the additional free pages */
+               if (0 != ui32AdtnlFreePages)
+               {
+                       ui32Index = ui32Loop;
+                       _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+                       ui32Loop = 0;
+
+                       while (ui32Loop++ < ui32AdtnlFreePages)
+                       {
+                               /*Set the corresponding mapping table entry to invalid address */
+                               psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
+                       }
+
+                       psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+               }
+
+       }
+
+#if defined(DEBUG)
+       if (IMG_TRUE == bPoisonFail)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __func__));
+       }
+
+       if (IMG_TRUE == bZeroFail)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __func__));
+       }
+#endif
+
+       /* Update the PMR memory holding information */
+       eError = PVRSRV_OK;
+
+e0:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemCPUMapLocalMem
+@Description    This function Changes CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv,
+                                              const PMR *psPMR,
+                                              IMG_UINT64 sCpuVAddrBase,
+                                              IMG_UINT32 ui32AllocPageCount,
+                                              IMG_UINT32 *pai32AllocIndices,
+                                              IMG_UINT32 ui32FreePageCount,
+                                              IMG_UINT32 *pai32FreeIndices)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEV_PHYADDR *psPageArray;
+       PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+       uintptr_t sCpuVABase = sCpuVAddrBase;
+       IMG_CPU_PHYADDR sCpuAddrPtr;
+       IMG_BOOL bValid = IMG_FALSE;
+
+       /*Get the base address of the heap */
+       eError = PMR_CpuPhysAddr(psPMR,
+                                psPMRPageArrayData->uiLog2AllocSize,
+                                1,
+                                0,     /* offset zero here mean first page in the PMR */
+                                &sCpuAddrPtr,
+                                &bValid);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PMR_CpuPhysAddr");
+
+       /* Phys address of heap is computed here by subtracting the offset of this page
+        * basically phys address of any page = Base address of heap + offset of the page */
+       sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
+       psPageArray = psPMRPageArrayData->pasDevPAddr;
+
+       return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+                                          sCpuVABase,
+                                          sCpuAddrPtr,
+                                          ui32AllocPageCount,
+                                          pai32AllocIndices,
+                                          ui32FreePageCount,
+                                          pai32FreeIndices,
+                                          IMG_TRUE);
+}
+
+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
+       /* pfnLockPhysAddresses */
+       &PMRLockSysPhysAddressesLocalMem,
+       /* pfnUnlockPhysAddresses */
+       &PMRUnlockSysPhysAddressesLocalMem,
+       /* pfnDevPhysAddr */
+       &PMRSysPhysAddrLocalMem,
+       /* pfnAcquireKernelMappingData */
+       &PMRAcquireKernelMappingDataLocalMem,
+       /* pfnReleaseKernelMappingData */
+       &PMRReleaseKernelMappingDataLocalMem,
+       /* pfnReadBytes */
+       &PMRReadBytesLocalMem,
+       /* pfnWriteBytes */
+       &PMRWriteBytesLocalMem,
+       /* pfnUnpinMem */
+       NULL,
+       /* pfnPinMem */
+       NULL,
+       /* pfnChangeSparseMem*/
+       &PMRChangeSparseMemLocalMem,
+       /* pfnChangeSparseMemCPUMap */
+       &PMRChangeSparseMemCPUMapLocalMem,
+       /* pfnMMap */
+       NULL,
+       /* pfnFinalize */
+       &PMRFinalizeLocalMem
+};
+
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap,
+                                                       CONNECTION_DATA *psConnection,
+                            IMG_DEVMEM_SIZE_T uiSize,
+                            IMG_DEVMEM_SIZE_T uiChunkSize,
+                            IMG_UINT32 ui32NumPhysChunks,
+                            IMG_UINT32 ui32NumVirtChunks,
+                            IMG_UINT32 *pui32MappingTable,
+                            IMG_UINT32 uiLog2AllocPageSize,
+                            PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                            const IMG_CHAR *pszAnnotation,
+                            IMG_PID uiPid,
+                            PMR **ppsPMRPtr,
+                            IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eError2;
+       PMR *psPMR = NULL;
+       PMR_LMALLOCARRAY_DATA *psPrivData = NULL;
+       PMR_FLAGS_T uiPMRFlags;
+       IMG_BOOL bZero;
+       IMG_BOOL bPoisonOnAlloc;
+       IMG_BOOL bPoisonOnFree;
+       IMG_BOOL bOnDemand;
+       IMG_BOOL bContig;
+
+       /* For sparse requests we have to do the allocation
+        * in chunks rather than requesting one contiguous block */
+       if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1)
+       {
+               if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: LMA kernel mapping functions currently "
+                                       "don't work with discontiguous memory.",
+                                       __func__));
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam);
+               }
+               bContig = IMG_FALSE;
+       }
+       else
+       {
+               bContig = IMG_TRUE;
+       }
+
+       bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+       bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+       bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+#if defined(DEBUG)
+       bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+#else
+       bPoisonOnFree = IMG_FALSE;
+#endif
+
+       /* Create Array structure that holds the physical pages */
+       eError = _AllocLMPageArray(uiChunkSize * ui32NumVirtChunks,
+                                  ui32NumPhysChunks,
+                                  ui32NumVirtChunks,
+                                  pui32MappingTable,
+                                  uiLog2AllocPageSize,
+                                  bZero,
+                                  bPoisonOnAlloc,
+                                  bPoisonOnFree,
+                                  bContig,
+                                  bOnDemand,
+                                  psPhysHeap,
+                                  uiFlags,
+                                  uiPid,
+                                  &psPrivData,
+                                  psConnection);
+       PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray);
+
+       if (!bOnDemand)
+       {
+               /* Allocate the physical pages */
+               eError = _AllocLMPages(psPrivData, pui32MappingTable);
+               PVR_GOTO_IF_ERROR(eError, errorOnAllocPages);
+       }
+
+       /* In this instance, we simply pass flags straight through.
+
+          Generically, uiFlags can include things that control the PMR
+          factory, but we don't need any such thing (at the time of
+          writing!), and our caller specifies all PMR flags so we don't
+          need to meddle with what was given to us.
+       */
+       uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+       /* check no significant bits were lost in cast due to different
+          bit widths for flags */
+       PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+       if (bOnDemand)
+       {
+               PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (LMA)");
+       }
+
+       eError = PMRCreatePMR(psPhysHeap,
+                                                 uiSize,
+                                                 uiChunkSize,
+                                                 ui32NumPhysChunks,
+                                                 ui32NumVirtChunks,
+                                                 pui32MappingTable,
+                                                 uiLog2AllocPageSize,
+                                                 uiPMRFlags,
+                                                 pszAnnotation,
+                                                 &_sPMRLMAFuncTab,
+                                                 psPrivData,
+                                                 PMR_TYPE_LMA,
+                                                 &psPMR,
+                                                 ui32PDumpFlags);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate);
+
+       *ppsPMRPtr = psPMR;
+       return PVRSRV_OK;
+
+errorOnCreate:
+       if (!bOnDemand && psPrivData->iNumPagesAllocated)
+       {
+               eError2 = _FreeLMPages(psPrivData, NULL, 0);
+               PVR_ASSERT(eError2 == PVRSRV_OK);
+       }
+
+errorOnAllocPages:
+       eError2 = _FreeLMPageArray(psPrivData);
+       PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pmr.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pmr.c
new file mode 100644 (file)
index 0000000..8c4575b
--- /dev/null
@@ -0,0 +1,3697 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excusable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "devicemem_server_utils.h"
+
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pmr_impl.h"
+#include "pmr_os.h"
+#include "pvrsrv.h"
+
+#include "allocmem.h"
+#include "lock.h"
+#include "uniq_key_splay_tree.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "secure_export.h"
+#include "ossecure_export.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+/* ourselves */
+#include "pmr.h"
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#include "proc_stats.h"
+#endif
+
+#include "pdump_km.h"
+
+/* Memalloc flags can be converted into pmr, ra or psplay flags.
+ * Ensure flags types are same size.
+ */
+static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(PMR_FLAGS_T),
+                         "Mismatch memalloc and pmr flags type size.");
+static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(RA_FLAGS_T),
+                         "Mismatch memalloc and ra flags type size.");
+static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(IMG_PSPLAY_FLAGS_T),
+                         "Mismatch memalloc and psplay flags type size.");
+
+/* A "context" for the physical memory block resource allocator.
+ *
+ * Context is probably the wrong word.
+ *
+ * There is almost certainly only one of these, ever, in the system.
+ * But, let's keep the notion of a context anyway, "just-in-case".
+ */
+static struct _PMR_CTX_
+{
+       /* For debugging, and PDump, etc., let's issue a forever incrementing
+        * serial number to each allocation.
+        */
+       IMG_UINT64 uiNextSerialNum;
+
+       /* For security, we only allow a PMR to be mapped if the caller knows
+        * its key. We can pseudo-randomly generate keys
+        */
+       IMG_UINT64 uiNextKey;
+
+       /* For debugging only, I guess: Number of live PMRs */
+       IMG_UINT32 uiNumLivePMRs;
+
+       /* Lock for this structure */
+       POS_LOCK hLock;
+
+       /* In order to seed the uiNextKey, we enforce initialisation at driver
+        * load time. Also, we can debug check at driver unload that the PMR
+        * count is zero.
+        */
+       IMG_BOOL bModuleInitialised;
+} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE };
+
+
+/* A PMR. One per physical allocation. May be "shared".
+ *
+ * "shared" is ambiguous. We need to be careful with terminology.
+ * There are two ways in which a PMR may be "shared" and we need to be sure
+ * that we are clear which we mean.
+ *
+ * i)   multiple small allocations living together inside one PMR.
+ *
+ * ii)  one single allocation filling a PMR but mapped into multiple memory
+ *      contexts.
+ *
+ * This is more important further up the stack - at this level, all we care is
+ * that the PMR is being referenced multiple times.
+ */
+struct _PMR_
+{
+       /* This object is strictly refcounted. References include:
+        * - mapping
+        * - live handles (to this object)
+        * - live export handles
+        * (thus it is normal for allocated and exported memory to have a refcount of 3)
+        * The object is destroyed when and only when the refcount reaches 0
+        */
+
+       /* Physical address translation (device <> cpu) is done on a per device
+        * basis which means we need the physical heap info
+        */
+       PHYS_HEAP *psPhysHeap;
+
+       ATOMIC_T iRefCount;
+
+       /* Lock count - this is the number of times PMRLockSysPhysAddresses()
+        * has been called, less the number of PMRUnlockSysPhysAddresses()
+        * calls. This is arguably here for debug reasons only, as the refcount
+        * is already incremented as a matter of course.
+        * Really, this just allows us to trap protocol errors: i.e. calling
+        * PMRSysPhysAddr(), without a lock, or calling
+        * PMRUnlockSysPhysAddresses() too many or too few times.
+        */
+       ATOMIC_T iLockCount;
+
+       /* Lock for this structure */
+       POS_LOCK hLock;
+
+       /* Incrementing serial number to each allocation. */
+       IMG_UINT64 uiSerialNum;
+
+       /* For security, we only allow a PMR to be mapped if the caller knows
+        * its key. We can pseudo-randomly generate keys
+        */
+       PMR_PASSWORD_T uiKey;
+
+       /* Callbacks for per-flavour functions */
+       const PMR_IMPL_FUNCTAB *psFuncTab;
+
+       /* Data associated with the "subtype" */
+       PMR_IMPL_PRIVDATA pvFlavourData;
+
+       /* What kind of PMR do we have? */
+       PMR_IMPL_TYPE eFlavour;
+
+       /* And for pdump */
+       const IMG_CHAR *pszPDumpDefaultMemspaceName;
+
+       /* Allocation annotation */
+       IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN];
+
+#if defined(PDUMP)
+
+       IMG_HANDLE hPDumpAllocHandle;
+
+       IMG_UINT32 uiNumPDumpBlocks;
+#endif
+
+       /* Logical size of allocation. "logical", because a PMR can represent
+        * memory that will never physically exist.  This is the amount of
+        * virtual space that the PMR would consume when it's mapped into a
+        * virtual allocation.
+        */
+       PMR_SIZE_T uiLogicalSize;
+
+       /* Mapping table for the allocation.
+        * PMR's can be sparse in which case not all the "logic" addresses in
+        * it are valid. We need to know which addresses are and aren't valid
+        * when mapping or reading the PMR.
+        * The mapping table translates "logical" offsets into physical offsets
+        * which is what we always pass to the PMR factory (so it doesn't have
+        * to be concerned about sparseness issues)
+        */
+       PMR_MAPPING_TABLE *psMappingTable;
+
+       /* Indicates whether this PMR has been allocated as sparse.
+        * The condition for this variable to be set at allocation time is:
+        * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1)
+        */
+       IMG_BOOL bSparseAlloc;
+
+       /* Indicates whether this PMR has been unpinned.
+        * By default, all PMRs are pinned at creation.
+        */
+       IMG_BOOL bIsUnpinned;
+
+       /*
+        * Flag that conveys mutability of the PMR:
+        * - TRUE indicates the PMR is immutable (no more memory changes)
+        * - FALSE means the memory layout associated with the PMR is mutable
+        *
+        * A PMR is always mutable by default but is marked immutable on the
+        * first export for the rest of its life.
+        *
+        * Also, any PMRs that track the same memory through imports are
+        * marked immutable as well.
+        */
+       IMG_BOOL bNoLayoutChange;
+
+       /* Minimum Physical Contiguity Guarantee.  Might be called "page size",
+        * but that would be incorrect, as page size is something meaningful
+        * only in virtual realm. This contiguity guarantee provides an
+        * inequality that can be verified/asserted/whatever to ensure that
+        * this PMR conforms to the page size requirement of the place the PMR
+        * gets mapped. (May be used to select an appropriate heap in variable
+        * page size systems)
+        *
+        * The absolutely necessary condition is this:
+        *
+        *    device MMU page size <= actual physical contiguity.
+        *
+        * We go one step further in order to be able to provide an early
+        * warning / early compatibility check and say this:
+        *
+        *     device MMU page size <=
+        *         2**(uiLog2ContiguityGuarantee) <=
+        *             actual physical contiguity.
+        *
+        * In this way, it is possible to make the page table reservation
+        * in the device MMU without even knowing the granularity of the
+        * physical memory (i.e. useful for being able to allocate virtual
+        * before physical)
+        */
+       PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+
+       /* Flags. We store a copy of the "PMR flags" (usually a subset of the
+        * flags given at allocation time) and return them to any caller of
+        * PMR_Flags(). The intention of these flags is that the ones stored
+        * here are used to represent permissions, such that no one is able
+        * to map a PMR in a mode in which they are not allowed, e.g.,
+        * writeable for a read-only PMR, etc.
+        */
+       PMR_FLAGS_T uiFlags;
+
+       /* Do we really need this?
+        * For now we'll keep it, until we know we don't.
+        * NB: this is not the "memory context" in client terms - this is
+        * _purely_ the "PMR" context, of which there is almost certainly only
+        * ever one per system as a whole, but we'll keep the concept anyway,
+        * just-in-case.
+        */
+       struct _PMR_CTX_ *psContext;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       /* Stored handle to PMR RI entry */
+       void            *hRIHandle;
+#endif
+};
+
+/* Do we need a struct for the export handle?
+ * I'll use one for now, but if nothing goes in it, we'll lose it
+ */
+struct _PMR_EXPORT_
+{
+       struct _PMR_ *psPMR;
+};
+
+struct _PMR_PAGELIST_
+{
+       struct _PMR_ *psReferencePMR;
+};
+
+#if defined(PDUMP)
+static INLINE IMG_BOOL _IsHostDevicePMR(const PMR *const psPMR)
+{
+       const PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetPVRSRVData()->psHostMemDeviceNode;
+       return psPMR->psPhysHeap == psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL];
+}
+
+static void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle);
+
+static void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32ChunkSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoOut,
+                  IMG_UINT32 ui32PDumpFlags);
+
+static void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut);
+#endif /* defined PDUMP */
+
+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR)
+{
+       PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL;
+
+       PVR_ASSERT(psExportPMR != NULL);
+       if (psExportPMR)
+       {
+               PVR_ASSERT(psExportPMR->psPMR != NULL);
+               if (psExportPMR->psPMR)
+               {
+                       PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0);
+                       if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0)
+                       {
+                               psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR);
+                       }
+               }
+       }
+
+       return psReturnedDeviceNode;
+}
+
+static PVRSRV_ERROR
+_PMRCreate(PMR_SIZE_T uiLogicalSize,
+           PMR_SIZE_T uiChunkSize,
+           IMG_UINT32 ui32NumPhysChunks,
+           IMG_UINT32 ui32NumVirtChunks,
+           IMG_UINT32 *pui32MappingTable,
+           PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+           PMR_FLAGS_T uiFlags,
+           PMR **ppsPMR)
+{
+       void *pvPMRLinAddr;
+       PMR *psPMR;
+       PMR_MAPPING_TABLE *psMappingTable;
+       struct _PMR_CTX_ *psContext;
+       IMG_UINT32 i, ui32Temp = 0;
+       IMG_UINT32 ui32Remainder;
+       PVRSRV_ERROR eError;
+       IMG_BOOL bSparse = IMG_FALSE;
+
+       psContext = &_gsSingletonPMRContext;
+
+       /* Do we have a sparse allocation? */
+       if ( (ui32NumVirtChunks != ui32NumPhysChunks) ||
+                       (ui32NumVirtChunks > 1) )
+       {
+               bSparse = IMG_TRUE;
+       }
+
+       /* Extra checks required for sparse PMRs */
+       if (uiLogicalSize != uiChunkSize)
+       {
+               /* Check the logical size and chunk information agree with each other */
+               if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)",
+                                       __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
+                       return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+               }
+
+               /* Check that the chunk size is a multiple of the contiguity */
+               OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
+               if (ui32Remainder)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Bad chunk size, must be a multiple of the contiguity "
+                                       "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)",
+                                       __func__,
+                                       (unsigned long long) uiChunkSize,
+                                       uiLog2ContiguityGuarantee));
+                       return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
+               }
+       }
+
+       pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
+       PVR_RETURN_IF_NOMEM(pvPMRLinAddr);
+
+       psPMR = (PMR *) pvPMRLinAddr;
+       psMappingTable = IMG_OFFSET_ADDR(pvPMRLinAddr, sizeof(*psPMR));
+
+       /* Setup the mapping table */
+       psMappingTable->uiChunkSize = uiChunkSize;
+       psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks;
+       psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks;
+       OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])*
+                      ui32NumVirtChunks);
+       for (i=0; i<ui32NumPhysChunks; i++)
+       {
+               ui32Temp = pui32MappingTable[i];
+               if (ui32Temp < ui32NumVirtChunks)
+               {
+                       psMappingTable->aui32Translation[ui32Temp] = ui32Temp;
+               }
+               else
+               {
+                       OSFreeMem(psPMR);
+                       return PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY;
+               }
+       }
+
+       eError = OSLockCreate(&psPMR->hLock);
+       if (eError != PVRSRV_OK)
+       {
+               OSFreeMem(psPMR);
+               return eError;
+       }
+
+       /* Setup the PMR */
+       OSAtomicWrite(&psPMR->iRefCount, 0);
+
+       /* If allocation is not made on demand, it will be backed now and
+        * backing will not be removed until the PMR is destroyed, therefore
+        * we can initialise the iLockCount to 1 rather than 0.
+        */
+       OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1));
+
+       psPMR->psContext = psContext;
+       psPMR->uiLogicalSize = uiLogicalSize;
+       psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee;
+       psPMR->uiFlags = uiFlags;
+       psPMR->psMappingTable = psMappingTable;
+       psPMR->bSparseAlloc = bSparse;
+       psPMR->bIsUnpinned = IMG_FALSE;
+       psPMR->bNoLayoutChange = IMG_FALSE;
+       psPMR->szAnnotation[0] = '\0';
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       psPMR->hRIHandle = NULL;
+#endif
+
+       OSLockAcquire(psContext->hLock);
+       psPMR->uiKey = psContext->uiNextKey;
+       psPMR->uiSerialNum = psContext->uiNextSerialNum;
+       psContext->uiNextKey = (0x80200003 * psContext->uiNextKey)
+                                                               ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr);
+       psContext->uiNextSerialNum++;
+       *ppsPMR = psPMR;
+       PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR));
+       /* Increment live PMR count */
+       psContext->uiNumLivePMRs++;
+       OSLockRelease(psContext->hLock);
+
+       return PVRSRV_OK;
+}
+
+/* This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL PMRIsPMRLive(PMR *psPMR)
+{
+       return (OSAtomicRead(&psPMR->iRefCount) > 0);
+}
+
+static IMG_UINT32
+_Ref(PMR *psPMR)
+{
+       PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0);
+       return OSAtomicIncrement(&psPMR->iRefCount);
+}
+
+static IMG_UINT32
+_Unref(PMR *psPMR)
+{
+       PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0);
+       return OSAtomicDecrement(&psPMR->iRefCount);
+}
+
+static void
+_UnrefAndMaybeDestroy(PMR *psPMR)
+{
+       PVRSRV_ERROR eError2;
+       struct _PMR_CTX_ *psCtx;
+       IMG_INT iRefCount;
+
+       PVR_ASSERT(psPMR != NULL);
+
+       /* Acquire PMR factory lock if provided */
+       if (psPMR->psFuncTab->pfnGetPMRFactoryLock)
+       {
+               psPMR->psFuncTab->pfnGetPMRFactoryLock();
+       }
+
+       iRefCount = _Unref(psPMR);
+
+       if (iRefCount == 0)
+       {
+               if (psPMR->psFuncTab->pfnFinalize != NULL)
+               {
+                       eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
+
+                       /* PMR unref can be called asynchronously by the kernel or other
+                        * third party modules (eg. display) which doesn't go through the
+                        * usual services bridge. The same PMR can be referenced simultaneously
+                        * in a different path that results in a race condition.
+                        * Hence depending on the race condition, a factory may refuse to destroy
+                        * the resource associated with this PMR if a reference on it was taken
+                        * prior to unref. In that case the PMR factory function returns the error.
+                        *
+                        * When such an error is encountered, the factory needs to ensure the state
+                        * associated with PMR is undisturbed. At this point we just bail out from
+                        * freeing the PMR itself. The PMR handle will then be freed at a later point
+                        * when the same PMR is unreferenced.
+                        * */
+                       if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2)
+                       {
+                               if (psPMR->psFuncTab->pfnReleasePMRFactoryLock)
+                               {
+                                       psPMR->psFuncTab->pfnReleasePMRFactoryLock();
+                               }
+                               return;
+                       }
+                       PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+               }
+#if defined(PDUMP)
+               /* if allocation is done on the host node don't include it in the PDUMP */
+               if (!_IsHostDevicePMR(psPMR))
+               {
+                       PDumpPMRFreePMR(psPMR,
+                                       psPMR->uiLogicalSize,
+                                       (1 << psPMR->uiLog2ContiguityGuarantee),
+                                       psPMR->uiLog2ContiguityGuarantee,
+                                       psPMR->hPDumpAllocHandle);
+               }
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+               /* This PMR is about to be destroyed, update its mmap stats record (if present)
+                * to avoid dangling pointer. Additionally, this is required because mmap stats
+                * are identified by PMRs and a new PMR down the line "might" get the same address
+                * as the one we're about to free and we'd like 2 different entries in mmaps
+                * stats for such cases */
+               MMapStatsRemovePMR(psPMR);
+#endif
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+               /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */
+               PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+               {
+                       PVRSRV_ERROR eError;
+
+                       /* Delete RI entry */
+                       if (psPMR->hRIHandle)
+                       {
+                               eError = RIDeletePMREntryKM (psPMR->hRIHandle);
+
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s",
+                                                       __func__,
+                                                       PVRSRVGetErrorString(eError)));
+                                       /* continue destroying the PMR */
+                               }
+                       }
+               }
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+               psCtx = psPMR->psContext;
+
+               OSLockDestroy(psPMR->hLock);
+
+               /* Release PMR factory lock acquired if any */
+               if (psPMR->psFuncTab->pfnReleasePMRFactoryLock)
+               {
+                       psPMR->psFuncTab->pfnReleasePMRFactoryLock();
+               }
+
+               OSFreeMem(psPMR);
+
+               /* Decrement live PMR count. Probably only of interest for debugging */
+               PVR_ASSERT(psCtx->uiNumLivePMRs > 0);
+
+               OSLockAcquire(psCtx->hLock);
+               psCtx->uiNumLivePMRs--;
+               OSLockRelease(psCtx->hLock);
+       }
+       else
+       {
+               /* Release PMR factory lock acquired if any */
+               if (psPMR->psFuncTab->pfnReleasePMRFactoryLock)
+               {
+                       psPMR->psFuncTab->pfnReleasePMRFactoryLock();
+               }
+       }
+}
+
+static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+{
+       return psPMR->bSparseAlloc;
+}
+
+PVRSRV_ERROR
+PMRCreatePMR(PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_UINT32 *pui32MappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszAnnotation,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR_IMPL_TYPE eType,
+             PMR **ppsPMRPtr,
+             IMG_UINT32 ui32PDumpFlags)
+{
+       PMR *psPMR = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation");
+
+       eError = _PMRCreate(uiLogicalSize,
+                           uiChunkSize,
+                           ui32NumPhysChunks,
+                           ui32NumVirtChunks,
+                           pui32MappingTable,
+                           uiLog2ContiguityGuarantee,
+                           uiFlags,
+                           &psPMR);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       psPMR->psPhysHeap = psPhysHeap;
+       psPMR->psFuncTab = psFuncTab;
+       psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap);
+       psPMR->pvFlavourData = pvPrivData;
+       psPMR->eFlavour = eType;
+       OSAtomicWrite(&psPMR->iRefCount, 1);
+
+       OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+
+#if defined(PDUMP)
+       /* if allocation was done on the host node don't include it in the PDUMP */
+       if (!_IsHostDevicePMR(psPMR))
+       {
+               PMR_FLAGS_T uiFlags = psPMR->uiFlags;
+               IMG_BOOL bInitialise = IMG_FALSE;
+               IMG_UINT32 ui32InitValue = 0;
+
+               if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+               {
+                       bInitialise = IMG_TRUE;
+               }
+               else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+               {
+                       ui32InitValue = 0xDEADBEEF;
+                       bInitialise = IMG_TRUE;
+               }
+
+               PDumpPMRMallocPMR(psPMR,
+                                 (uiChunkSize * ui32NumVirtChunks),
+                                 1ULL<<uiLog2ContiguityGuarantee,
+                                 uiChunkSize,
+                                 ui32NumPhysChunks,
+                                 ui32NumVirtChunks,
+                                 pui32MappingTable,
+                                 uiLog2ContiguityGuarantee,
+                                 bInitialise,
+                                 ui32InitValue,
+                                 &psPMR->hPDumpAllocHandle,
+                                 ui32PDumpFlags);
+       }
+#endif
+
+       *ppsPMRPtr = psPMR;
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR,
+                                           IMG_UINT32 ui32NestingLevel)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psPMR != NULL);
+
+       /* Note: taking this lock is not required to protect the PMR reference
+        * count, because the PMR reference count is atomic. Rather, taking
+        * the lock here guarantees that no caller will exit this function
+        * without the underlying physical addresses being locked.
+        */
+       OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+       /* We also count the locks as references, so that the PMR is not freed
+        * while someone is using a physical address.
+        * "lock" here simply means incrementing the refcount. It means the
+        * refcount is multipurpose, but that's okay. We only have to promise
+        * that physical addresses are valid after this point, and remain valid
+        * until the corresponding PMRUnlockSysPhysAddressesOSMem()
+        */
+       _Ref(psPMR);
+
+       /* Also count locks separately from other types of references, to
+        * allow for debug assertions
+        */
+
+       /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */
+       if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2))
+       {
+               if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL)
+               {
+                       /* must always have lock and unlock in pairs! */
+                       PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL);
+
+                       eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData);
+
+                       PVR_GOTO_IF_ERROR(eError, e1);
+               }
+       }
+       OSLockRelease(psPMR->hLock);
+
+       return PVRSRV_OK;
+
+e1:
+       OSAtomicDecrement(&psPMR->iLockCount);
+       _Unref(psPMR);
+       PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0);
+       OSLockRelease(psPMR->hLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR)
+{
+       return PMRLockSysPhysAddressesNested(psPMR, 0);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR)
+{
+       return PMRUnlockSysPhysAddressesNested(psPMR, 2);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psPMR != NULL);
+
+       /* Acquiring the lock here, as well as during the Lock operation ensures
+        * the lock count hitting zero and the unlocking of the phys addresses is
+        * an atomic operation
+        */
+       OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+       PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+
+       if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1))
+       {
+               if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL)
+               {
+                       PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL);
+
+                       eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+                       /* must never fail */
+                       PVR_ASSERT(eError == PVRSRV_OK);
+               }
+       }
+
+       OSLockRelease(psPMR->hLock);
+
+       /* We also count the locks as references, so that the PMR is not
+        * freed while someone is using a physical address.
+        */
+       _UnrefAndMaybeDestroy(psPMR);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT(psPMR != NULL);
+
+       OSLockAcquire(psPMR->hLock);
+       /* Stop if we still have references on the PMR */
+       if (   ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2))
+                       || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) )
+       {
+               OSLockRelease(psPMR->hLock);
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: PMR is still referenced %u times. "
+                               "That means this PMR is probably exported or used somewhere else. "
+                               "Allowed are 2 references if it is mapped to device, otherwise 1.",
+                               __func__,
+                               OSAtomicRead(&psPMR->iRefCount)));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_STILL_REFERENCED, e_exit);
+       }
+       OSLockRelease(psPMR->hLock);
+
+       if (psPMR->psFuncTab->pfnUnpinMem != NULL)
+       {
+               eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData);
+               if (eError == PVRSRV_OK)
+               {
+                       psPMR->bIsUnpinned = IMG_TRUE;
+               }
+       }
+
+e_exit:
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRPinPMR(PMR *psPMR)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT(psPMR != NULL);
+
+       if (psPMR->psFuncTab->pfnPinMem != NULL)
+       {
+               eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData,
+                                                    psPMR->psMappingTable);
+               if (eError == PVRSRV_OK)
+               {
+                       psPMR->bIsUnpinned = IMG_FALSE;
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+                         PMR **ppsPMR)
+{
+       PMRRefPMR(psPMR);
+       *ppsPMR = psPMR;
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR)
+{
+       PMRUnrefPMR(psPMR);
+       return PVRSRV_OK;
+}
+
+/*
+       Note:
+       We pass back the PMR as it was passed in as a different handle type
+       (DEVMEM_MEM_IMPORT) and it allows us to change the import structure
+       type if we should need to embed any meta data in it.
+ */
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+                  PMR **ppsPMR,
+                  IMG_DEVMEM_SIZE_T *puiSize,
+                  IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       _Ref(psPMR);
+
+       /* Return the PMR */
+       *ppsPMR = psPMR;
+       *puiSize = psPMR->uiLogicalSize;
+       *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee;
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+          IMG_UINT64 *pui64UID)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       *pui64UID = psPMR->uiSerialNum;
+
+       return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+       IMG_UINT64 uiPassword;
+       PMR_EXPORT *psPMRExport;
+
+       uiPassword = psPMR->uiKey;
+
+       psPMRExport = OSAllocMem(sizeof(*psPMRExport));
+       PVR_RETURN_IF_NOMEM(psPMRExport);
+
+       psPMRExport->psPMR = psPMR;
+       _Ref(psPMR);
+       /* The layout of a PMR can't change once exported
+        * to make sure the importers view of the memory is
+        * the same as exporter. */
+       psPMR->bNoLayoutChange = IMG_TRUE;
+
+       *ppsPMRExportPtr = psPMRExport;
+       *puiSize = psPMR->uiLogicalSize;
+       *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee;
+       *puiPassword = uiPassword;
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+       PVR_ASSERT(psPMRExport != NULL);
+       PVR_ASSERT(psPMRExport->psPMR != NULL);
+       PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+       _UnrefAndMaybeDestroy(psPMRExport->psPMR);
+
+       OSFreeMem(psPMRExport);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+       PMR *psPMR;
+
+       PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+       psPMR = psPMRExport->psPMR;
+
+       PVR_ASSERT((psPMR->bNoLayoutChange == IMG_TRUE));
+
+       if (psPMR->uiKey != uiPassword)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "PMRImport: Import failed, password specified does not match the export"));
+               return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR;
+       }
+
+       if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig)
+       {
+               return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES;
+       }
+
+       _Ref(psPMR);
+
+       *ppsPMR = psPMR;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+       _UnrefAndMaybeDestroy(psPMR);
+
+       return PVRSRV_OK;
+}
+
+#else /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr);
+       PVR_UNREFERENCED_PARAMETER(puiSize);
+       PVR_UNREFERENCED_PARAMETER(puiLog2Contig);
+       PVR_UNREFERENCED_PARAMETER(puiPassword);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMRExport);
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMRExport);
+       PVR_UNREFERENCED_PARAMETER(uiPassword);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiLog2Contig);
+       PVR_UNREFERENCED_PARAMETER(ppsPMR);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       return PVRSRV_OK;
+}
+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR)
+{
+       _UnrefAndMaybeDestroy(psPMR);
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport)
+{
+       return PMRSecureUnexportPMR(psExport);
+}
+
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+                                PMR *psPMR,
+                                IMG_SECURE_TYPE *phSecure,
+                                PMR **ppsPMR,
+                                CONNECTION_DATA **ppsSecureConnection)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(ppsSecureConnection);
+
+       /* We are acquiring reference to PMR here because OSSecureExport
+        * releases bridge lock and PMR lock for a moment and we don't want PMR
+        * to be removed by other thread in the meantime. */
+       _Ref(psPMR);
+
+       eError = OSSecureExport("secure_pmr",
+                               _ReleaseSecurePMR,
+                               (void *) psPMR,
+                               phSecure);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       *ppsPMR = psPMR;
+
+       /* Mark the PMR immutable once exported
+        * This allows the importers and exporter to have
+        * the same view of the memory */
+       psPMR->bNoLayoutChange = IMG_TRUE;
+
+       return PVRSRV_OK;
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       _UnrefAndMaybeDestroy(psPMR);
+       return eError;
+}
+
+PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE *psDevNode,
+                                IMG_SECURE_TYPE hSecure,
+                                PMR **ppsPMR,
+                                IMG_DEVMEM_SIZE_T *puiSize,
+                                IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       PVRSRV_ERROR eError;
+       PMR *psPMR;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       eError = OSSecureImport(hSecure, (void **) &psPMR);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       PVR_LOG_RETURN_IF_FALSE(PhysHeapDeviceNode(psPMR->psPhysHeap) == psDevNode,
+                                       "PMR invalid for this device",
+                                       PVRSRV_ERROR_PMR_NOT_PERMITTED);
+
+       _Ref(psPMR);
+       /* The PMR should be immutable once exported
+        * This allows the importers and exporter to have
+        * the same view of the memory */
+       PVR_ASSERT(psPMR->bNoLayoutChange == IMG_TRUE);
+
+       /* Return the PMR */
+       *ppsPMR = psPMR;
+       *puiSize = psPMR->uiLogicalSize;
+       *puiAlign = 1ull << psPMR->uiLog2ContiguityGuarantee;
+       return PVRSRV_OK;
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR)
+{
+       _UnrefAndMaybeDestroy(psPMR);
+       return PVRSRV_OK;
+}
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+                 void *hRIHandle)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       psPMR->hRIHandle = hRIHandle;
+       return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRAcquireKernelMappingData(PMR *psPMR,
+                             size_t uiLogicalOffset,
+                             size_t uiSize,
+                             void **ppvKernelAddressOut,
+                             size_t *puiLengthOut,
+                             IMG_HANDLE *phPrivOut,
+                             IMG_BOOL bMapSparse)
+{
+       PVRSRV_ERROR eError;
+       void *pvKernelAddress;
+       IMG_HANDLE hPriv;
+
+       PVR_ASSERT(psPMR != NULL);
+
+       if (_PMRIsSparse(psPMR) && !bMapSparse)
+       {
+               /* Mapping of sparse allocations must be signalled. */
+               return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+       }
+
+       /* Acquire/Release functions must be overridden in pairs */
+       if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL)
+       {
+               PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL);
+
+               /* If PMR implementation does not supply this pair of
+                * functions, it means they do not permit the PMR to be mapped
+                * into kernel memory at all
+                */
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0);
+       }
+       PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+       eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                              uiLogicalOffset,
+                                                              uiSize,
+                                                              &pvKernelAddress,
+                                                              &hPriv,
+                                                              psPMR->uiFlags);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       *ppvKernelAddressOut = pvKernelAddress;
+       if (uiSize == 0)
+       {
+               /* Zero size means map in the whole PMR ... */
+               *puiLengthOut = (size_t)psPMR->uiLogicalSize;
+       }
+       else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee))
+       {
+               /* ... map in the requested pages ... */
+               *puiLengthOut = uiSize;
+       }
+       else
+       {
+               /* ... otherwise we just map in one page */
+               *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee;
+       }
+       *phPrivOut = hPriv;
+
+       return PVRSRV_OK;
+
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut)
+{
+       return _PMRAcquireKernelMappingData(psPMR,
+                                           uiLogicalOffset,
+                                           uiSize,
+                                           ppvKernelAddressOut,
+                                           puiLengthOut,
+                                           phPrivOut,
+                                           IMG_FALSE);
+}
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  size_t uiLogicalOffset,
+                                  size_t uiSize,
+                                  void **ppvKernelAddressOut,
+                                  size_t *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut)
+{
+       return _PMRAcquireKernelMappingData(psPMR,
+                                           uiLogicalOffset,
+                                           uiSize,
+                                           ppvKernelAddressOut,
+                                           puiLengthOut,
+                                           phPrivOut,
+                                           IMG_TRUE);
+}
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv)
+{
+       PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL);
+       PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+       psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                     hPriv);
+
+       return PVRSRV_OK;
+}
+
+/*
+       _PMRLogicalOffsetToPhysicalOffset
+
+       Translate between the "logical" offset which the upper levels
+       provide and the physical offset which is what the PMR
+       factories works on.
+
+       As well as returning the physical offset we return the number of
+       bytes remaining till the next chunk and if this chunk is valid.
+
+       For multi-page operations, upper layers communicate their
+       Log2PageSize else argument is redundant (set to zero).
+ */
+
+static void
+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR,
+                                  IMG_UINT32 ui32Log2PageSize,
+                                  IMG_UINT32 ui32NumOfPages,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                  IMG_DEVMEM_OFFSET_T *puiPhysicalOffset,
+                                  IMG_UINT32 *pui32BytesRemain,
+                                  IMG_BOOL *bValid)
+{
+       PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable;
+       IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize;
+       IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset;
+       IMG_UINT64 ui64ChunkIndex;
+       IMG_UINT32 ui32Remain;
+       IMG_UINT32 idx;
+
+       /* Must be translating at least a page */
+       PVR_ASSERT(ui32NumOfPages);
+
+       if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks)
+       {
+               /* Fast path the common case, as logical and physical offsets are
+                       equal we assume the ui32NumOfPages span is also valid */
+               *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset);
+               puiPhysicalOffset[0] = uiOffset;
+               bValid[0] = IMG_TRUE;
+
+               if (ui32NumOfPages > 1)
+               {
+                       /* initial offset may not be page aligned, round down */
+                       uiOffset &= ~(uiPageSize-1);
+                       for (idx=1; idx < ui32NumOfPages; idx++)
+                       {
+                               uiOffset += uiPageSize;
+                               puiPhysicalOffset[idx] = uiOffset;
+                               bValid[idx] = IMG_TRUE;
+                       }
+               }
+       }
+       else
+       {
+               for (idx=0; idx < ui32NumOfPages; idx++)
+               {
+                       ui64ChunkIndex = OSDivide64r64(
+                                       uiOffset,
+                                       TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize),
+                                       &ui32Remain);
+
+                       if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID)
+                       {
+                               bValid[idx] = IMG_FALSE;
+                       }
+                       else
+                       {
+                               bValid[idx] = IMG_TRUE;
+                       }
+
+                       if (idx == 0)
+                       {
+                               if (ui32Remain == 0)
+                               {
+                                       /* Start of chunk so return the chunk size */
+                                       *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize);
+                               }
+                               else
+                               {
+                                       *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain);
+                               }
+
+                               puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) +      ui32Remain;
+
+                               /* initial offset may not be page aligned, round down */
+                               uiOffset &= ~(uiPageSize-1);
+                       }
+                       else
+                       {
+                               puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain;
+                       }
+                       uiOffset += uiPageSize;
+               }
+       }
+}
+
+static PVRSRV_ERROR
+_PMR_ReadBytesPhysical(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                       IMG_UINT8 *pcBuffer,
+                       size_t uiBufSz,
+                       size_t *puiNumBytes)
+{
+       PVRSRV_ERROR eError;
+
+       if (psPMR->psFuncTab->pfnReadBytes != NULL)
+       {
+               /* defer to callback if present */
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+               PVR_GOTO_IF_ERROR(eError, e0);
+
+               eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData,
+                                                       uiPhysicalOffset,
+                                                       pcBuffer,
+                                                       uiBufSz,
+                                                       puiNumBytes);
+               PMRUnlockSysPhysAddresses(psPMR);
+               PVR_GOTO_IF_ERROR(eError, e0);
+       }
+       else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+       {
+               /* "default" handler for reading bytes */
+
+               IMG_HANDLE hKernelMappingHandle;
+               IMG_UINT8 *pcKernelAddress;
+
+               eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                                      (size_t) uiPhysicalOffset,
+                                                                      uiBufSz,
+                                                                      (void **)&pcKernelAddress,
+                                                                      &hKernelMappingHandle,
+                                                                      psPMR->uiFlags);
+               PVR_GOTO_IF_ERROR(eError, e0);
+
+               /* Use the conservative 'DeviceMemCopy' here because we can't
+                * know if this PMR will be mapped cached.
+                */
+
+               OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz);
+               *puiNumBytes = uiBufSz;
+
+               psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                             hKernelMappingHandle);
+       }
+       else
+       {
+               OSPanic();
+               PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+       }
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       *puiNumBytes = 0;
+       return eError;
+}
+
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              size_t uiBufSz,
+              size_t *puiNumBytes)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+       size_t uiBytesCopied = 0;
+
+       if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+       {
+               uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+       }
+       PVR_ASSERT(uiBufSz > 0);
+       PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+       /* PMR implementations can override this. If they don't, a "default"
+        * handler uses kernel virtual mappings.  If the kernel can't
+        * provide a kernel virtual mapping, this function fails.
+        */
+       PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+                  psPMR->psFuncTab->pfnReadBytes != NULL);
+
+       while (uiBytesCopied != uiBufSz)
+       {
+               IMG_UINT32 ui32Remain;
+               size_t uiBytesToCopy;
+               size_t uiRead;
+               IMG_BOOL bValid;
+
+               _PMRLogicalOffsetToPhysicalOffset(psPMR,
+                                                 0,
+                                                 1,
+                                                 uiLogicalOffset,
+                                                 &uiPhysicalOffset,
+                                                 &ui32Remain,
+                                                 &bValid);
+               /* Copy till either then end of the chunk or end
+                * of the buffer
+                */
+               uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+               if (bValid)
+               {
+                       /* Read the data from the PMR */
+                       eError = _PMR_ReadBytesPhysical(psPMR,
+                                                       uiPhysicalOffset,
+                                                       &pcBuffer[uiBytesCopied],
+                                                       uiBytesToCopy,
+                                                       &uiRead);
+                       if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError),
+                                               uiRead,
+                                               uiBytesToCopy));
+                               /* Bail out as soon as we hit an error */
+                               break;
+                       }
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")",
+                                       __func__,
+                                       uiLogicalOffset,
+                                       psPMR->uiLogicalSize));
+                       /* Fill invalid chunks with 0 */
+                       OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy);
+                       uiRead = uiBytesToCopy;
+                       eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR;
+               }
+               uiLogicalOffset += uiRead;
+               uiBytesCopied += uiRead;
+       }
+
+       *puiNumBytes = uiBytesCopied;
+       return eError;
+}
+
+static PVRSRV_ERROR
+_PMR_WriteBytesPhysical(PMR *psPMR,
+                        IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                        IMG_UINT8 *pcBuffer,
+                        size_t uiBufSz,
+                        size_t *puiNumBytes)
+{
+       PVRSRV_ERROR eError;
+
+       if (psPMR->psFuncTab->pfnWriteBytes != NULL)
+       {
+               /* defer to callback if present */
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+               PVR_GOTO_IF_ERROR(eError, e0);
+
+               eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData,
+                                                        uiPhysicalOffset,
+                                                        pcBuffer,
+                                                        uiBufSz,
+                                                        puiNumBytes);
+               PMRUnlockSysPhysAddresses(psPMR);
+               PVR_GOTO_IF_ERROR(eError, e0);
+       }
+       else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+       {
+               /* "default" handler for reading bytes */
+
+               IMG_HANDLE hKernelMappingHandle;
+               IMG_UINT8 *pcKernelAddress;
+
+               eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                                      (size_t) uiPhysicalOffset,
+                                                                      uiBufSz,
+                                                                      (void **)&pcKernelAddress,
+                                                                      &hKernelMappingHandle,
+                                                                      psPMR->uiFlags);
+               PVR_GOTO_IF_ERROR(eError, e0);
+
+               /* Use the conservative 'DeviceMemCopy' here because we can't know
+                * if this PMR will be mapped cached.
+                */
+
+               OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz);
+               *puiNumBytes = uiBufSz;
+
+               psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                             hKernelMappingHandle);
+       }
+       else
+       {
+               /* The write callback is optional as it's only required by the
+                * debug tools
+                */
+               OSPanic();
+               PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0);
+       }
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       *puiNumBytes = 0;
+       return eError;
+}
+
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               size_t uiBufSz,
+               size_t *puiNumBytes)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+       size_t uiBytesCopied = 0;
+
+       if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+       {
+               uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+       }
+       PVR_ASSERT(uiBufSz > 0);
+       PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+       /* PMR implementations can override this. If they don't, a "default"
+        * handler uses kernel virtual mappings. If the kernel can't provide
+        * a kernel virtual mapping, this function fails.
+        */
+       PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+                  psPMR->psFuncTab->pfnWriteBytes != NULL);
+
+       while (uiBytesCopied != uiBufSz)
+       {
+               IMG_UINT32 ui32Remain;
+               size_t uiBytesToCopy;
+               size_t uiWrite;
+               IMG_BOOL bValid;
+
+               _PMRLogicalOffsetToPhysicalOffset(psPMR,
+                                                 0,
+                                                 1,
+                                                 uiLogicalOffset,
+                                                 &uiPhysicalOffset,
+                                                 &ui32Remain,
+                                                 &bValid);
+
+               /* Copy till either then end of the chunk or end of the buffer
+                */
+               uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+               if (bValid)
+               {
+                       /* Write the data to the PMR */
+                       eError = _PMR_WriteBytesPhysical(psPMR,
+                                                        uiPhysicalOffset,
+                                                        &pcBuffer[uiBytesCopied],
+                                                        uiBytesToCopy,
+                                                        &uiWrite);
+                       if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError),
+                                               uiWrite,
+                                               uiBytesToCopy));
+                               /* Bail out as soon as we hit an error */
+                               break;
+                       }
+               }
+               else
+               {
+                       /* Ignore writes to invalid pages */
+                       uiWrite = uiBytesToCopy;
+               }
+               uiLogicalOffset += uiWrite;
+               uiBytesCopied += uiWrite;
+       }
+
+       *puiNumBytes = uiBytesCopied;
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+       if (psPMR->psFuncTab->pfnMMap)
+       {
+               return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData);
+       }
+
+       return OSMMapPMRGeneric(psPMR, pOSMMapData);
+}
+
+void
+PMRRefPMR(PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+       _Ref(psPMR);
+}
+
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR)
+{
+       _UnrefAndMaybeDestroy(psPMR);
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR)
+{
+       PMRUnlockSysPhysAddresses(psPMR);
+
+       PMRUnrefPMR(psPMR);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_DEVICE_NODE *
+PMR_DeviceNode(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       return PhysHeapDeviceNode(psPMR->psPhysHeap);
+}
+
+PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       return psPMR->uiFlags;
+}
+
+IMG_BOOL
+PMR_IsSparse(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       return _PMRIsSparse(psPMR);
+}
+
+IMG_BOOL
+PMR_IsUnpinned(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       return psPMR->bIsUnpinned;
+}
+
+/* Function that alters the mutability property
+ * of the PMR
+ * Setting it to TRUE makes sure the PMR memory layout
+ * can't be changed through future calls */
+void
+PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       psPMR->bNoLayoutChange = bFlag;
+}
+
+IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       return psPMR->bNoLayoutChange;
+}
+
+void
+PMR_LogicalSize(const PMR *psPMR,
+                IMG_DEVMEM_SIZE_T *puiLogicalSize)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       *puiLogicalSize = psPMR->uiLogicalSize;
+}
+
+PVRSRV_ERROR
+PMR_PhysicalSize(const PMR *psPMR,
+                 IMG_DEVMEM_SIZE_T *puiPhysicalSize)
+{
+       PVR_ASSERT(psPMR != NULL);
+
+       /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */
+       if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned)
+       {
+               if (psPMR->bSparseAlloc)
+               {
+                       *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks;
+               }
+               else
+               {
+                       *puiPhysicalSize = psPMR->uiLogicalSize;
+               }
+       }
+       else
+       {
+               *puiPhysicalSize = 0;
+       }
+       return PVRSRV_OK;
+}
+
+PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR)
+{
+       return psPMR->psPhysHeap;
+}
+
+PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+                  IMG_UINT32 ui32Log2PageSize,
+                  IMG_UINT32 ui32NumOfPages,
+                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                  IMG_BOOL *pbValid)
+{
+       IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+       IMG_UINT32 *pui32BytesRemain = aui32BytesRemain;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT(psPMR != NULL);
+       PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset);
+
+       if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+               PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0);
+
+               pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32));
+               PVR_GOTO_IF_NOMEM(pui32BytesRemain, eError, e0);
+       }
+
+       _PMRLogicalOffsetToPhysicalOffset(psPMR,
+                                         ui32Log2PageSize,
+                                         ui32NumOfPages,
+                                         uiLogicalOffset,
+                                         puiPhysicalOffset,
+                                         pui32BytesRemain,
+                                         pbValid);
+
+e0:
+       if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL)
+       {
+               OSFreeMem(puiPhysicalOffset);
+       }
+
+       if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL)
+       {
+               OSFreeMem(pui32BytesRemain);
+       }
+
+       return eError;
+}
+
+PMR_MAPPING_TABLE *
+PMR_GetMappingTable(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+       return psPMR->psMappingTable;
+
+}
+
+IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+       return psPMR->uiLog2ContiguityGuarantee;
+}
+
+const IMG_CHAR *
+PMR_GetAnnotation(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+       return psPMR->szAnnotation;
+}
+
+PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+       return psPMR->eFlavour;
+}
+
+IMG_INT32
+PMR_GetRefCount(const PMR *psPMR)
+{
+       PVR_ASSERT(psPMR != NULL);
+       return OSAtomicRead(&psPMR->iRefCount);
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddrPtr,
+                IMG_BOOL *pbValid)
+{
+       IMG_UINT32 ui32Remain;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+
+       PVR_ASSERT(psPMR != NULL);
+       PVR_ASSERT(ui32NumOfPages > 0);
+       PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL);
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+       PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+#endif
+
+       if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+               PVR_RETURN_IF_NOMEM(puiPhysicalOffset);
+       }
+
+       _PMRLogicalOffsetToPhysicalOffset(psPMR,
+                                         ui32Log2PageSize,
+                                         ui32NumOfPages,
+                                         uiLogicalOffset,
+                                         puiPhysicalOffset,
+                                         &ui32Remain,
+                                         pbValid);
+       if (*pbValid || _PMRIsSparse(psPMR))
+       {
+               /* Sparse PMR may not always have the first page valid */
+               eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+                                                         ui32Log2PageSize,
+                                                         ui32NumOfPages,
+                                                         puiPhysicalOffset,
+                                                         pbValid,
+                                                         psDevAddrPtr);
+               PVR_GOTO_IF_ERROR(eError, FreeOffsetArray);
+
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+               /* Currently excluded from the default build because of performance
+                * concerns.
+                * We do not need this part in all systems because the GPU has the same
+                * address view of system RAM as the CPU.
+                * Alternatively this could be implemented as part of the PMR-factories
+                * directly */
+               if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
+                   PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+               {
+                       IMG_UINT32 i;
+                       IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+                       /* Copy the translated addresses to the correct array */
+                       for (i = 0; i < ui32NumOfPages; i++)
+                       {
+                               PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
+                                                          1,
+                                                          &sDevPAddrCorrected,
+                                                          (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]);
+                               psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr;
+                       }
+               }
+#endif
+       }
+
+FreeOffsetArray:
+       if (puiPhysicalOffset != auiPhysicalOffset)
+       {
+               OSFreeMem(puiPhysicalOffset);
+       }
+
+       return eError;
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid)
+{
+       IMG_UINT32 idx;
+       PVRSRV_ERROR eError;
+       IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr;
+
+       if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+               PVR_GOTO_IF_NOMEM(psDevPAddr, eError, e0);
+       }
+
+       eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages,
+                                uiLogicalOffset, psDevPAddr, pbValid);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       if (_PMRIsSparse(psPMR))
+       {
+               /* Loop over each page.
+                * If Dev addr valid, populate the CPU addr from the Dev addr
+                */
+               for (idx = 0; idx < ui32NumOfPages; idx++)
+               {
+                       if (pbValid[idx])
+                       {
+                               PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, 1, &psCpuAddrPtr[idx], &psDevPAddr[idx]);
+                       }
+               }
+       }
+       else
+       {
+               /* In this case all addrs will be valid, so we can block translate */
+               PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr);
+       }
+
+       if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               OSFreeMem(psDevPAddr);
+       }
+
+       return PVRSRV_OK;
+e1:
+       if (psDevPAddr != asDevPAddr)
+       {
+               OSFreeMem(psDevPAddr);
+       }
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+                                 IMG_UINT32 ui32AllocPageCount,
+                                 IMG_UINT32 *pai32AllocIndices,
+                                 IMG_UINT32 ui32FreePageCount,
+                                 IMG_UINT32 *pai32FreeIndices,
+                                 IMG_UINT32 uiSparseFlags)
+{
+       PVRSRV_ERROR eError;
+
+       if (IMG_TRUE == psPMR->bNoLayoutChange)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This PMR layout cannot be changed",
+                               __func__));
+               return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+       }
+
+       if (NULL == psPMR->psFuncTab->pfnChangeSparseMem)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This type of sparse PMR cannot be changed.",
+                               __func__));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData,
+                                                     psPMR,
+                                                     ui32AllocPageCount,
+                                                     pai32AllocIndices,
+                                                     ui32FreePageCount,
+                                                     pai32FreeIndices,
+                                                     uiSparseFlags);
+       if (eError != PVRSRV_OK)
+       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               if (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+               {
+                       PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT,
+                                                 OSGetCurrentClientProcessIDKM());
+               }
+#endif
+               goto e0;
+       }
+
+#if defined(PDUMP)
+       {
+               IMG_BOOL bInitialise = IMG_FALSE;
+               IMG_UINT32 ui32InitValue = 0;
+
+               if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR)))
+               {
+                       bInitialise = IMG_TRUE;
+               }
+               else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR)))
+               {
+                       ui32InitValue = 0xDEADBEEF;
+                       bInitialise = IMG_TRUE;
+               }
+
+               PDumpPMRChangeSparsePMR(psPMR,
+                                       1 << psPMR->uiLog2ContiguityGuarantee,
+                                       ui32AllocPageCount,
+                                       pai32AllocIndices,
+                                       ui32FreePageCount,
+                                       pai32FreeIndices,
+                                       bInitialise,
+                                       ui32InitValue,
+                                       &psPMR->hPDumpAllocHandle);
+       }
+
+#endif
+
+e0:
+       return eError;
+}
+
+
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+                                       IMG_UINT64 sCpuVAddrBase,
+                                       IMG_UINT32 ui32AllocPageCount,
+                                       IMG_UINT32 *pai32AllocIndices,
+                                       IMG_UINT32 ui32FreePageCount,
+                                       IMG_UINT32 *pai32FreeIndices)
+{
+       PVRSRV_ERROR eError;
+
+       if ((NULL == psPMR->psFuncTab) ||
+                       (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This type of sparse PMR cannot be changed.",
+                               __func__));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       if (IMG_TRUE == psPMR->bNoLayoutChange)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This PMR layout cannot be changed",
+                               __func__));
+               return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+       }
+
+       eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData,
+                                                           psPMR,
+                                                           sCpuVAddrBase,
+                                                           ui32AllocPageCount,
+                                                           pai32AllocIndices,
+                                                           ui32FreePageCount,
+                                                           pai32FreeIndices);
+
+       return eError;
+}
+
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                               IMG_UINT32 ui32MemspaceNameLen,
+                               IMG_CHAR *pszMemspaceName,
+                               IMG_UINT32 ui32SymbolicAddrLen,
+                               IMG_CHAR *pszSymbolicAddr,
+                               IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                               IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap);
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       if (PVRSRV_CHECK_PHYS_HEAP(FW_CODE, psPMR->uiFlags) ||
+               PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, psPMR->uiFlags) ||
+           PVRSRV_CHECK_PHYS_HEAP(GPU_SECURE, psPMR->uiFlags))
+       {
+               OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC,
+                          psPMR->pszPDumpDefaultMemspaceName);
+       }
+       else
+#endif
+       if (DevmemCPUCacheCoherency(psDevNode, psPMR->uiFlags) ||
+           DevmemDeviceCacheCoherency(psDevNode, psPMR->uiFlags))
+       {
+               OSSNPrintf(pszMemspaceName,
+                          ui32MemspaceNameLen,
+                          PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC,
+                          psPMR->pszPDumpDefaultMemspaceName);
+       }
+       else
+       {
+               OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC,
+                          psPMR->pszPDumpDefaultMemspaceName);
+       }
+
+       OSSNPrintf(pszSymbolicAddr,
+                  ui32SymbolicAddrLen,
+                  PMR_SYMBOLICADDR_FMTSPEC,
+                  PMR_DEFAULT_PREFIX,
+                  psPMR->uiSerialNum,
+                  uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR),
+                  psPMR->szAnnotation);
+
+       if (pszSymbolicAddr)
+       {
+               PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr));
+       }
+
+
+       *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1);
+       *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1)
+                       << PMR_GetLog2Contiguity(psPMR));
+
+       return eError;
+}
+
+
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32MemspaceNameLen,
+                      IMG_CHAR *pszMemspaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName
+)
+{
+       IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+       IMG_UINT32 ui32Remain;
+       IMG_BOOL bValid;
+
+       PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize);
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       _PMRLogicalOffsetToPhysicalOffset(psPMR,
+                                         0,
+                                         1,
+                                         uiLogicalOffset,
+                                         &uiPhysicalOffset,
+                                         &ui32Remain,
+                                         &bValid);
+
+       if (!bValid)
+       {
+               /* For sparse allocations, for a given logical address, there
+                * may not be a physical memory backing, the virtual range can
+                * still be valid.
+                */
+               uiPhysicalOffset = uiLogicalOffset;
+       }
+
+       return _PMR_PDumpSymbolicAddrPhysical(psPMR,
+                                             uiPhysicalOffset,
+                                             ui32MemspaceNameLen,
+                                             pszMemspaceName,
+                                             ui32SymbolicAddrLen,
+                                             pszSymbolicAddr,
+                                             puiNewOffset,
+                                             puiNextSymName);
+}
+
+/*!
+ * @brief Writes a WRW command to the script2 buffer, representing a
+ *        dword write to a physical allocation. Size is always
+ *        sizeof(IMG_UINT32).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui32Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                       IMG_UINT32 ui32Value,
+                       PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize);
+       /* Especially make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+                       <= uiPMRPageSize));
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Get the symbolic address of the PMR */
+       eError = PMR_PDumpSymbolicAddr(psPMR,
+                                      uiLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpSymbolicOffset,
+                                      &uiNextSymName);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Write the WRW script command */
+       eError = PDumpPMRWRW32(PMR_DeviceNode(psPMR),
+                              aszMemspaceName,
+                              aszSymbolicName,
+                              uiPDumpSymbolicOffset,
+                              ui32Value,
+                              uiPDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       eError = PMRUnlockSysPhysAddresses(psPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW followed by a WRW command to the pdump script to perform
+ *        an effective copy from memory to memory. Memory copied is of size
+ *        sizeof(IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+       const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+       PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+       /* Especially make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+                       <= uiSrcPMRPageSize));
+
+       PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+       /* Especially make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+                       <= uiDstPMRPageSize));
+
+       eError = PMRLockSysPhysAddresses(psSrcPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Get the symbolic address of the source PMR */
+       eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+                                      uiSrcLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpSymbolicOffset,
+                                      &uiNextSymName);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Issue PDump read command */
+       eError = PDumpPMRRDW32MemToInternalVar(PMR_DeviceNode(psSrcPMR),
+                                              pszTmpVar,
+                                              aszMemspaceName,
+                                              aszSymbolicName,
+                                              uiPDumpSymbolicOffset,
+                                              uiPDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+       eError = PMRLockSysPhysAddresses(psDstPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+       /* Get the symbolic address of the destination PMR */
+       eError = PMR_PDumpSymbolicAddr(psDstPMR,
+                                      uiDstLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpSymbolicOffset,
+                                      &uiNextSymName);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+       /* Write the WRW script command */
+       eError = PDumpPMRWRW32InternalVarToMem(PMR_DeviceNode(psDstPMR),
+                                              aszMemspaceName,
+                                              aszSymbolicName,
+                                              uiPDumpSymbolicOffset,
+                                              pszTmpVar,
+                                              uiPDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+       eError = PMRUnlockSysPhysAddresses(psDstPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a WRW64 command to the script2 buffer, representing a
+ *        dword write to a physical allocation. Size is always
+ *        sizeof(IMG_UINT64).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui64Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                       IMG_UINT64 ui64Value,
+                       PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize);
+       /* Especially make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value))
+                       <= uiPMRPageSize));
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Get the symbolic address of the PMR */
+       eError = PMR_PDumpSymbolicAddr(psPMR,
+                                      uiLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpSymbolicOffset,
+                                      &uiNextSymName);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Write the WRW script command */
+       eError = PDumpPMRWRW64(PMR_DeviceNode(psPMR),
+                              aszMemspaceName,
+                              aszSymbolicName,
+                              uiPDumpSymbolicOffset,
+                              ui64Value,
+                              uiPDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       eError = PMRUnlockSysPhysAddresses(psPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to
+ *        perform an effective copy from memory to memory. Memory copied is of
+ *        size sizeof(IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+       const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+       PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+       /* Especially make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+                       <= uiSrcPMRPageSize));
+
+       PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+       /* Especially make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+                       <= uiDstPMRPageSize));
+
+       eError = PMRLockSysPhysAddresses(psSrcPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Get the symbolic address of the source PMR */
+       eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+                                      uiSrcLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpSymbolicOffset,
+                                      &uiNextSymName);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Issue PDump read command */
+       eError = PDumpPMRRDW64MemToInternalVar(PMR_DeviceNode(psSrcPMR),
+                                              pszTmpVar,
+                                              aszMemspaceName,
+                                              aszSymbolicName,
+                                              uiPDumpSymbolicOffset,
+                                              uiPDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+       eError = PMRLockSysPhysAddresses(psDstPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+       /* Get the symbolic address of the destination PMR */
+       eError = PMR_PDumpSymbolicAddr(psDstPMR,
+                                      uiDstLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpSymbolicOffset,
+                                      &uiNextSymName);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+       /* Write the WRW script command */
+       eError = PDumpPMRWRW64InternalVarToMem(PMR_DeviceNode(psDstPMR),
+                                              aszMemspaceName,
+                                              aszSymbolicName,
+                                              uiPDumpSymbolicOffset,
+                                              pszTmpVar,
+                                              uiPDumpFlags);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+
+       eError = PMRUnlockSysPhysAddresses(psDstPMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       return PVRSRV_OK;
+}
+
+/*!
+ * @brief PDumps the contents of the given allocation.
+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used
+ * as the source of data, rather than the allocation's actual backing.
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - Offset to write at
+ * @param uiSize - Number of bytes to write
+ * @param uiPDumpFlags - PDump flags
+ * @param bZero - Use the PDump zero page as the source
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiOutOffset;
+       IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+       const IMG_CHAR *pszParamStreamFileName;
+       PDUMP_FILEOFFSET_T uiParamStreamFileOffset;
+
+       /* required when !bZero */
+#define PMR_MAX_PDUMP_BUFSZ (1<<21)
+       IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME];
+       IMG_UINT8 *pcBuffer = NULL;
+       size_t uiBufSz;
+       IMG_BOOL bValid;
+       IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize;
+       PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(psDevNode))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+       /* Check if pdump client is connected */
+       if (!PDumpCheckFlagsWrite(psDevNode,
+                                     PDUMP_FLAGS_CONTINUOUS))
+       {
+               /* Dumping of memory in Pdump buffer will be rejected for no client connected case.
+                * So return early and save reading of data from PMR. */
+               return PVRSRV_OK;
+       }
+
+       /* Get the correct PDump stream file name */
+       if (bZero)
+       {
+               PDumpCommentWithFlags(psDevNode,
+                                     uiPDumpFlags,
+                                     "Zeroing allocation (" IMG_DEVMEM_SIZE_FMTSPEC " bytes)",
+                                     uiSize);
+
+               /* get the zero page information. it is constant for this function */
+               PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset,
+                                             &uiBufSz,
+                                             &pszParamStreamFileName);
+       }
+       else
+       {
+
+               uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR);
+               PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ);
+
+               pcBuffer = OSAllocMem(uiBufSz);
+
+               PVR_LOG_RETURN_IF_NOMEM(pcBuffer, "OSAllocMem");
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               pszParamStreamFileName = aszParamStreamFilename;
+       }
+
+       /* Loop over all touched symbolic addresses of the PMR and
+        * emit LDBs to load the contents. */
+       while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+       {
+               /* Get the correct symbolic name for the current offset */
+               eError = PMR_PDumpSymbolicAddr(psPMR,
+                                              uiCurrentOffset,
+                                              sizeof(aszMemspaceName),
+                                              &aszMemspaceName[0],
+                                              sizeof(aszSymbolicName),
+                                              &aszSymbolicName[0],
+                                              &uiOutOffset,
+                                              &uiNextSymName);
+               PVR_ASSERT(eError == PVRSRV_OK);
+               PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz);
+
+               PMR_IsOffsetValid(psPMR,
+                                 0,
+                                 1,
+                                 uiCurrentOffset,
+                                 &bValid);
+
+               /* Either just LDB the zeros or read from the PMR and store that
+                * in the pdump stream */
+               if (bValid)
+               {
+                       size_t uiNumBytes;
+
+                       if (bZero)
+                       {
+                               uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset);
+                       }
+                       else
+                       {
+                               IMG_DEVMEM_OFFSET_T uiReadOffset;
+                               uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+                                               uiLogicalOffset + uiSize - uiCurrentOffset :
+                                               uiNextSymName - uiCurrentOffset);
+
+                               eError = PMR_ReadBytes(psPMR,
+                                                      uiCurrentOffset,
+                                                      pcBuffer,
+                                                      uiReadOffset,
+                                                      &uiNumBytes);
+                               PVR_ASSERT(eError == PVRSRV_OK);
+
+                               eError = PDumpWriteParameterBlob(psDevNode,
+                                                         pcBuffer,
+                                                         uiNumBytes,
+                                                         uiPDumpFlags,
+                                                         &aszParamStreamFilename[0],
+                                                         sizeof(aszParamStreamFilename),
+                                                         &uiParamStreamFileOffset);
+                               if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+                               {
+                                       /* Write to parameter file prevented under the flags and
+                                        * current state of the driver so skip further writes.
+                                        */
+                                       eError = PVRSRV_OK;
+                               }
+                               else if (eError != PVRSRV_OK)
+                               {
+                                       PDUMP_ERROR(psDevNode,
+                                                   eError, "Failed to write PMR memory to parameter file");
+                               }
+                       }
+
+                       /* Emit the LDB command to the current symbolic address */
+                       eError = PDumpPMRLDB(psDevNode,
+                                            aszMemspaceName,
+                                            aszSymbolicName,
+                                            uiOutOffset,
+                                            uiNumBytes,
+                                            pszParamStreamFileName,
+                                            uiParamStreamFileOffset,
+                                            uiPDumpFlags);
+                       uiSizeRemain = uiSizeRemain - uiNumBytes;
+               }
+               uiCurrentOffset = uiNextSymName;
+       }
+
+       if (!bZero)
+       {
+               eError = PMRUnlockSysPhysAddresses(psPMR);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               OSFreeMem(pcBuffer);
+       }
+
+       return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiOutOffset;
+       IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+       IMG_UINT32 uiCurrentFileOffset = uiFileOffset;
+
+       PVR_UNREFERENCED_PARAMETER(uiArraySize);
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+       while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+       {
+               IMG_DEVMEM_OFFSET_T uiReadOffset;
+
+               eError = PMR_PDumpSymbolicAddr(psPMR,
+                                              uiCurrentOffset,
+                                              sizeof(aszMemspaceName),
+                                              &aszMemspaceName[0],
+                                              sizeof(aszSymbolicName),
+                                              &aszSymbolicName[0],
+                                              &uiOutOffset,
+                                              &uiNextSymName);
+               PVR_ASSERT(eError == PVRSRV_OK);
+               PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize);
+
+               uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+                               uiLogicalOffset + uiSize - uiCurrentOffset :
+                               uiNextSymName - uiCurrentOffset);
+
+               eError = PDumpPMRSAB(PMR_DeviceNode(psPMR),
+                                    aszMemspaceName,
+                                    aszSymbolicName,
+                                    uiOutOffset,
+                                    uiReadOffset,
+                                    pszFilename,
+                                    uiCurrentFileOffset);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               uiCurrentFileOffset += uiNextSymName - uiCurrentOffset;
+               uiCurrentOffset = uiNextSymName;
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+                       <= uiPMRPageSize));
+
+       eError = PMR_PDumpSymbolicAddr(psPMR,
+                                      uiLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpOffset,
+                                      &uiNextSymName);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+#define _MEMPOLL_DELAY         (1000)
+#define _MEMPOLL_COUNT         (2000000000 / _MEMPOLL_DELAY)
+
+       eError = PDumpPMRPOL(PMR_DeviceNode(psPMR),
+                            aszMemspaceName,
+                            aszSymbolicName,
+                            uiPDumpOffset,
+                            ui32Value,
+                            ui32Mask,
+                            eOperator,
+                            _MEMPOLL_COUNT,
+                            _MEMPOLL_DELAY,
+                            uiPDumpFlags);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCheck32(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                               IMG_UINT32 ui32Value,
+                               IMG_UINT32 ui32Mask,
+                               PDUMP_POLL_OPERATOR eOperator,
+                               PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Make sure to not cross a block boundary */
+       PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+                  < uiPMRPageSize));
+
+       eError = PMR_PDumpSymbolicAddr(psPMR,
+                                      uiLogicalOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpOffset,
+                                      &uiNextSymName);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       eError = PDumpPMRPOL(PMR_DeviceNode(psPMR),
+                            aszMemspaceName,
+                            aszSymbolicName,
+                            uiPDumpOffset,
+                            ui32Value,
+                            ui32Mask,
+                            eOperator,
+                            1,
+                            1,
+                            uiPDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+       /* Confirm that the device node's ui32InternalID matches the bound
+        * PDump device stored* in PVRSRV_DATA.
+        */
+       if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR)))
+       {
+               return PVRSRV_OK;
+       }
+
+       eError = PMR_PDumpSymbolicAddr(psPMR,
+                                      uiReadOffset,
+                                      sizeof(aszMemspaceName),
+                                      &aszMemspaceName[0],
+                                      sizeof(aszSymbolicName),
+                                      &aszSymbolicName[0],
+                                      &uiPDumpOffset,
+                                      &uiNextSymName);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       eError = PDumpPMRCBP(PMR_DeviceNode(psPMR),
+                            aszMemspaceName,
+                            aszSymbolicName,
+                            uiPDumpOffset,
+                            uiWriteOffset,
+                            uiPacketSize,
+                            uiBufferSize);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+                        IMG_UINT32 uiBlockSize,
+                        IMG_UINT32 ui32AllocPageCount,
+                        IMG_UINT32 *pai32AllocIndices,
+                        IMG_UINT32 ui32FreePageCount,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_BOOL bInitialise,
+                        IMG_UINT32 ui32InitValue,
+                        IMG_HANDLE *phPDumpAllocInfoOut)
+{
+       PVRSRV_ERROR eError;
+       IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle;
+
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_UINT32 i, uiIndex;
+       PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+
+       /* Remove pages from the PMR */
+       for (i = 0; i < ui32FreePageCount; i++)
+       {
+               uiIndex = pai32FreeIndices[i];
+
+               eError = PDumpFree(psDevNode,
+                                  phPDumpAllocInfo[uiIndex]);
+               PVR_ASSERT(eError == PVRSRV_OK);
+               phPDumpAllocInfo[uiIndex] = NULL;
+       }
+
+       /* Add new pages to the PMR */
+       for (i = 0; i < ui32AllocPageCount; i++)
+       {
+               uiIndex = pai32AllocIndices[i];
+
+               PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL);
+
+               eError = PMR_PDumpSymbolicAddr(psPMR,
+                                              uiIndex * uiBlockSize,
+                                              sizeof(aszMemspaceName),
+                                              &aszMemspaceName[0],
+                                              sizeof(aszSymbolicName),
+                                              &aszSymbolicName[0],
+                                              &uiOffset,
+                                              &uiNextSymName);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               eError = PDumpMalloc(psDevNode,
+                                    aszMemspaceName,
+                                    aszSymbolicName,
+                                    uiBlockSize,
+                                    uiBlockSize,
+                                    bInitialise,
+                                    ui32InitValue,
+                                    &phPDumpAllocInfo[uiIndex],
+                                    PDUMP_NONE);
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+
+       /* (IMG_HANDLE) <- (IMG_HANDLE*) */
+       *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+}
+
+static void
+PDumpPMRFreePMR(PMR *psPMR,
+                IMG_DEVMEM_SIZE_T uiSize,
+                IMG_DEVMEM_ALIGN_T uiBlockSize,
+                IMG_UINT32 uiLog2Contiguity,
+                IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 i;
+
+       /* (IMG_HANDLE*) <- (IMG_HANDLE) */
+       IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle;
+
+       for (i = 0; i < psPMR->uiNumPDumpBlocks; i++)
+       {
+               if (ahPDumpAllocHandleArray[i] != NULL)
+               {
+                       eError = PDumpFree(PMR_DeviceNode(psPMR),
+                                          ahPDumpAllocHandleArray[i]);
+                       PVR_ASSERT(eError == PVRSRV_OK);
+                       ahPDumpAllocHandleArray[i] = NULL;
+               }
+       }
+
+       OSFreeMem(ahPDumpAllocHandleArray);
+}
+
+static void
+PDumpPMRMallocPMR(PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_UINT32 ui32ChunkSize,
+                  IMG_UINT32 ui32NumPhysChunks,
+                  IMG_UINT32 ui32NumVirtChunks,
+                  IMG_UINT32 *puiMappingTable,
+                  IMG_UINT32 uiLog2Contiguity,
+                  IMG_BOOL bInitialise,
+                  IMG_UINT32 ui32InitValue,
+                  IMG_HANDLE *phPDumpAllocInfoOut,
+                  IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_HANDLE *phPDumpAllocInfo;
+
+       IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+       IMG_UINT32 uiNumPhysBlocks;
+       IMG_UINT32 uiNumVirtBlocks;
+       IMG_UINT32 i, uiIndex;
+
+       if (PMR_IsSparse(psPMR))
+       {
+               uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity;
+               /* Make sure we did not cut off anything */
+               PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks));
+       }
+       else
+       {
+               uiNumPhysBlocks = uiSize >> uiLog2Contiguity;
+               /* Make sure we did not cut off anything */
+               PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize);
+       }
+
+       uiNumVirtBlocks = uiSize >> uiLog2Contiguity;
+       PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize);
+
+       psPMR->uiNumPDumpBlocks = uiNumVirtBlocks;
+
+       phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE));
+
+
+       for (i = 0; i < uiNumPhysBlocks; i++)
+       {
+               uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i;
+
+               eError = PMR_PDumpSymbolicAddr(psPMR,
+                                              uiIndex * uiBlockSize,
+                                              sizeof(aszMemspaceName),
+                                              &aszMemspaceName[0],
+                                              sizeof(aszSymbolicName),
+                                              &aszSymbolicName[0],
+                                              &uiOffset,
+                                              &uiNextSymName);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               eError = PDumpMalloc(PMR_DeviceNode(psPMR),
+                                    aszMemspaceName,
+                                    aszSymbolicName,
+                                    uiBlockSize,
+                                    uiBlockSize,
+                                    bInitialise,
+                                    ui32InitValue,
+                                    &phPDumpAllocInfo[uiIndex],
+                                    ui32PDumpFlags);
+               PVR_LOG_RETURN_VOID_IF_FALSE((eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE),
+                                            "PDumpPMRMalloc PDump capture bound to other device");
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+
+       /* (IMG_HANDLE) <- (IMG_HANDLE*) */
+       *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+
+}
+#endif /* PDUMP */
+
+
+void *PMRGetPrivateData(const PMR *psPMR,
+                        const PMR_IMPL_FUNCTAB *psFuncTab)
+{
+       return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL;
+}
+
+#define PMR_PM_WORD_SIZE 4
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+               PMR *psPageListPMR,
+               IMG_DEVMEM_OFFSET_T uiTableOffset,
+               IMG_DEVMEM_SIZE_T  uiTableLength,
+               /* Referenced PMR, and "page" granularity */
+               PMR *psReferencePMR,
+               IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+               PMR_PAGELIST **ppsPageList)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEVMEM_SIZE_T uiWordSize;
+       IMG_UINT32 uiNumPages;
+       IMG_UINT32 uiPageIndex;
+       PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags;
+       PMR_PAGELIST *psPageList;
+#if defined(PDUMP)
+       IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset;
+       IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+       IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+       IMG_DEVMEM_OFFSET_T uiPagePDumpOffset;
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif
+#if !defined(NO_HARDWARE)
+       IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee;
+       IMG_UINT64 uiPageListPMRPage = 0;
+       IMG_UINT64 uiPrevPageListPMRPage = 0;
+       IMG_HANDLE hPrivData = NULL;
+       void *pvKernAddr = NULL;
+       IMG_UINT32 *pui32DataPtr = NULL;
+       IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_DEV_PHYADDR *pasDevAddrPtr;
+       IMG_BOOL *pbPageIsValid;
+#endif
+
+       uiWordSize = PMR_PM_WORD_SIZE;
+
+       /* check we're being asked to write the same number of 4-byte units as there are pages */
+       uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize);
+
+       if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize)
+       {
+               /* Strictly speaking, it's possible to provoke this error in two ways:
+                       (i) if it's not a whole multiple of the page size; or
+                       (ii) if there are more than 4 billion pages.
+                       The latter is unlikely. :) but the check is required in order to justify the cast.
+                */
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error);
+       }
+       uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages;
+       if (uiNumPages * uiWordSize != uiTableLength)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error);
+       }
+
+       /* Check we're not being asked to write off the end of the PMR */
+       PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength <= psPageListPMR->uiLogicalSize, eError, return_error);
+
+       /* the PMR into which we are writing must not be user CPU mappable: */
+       if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC,
+                        (PMR_FLAGS_T)(uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))));
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Page list PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")",
+                        uiFlags));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error);
+       }
+
+       if (_PMRIsSparse(psPageListPMR))
+       {
+               PVR_LOG_GOTO_WITH_ERROR("psPageListPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error);
+       }
+
+       if (_PMRIsSparse(psReferencePMR))
+       {
+               PVR_LOG_GOTO_WITH_ERROR("psReferencePMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error);
+       }
+
+       psPageList = OSAllocMem(sizeof(PMR_PAGELIST));
+       PVR_LOG_GOTO_IF_NOMEM(psPageList, eError, return_error);
+
+       psPageList->psReferencePMR = psReferencePMR;
+
+       /* Need to lock down the physical addresses of the reference PMR */
+       /* N.B.  This also checks that the requested "contiguity" is achievable */
+       eError = PMRLockSysPhysAddresses(psReferencePMR);
+       PVR_GOTO_IF_ERROR(eError, free_page_list);
+
+#if !defined(NO_HARDWARE)
+       if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR));
+               PVR_LOG_GOTO_IF_NOMEM(pasDevAddrPtr, eError, unlock_phys_addrs);
+
+               pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL));
+               if (pbPageIsValid == NULL)
+               {
+                       /* Clean-up before exit */
+                       OSFreeMem(pasDevAddrPtr);
+
+                       PVR_LOG_GOTO_WITH_ERROR("pbPageIsValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_devaddr_array);
+               }
+       }
+       else
+       {
+               pasDevAddrPtr = asDevPAddr;
+               pbPageIsValid = abValid;
+       }
+
+       eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0,
+                                pasDevAddrPtr, pbPageIsValid);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", free_valid_array);
+#endif
+
+       for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+       {
+               IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex);
+
+#if defined(PDUMP)
+               eError = PMR_PDumpSymbolicAddr(psPageListPMR,
+                                              uiPMROffset,
+                                              sizeof(aszTableEntryMemspaceName),
+                                              &aszTableEntryMemspaceName[0],
+                                              sizeof(aszTableEntrySymbolicName),
+                                              &aszTableEntrySymbolicName[0],
+                                              &uiTableEntryPDumpOffset,
+                                              &uiNextSymName);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               eError = PMR_PDumpSymbolicAddr(psReferencePMR,
+                                              (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+                                              sizeof(aszPageMemspaceName),
+                                              &aszPageMemspaceName[0],
+                                              sizeof(aszPageSymbolicName),
+                                              &aszPageSymbolicName[0],
+                                              &uiPagePDumpOffset,
+                                              &uiNextSymName);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               eError = PDumpWriteShiftedMaskedValue(PMR_DeviceNode(psReferencePMR),
+                                                     /* destination */
+                                                     aszTableEntryMemspaceName,
+                                                     aszTableEntrySymbolicName,
+                                                     uiTableEntryPDumpOffset,
+                                                     /* source */
+                                                     aszPageMemspaceName,
+                                                     aszPageSymbolicName,
+                                                     uiPagePDumpOffset,
+                                                     /* shift right */
+                                                     uiLog2PageSize,
+                                                     /* shift left */
+                                                     0,
+                                                     /* mask */
+                                                     0xffffffff,
+                                                     /* word size */
+                                                     uiWordSize,
+                                                     /* flags */
+                                                     PDUMP_FLAGS_CONTINUOUS);
+               PVR_ASSERT(eError == PVRSRV_OK);
+#else
+               PVR_UNREFERENCED_PARAMETER(uiPMROffset);
+#endif
+
+#if !defined(NO_HARDWARE)
+
+               /*
+                       We check for sparse PMR's at function entry, but as we can,
+                       check that every page is valid
+                */
+               PVR_ASSERT(pbPageIsValid[uiPageIndex]);
+               PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0);
+               PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+               uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee;
+
+               if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage))
+               {
+                       size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1));
+                       size_t uiMappedSize;
+
+                       /* If we already had a page list mapped, we need to unmap it... */
+                       if (pui32DataPtr != NULL)
+                       {
+                               PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+                       }
+
+                       eError = PMRAcquireKernelMappingData(psPageListPMR,
+                                                            uiMappingOffset,
+                                                            uiPageListPageSize,
+                                                            &pvKernAddr,
+                                                            &uiMappedSize,
+                                                            &hPrivData);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)",
+                                               uiPageListPMRPage, eError));
+                               goto free_valid_array;
+                       }
+
+                       uiPrevPageListPMRPage = uiPageListPMRPage;
+                       PVR_ASSERT(uiMappedSize >= uiPageListPageSize);
+                       PVR_ASSERT(pvKernAddr != NULL);
+
+                       pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (uiPageListPageSize - 1)));
+               }
+
+               PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+               /* Write the physical page index into the page list PMR */
+               *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+
+               /* Last page so unmap */
+               if (uiPageIndex == (uiNumPages - 1))
+               {
+                       PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+               }
+#endif
+       }
+
+       /* if this memory is allocated as write-combine we must flush write
+        * buffers */
+       if (PVRSRV_CHECK_CPU_WRITE_COMBINE(psPageListPMR->uiFlags))
+       {
+               OSWriteMemoryBarrier(NULL);
+       }
+
+#if !defined(NO_HARDWARE)
+       if (pasDevAddrPtr != asDevPAddr)
+       {
+               OSFreeMem(pbPageIsValid);
+               OSFreeMem(pasDevAddrPtr);
+       }
+#endif
+       *ppsPageList = psPageList;
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+#if !defined(NO_HARDWARE)
+
+free_valid_array:
+       if (pbPageIsValid != abValid)
+       {
+               OSFreeMem(pbPageIsValid);
+       }
+
+free_devaddr_array:
+       if (pasDevAddrPtr != asDevPAddr)
+       {
+               OSFreeMem(pasDevAddrPtr);
+       }
+
+unlock_phys_addrs:
+       PMRUnlockSysPhysAddresses(psReferencePMR);
+#endif
+
+free_page_list:
+       OSFreeMem(psPageList);
+
+return_error:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+PVRSRV_ERROR
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR);
+       PVR_ASSERT(eError == PVRSRV_OK);
+       OSFreeMem(psPageList);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+              IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+       IMG_UINT32 uiNumPages;
+       IMG_UINT32 uiPageIndex;
+       IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize;
+       IMG_HANDLE hPrivData = NULL;
+       void *pvKernAddr = NULL;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       size_t uiMappedSize;
+
+       PVR_ASSERT(psPMR);
+
+       /* Calculate number of pages in this PMR */
+       uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+       /* Verify the logical Size is a multiple or the physical page size */
+       if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMR is not a multiple of %u",
+                        __func__,
+                        ui32PageSize));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error);
+       }
+
+       if (_PMRIsSparse(psPMR))
+       {
+               PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error);
+       }
+
+       /* Scan through all pages of the PMR */
+       for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+       {
+               /* map the physical page (for a given PMR offset) into kernel space */
+               eError = PMRAcquireKernelMappingData(psPMR,
+                                                    (size_t)uiPageIndex << uiLog2PageSize,
+                                                    ui32PageSize,
+                                                    &pvKernAddr,
+                                                    &uiMappedSize,
+                                                    &hPrivData);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", AcquireKernelMapping_Error);
+
+               /* ensure the mapped page size is the same as the physical page size */
+               if (uiMappedSize != ui32PageSize)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx,
+                                __func__,
+                                ui32PageSize,
+                                (IMG_UINT64)uiMappedSize));
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, MappingSize_Error);
+               }
+
+               /* Use the conservative 'DeviceMemSet' here because we can't know
+                * if this PMR will be mapped cached.
+                */
+               OSDeviceMemSet(pvKernAddr, 0, ui32PageSize);
+
+               /* release mapping */
+               PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                "%s: Zeroing PMR %p done (num pages %u, page size %u)",
+                __func__,
+                psPMR,
+                uiNumPages,
+                ui32PageSize));
+
+       return PVRSRV_OK;
+
+
+       /* Error handling */
+
+MappingSize_Error:
+       PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+AcquireKernelMapping_Error:
+Sparse_Error:
+MultiPage_Error:
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psPMR,
+                IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+       IMG_DEV_PHYADDR sDevAddrPtr;
+       IMG_UINT32 uiNumPages;
+       IMG_UINT32 uiPageIndex;
+       IMG_BOOL bPageIsValid;
+       IMG_UINT32 ui32Col = 16;
+       IMG_UINT32 ui32SizePerCol = 11;
+       IMG_UINT32 ui32ByteCount = 0;
+       IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1];
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Get number of pages */
+       uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+       /* Verify the logical Size is a multiple or the physical page size */
+       if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: PMR is not a multiple of %" IMG_UINT64_FMTSPEC,
+                       __func__, (IMG_UINT64) (1ULL << uiLog2PageSize)));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error);
+       }
+
+       if (_PMRIsSparse(psPMR))
+       {
+               PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error);
+       }
+
+       PVR_LOG(("    PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize));
+
+       /* Print the address of the physical pages */
+       for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+       {
+               /* Get Device physical Address */
+               eError = PMR_DevPhysAddr(psPMR,
+                                        uiLog2PageSize,
+                                        1,
+                                        (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+                                        &sDevAddrPtr,
+                                        &bPageIsValid);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p failed to get DevPhysAddr with error %u",
+                                       __func__,
+                                       psPMR,
+                                       eError));
+                       goto DevPhysAddr_Error;
+               }
+
+               ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize));
+               PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol);
+
+               if (uiPageIndex % ui32Col == ui32Col-1)
+               {
+                       PVR_LOG(("      Phys Page: %s", pszBuffer));
+                       ui32ByteCount = 0;
+               }
+       }
+       if (ui32ByteCount > 0)
+       {
+               PVR_LOG(("      Phys Page: %s", pszBuffer));
+       }
+
+       return PVRSRV_OK;
+
+       /* Error handling */
+DevPhysAddr_Error:
+Sparse_Error:
+MultiPage_Error:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       /* Singleton PMR context already initialised */
+       if (_gsSingletonPMRContext.bModuleInitialised)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out);
+       }
+
+       eError = OSLockCreate(&_gsSingletonPMRContext.hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", out);
+
+       _gsSingletonPMRContext.uiNextSerialNum = 1;
+
+       _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext;
+
+       _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE;
+
+       _gsSingletonPMRContext.uiNumLivePMRs = 0;
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+       eError = MMapStatsInit();
+       PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", out);
+#endif
+
+out:
+       PVR_ASSERT(eError == PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PMRDeInit(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               goto out;
+       }
+
+       /* Singleton PMR context is not initialised */
+       if (!_gsSingletonPMRContext.bModuleInitialised)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out);
+       }
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+       MMapStatsDeInit();
+#endif
+
+       if (_gsSingletonPMRContext.uiNumLivePMRs != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain",
+                               __func__,
+                               _gsSingletonPMRContext.uiNumLivePMRs));
+               PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable",
+                               __func__));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out);
+       }
+
+       OSLockDestroy(_gsSingletonPMRContext.hLock);
+
+       _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE;
+
+out:
+       PVR_ASSERT(eError == PVRSRV_OK);
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/power.c b/drivers/gpu/drm/img/img-rogue/services/server/common/power.c
new file mode 100644 (file)
index 0000000..e1c3a3a
--- /dev/null
@@ -0,0 +1,930 @@
+/*************************************************************************/ /*!
+@File           power.c
+@Title          Power management functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "lock.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "process_stats.h"
+
+
+struct _PVRSRV_POWER_DEV_TAG_
+{
+       PFN_PRE_POWER                                   pfnDevicePrePower;
+       PFN_POST_POWER                                  pfnDevicePostPower;
+       PFN_SYS_PRE_POWER                               pfnSystemPrePower;
+       PFN_SYS_POST_POWER                              pfnSystemPostPower;
+       PFN_PRE_CLOCKSPEED_CHANGE               pfnPreClockSpeedChange;
+       PFN_POST_CLOCKSPEED_CHANGE              pfnPostClockSpeedChange;
+       PFN_FORCED_IDLE_REQUEST                 pfnForcedIdleRequest;
+       PFN_FORCED_IDLE_CANCEL_REQUEST  pfnForcedIdleCancelRequest;
+       PFN_GPU_UNITS_POWER_CHANGE              pfnGPUUnitsPowerChange;
+       IMG_HANDLE                                              hSysData;
+       IMG_HANDLE                                              hDevCookie;
+       PVRSRV_DEV_POWER_STATE                  eDefaultPowerState;
+       ATOMIC_T                                                eCurrentPowerState;
+};
+
+/*!
+  Typedef for a pointer to a function that will be called for re-acquiring
+  device powerlock after releasing it temporarily for some timeout period
+  in function PVRSRVDeviceIdleRequestKM
+ */
+typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode);
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       return OSClockns64();
+#else
+       return 0;
+#endif
+}
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       return OSClockus();
+#else
+       return 0;
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function     _IsSystemStatePowered
+
+ @Description  Tests whether a given system state represents powered-up.
+
+ @Input                eSystemPowerState : a system power state
+
+ @Return       IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+       return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON);
+}
+
+/* We don't expect PID=0 to acquire device power-lock */
+#define PWR_LOCK_OWNER_PID_CLR_VAL 0
+
+PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = OSLockCreate(&psDeviceNode->hPowerLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
+
+       psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL;
+       return PVRSRV_OK;
+}
+
+void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL;
+       OSLockDestroy(psDeviceNode->hPowerLock);
+}
+
+IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       return OSLockIsLocked(psDeviceNode->hPowerLock) &&
+              OSGetCurrentClientProcessIDKM() == psDeviceNode->uiPwrLockOwnerPID;
+}
+
+PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       OSLockAcquire(psDeviceNode->hPowerLock);
+
+       /* Only allow to take powerlock when the system power is on */
+       if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+       {
+               psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM();
+               return PVRSRV_OK;
+       }
+
+       OSLockRelease(psDeviceNode->hPowerLock);
+
+       return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF;
+}
+
+PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       if (!(OSTryLockAcquire(psDeviceNode->hPowerLock)))
+       {
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       /* Only allow to take powerlock when the system power is on */
+       if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+       {
+               psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM();
+
+               /* System is powered ON, return OK */
+               return PVRSRV_OK;
+       }
+       else
+       {
+               /* System is powered OFF, release the lock and return error */
+               OSLockRelease(psDeviceNode->hPowerLock);
+               return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF;
+       }
+}
+
+/*!
+******************************************************************************
+
+ @Function     _PVRSRVForcedPowerLock
+
+ @Description  Obtain the mutex for power transitions regardless of system
+               power state
+
+ @Return       Always returns PVRSRV_OK. Function prototype required same as
+               PFN_POWER_LOCK_ACQUIRE
+
+******************************************************************************/
+static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       OSLockAcquire(psDeviceNode->hPowerLock);
+       psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM();
+
+       return PVRSRV_OK;
+}
+
+void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDeviceNode));
+
+       /* Reset uiPwrLockOwnerPID before releasing lock */
+       psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL;
+       OSLockRelease(psDeviceNode->hPowerLock);
+}
+
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice)
+{
+       return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF);
+}
+
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+                                       PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+       PVRSRV_POWER_DEV *psPowerDevice;
+
+       psPowerDevice = psDeviceNode->psPowerDev;
+       if (psPowerDevice == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       psPowerDevice->eDefaultPowerState = eNewPowerState;
+
+       return PVRSRV_OK;
+}
+
+/*
+ @Input       pfnPowerLockAcquire  : Function to re-acquire power-lock in-case
+                                     it was necessary to release it.
+*/
+static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                       PFN_SYS_DEV_IS_DEFAULT_STATE_OFF    pfnIsDefaultStateOff,
+                                       IMG_BOOL                            bDeviceOffPermitted,
+                                       PFN_POWER_LOCK_ACQUIRE              pfnPowerLockAcquire)
+{
+       PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+       PVRSRV_ERROR eError;
+
+       if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) &&
+           (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev)))
+       {
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie,
+                                                                 bDeviceOffPermitted);
+                       if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+                       {
+                               PVRSRV_ERROR eErrPwrLockAcq;
+                               /* FW denied idle request */
+                               PVRSRVPowerUnlock(psDeviceNode);
+
+                               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+                               eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode);
+                               if (eErrPwrLockAcq != PVRSRV_OK)
+                               {
+                                       /* We only understand PVRSRV_ERROR_RETRY, so assert on others.
+                                        * Moreover, we've ended-up releasing the power-lock which was
+                                        * originally "held" by caller before calling this function -
+                                        * since this needs vigilant handling at call-site, we pass
+                                        * back an explicit error, for caller(s) to "avoid" calling
+                                        * PVRSRVPowerUnlock */
+                                       PVR_ASSERT(eErrPwrLockAcq == PVRSRV_ERROR_RETRY);
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to re-acquire power-lock "
+                                                "(%s) after releasing it for a time-out",
+                                                        __func__, PVRSRVGetErrorString(eErrPwrLockAcq)));
+                                       return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED;
+                               }
+                       }
+                       else
+                       {
+                               /* idle request successful or some other error occurred, return */
+                               break;
+                       }
+               } END_LOOP_UNTIL_TIMEOUT();
+       }
+       else
+       {
+               PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
+               return PVRSRV_OK;
+       }
+
+       return eError;
+}
+
+/*
+ * Wrapper function helps limiting calling complexity of supplying additional
+ * PFN_POWER_LOCK_ACQUIRE argument (required by _PVRSRVDeviceIdleRequestKM)
+ */
+inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                       PFN_SYS_DEV_IS_DEFAULT_STATE_OFF      pfnIsDefaultStateOff,
+                                       IMG_BOOL                              bDeviceOffPermitted)
+{
+       return _PVRSRVDeviceIdleRequestKM(psDeviceNode,
+                                         pfnIsDefaultStateOff,
+                                         bDeviceOffPermitted,
+                                         PVRSRVPowerLock);
+}
+
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+       if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest)
+       {
+               return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie);
+       }
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVDevicePrePowerStateKM
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input                psPowerDevice : Power device
+ @Input                eNewPowerState : New power state
+ @Input                ePwrFlags : Power state change flags
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV              *psPowerDevice,
+                                                                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                                                                PVRSRV_POWER_FLAGS             ePwrFlags)
+{
+       PVRSRV_DEV_POWER_STATE eCurrentPowerState;
+       IMG_UINT64 ui64SysTimer1 = 0;
+       IMG_UINT64 ui64SysTimer2 = 0;
+       IMG_UINT64 ui64DevTimer1 = 0;
+       IMG_UINT64 ui64DevTimer2 = 0;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+       eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+
+       if (psPowerDevice->pfnDevicePrePower != NULL)
+       {
+               ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+               /* Call the device's power callback. */
+               eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+                                                                                                 eNewPowerState,
+                                                                                                 eCurrentPowerState,
+                                                                                                 BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED));
+
+               ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       /* Do any required system-layer processing. */
+       if (psPowerDevice->pfnSystemPrePower != NULL)
+       {
+               ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+               eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData,
+                                                                                                 (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ?
+                                                                                                       PVRSRV_SYS_POWER_STATE_ON :
+                                                                                                       PVRSRV_SYS_POWER_STATE_OFF,
+                                                                                                 (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ?
+                                                                                                       PVRSRV_SYS_POWER_STATE_ON :
+                                                                                                       PVRSRV_SYS_POWER_STATE_OFF,
+                                                                                                 ePwrFlags);
+
+               ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+                                                        ui64DevTimer1, ui64DevTimer2,
+                                                        BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED),
+                                                        eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+                                                        IMG_TRUE);
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVDevicePostPowerStateKM
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input                psPowerDevice : Power device
+ @Input                eNewPowerState : New power state
+ @Input                ePwrFlags : Power state change flags
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV                     *psPowerDevice,
+                                                                                 PVRSRV_DEV_POWER_STATE        eNewPowerState,
+                                                                                 PVRSRV_POWER_FLAGS            ePwrFlags)
+{
+       PVRSRV_DEV_POWER_STATE eCurrentPowerState;
+       IMG_UINT64 ui64SysTimer1 = 0;
+       IMG_UINT64 ui64SysTimer2 = 0;
+       IMG_UINT64 ui64DevTimer1 = 0;
+       IMG_UINT64 ui64DevTimer2 = 0;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+       eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+
+       /* Do any required system-layer processing. */
+       if (psPowerDevice->pfnSystemPostPower != NULL)
+       {
+               ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+               eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData,
+                                                                                                  (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ?
+                                                                                                        PVRSRV_SYS_POWER_STATE_ON :
+                                                                                                        PVRSRV_SYS_POWER_STATE_OFF,
+                                                                                                  (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ?
+                                                                                                        PVRSRV_SYS_POWER_STATE_ON :
+                                                                                                        PVRSRV_SYS_POWER_STATE_OFF,
+                                                                                                  ePwrFlags);
+
+               ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       if (psPowerDevice->pfnDevicePostPower != NULL)
+       {
+               ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+               /* Call the device's power callback. */
+               eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+                                                                                                  eNewPowerState,
+                                                                                                  eCurrentPowerState,
+                                                                                                  BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED));
+
+               ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+                                                        ui64DevTimer1, ui64DevTimer2,
+                                                        BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED),
+                                                        eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+                                                        IMG_FALSE);
+
+       OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                                                                PVRSRV_POWER_FLAGS ePwrFlags)
+{
+       PVRSRV_ERROR    eError;
+       PVRSRV_DATA*    psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_POWER_DEV *psPowerDevice;
+
+       psPowerDevice = psDeviceNode->psPowerDev;
+       if (!psPowerDevice)
+       {
+               return PVRSRV_OK;
+       }
+
+       if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+       {
+               eNewPowerState = psPowerDevice->eDefaultPowerState;
+       }
+
+       if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState)
+       {
+               eError = PVRSRVDevicePrePowerStateKM(psPowerDevice,
+                                                                                        eNewPowerState,
+                                                                                        ePwrFlags);
+               PVR_GOTO_IF_ERROR(eError, ErrorExit);
+
+               eError = PVRSRVDevicePostPowerStateKM(psPowerDevice,
+                                                                                         eNewPowerState,
+                                                                                         ePwrFlags);
+               PVR_GOTO_IF_ERROR(eError, ErrorExit);
+
+               /* Signal Device Watchdog Thread about power mode change. */
+               if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+               {
+                       psPVRSRVData->ui32DevicesWatchdogPwrTrans++;
+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+                       if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)
+#endif
+                       {
+                               eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+                               PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+                       }
+               }
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+               else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+               {
+                       /* signal watchdog thread and give it a chance to switch to
+                        * longer / infinite wait time */
+                       eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+                       PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+               }
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+       }
+
+       return PVRSRV_OK;
+
+ErrorExit:
+
+       if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                                "%s: Transition to %d was denied, Flags=0x%08x",
+                                __func__, eNewPowerState, ePwrFlags));
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Transition to %d FAILED (%s)",
+                                __func__, eNewPowerState, PVRSRVGetErrorString(eError)));
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                                        PVRSRV_SYS_POWER_STATE eNewSysPowerState,
+                                                                                        PVRSRV_POWER_FLAGS ePwrFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT uiStage = 0;
+
+       PVRSRV_DEV_POWER_STATE eNewDevicePowerState =
+         _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+
+       /* If setting devices to default state, force idle all devices whose default state is off */
+       PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
+         (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL;
+
+       /* require a proper power state */
+       if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Prevent simultaneous SetPowerStateKM calls */
+       _PVRSRVForcedPowerLock(psDeviceNode);
+
+       /* no power transition requested, so do nothing */
+       if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState)
+       {
+               PVRSRVPowerUnlock(psDeviceNode);
+               return PVRSRV_OK;
+       }
+
+       eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff,
+                                           IMG_TRUE, _PVRSRVForcedPowerLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM");
+               uiStage++;
+               goto ErrorExit;
+       }
+
+       eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState,
+                                                                                ePwrFlags | PVRSRV_POWER_FLAGS_FORCED);
+       if (eError != PVRSRV_OK)
+       {
+               uiStage++;
+               goto ErrorExit;
+       }
+
+       psDeviceNode->eCurrentSysPowerState = eNewSysPowerState;
+
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       return PVRSRV_OK;
+
+ErrorExit:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.",
+                        __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState,
+                        PVRSRVGetErrorString(eError), uiStage));
+
+       PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG *psDevConfig,
+                                                                                        PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DEVICE_NODE *psDevNode = psDevConfig->psDevNode;
+       PVRSRV_SYS_POWER_STATE eCurrentSysPowerState;
+
+       if (psDevNode != NULL)
+       {
+               eCurrentSysPowerState = psDevNode->eCurrentSysPowerState;
+       }
+       else
+       {
+               /* assume power is off if no device node */
+               eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF;
+       }
+
+       /* no power transition requested, so do nothing */
+       if (eNewSysPowerState == eCurrentSysPowerState)
+       {
+               return PVRSRV_OK;
+       }
+
+       if (psDevConfig->pfnPrePowerState != NULL)
+       {
+               eError = psDevConfig->pfnPrePowerState(psDevConfig->hSysData,
+                                                                                                 eNewSysPowerState,
+                                                                                                 eCurrentSysPowerState,
+                                                                                                 PVRSRV_POWER_FLAGS_FORCED);
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       if (psDevConfig->pfnPostPowerState != NULL)
+       {
+               eError = psDevConfig->pfnPostPowerState(psDevConfig->hSysData,
+                                                                                                  eNewSysPowerState,
+                                                                                                  eCurrentSysPowerState,
+                                                                                                  PVRSRV_POWER_FLAGS_FORCED);
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       if (psDevNode != NULL)
+       {
+               psDevNode->eCurrentSysPowerState = eNewSysPowerState;
+       }
+
+       return PVRSRV_OK;
+}
+
+void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE                               psDeviceNode,
+                                                        PVRSRV_POWER_DEV                                       *psPowerDevice,
+                                                        PFN_PRE_POWER                                          pfnDevicePrePower,
+                                                        PFN_POST_POWER                                         pfnDevicePostPower,
+                                                        PFN_SYS_PRE_POWER                                      pfnSystemPrePower,
+                                                        PFN_SYS_POST_POWER                                     pfnSystemPostPower,
+                                                        PFN_FORCED_IDLE_REQUEST                        pfnForcedIdleRequest,
+                                                        PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest)
+{
+       if (psPowerDevice != NULL)
+       {
+               if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))
+               {
+                       psPowerDevice->pfnSystemPrePower = NULL;
+                       psPowerDevice->pfnSystemPostPower = NULL;
+               }
+               else
+               {
+                       psPowerDevice->pfnSystemPrePower = pfnSystemPrePower;
+                       psPowerDevice->pfnSystemPostPower = pfnSystemPostPower;
+               }
+
+               psPowerDevice->pfnDevicePrePower = pfnDevicePrePower;
+               psPowerDevice->pfnDevicePostPower = pfnDevicePostPower;
+               psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest;
+               psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest;
+       }
+}
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                          PFN_PRE_POWER                                pfnDevicePrePower,
+                                                                          PFN_POST_POWER                               pfnDevicePostPower,
+                                                                          PFN_SYS_PRE_POWER                    pfnSystemPrePower,
+                                                                          PFN_SYS_POST_POWER                   pfnSystemPostPower,
+                                                                          PFN_PRE_CLOCKSPEED_CHANGE    pfnPreClockSpeedChange,
+                                                                          PFN_POST_CLOCKSPEED_CHANGE   pfnPostClockSpeedChange,
+                                                                          PFN_FORCED_IDLE_REQUEST      pfnForcedIdleRequest,
+                                                                          PFN_FORCED_IDLE_CANCEL_REQUEST       pfnForcedIdleCancelRequest,
+                                                                          PFN_GPU_UNITS_POWER_CHANGE   pfnGPUUnitsPowerChange,
+                                                                          IMG_HANDLE                                   hDevCookie,
+                                                                          PVRSRV_DEV_POWER_STATE               eCurrentPowerState,
+                                                                          PVRSRV_DEV_POWER_STATE               eDefaultPowerState)
+{
+       PVRSRV_POWER_DEV *psPowerDevice;
+
+       PVR_ASSERT(!psDeviceNode->psPowerDev);
+
+       PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+       PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+       psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV));
+       PVR_LOG_RETURN_IF_NOMEM(psPowerDevice, "psPowerDevice");
+
+       /* setup device for power manager */
+       PVRSRVSetPowerCallbacks(psDeviceNode,
+                                                       psPowerDevice,
+                                                       pfnDevicePrePower,
+                                                       pfnDevicePostPower,
+                                                       pfnSystemPrePower,
+                                                       pfnSystemPostPower,
+                                                       pfnForcedIdleRequest,
+                                                       pfnForcedIdleCancelRequest);
+
+       psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+       psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+       psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange;
+       psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData;
+       psPowerDevice->hDevCookie = hDevCookie;
+       OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eCurrentPowerState);
+       psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+       psDeviceNode->psPowerDev = psPowerDevice;
+
+       return PVRSRV_OK;
+}
+
+void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       if (psDeviceNode->psPowerDev)
+       {
+               OSFreeMem(psDeviceNode->psPowerDev);
+               psDeviceNode->psPowerDev = NULL;
+       }
+}
+
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                          PPVRSRV_DEV_POWER_STATE pePowerState)
+{
+       PVRSRV_POWER_DEV *psPowerDevice;
+
+       psPowerDevice = psDeviceNode->psPowerDev;
+       if (psPowerDevice == NULL)
+       {
+               return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+       }
+
+       *pePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+
+       return PVRSRV_OK;
+}
+
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       PVRSRV_DEV_POWER_STATE ePowerState;
+
+       if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK)
+       {
+               return IMG_FALSE;
+       }
+
+       return (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+}
+
+PVRSRV_ERROR
+PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                IMG_BOOL            bIdleDevice,
+                                void*               pvInfo)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       PVRSRV_POWER_DEV        *psPowerDevice;
+       IMG_UINT64                      ui64StartTimer, ui64StopTimer;
+
+       PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+       ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+       /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
+       eError = PVRSRVPowerLock(psDeviceNode);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock");
+
+       psPowerDevice = psDeviceNode->psPowerDev;
+       if (psPowerDevice)
+       {
+               PVRSRV_DEV_POWER_STATE eCurrentPowerState =
+                       OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+
+               if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+               {
+                       /* We can change the clock speed if the device is either IDLE or OFF */
+                       eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               /* FW Can signal denied when busy with SPM or other work it can not idle */
+                               if (eError != PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) from %s()", __func__,
+                                                PVRSRVGETERRORSTRING(eError), "PVRSRVDeviceIdleRequestKM"));
+                               }
+                               if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+                               {
+                                       PVRSRVPowerUnlock(psDeviceNode);
+                               }
+                               return eError;
+                       }
+               }
+
+               eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+                                                              eCurrentPowerState);
+       }
+
+       ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+       InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+
+       return eError;
+}
+
+void
+PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                 IMG_BOOL            bIdleDevice,
+                                 void*               pvInfo)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_POWER_DEV        *psPowerDevice;
+       IMG_UINT64                      ui64StartTimer, ui64StopTimer;
+
+       PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+       ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+       psPowerDevice = psDeviceNode->psPowerDev;
+       if (psPowerDevice)
+       {
+               PVRSRV_DEV_POWER_STATE eCurrentPowerState =
+                       OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+
+               eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+                                                                                                               eCurrentPowerState);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+                                        __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+               }
+
+               if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+               {
+                       eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+                       PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM");
+               }
+       }
+
+       /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges);
+
+       ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+       InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+}
+
+PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                             IMG_UINT32 ui32NewValue)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       PVRSRV_POWER_DEV        *psPowerDevice;
+
+       psPowerDevice = psDeviceNode->psPowerDev;
+       if (psPowerDevice)
+       {
+               PVRSRV_DEV_POWER_STATE eDevicePowerState;
+
+               eError = PVRSRVPowerLock(psDeviceNode);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock");
+
+               eDevicePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+               if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+               {
+                       /* Device must be idle to change GPU unit(s) power state */
+                       eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM");
+                               if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+                               {
+                                       goto ErrorExit;
+                               }
+                               goto ErrorUnlockAndExit;
+                       }
+               }
+
+               if (psPowerDevice->pfnGPUUnitsPowerChange != NULL)
+               {
+                       PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->hDevCookie, ui32NewValue);
+
+                       if (eError2 != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+                                        __func__, psDeviceNode,
+                                        PVRSRVGetErrorString(eError2)));
+                       }
+               }
+
+               if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+               {
+                       eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM", ErrorUnlockAndExit);
+               }
+
+               PVRSRVPowerUnlock(psDeviceNode);
+       }
+
+       return eError;
+
+ErrorUnlockAndExit:
+       PVRSRVPowerUnlock(psDeviceNode);
+ErrorExit:
+       return eError;
+}
+
+/******************************************************************************
+ End of file (power.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/process_stats.c b/drivers/gpu/drm/img/img-rogue/services/server/common/process_stats.c
new file mode 100644 (file)
index 0000000..5867e2a
--- /dev/null
@@ -0,0 +1,3358 @@
+/*************************************************************************/ /*!
+@File
+@Title          Process based statistics
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Manages a collection of statistics based around a process
+                and referenced via OS agnostic methods.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lists.h"
+#include "process_stats.h"
+#include "ri_server.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+#include "proc_stats.h"
+#include "htbuffer.h"
+#include "pvr_ricommon.h"
+#include "di_server.h"
+#if defined(__linux__)
+#include "trace_events.h"
+#endif
+
+/* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */
+#if defined(__linux__) && ( \
+       defined(PVRSRV_ENABLE_PERPID_STATS) || \
+       defined(PVRSRV_ENABLE_CACHEOP_STATS) || \
+       defined(PVRSRV_ENABLE_MEMORY_STATS) || \
+       defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) )
+#define ENABLE_DEBUGFS_PIDS
+#endif
+
+/* Enable GPU memory accounting tracepoint */
+#if defined(__linux__) && ( \
+       defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) )
+#define ENABLE_GPU_MEM_TRACEPOINT
+#endif
+
+/*
+ * Maximum history of process statistics that will be kept.
+ */
+#define MAX_DEAD_LIST_PROCESSES  (10)
+
+/*
+ * Definition of all the strings used to format process based statistics.
+ */
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+/* Array of Process stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY };
+#undef X
+#endif
+
+/* Array of Driver stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY };
+#undef X
+
+/* structure used in hash table to track statistic entries */
+typedef struct {
+       size_t     uiSizeInBytes;
+       IMG_PID    uiPid;
+} _PVR_STATS_TRACKING_HASH_ENTRY;
+
+/* Function used internally to decrement tracked per-process statistic entries */
+static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+                                     PVRSRV_MEM_ALLOC_TYPE eAllocType);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+#endif
+int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+
+/* Note: all of the accesses to the global stats should be protected
+ * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the
+ * invocations of macros *_GLOBAL_STAT_VALUE. */
+
+/* Macros for fetching stat values */
+#define GET_STAT_VALUE(ptr,var) (ptr)->i32StatValue[(var)]
+#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui64StatValue[idx]
+
+#define GET_GPUMEM_GLOBAL_STAT_VALUE() \
+       GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + \
+       GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA) + \
+       GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + \
+       GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + \
+       GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT)
+
+#define GET_GPUMEM_PERPID_STAT_VALUE(ptr) \
+       GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA) + \
+       GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA) + \
+       GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + \
+       GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES) + \
+       GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT)
+/*
+ * Macros for updating stat values.
+ */
+#define UPDATE_MAX_VALUE(a,b)                                  do { if ((b) > (a)) {(a) = (b);} } while (0)
+#define INCREASE_STAT_VALUE(ptr,var,val)               do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while (0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val)                do { (var).ui64StatValue[(idx)] += (val); if ((var).ui64StatValue[(idx)] > (var).ui64StatValue[(idx##_MAX)]) {(var).ui64StatValue[(idx##_MAX)] = (var).ui64StatValue[(idx)];} } while (0)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* Allow stats to go negative */
+#define DECREASE_STAT_VALUE(ptr,var,val)               do { (ptr)->i32StatValue[(var)] -= (val); } while (0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val)                do { (var).ui64StatValue[(idx)] -= (val); } while (0)
+#else
+#define DECREASE_STAT_VALUE(ptr,var,val)               do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while (0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val)                do { if ((var).ui64StatValue[(idx)] >= (val)) { (var).ui64StatValue[(idx)] -= (val); } else { (var).ui64StatValue[(idx)] = 0; } } while (0)
+#endif
+#define MAX_CACHEOP_STAT 16
+#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1))
+#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1))
+
+/*
+ * Structures for holding statistics...
+ */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+typedef struct _PVRSRV_MEM_ALLOC_REC_
+{
+       PVRSRV_MEM_ALLOC_TYPE           eAllocType;
+       IMG_UINT64                      ui64Key;
+       void*                           pvCpuVAddr;
+       IMG_CPU_PHYADDR                 sCpuPAddr;
+       size_t                          uiBytes;
+       void*                           pvPrivateData;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
+       void*                           pvAllocdFromFile;
+       IMG_UINT32                      ui32AllocdFromLine;
+#endif
+       IMG_PID                         pid;
+       struct _PVRSRV_MEM_ALLOC_REC_*  psNext;
+       struct _PVRSRV_MEM_ALLOC_REC_** ppsThis;
+} PVRSRV_MEM_ALLOC_REC;
+#endif
+
+typedef struct _PVRSRV_PROCESS_STATS_ {
+
+       /* Linked list pointers */
+       struct _PVRSRV_PROCESS_STATS_* psNext;
+       struct _PVRSRV_PROCESS_STATS_* psPrev;
+
+       /* Create per process lock that need to be held
+        * to edit of its members */
+       POS_LOCK                       hLock;
+
+       /* OS level process ID */
+       IMG_PID                        pid;
+       IMG_UINT32                     ui32RefCount;
+
+       /* Stats... */
+       IMG_INT32                      i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+       IMG_UINT32                     ui32StatAllocFlags;
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+       struct _CACHEOP_STRUCT_ {
+               PVRSRV_CACHE_OP        uiCacheOp;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+               IMG_DEV_VIRTADDR       sDevVAddr;
+               IMG_DEV_PHYADDR        sDevPAddr;
+               RGXFWIF_DM             eFenceOpType;
+#endif
+               IMG_DEVMEM_SIZE_T      uiOffset;
+               IMG_DEVMEM_SIZE_T      uiSize;
+               IMG_UINT64             ui64ExecuteTime;
+               IMG_BOOL               bUserModeFlush;
+               IMG_BOOL               bIsFence;
+               IMG_PID                ownerPid;
+       }                              asCacheOp[MAX_CACHEOP_STAT];
+       IMG_INT32                      uiCacheOpWriteIndex;
+#endif
+
+       /* Other statistics structures */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRV_MEM_ALLOC_REC*          psMemoryRecords;
+#endif
+} PVRSRV_PROCESS_STATS;
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+
+typedef struct _PVRSRV_OS_STAT_ENTRY_
+{
+       DI_GROUP *psStatsDIGroup;
+       DI_ENTRY *psProcessStatsDIEntry;
+       DI_ENTRY *psMemStatsDIEntry;
+       DI_ENTRY *psRIMemStatsDIEntry;
+       DI_ENTRY *psCacheOpStatsDIEntry;
+} PVRSRV_OS_STAT_ENTRY;
+
+static PVRSRV_OS_STAT_ENTRY gsLiveStatEntries;
+static PVRSRV_OS_STAT_ENTRY gsRetiredStatEntries;
+
+int GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+int GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+
+/*
+ * Functions for printing the information stored...
+ */
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+void ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                               PVRSRV_PROCESS_STATS *psProcessStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+void MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                           PVRSRV_PROCESS_STATS *psProcessStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                             PVRSRV_PROCESS_STATS *psProcessStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                               PVRSRV_PROCESS_STATS *psProcessStats);
+#endif
+
+typedef void (PVRSRV_STATS_PRINT_ELEMENTS)(OSDI_IMPL_ENTRY *psEntry,
+                                           PVRSRV_PROCESS_STATS *psProcessStats);
+
+typedef enum
+{
+       PVRSRV_STAT_TYPE_PROCESS,
+       PVRSRV_STAT_TYPE_MEMORY,
+       PVRSRV_STAT_TYPE_RIMEMORY,
+       PVRSRV_STAT_TYPE_CACHEOP,
+       PVRSRV_STAT_TYPE_LAST
+} PVRSRV_STAT_TYPE;
+
+#define SEPARATOR_STR_LEN 166
+
+typedef struct _PVRSRV_STAT_PV_DATA_ {
+
+       PVRSRV_STAT_TYPE eStatType;
+       PVRSRV_STATS_PRINT_ELEMENTS* pfnStatsPrintElements;
+       IMG_CHAR szLiveStatsHeaderStr[SEPARATOR_STR_LEN + 1];
+       IMG_CHAR szRetiredStatsHeaderStr[SEPARATOR_STR_LEN + 1];
+
+} PVRSRV_STAT_PV_DATA;
+
+static PVRSRV_STAT_PV_DATA g_StatPvDataArr[] = {
+                                               { PVRSRV_STAT_TYPE_PROCESS,  NULL, " Process"               , " Process"               },
+                                               { PVRSRV_STAT_TYPE_MEMORY,   NULL, " Memory Allocation"     , " Memory Allocation"     },
+                                               { PVRSRV_STAT_TYPE_RIMEMORY, NULL, " Resource Allocation"   , " Resource Allocation"   },
+                                               { PVRSRV_STAT_TYPE_CACHEOP,  NULL, " Cache Maintenance Ops" , " Cache Maintenance Ops" }
+                                             };
+
+#define GET_STAT_ENTRY_ID(STAT_TYPE) &g_StatPvDataArr[(STAT_TYPE)]
+
+/* Generic header strings */
+static const IMG_CHAR g_szLiveHeaderStr[]    = " Statistics for LIVE Processes ";
+static const IMG_CHAR g_szRetiredHeaderStr[] = " Statistics for RETIRED Processes ";
+
+/* Separator string used for separating stats for different PIDs */
+static IMG_CHAR g_szSeparatorStr[SEPARATOR_STR_LEN + 1] = "";
+
+static inline void
+_prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGenericHeaderStr)
+{
+       IMG_UINT32 ui32NumSeparators;
+       IMG_CHAR szStatsHeaderFooterStr[75];
+
+       /* Prepare text content of the header in a local string */
+       OSStringLCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr));
+       OSStringLCat(szStatsHeaderFooterStr, pszGenericHeaderStr, ARRAY_SIZE(szStatsHeaderFooterStr));
+
+       /* Write all '-' characters to the header string */
+       memset(pszStatsSpecificStr, '-', SEPARATOR_STR_LEN);
+       pszStatsSpecificStr[SEPARATOR_STR_LEN] = '\0';
+
+       /* Find the spot for text content in the header string */
+       ui32NumSeparators = (SEPARATOR_STR_LEN - OSStringLength(szStatsHeaderFooterStr)) >> 1;
+
+       /* Finally write the text content */
+       OSSNPrintf(pszStatsSpecificStr + ui32NumSeparators,
+                  OSStringLength(szStatsHeaderFooterStr),
+                  "%s", szStatsHeaderFooterStr);
+
+       /* Overwrite the '\0' character added by OSSNPrintf() */
+       if (OSStringLength(szStatsHeaderFooterStr) > 0)
+       {
+               pszStatsSpecificStr[ui32NumSeparators + OSStringLength(szStatsHeaderFooterStr) - 1] = ' ';
+       }
+}
+
+static inline void
+_prepareSeparatorStrings(void)
+{
+       IMG_UINT32 i;
+
+       /* Prepare header strings for each stat type */
+       for (i = 0; i < PVRSRV_STAT_TYPE_LAST; ++i)
+       {
+               _prepareStatsHeaderString(g_StatPvDataArr[i].szLiveStatsHeaderStr, g_szLiveHeaderStr);
+               _prepareStatsHeaderString(g_StatPvDataArr[i].szRetiredStatsHeaderStr, g_szRetiredHeaderStr);
+       }
+
+       /* Prepare separator string to separate stats for different PIDs */
+       memset(g_szSeparatorStr, '-', SEPARATOR_STR_LEN);
+       g_szSeparatorStr[SEPARATOR_STR_LEN] = '\0';
+}
+
+static inline void
+_prepareStatsPrivateData(void)
+{
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+       g_StatPvDataArr[PVRSRV_STAT_TYPE_PROCESS].pfnStatsPrintElements = ProcessStatsPrintElements;
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       g_StatPvDataArr[PVRSRV_STAT_TYPE_MEMORY].pfnStatsPrintElements = MemStatsPrintElements;
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       g_StatPvDataArr[PVRSRV_STAT_TYPE_RIMEMORY].pfnStatsPrintElements = RIMemStatsPrintElements;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+       g_StatPvDataArr[PVRSRV_STAT_TYPE_CACHEOP].pfnStatsPrintElements = CacheOpStatsPrintElements;
+#endif
+
+       _prepareSeparatorStrings();
+}
+
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
+#endif
+
+/*
+ * Global Boolean to flag when the statistics are ready to monitor
+ * memory allocations.
+ */
+static IMG_BOOL bProcessStatsInitialised = IMG_FALSE;
+
+/*
+ * Linked lists for process stats. Live stats are for processes which are still running
+ * and the dead list holds those that have exited.
+ */
+static PVRSRV_PROCESS_STATS *g_psLiveList;
+static PVRSRV_PROCESS_STATS *g_psDeadList;
+
+static POS_LOCK g_psLinkedListLock;
+/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type.
+ * This allows it to group all such instances of the same lock type under one class
+ * The consequence of this is that, if lock acquisition is nested on different instances, it generates
+ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition.
+ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */
+#define PROCESS_LOCK_SUBCLASS_CURRENT  1
+#define PROCESS_LOCK_SUBCLASS_PREV             2
+#define PROCESS_LOCK_SUBCLASS_NEXT             3
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*
+ * Pointer to OS folder to hold PID folders.
+ */
+static DI_GROUP *psProcStatsDIGroup;
+#endif
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static DI_ENTRY *psProcStatsDIEntry;
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+/* Global driver PID stats registration handle */
+static IMG_HANDLE g_hDriverProcessStats;
+#endif
+
+/* Global driver-data folders */
+typedef struct _GLOBAL_STATS_
+{
+       IMG_UINT64 ui64StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT];
+       POS_LOCK   hGlobalStatsLock;
+} GLOBAL_STATS;
+
+static DI_ENTRY *psGlobalMemDIEntry;
+static GLOBAL_STATS gsGlobalStats;
+
+#define HASH_INITIAL_SIZE 5
+/* A hash table used to store the size of any vmalloc'd allocation
+ * against its address (not needed for kmallocs as we can use ksize()) */
+static HASH_TABLE* gpsSizeTrackingHashTable;
+static POS_LOCK         gpsSizeTrackingHashTableLock;
+
+static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid);
+
+static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats);
+
+static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats);
+
+static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                   PVRSRV_PROCESS_STATS* psProcessStats,
+                                   IMG_UINT32 uiBytes);
+/*
+ * Power statistics related definitions
+ */
+
+/* For the mean time, use an exponentially weighted moving average with a
+ * 1/4 weighting for the new measurement.
+ */
+#define MEAN_TIME(A, B)     ( ((3*(A))/4) + ((1 * (B))/4) )
+
+#define UPDATE_TIME(time, newtime) \
+       ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime))
+
+/* Enum to be used as input to GET_POWER_STAT_INDEX */
+typedef enum
+{
+       DEVICE     = 0,
+       SYSTEM     = 1,
+       POST_POWER = 0,
+       PRE_POWER  = 2,
+       POWER_OFF  = 0,
+       POWER_ON   = 4,
+       NOT_FORCED = 0,
+       FORCED     = 8,
+} PVRSRV_POWER_STAT_TYPE;
+
+/* Macro used to access one of the power timing statistics inside an array */
+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
+       ((forced) + (powon) + (prepow) + (system))
+
+/* For the power timing stats we need 16 variables to store all the
+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power
+ * and device/system statistics
+ */
+#define NUM_POWER_STATS        (16)
+static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
+
+static DI_ENTRY *psPowerStatsDIEntry;
+
+typedef struct _EXTRA_POWER_STATS_
+{
+       IMG_UINT64      ui64PreClockSpeedChangeDuration;
+       IMG_UINT64      ui64BetweenPreEndingAndPostStartingDuration;
+       IMG_UINT64      ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+#define NUM_EXTRA_POWER_STATS  10
+
+static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd;
+
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+       IMG_UINT32 *pui32Stat;
+       IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
+       IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
+       IMG_UINT32 ui32Index;
+
+       if (bPrePower)
+       {
+               HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff);
+       }
+       else
+       {
+               HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff);
+       }
+
+       ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+                                        bPowerOn ? POWER_ON : POWER_OFF,
+                                        bPrePower ? PRE_POWER : POST_POWER,
+                                        DEVICE);
+       pui32Stat = &aui32PowerTimingStats[ui32Index];
+       *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
+
+       ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+                                        bPowerOn ? POWER_ON : POWER_OFF,
+                                        bPrePower ? PRE_POWER : POST_POWER,
+                                        SYSTEM);
+       pui32Stat = &aui32PowerTimingStats[ui32Index];
+       *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
+}
+
+static IMG_UINT64 ui64PreClockSpeedChangeMark;
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
+{
+       asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
+
+       ui64PreClockSpeedChangeMark = OSClockus();
+}
+
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
+{
+       IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
+
+       PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
+
+       asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+       asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+       ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+       if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
+       {
+               ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+       }
+
+       ui64PreClockSpeedChangeMark = 0;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInLiveList
+@Description    Searches the Live Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInLiveList(IMG_PID pid)
+{
+       PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+
+       while (psProcessStats != NULL)
+       {
+               if (psProcessStats->pid == pid)
+               {
+                       return psProcessStats;
+               }
+
+               psProcessStats = psProcessStats->psNext;
+       }
+
+       return NULL;
+} /* _FindProcessStatsInLiveList */
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInDeadList
+@Description    Searches the Dead Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInDeadList(IMG_PID pid)
+{
+       PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+
+       while (psProcessStats != NULL)
+       {
+               if (psProcessStats->pid == pid)
+               {
+                       return psProcessStats;
+               }
+
+               psProcessStats = psProcessStats->psNext;
+       }
+
+       return NULL;
+} /* _FindProcessStatsInDeadList */
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStats
+@Description    Searches the Live and Dead Process Lists for a statistics
+                structure that matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStats(IMG_PID pid)
+{
+       PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid);
+
+       if (psProcessStats == NULL)
+       {
+               psProcessStats = _FindProcessStatsInDeadList(pid);
+       }
+
+       return psProcessStats;
+} /* _FindProcessStats */
+
+/*************************************************************************/ /*!
+@Function       _CompressMemoryUsage
+@Description    Reduces memory usage by deleting old statistics data.
+                This function requires that the list lock is not held!
+*/ /**************************************************************************/
+static void
+_CompressMemoryUsage(void)
+{
+       PVRSRV_PROCESS_STATS* psProcessStats;
+       PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed;
+       IMG_UINT32 ui32ItemsRemaining;
+
+       /*
+        * We hold the lock whilst checking the list, but we'll release it
+        * before freeing memory (as that will require the lock too)!
+        */
+       OSLockAcquire(g_psLinkedListLock);
+
+       /* Check that the dead list is not bigger than the max size... */
+       psProcessStats          = g_psDeadList;
+       psProcessStatsToBeFreed = NULL;
+       ui32ItemsRemaining      = MAX_DEAD_LIST_PROCESSES;
+
+       while (psProcessStats != NULL  &&  ui32ItemsRemaining > 0)
+       {
+               ui32ItemsRemaining--;
+               if (ui32ItemsRemaining == 0)
+               {
+                       /* This is the last allowed process, cut the linked list here! */
+                       psProcessStatsToBeFreed = psProcessStats->psNext;
+                       psProcessStats->psNext  = NULL;
+               }
+               else
+               {
+                       psProcessStats = psProcessStats->psNext;
+               }
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+
+       /* Any processes stats remaining will need to be destroyed... */
+       while (psProcessStatsToBeFreed != NULL)
+       {
+               PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext;
+
+               psProcessStatsToBeFreed->psNext = NULL;
+               _DestroyProcessStat(psProcessStatsToBeFreed);
+               psProcessStatsToBeFreed = psNextProcessStats;
+       }
+} /* _CompressMemoryUsage */
+
+/* These functions move the process stats from the live to the dead list.
+ * _MoveProcessToDeadList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToDeadList performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+       /* Take the element out of the live list and append to the dead list... */
+       _RemoveProcessStatsFromList(psProcessStats);
+       _AddProcessStatsToFrontOfDeadList(psProcessStats);
+} /* _MoveProcessToDeadList */
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* These functions move the process stats from the dead to the live list.
+ * _MoveProcessToLiveList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToLiveList performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+       /* Take the element out of the live list and append to the dead list... */
+       _RemoveProcessStatsFromList(psProcessStats);
+       _AddProcessStatsToFrontOfLiveList(psProcessStats);
+} /* _MoveProcessToLiveList */
+#endif
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfLiveList
+@Description    Add a statistic to the live list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+       /* This function should always be called under global list lock g_psLinkedListLock.
+        */
+       PVR_ASSERT(psProcessStats != NULL);
+
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+       if (g_psLiveList != NULL)
+       {
+               PVR_ASSERT(psProcessStats != g_psLiveList);
+               OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+               g_psLiveList->psPrev = psProcessStats;
+               OSLockRelease(g_psLiveList->hLock);
+               psProcessStats->psNext = g_psLiveList;
+       }
+
+       g_psLiveList = psProcessStats;
+
+       OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfLiveList */
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfDeadList
+@Description    Add a statistic to the dead list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+       PVR_ASSERT(psProcessStats != NULL);
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+       if (g_psDeadList != NULL)
+       {
+               PVR_ASSERT(psProcessStats != g_psDeadList);
+               OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+               g_psDeadList->psPrev = psProcessStats;
+               OSLockRelease(g_psDeadList->hLock);
+               psProcessStats->psNext = g_psDeadList;
+       }
+
+       g_psDeadList = psProcessStats;
+
+       OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfDeadList */
+
+/*************************************************************************/ /*!
+@Function       _RemoveProcessStatsFromList
+@Description    Detaches a process from either the live or dead list.
+@Input          psProcessStats  Process stats to remove.
+*/ /**************************************************************************/
+static void
+_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+       PVR_ASSERT(psProcessStats != NULL);
+
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+       /* Remove the item from the linked lists... */
+       if (g_psLiveList == psProcessStats)
+       {
+               g_psLiveList = psProcessStats->psNext;
+
+               if (g_psLiveList != NULL)
+               {
+                       PVR_ASSERT(psProcessStats != g_psLiveList);
+                       OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+                       g_psLiveList->psPrev = NULL;
+                       OSLockRelease(g_psLiveList->hLock);
+
+               }
+       }
+       else if (g_psDeadList == psProcessStats)
+       {
+               g_psDeadList = psProcessStats->psNext;
+
+               if (g_psDeadList != NULL)
+               {
+                       PVR_ASSERT(psProcessStats != g_psDeadList);
+                       OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+                       g_psDeadList->psPrev = NULL;
+                       OSLockRelease(g_psDeadList->hLock);
+               }
+       }
+       else
+       {
+               PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext;
+               PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev;
+
+               if (psProcessStats->psNext != NULL)
+               {
+                       PVR_ASSERT(psProcessStats != psNext);
+                       OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT);
+                       psProcessStats->psNext->psPrev = psPrev;
+                       OSLockRelease(psNext->hLock);
+               }
+               if (psProcessStats->psPrev != NULL)
+               {
+                       PVR_ASSERT(psProcessStats != psPrev);
+                       OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+                       psProcessStats->psPrev->psNext = psNext;
+                       OSLockRelease(psPrev->hLock);
+               }
+       }
+
+
+       /* Reset the pointers in this cell, as it is not attached to anything */
+       psProcessStats->psNext = NULL;
+       psProcessStats->psPrev = NULL;
+
+       OSLockRelease(psProcessStats->hLock);
+
+} /* _RemoveProcessStatsFromList */
+
+static PVRSRV_ERROR
+_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_PROCESS_STATS *psProcessStats;
+
+       psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+       PVR_RETURN_IF_NOMEM(psProcessStats);
+
+       psProcessStats->pid             = ownerPid;
+       psProcessStats->ui32RefCount    = 1;
+
+       psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]     = 1;
+       psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+       eError = OSLockCreateNoStats(&psProcessStats->hLock);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       *ppsProcessStats = psProcessStats;
+       return PVRSRV_OK;
+
+e0:
+       OSFreeMemNoStats(psProcessStats);
+       return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+/*************************************************************************/ /*!
+@Function       _DestroyProcessStat
+@Description    Frees memory and resources held by a process statistic.
+@Input          psProcessStats  Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+       PVR_ASSERT(psProcessStats != NULL);
+
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+       /* Free the memory statistics... */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       while (psProcessStats->psMemoryRecords)
+       {
+               List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords);
+       }
+#endif
+       OSLockRelease(psProcessStats->hLock);
+
+       /*Destroy the lock */
+       OSLockDestroyNoStats(psProcessStats->hLock);
+
+       /* Free the memory... */
+       OSFreeMemNoStats(psProcessStats);
+} /* _DestroyProcessStat */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+static inline void
+_createStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries,
+                  DI_PFN_SHOW pfnStatsShow)
+{
+       PVRSRV_ERROR eError;
+       DI_ITERATOR_CB sIterator = {.pfnShow = pfnStatsShow};
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+       eError = DICreateEntry("process_stats", psStatsEntries->psStatsDIGroup,
+                              &sIterator,
+                              GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_PROCESS),
+                              DI_ENTRY_TYPE_GENERIC,
+                              &psStatsEntries->psProcessStatsDIEntry);
+       PVR_LOG_IF_ERROR(eError, "DICreateEntry (1)");
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+       eError = DICreateEntry("cache_ops_exec", psStatsEntries->psStatsDIGroup,
+                              &sIterator,
+                              GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_CACHEOP),
+                              DI_ENTRY_TYPE_GENERIC,
+                              &psStatsEntries->psCacheOpStatsDIEntry);
+       PVR_LOG_IF_ERROR(eError, "DICreateEntry (2)");
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       eError = DICreateEntry("mem_area", psStatsEntries->psStatsDIGroup,
+                              &sIterator,
+                              GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_MEMORY),
+                              DI_ENTRY_TYPE_GENERIC,
+                              &psStatsEntries->psMemStatsDIEntry);
+       PVR_LOG_IF_ERROR(eError, "DICreateEntry (3)");
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       eError = DICreateEntry("gpu_mem_area", psStatsEntries->psStatsDIGroup,
+                              &sIterator,
+                              GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_RIMEMORY),
+                              DI_ENTRY_TYPE_GENERIC,
+                              &psStatsEntries->psRIMemStatsDIEntry);
+       PVR_LOG_IF_ERROR(eError, "DICreateEntry (4)");
+#endif
+}
+
+static inline void
+_createStatisticsEntries(void)
+{
+       PVRSRV_ERROR eError;
+
+       eError = DICreateGroup("proc_stats", NULL, &psProcStatsDIGroup);
+       PVR_LOG_IF_ERROR(eError, "DICreateGroup (1)");
+       eError = DICreateGroup("live_pids_stats", psProcStatsDIGroup,
+                           &gsLiveStatEntries.psStatsDIGroup);
+       PVR_LOG_IF_ERROR(eError, "DICreateGroup (2)");
+       eError = DICreateGroup("retired_pids_stats", psProcStatsDIGroup,
+                           &gsRetiredStatEntries.psStatsDIGroup);
+       PVR_LOG_IF_ERROR(eError, "DICreateGroup (3)");
+
+       _createStatsFiles(&gsLiveStatEntries, GenericStatsPrintElementsLive);
+       _createStatsFiles(&gsRetiredStatEntries, GenericStatsPrintElementsRetired);
+
+       _prepareStatsPrivateData();
+}
+
+static inline void
+_removeStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries)
+{
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+       DIDestroyEntry(psStatsEntries->psProcessStatsDIEntry);
+       psStatsEntries->psProcessStatsDIEntry = NULL;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+       DIDestroyEntry(psStatsEntries->psCacheOpStatsDIEntry);
+    psStatsEntries->psCacheOpStatsDIEntry = NULL;
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       DIDestroyEntry(psStatsEntries->psMemStatsDIEntry);
+       psStatsEntries->psMemStatsDIEntry = NULL;
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       DIDestroyEntry(psStatsEntries->psRIMemStatsDIEntry);
+       psStatsEntries->psRIMemStatsDIEntry = NULL;
+#endif
+}
+
+static inline void
+_removeStatisticsEntries(void)
+{
+       _removeStatsFiles(&gsLiveStatEntries);
+       _removeStatsFiles(&gsRetiredStatEntries);
+
+       DIDestroyGroup(gsLiveStatEntries.psStatsDIGroup);
+       gsLiveStatEntries.psStatsDIGroup = NULL;
+       DIDestroyGroup(gsRetiredStatEntries.psStatsDIGroup);
+       gsRetiredStatEntries.psStatsDIGroup = NULL;
+       DIDestroyGroup(psProcStatsDIGroup);
+       psProcStatsDIGroup = NULL;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsInitialise
+@Description    Entry point for initialising the statistics module.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsInitialise(void)
+{
+       PVRSRV_ERROR error;
+
+       PVR_ASSERT(g_psLiveList == NULL);
+       PVR_ASSERT(g_psDeadList == NULL);
+       PVR_ASSERT(g_psLinkedListLock == NULL);
+       PVR_ASSERT(gpsSizeTrackingHashTable == NULL);
+       PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
+
+       /* We need a lock to protect the linked lists... */
+       error = OSLockCreate(&g_psLinkedListLock);
+       PVR_GOTO_IF_ERROR(error, return_);
+
+       /* We also need a lock to protect the hash table used for size tracking. */
+       error = OSLockCreate(&gpsSizeTrackingHashTableLock);
+       PVR_GOTO_IF_ERROR(error, detroy_linked_list_lock_);
+
+       /* We also need a lock to protect the GlobalStat counters */
+       error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock);
+       PVR_GOTO_IF_ERROR(error, destroy_hashtable_lock_);
+
+       /* Flag that we are ready to start monitoring memory allocations. */
+
+       gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE);
+       PVR_GOTO_IF_NOMEM(gpsSizeTrackingHashTable, error, destroy_stats_lock_);
+
+       OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+
+       bProcessStatsInitialised = IMG_TRUE;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       /* Register our 'system' PID to hold driver-wide alloc stats */
+       _RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID);
+#endif
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+       _createStatisticsEntries();
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+       {
+               DI_ITERATOR_CB sIterator = {.pfnShow = RawProcessStatsPrintElements};
+               error = DICreateEntry("memtrack_stats", NULL, &sIterator, NULL,
+                                      DI_ENTRY_TYPE_GENERIC, &psProcStatsDIEntry);
+               PVR_LOG_IF_ERROR(error, "DICreateEntry (1)");
+       }
+#endif
+
+       {
+               DI_ITERATOR_CB sIterator = {.pfnShow = PowerStatsPrintElements};
+               /* Create power stats entry... */
+               error = DICreateEntry("power_timing_stats", NULL, &sIterator, NULL,
+                                     DI_ENTRY_TYPE_GENERIC, &psPowerStatsDIEntry);
+               PVR_LOG_IF_ERROR(error, "DICreateEntry (2)");
+       }
+
+       {
+               DI_ITERATOR_CB sIterator = {.pfnShow = GlobalStatsPrintElements};
+               error = DICreateEntry("driver_stats", NULL, &sIterator, NULL,
+                                     DI_ENTRY_TYPE_GENERIC, &psGlobalMemDIEntry);
+               PVR_LOG_IF_ERROR(error, "DICreateEntry (3)");
+       }
+
+       return PVRSRV_OK;
+
+destroy_stats_lock_:
+       OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+       gsGlobalStats.hGlobalStatsLock = NULL;
+destroy_hashtable_lock_:
+       OSLockDestroy(gpsSizeTrackingHashTableLock);
+       gpsSizeTrackingHashTableLock = NULL;
+detroy_linked_list_lock_:
+       OSLockDestroy(g_psLinkedListLock);
+       g_psLinkedListLock = NULL;
+return_:
+       return error;
+
+}
+
+static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v, void* pvPriv)
+{
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+       _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v;
+       IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k;
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__,
+                psNewTrackingHashEntry->uiSizeInBytes,
+                uiCpuVAddr,
+                psNewTrackingHashEntry->uiPid));
+
+       PVR_UNREFERENCED_PARAMETER(pvPriv);
+#endif
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDestroy
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDestroy(void)
+{
+       PVR_ASSERT(bProcessStatsInitialised);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+       if (psProcStatsDIEntry != NULL)
+       {
+               DIDestroyEntry(psProcStatsDIEntry);
+               psProcStatsDIEntry = NULL;
+       }
+#endif
+
+       /* Destroy the power stats entry... */
+       if (psPowerStatsDIEntry!=NULL)
+       {
+               DIDestroyEntry(psPowerStatsDIEntry);
+               psPowerStatsDIEntry = NULL;
+       }
+
+       /* Destroy the global data entry */
+       if (psGlobalMemDIEntry!=NULL)
+       {
+               DIDestroyEntry(psGlobalMemDIEntry);
+               psGlobalMemDIEntry = NULL;
+       }
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+       _removeStatisticsEntries();
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       /* Deregister our 'system' PID which holds driver-wide alloc stats */
+       PVRSRVStatsDeregisterProcess(g_hDriverProcessStats);
+#endif
+
+       /* Stop monitoring memory allocations... */
+       bProcessStatsInitialised = IMG_FALSE;
+
+       /* Destroy the locks... */
+       if (g_psLinkedListLock != NULL)
+       {
+               OSLockDestroy(g_psLinkedListLock);
+               g_psLinkedListLock = NULL;
+       }
+
+       /* Free the live and dead lists... */
+       while (g_psLiveList != NULL)
+       {
+               PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+               _RemoveProcessStatsFromList(psProcessStats);
+               _DestroyProcessStat(psProcessStats);
+       }
+
+       while (g_psDeadList != NULL)
+       {
+               PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+               _RemoveProcessStatsFromList(psProcessStats);
+               _DestroyProcessStat(psProcessStats);
+       }
+
+       if (gpsSizeTrackingHashTable != NULL)
+       {
+               /* Dump all remaining entries in HASH table (list any remaining vmallocs) */
+               HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries, NULL);
+               HASH_Delete(gpsSizeTrackingHashTable);
+       }
+       if (gpsSizeTrackingHashTableLock != NULL)
+       {
+               OSLockDestroy(gpsSizeTrackingHashTableLock);
+               gpsSizeTrackingHashTableLock = NULL;
+       }
+
+       if (NULL != gsGlobalStats.hGlobalStatsLock)
+       {
+               OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+               gsGlobalStats.hGlobalStatsLock = NULL;
+       }
+
+}
+
+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                 size_t uiBytes)
+{
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       IMG_UINT64 ui64InitialSize;
+#endif
+
+       OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE();
+#endif
+
+       switch (eAllocType)
+       {
+               case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:
+                       DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes);
+                       break;
+
+               default:
+                       PVR_ASSERT(0);
+                       break;
+       }
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       {
+               IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE();
+               if (ui64Size != ui64InitialSize)
+               {
+                       TracepointUpdateGPUMemGlobal(0, ui64Size);
+               }
+       }
+#endif
+
+       OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                 size_t uiBytes)
+{
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       IMG_UINT64 ui64InitialSize;
+#endif
+
+       OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE();
+#endif
+
+       switch (eAllocType)
+       {
+               case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+                       break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:
+                       INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes);
+                       break;
+
+               default:
+                       PVR_ASSERT(0);
+                       break;
+       }
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       {
+               IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE();
+               if (ui64Size != ui64InitialSize)
+               {
+                       TracepointUpdateGPUMemGlobal(0, ui64Size);
+               }
+       }
+#endif
+
+       OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static PVRSRV_ERROR
+_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid)
+{
+       PVRSRV_PROCESS_STATS*   psProcessStats=NULL;
+       PVRSRV_ERROR                    eError;
+
+       PVR_ASSERT(phProcessStats != NULL);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]",
+                       __func__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID)
+                       ? "system" : OSGetCurrentClientProcessNameKM()));
+
+       /* Check the PID has not already moved to the dead list... */
+       OSLockAcquire(g_psLinkedListLock);
+       psProcessStats = _FindProcessStatsInDeadList(ownerPid);
+       if (psProcessStats != NULL)
+       {
+               /* Move it back onto the live list! */
+               _RemoveProcessStatsFromList(psProcessStats);
+               _AddProcessStatsToFrontOfLiveList(psProcessStats);
+       }
+       else
+       {
+               /* Check the PID is not already registered in the live list... */
+               psProcessStats = _FindProcessStatsInLiveList(ownerPid);
+       }
+
+       /* If the PID is on the live list then just increment the ref count and return... */
+       if (psProcessStats != NULL)
+       {
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+               psProcessStats->ui32RefCount++;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+               UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+               OSLockRelease(psProcessStats->hLock);
+               OSLockRelease(g_psLinkedListLock);
+
+               *phProcessStats = psProcessStats;
+
+               return PVRSRV_OK;
+       }
+       OSLockRelease(g_psLinkedListLock);
+
+       /* Allocate a new node structure and initialise it... */
+       eError = _AllocateProcessStats(&psProcessStats, ownerPid);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       /* Add it to the live list... */
+       OSLockAcquire(g_psLinkedListLock);
+       _AddProcessStatsToFrontOfLiveList(psProcessStats);
+       OSLockRelease(g_psLinkedListLock);
+
+       /* Done */
+       *phProcessStats = (IMG_HANDLE) psProcessStats;
+
+       return PVRSRV_OK;
+
+e0:
+       *phProcessStats = (IMG_HANDLE) NULL;
+       return PVRSRV_ERROR_OUT_OF_MEMORY;
+} /* _RegisterProcess */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsRegisterProcess
+@Description    Register a process into the list statistics list.
+@Output         phProcessStats  Handle to the process to be used to deregister.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats)
+{
+       return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM());
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDeregisterProcess
+@Input          hProcessStats  Handle to the process returned when registered.
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats)
+{
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]",
+                       __func__, OSGetCurrentClientProcessIDKM(),
+                       OSGetCurrentProcessName()));
+
+       if (hProcessStats != (IMG_HANDLE) NULL)
+       {
+               PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats;
+
+               /* Lower the reference count, if zero then move it to the dead list */
+               OSLockAcquire(g_psLinkedListLock);
+               if (psProcessStats->ui32RefCount > 0)
+               {
+                       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+                       psProcessStats->ui32RefCount--;
+                       psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+                       if (psProcessStats->ui32RefCount == 0)
+                       {
+                               OSLockRelease(psProcessStats->hLock);
+                               _MoveProcessToDeadList(psProcessStats);
+                       }else
+#endif
+                       {
+                               OSLockRelease(psProcessStats->hLock);
+                       }
+               }
+               OSLockRelease(g_psLinkedListLock);
+
+               /* Check if the dead list needs to be reduced */
+               _CompressMemoryUsage();
+       }
+} /* PVRSRVStatsDeregisterProcess */
+
+void
+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                        void *pvCpuVAddr,
+                                                        IMG_CPU_PHYADDR sCpuPAddr,
+                                                        size_t uiBytes,
+                                                        void *pvPrivateData,
+                                                        IMG_PID currentPid
+                                                        DEBUG_MEMSTATS_PARAMS)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       IMG_PID                            currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+       PVRSRV_DATA*               psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_MEM_ALLOC_REC*  psRecord = NULL;
+       PVRSRV_PROCESS_STATS*  psProcessStats;
+       enum { PVRSRV_PROC_NOTFOUND,
+              PVRSRV_PROC_FOUND,
+              PVRSRV_PROC_RESURRECTED
+            } eProcSearch = PVRSRV_PROC_FOUND;
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       IMG_UINT64 ui64InitialSize;
+#endif
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Called when process statistics module is not initialised",
+                                __func__));
+#endif
+               return;
+       }
+
+       /*
+        * To prevent a recursive loop, we make the memory allocations for our
+        * memstat records via OSAllocMemNoStats(), which does not try to
+        * create a memstat record entry.
+        */
+
+       /* Allocate the memory record... */
+       psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC));
+       if (psRecord == NULL)
+       {
+               return;
+       }
+
+       psRecord->eAllocType       = eAllocType;
+       psRecord->pvCpuVAddr       = pvCpuVAddr;
+       psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
+       psRecord->uiBytes          = uiBytes;
+       psRecord->pvPrivateData    = pvPrivateData;
+
+       psRecord->pid = currentPid;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
+       psRecord->pvAllocdFromFile = pvAllocFromFile;
+       psRecord->ui32AllocdFromLine = ui32AllocFromLine;
+#endif
+
+       _increase_global_stat(eAllocType, uiBytes);
+       /* Lock while we find the correct process... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       if (psPVRSRVData)
+       {
+               if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+                   (currentCleanupPid != 0))
+               {
+                       psProcessStats = _FindProcessStats(currentCleanupPid);
+               }
+               else
+               {
+                       psProcessStats = _FindProcessStatsInLiveList(currentPid);
+                       if (!psProcessStats)
+                       {
+                               psProcessStats = _FindProcessStatsInDeadList(currentPid);
+                               eProcSearch = PVRSRV_PROC_RESURRECTED;
+                       }
+               }
+       }
+       else
+       {
+               psProcessStats = _FindProcessStatsInLiveList(currentPid);
+               if (!psProcessStats)
+               {
+                       psProcessStats = _FindProcessStatsInDeadList(currentPid);
+                       eProcSearch = PVRSRV_PROC_RESURRECTED;
+               }
+       }
+
+       if (psProcessStats == NULL)
+       {
+               eProcSearch = PVRSRV_PROC_NOTFOUND;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Process stat increment called for 'unknown' process PID(%d)",
+                                __func__, currentPid));
+
+               if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK)
+               {
+                       OSLockRelease(g_psLinkedListLock);
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)",
+                               __func__, currentPid, OSGetCurrentProcessName(), uiBytes));
+                       goto free_record;
+               }
+
+               /* Add it to the live list... */
+               _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+               OSLockRelease(g_psLinkedListLock);
+
+#else  /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+               OSLockRelease(g_psLinkedListLock);
+               goto free_record;
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+       }
+       else
+       {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               if (eProcSearch == PVRSRV_PROC_RESURRECTED)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Process stat incremented on 'dead' process PID(%d)",
+                                __func__, currentPid));
+                       /* Move process from dead list to live list */
+                       _MoveProcessToLiveList(psProcessStats);
+               }
+#endif
+               OSLockRelease(g_psLinkedListLock);
+       }
+
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+       /* Insert the memory record... */
+       if (psRecord != NULL)
+       {
+               List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord);
+       }
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
+#endif
+
+       /* Update the memory watermarks... */
+       switch (eAllocType)
+       {
+               case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+               {
+                       if (psRecord != NULL)
+                       {
+                               if (pvCpuVAddr == NULL)
+                               {
+                                       break;
+                               }
+                               psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+               {
+                       if (psRecord != NULL)
+                       {
+                               if (pvCpuVAddr == NULL)
+                               {
+                                       break;
+                               }
+                               psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+               {
+                       if (psRecord != NULL)
+                       {
+                               if (pvCpuVAddr == NULL)
+                               {
+                                       break;
+                               }
+                               psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+               {
+                       if (psRecord != NULL)
+                       {
+                               if (pvCpuVAddr == NULL)
+                               {
+                                       break;
+                               }
+                               psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+               {
+                       if (psRecord != NULL)
+                       {
+                               psRecord->ui64Key = sCpuPAddr.uiAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+               {
+                       if (psRecord != NULL)
+                       {
+                               if (pvCpuVAddr == NULL)
+                               {
+                                       break;
+                               }
+                               psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+               {
+                       if (psRecord != NULL)
+                       {
+                               psRecord->ui64Key = sCpuPAddr.uiAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+               {
+                       if (psRecord != NULL)
+                       {
+                               psRecord->ui64Key = sCpuPAddr.uiAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+               {
+                       if (psRecord != NULL)
+                       {
+                               if (pvCpuVAddr == NULL)
+                               {
+                                       break;
+                               }
+                               psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+                       }
+                       INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+                       psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+               }
+               break;
+
+               default:
+               {
+                       PVR_ASSERT(0);
+               }
+               break;
+       }
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
+       {
+               IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
+               if (ui64Size != ui64InitialSize)
+               {
+                       TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size);
+               }
+       }
+#endif
+
+       OSLockRelease(psProcessStats->hLock);
+
+       return;
+
+free_record:
+       if (psRecord != NULL)
+       {
+               OSFreeMemNoStats(psRecord);
+       }
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+} /* PVRSRVStatsAddMemAllocRecord */
+
+void
+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                               IMG_UINT64 ui64Key,
+                                                               IMG_PID currentPid)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       IMG_PID                            currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+       PVRSRV_DATA*               psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+       PVRSRV_MEM_ALLOC_REC*  psRecord           = NULL;
+       IMG_BOOL                           bFound             = IMG_FALSE;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Called when process statistics module is not initialised",
+                                __func__));
+#endif
+               return;
+       }
+
+       /* Lock while we find the correct process and remove this record... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       if (psPVRSRVData)
+       {
+               if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+                   (currentCleanupPid != 0))
+               {
+                       psProcessStats = _FindProcessStats(currentCleanupPid);
+               }
+               else
+               {
+                       psProcessStats = _FindProcessStats(currentPid);
+               }
+       }
+       else
+       {
+               psProcessStats = _FindProcessStats(currentPid);
+       }
+       if (psProcessStats != NULL)
+       {
+               psRecord      = psProcessStats->psMemoryRecords;
+               while (psRecord != NULL)
+               {
+                       if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+                       {
+                               bFound = IMG_TRUE;
+                               break;
+                       }
+
+                       psRecord = psRecord->psNext;
+               }
+       }
+
+       /* If not found, we need to do a full search in case it was allocated to a different PID... */
+       if (!bFound)
+       {
+               PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats;
+
+               /* Search all live lists first... */
+               psProcessStats = g_psLiveList;
+               while (psProcessStats != NULL)
+               {
+                       if (psProcessStats != psProcessStatsAlreadyChecked)
+                       {
+                               psRecord      = psProcessStats->psMemoryRecords;
+                               while (psRecord != NULL)
+                               {
+                                       if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+                                       {
+                                               bFound = IMG_TRUE;
+                                               break;
+                                       }
+
+                                       psRecord = psRecord->psNext;
+                               }
+                       }
+
+                       if (bFound)
+                       {
+                               break;
+                       }
+
+                       psProcessStats = psProcessStats->psNext;
+               }
+
+               /* If not found, then search all dead lists next... */
+               if (!bFound)
+               {
+                       psProcessStats = g_psDeadList;
+                       while (psProcessStats != NULL)
+                       {
+                               if (psProcessStats != psProcessStatsAlreadyChecked)
+                               {
+                                       psRecord      = psProcessStats->psMemoryRecords;
+                                       while (psRecord != NULL)
+                                       {
+                                               if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+                                               {
+                                                       bFound = IMG_TRUE;
+                                                       break;
+                                               }
+
+                                               psRecord = psRecord->psNext;
+                                       }
+                               }
+
+                               if (bFound)
+                               {
+                                       break;
+                               }
+
+                               psProcessStats = psProcessStats->psNext;
+                       }
+               }
+       }
+
+       /* Update the watermark and remove this record...*/
+       if (bFound)
+       {
+               _decrease_global_stat(eAllocType, psRecord->uiBytes);
+
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+               _DecreaseProcStatValue(eAllocType,
+                                      psProcessStats,
+                                      psRecord->uiBytes);
+
+               List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
+               OSLockRelease(psProcessStats->hLock);
+               OSLockRelease(g_psLinkedListLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               /* If all stats are now zero, remove the entry for this thread */
+               if (psProcessStats->ui32StatAllocFlags == 0)
+               {
+                       OSLockAcquire(g_psLinkedListLock);
+                       _MoveProcessToDeadList(psProcessStats);
+                       OSLockRelease(g_psLinkedListLock);
+
+                       /* Check if the dead list needs to be reduced */
+                       _CompressMemoryUsage();
+               }
+#endif
+               /*
+                * Free the record outside the lock so we don't deadlock and so we
+                * reduce the time the lock is held.
+                */
+               OSFreeMemNoStats(psRecord);
+       }
+       else
+       {
+               OSLockRelease(g_psLinkedListLock);
+       }
+
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(ui64Key);
+#endif
+} /* PVRSRVStatsRemoveMemAllocRecord */
+
+void
+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                       size_t uiBytes,
+                                                                       IMG_UINT64 uiCpuVAddr,
+                                                                       IMG_PID uiPid)
+{
+       IMG_BOOL bRes = IMG_FALSE;
+       _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL;
+
+       if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL))
+       {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Called when process statistics module is not initialised",
+                                __func__));
+#endif
+               return;
+       }
+
+       /* Alloc untracked memory for the new hash table entry */
+       psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry));
+       if (psNewTrackingHashEntry == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!",
+                                __func__, __LINE__));
+               return;
+       }
+
+       /* Fill-in the size of the allocation and PID of the allocating process */
+       psNewTrackingHashEntry->uiSizeInBytes = uiBytes;
+       psNewTrackingHashEntry->uiPid = uiPid;
+       OSLockAcquire(gpsSizeTrackingHashTableLock);
+       /* Insert address of the new struct into the hash table */
+       bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry);
+       OSLockRelease(gpsSizeTrackingHashTableLock);
+       if (bRes)
+       {
+               PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!",
+                                __func__, __LINE__));
+               /* Free the memory allocated for psNewTrackingHashEntry, as we
+                * failed to insert it into the Hash table.
+                */
+               OSFreeMemNoStats(psNewTrackingHashEntry);
+       }
+}
+
+void
+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                            size_t uiBytes,
+                            IMG_PID currentPid)
+
+{
+       IMG_PID                           currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+       PVRSRV_DATA*              psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+       enum { PVRSRV_PROC_NOTFOUND,
+              PVRSRV_PROC_FOUND,
+              PVRSRV_PROC_RESURRECTED
+            } eProcSearch = PVRSRV_PROC_FOUND;
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       IMG_UINT64 ui64InitialSize;
+#endif
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Called when process statistics module is not initialised",
+                                __func__));
+#endif
+               return;
+       }
+
+       _increase_global_stat(eAllocType, uiBytes);
+       OSLockAcquire(g_psLinkedListLock);
+       if (psPVRSRVData)
+       {
+               if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+                   (currentCleanupPid != 0))
+               {
+                       psProcessStats = _FindProcessStats(currentCleanupPid);
+               }
+               else
+               {
+                       psProcessStats = _FindProcessStatsInLiveList(currentPid);
+                       if (!psProcessStats)
+                       {
+                               psProcessStats = _FindProcessStatsInDeadList(currentPid);
+                               eProcSearch = PVRSRV_PROC_RESURRECTED;
+                       }
+               }
+       }
+       else
+       {
+               psProcessStats = _FindProcessStatsInLiveList(currentPid);
+               if (!psProcessStats)
+               {
+                       psProcessStats = _FindProcessStatsInDeadList(currentPid);
+                       eProcSearch = PVRSRV_PROC_RESURRECTED;
+               }
+       }
+
+       if (psProcessStats == NULL)
+       {
+               eProcSearch = PVRSRV_PROC_NOTFOUND;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Process stat increment called for 'unknown' process PID(%d)",
+                                __func__, currentPid));
+
+               if (bProcessStatsInitialised)
+               {
+                       if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK)
+                       {
+                               OSLockRelease(g_psLinkedListLock);
+                               return;
+                       }
+                       /* Add it to the live list... */
+                       _AddProcessStatsToFrontOfLiveList(psProcessStats);
+               }
+#else
+               OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+
+       }
+
+       if (psProcessStats != NULL)
+       {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               if (eProcSearch == PVRSRV_PROC_RESURRECTED)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "%s: Process stat incremented on 'dead' process PID(%d)",
+                                        __func__, currentPid));
+
+                       /* Move process from dead list to live list */
+                       _MoveProcessToLiveList(psProcessStats);
+               }
+#endif
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               /* Release the list lock as soon as we acquire the process lock,
+                * this ensures if the process is in deadlist the entry cannot be
+                * deleted or modified
+                */
+               OSLockRelease(g_psLinkedListLock);
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+               ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
+#endif
+
+               /* Update the memory watermarks... */
+               switch (eAllocType)
+               {
+                       case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:
+                       {
+                               INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes);
+                               psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+                       break;
+
+                       default:
+                       {
+                               PVR_ASSERT(0);
+                       }
+                       break;
+               }
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+               if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
+               {
+                       IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
+                       if (ui64Size != ui64InitialSize)
+                       {
+                               TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid,
+                                                                ui64Size);
+                       }
+               }
+#endif
+
+               OSLockRelease(psProcessStats->hLock);
+       }
+
+}
+
+static void
+_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                       PVRSRV_PROCESS_STATS* psProcessStats,
+                       IMG_UINT32 uiBytes)
+{
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
+#endif
+
+       switch (eAllocType)
+       {
+               case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0)
+                       {
+                               psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:
+               {
+                       DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes);
+                       if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0)
+                       {
+                                       psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+                       }
+               }
+               break;
+
+               default:
+               {
+                       PVR_ASSERT(0);
+               }
+               break;
+       }
+
+#if defined(ENABLE_GPU_MEM_TRACEPOINT)
+       if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
+       {
+               IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
+               if (ui64Size != ui64InitialSize)
+               {
+                       TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size);
+               }
+       }
+#endif
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_PROCESS_STATS *psProcessStats;
+
+       DIPrintf(psEntry,
+                "%s,%s,%s,%s,%s,%s,%s\n",
+                "PID",
+                "MemoryUsageKMalloc",           // PVRSRV_PROCESS_STAT_TYPE_KMALLOC
+                "MemoryUsageAllocPTMemoryUMA",  // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA
+                "MemoryUsageAllocPTMemoryLMA",  // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA
+                "MemoryUsageAllocGPUMemLMA",    // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES
+                "MemoryUsageAllocGPUMemUMA",    // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES
+                "MemoryUsageDmaBufImport");     // PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT
+
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = g_psLiveList;
+
+       while (psProcessStats != NULL)
+       {
+               if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
+               {
+                       DIPrintf(psEntry,
+                                "%d,%d,%d,%d,%d,%d,%d\n",
+                                psProcessStats->pid,
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES],
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES],
+                                psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT]);
+               }
+
+               psProcessStats = psProcessStats->psNext;
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+
+       return 0;
+} /* RawProcessStatsPrintElements */
+#endif
+
+void
+PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+                             IMG_PID decrPID)
+{
+       PVRSRV_PROCESS_STATS*  psProcessStats;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes);
+
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(decrPID);
+
+       if (psProcessStats != NULL)
+       {
+               /* Decrement the kmalloc memory stat... */
+               DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+               DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+}
+
+static void
+_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+                        PVRSRV_MEM_ALLOC_TYPE eAllocType)
+{
+       PVRSRV_PROCESS_STATS*  psProcessStats;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes);
+
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid);
+
+       if (psProcessStats != NULL)
+       {
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               /* Decrement the memory stat... */
+               _DecreaseProcStatValue(eAllocType,
+                                      psProcessStats,
+                                      psTrackingHashEntry->uiSizeInBytes);
+               OSLockRelease(psProcessStats->hLock);
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+}
+
+void
+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                         IMG_UINT64 uiCpuVAddr)
+{
+       _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL;
+
+       if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL))
+       {
+               return;
+       }
+
+       OSLockAcquire(gpsSizeTrackingHashTableLock);
+       psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr);
+       OSLockRelease(gpsSizeTrackingHashTableLock);
+       if (psTrackingHashEntry)
+       {
+               _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType);
+               OSFreeMemNoStats(psTrackingHashEntry);
+       }
+}
+
+void
+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                       size_t uiBytes,
+                                                       IMG_PID currentPid)
+{
+       IMG_PID                            currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+       PVRSRV_DATA*               psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_PROCESS_STATS*  psProcessStats = NULL;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       _decrease_global_stat(eAllocType, uiBytes);
+
+       OSLockAcquire(g_psLinkedListLock);
+       if (psPVRSRVData)
+       {
+               if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+                   (currentCleanupPid != 0))
+               {
+                       psProcessStats = _FindProcessStats(currentCleanupPid);
+               }
+               else
+               {
+                       psProcessStats = _FindProcessStats(currentPid);
+               }
+       }
+       else
+       {
+               psProcessStats = _FindProcessStats(currentPid);
+       }
+
+
+       if (psProcessStats != NULL)
+       {
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               /* Release the list lock as soon as we acquire the process lock,
+                * this ensures if the process is in deadlist the entry cannot be
+                * deleted or modified
+                */
+               OSLockRelease(g_psLinkedListLock);
+               /* Update the memory watermarks... */
+               _DecreaseProcStatValue(eAllocType,
+                                      psProcessStats,
+                                      uiBytes);
+               OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+               /* If all stats are now zero, remove the entry for this thread */
+               if (psProcessStats->ui32StatAllocFlags == 0)
+               {
+                       OSLockAcquire(g_psLinkedListLock);
+                       _MoveProcessToDeadList(psProcessStats);
+                       OSLockRelease(g_psLinkedListLock);
+
+                       /* Check if the dead list needs to be reduced */
+                       _CompressMemoryUsage();
+               }
+#endif
+       }else{
+               OSLockRelease(g_psLinkedListLock);
+       }
+}
+
+/* For now we do not want to expose the global stats API
+ * so we wrap it into this specific function for pooled pages.
+ * As soon as we need to modify the global stats directly somewhere else
+ * we want to replace these functions with more general ones.
+ */
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes)
+{
+       _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes)
+{
+       _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
+                         IMG_PID pidOwner)
+{
+       PVRSRV_PROCESS_STAT_TYPE eOOMStatType = (PVRSRV_PROCESS_STAT_TYPE) ui32OOMStatType;
+       IMG_PID pidCurrent = pidOwner;
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       /* Lock while we find the correct process and update the record... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(pidCurrent);
+       if (psProcessStats != NULL)
+       {
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               psProcessStats->i32StatValue[eOOMStatType]++;
+               OSLockRelease(psProcessStats->hLock);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStats: Process not found for Pid=%d", pidCurrent));
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateOOMStats */
+
+PVRSRV_ERROR
+PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
+                          IMG_PID pidOwner)
+{
+       if (ui32OOMStatType >= PVRSRV_PROCESS_STAT_TYPE_COUNT)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVRSRVStatsUpdateOOMStats(ui32OOMStatType, pidOwner);
+
+       return PVRSRV_OK;
+}
+
+void
+PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+                                                                       IMG_UINT32 ui32TotalNumOutOfMemory,
+                                                                       IMG_UINT32 ui32NumTAStores,
+                                                                       IMG_UINT32 ui32Num3DStores,
+                                                                       IMG_UINT32 ui32NumCDMStores,
+                                                                       IMG_UINT32 ui32NumTDMStores,
+                                                                       IMG_PID pidOwner)
+{
+       IMG_PID pidCurrent = pidOwner;
+
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       /* Lock while we find the correct process and update the record... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(pidCurrent);
+       if (psProcessStats != NULL)
+       {
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS]       += ui32TotalNumPartialRenders;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS]      += ui32TotalNumOutOfMemory;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores;
+               OSLockRelease(psProcessStats->hLock);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Process not found for Pid=%d", pidCurrent));
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateRenderContextStats */
+
+void
+PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+                                                          IMG_UINT32 ui32NumReqByFW,
+                                                          IMG_PID owner)
+{
+       IMG_PID                           currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner;
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       /* Lock while we find the correct process and update the record... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(currentPid);
+       if (psProcessStats != NULL)
+       {
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW]  += ui32NumReqByFW;
+               OSLockRelease(psProcessStats->hLock);
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateZSBufferStats */
+
+void
+PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+                                                          IMG_UINT32 ui32NumGrowReqByFW,
+                                                          IMG_UINT32 ui32InitFLPages,
+                                                          IMG_UINT32 ui32NumHighPages,
+                                                          IMG_PID ownerPid)
+{
+       IMG_PID                           currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       /* Lock while we find the correct process and update the record... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(currentPid);
+
+       if (psProcessStats != NULL)
+       {
+
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW]  += ui32NumGrowReqByFW;
+
+               UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT],
+                               (IMG_INT32) ui32InitFLPages);
+
+               UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES],
+                               (IMG_INT32) ui32NumHighPages);
+
+               OSLockRelease(psProcessStats->hLock);
+
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateFreelistStats */
+
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+
+int
+GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry);
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL);
+
+       DIPrintf(psEntry, "%s\n", psStatType->szLiveStatsHeaderStr);
+
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = g_psLiveList;
+
+       if (psProcessStats == NULL)
+       {
+               DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr);
+       }
+       else
+       {
+               while (psProcessStats != NULL)
+               {
+                       psStatType->pfnStatsPrintElements(psEntry, psProcessStats);
+                       psProcessStats = psProcessStats->psNext;
+                       DIPrintf(psEntry, "%s\n", g_szSeparatorStr);
+               }
+       }
+       OSLockRelease(g_psLinkedListLock);
+
+       return 0;
+}
+
+int
+GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry);
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL);
+
+       DIPrintf(psEntry, "%s\n", psStatType->szRetiredStatsHeaderStr);
+
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = g_psDeadList;
+
+       if (psProcessStats == NULL)
+       {
+               DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr);
+       }
+       else
+       {
+               while (psProcessStats != NULL)
+               {
+                       psStatType->pfnStatsPrintElements(psEntry, psProcessStats);
+                       psProcessStats = psProcessStats->psNext;
+                       DIPrintf(psEntry, "%s\n", g_szSeparatorStr);
+               }
+       }
+       OSLockRelease(g_psLinkedListLock);
+
+       return 0;
+}
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+/*************************************************************************/ /*!
+@Function       ProcessStatsPrintElements
+@Description    Prints all elements for this process statistic record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                          PVRSRV_PROCESS_STATS *psProcessStats)
+{
+       IMG_UINT32 ui32StatNumber;
+
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+       DIPrintf(psEntry, "PID %u\n", psProcessStats->pid);
+
+       /* Loop through all the values and print them... */
+       for (ui32StatNumber = 0;
+            ui32StatNumber < ARRAY_SIZE(pszProcessStatType);
+            ui32StatNumber++)
+       {
+               if (OSStringNCompare(pszProcessStatType[ui32StatNumber], "", 1) != 0)
+               {
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+                       if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) ||
+                           (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES))
+                       {
+                               /* get the stat from RI */
+                               IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid,
+                                                                           (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES)
+                                                                           ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA);
+
+                               DIPrintf(psEntry, "%-34s%10d %8dK\n",
+                                                pszProcessStatType[ui32StatNumber], ui32Total, ui32Total>>10);
+                       }
+                       else
+#endif
+                       {
+                               if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC &&
+                                       ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX)
+                               {
+                                       DIPrintf(psEntry, "%-34s%10d %8dK\n",
+                                                        pszProcessStatType[ui32StatNumber],
+                                                        psProcessStats->i32StatValue[ui32StatNumber],
+                                                        psProcessStats->i32StatValue[ui32StatNumber] >> 10);
+                               }
+                               else
+                               {
+                                       DIPrintf(psEntry, "%-34s%10d\n",
+                                                        pszProcessStatType[ui32StatNumber],
+                                                        psProcessStats->i32StatValue[ui32StatNumber]);
+                               }
+                       }
+               }
+       }
+
+       OSLockRelease(psProcessStats->hLock);
+} /* ProcessStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void
+PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEV_PHYADDR sDevPAddr,
+#endif
+                                                       IMG_DEVMEM_SIZE_T uiOffset,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       IMG_UINT64 ui64ExecuteTime,
+                                                       IMG_BOOL bUserModeFlush,
+                                                       IMG_PID ownerPid)
+{
+       IMG_PID                           currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       /* Don't do anything if we are not initialised or we are shutting down! */
+       if (!bProcessStatsInitialised)
+       {
+               return;
+       }
+
+       /* Lock while we find the correct process and update the record... */
+       OSLockAcquire(g_psLinkedListLock);
+
+       psProcessStats = _FindProcessStats(currentPid);
+
+       if (psProcessStats != NULL)
+       {
+               IMG_INT32 Idx;
+
+               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+               /* Look-up next buffer write index */
+               Idx = psProcessStats->uiCacheOpWriteIndex;
+               psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx);
+
+               /* Store all CacheOp meta-data */
+               psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+               psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr;
+               psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr;
+#endif
+               psProcessStats->asCacheOp[Idx].uiOffset = uiOffset;
+               psProcessStats->asCacheOp[Idx].uiSize = uiSize;
+               psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush;
+               psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime;
+
+               OSLockRelease(psProcessStats->hLock);
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateCacheOpStats */
+
+/*************************************************************************/ /*!
+@Function       CacheOpStatsPrintElements
+@Description    Prints all elements for this process statistic CacheOp record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                          PVRSRV_PROCESS_STATS *psProcessStats)
+{
+       IMG_CHAR  *pszCacheOpType, *pszFlushType, *pszFlushMode;
+       IMG_INT32 i32WriteIdx, i32ReadIdx;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+       #define CACHEOP_RI_PRINTF_HEADER \
+               "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s\n"
+       #define CACHEOP_RI_PRINTF               \
+               "%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu\n"
+#else
+       #define CACHEOP_PRINTF_HEADER   \
+               "%-10s %-10s %-5s %-10s %-10s %-12s\n"
+       #define CACHEOP_PRINTF                  \
+               "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu\n"
+#endif
+
+       DIPrintf(psEntry, "PID %u\n", psProcessStats->pid);
+
+       /* File header info */
+       DIPrintf(psEntry,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                       CACHEOP_RI_PRINTF_HEADER,
+#else
+                                       CACHEOP_PRINTF_HEADER,
+#endif
+                                       "CacheOp",
+                                       "Type",
+                                       "Mode",
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                       "DevVAddr",
+                                       "DevPAddr",
+#endif
+                                       "Offset",
+                                       "Size",
+                                       "Time (us)");
+
+       /* Take a snapshot of write index, read backwards in buffer
+          and wrap round at boundary */
+       i32WriteIdx = psProcessStats->uiCacheOpWriteIndex;
+       for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx);
+                i32ReadIdx != i32WriteIdx;
+                i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx))
+       {
+               IMG_UINT64 ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime;
+               IMG_DEVMEM_SIZE_T ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift();
+
+               if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+               {
+                       pszFlushType = "RBF.Fast";
+               }
+               else
+               {
+                       pszFlushType = "RBF.Slow";
+               }
+
+               if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush)
+               {
+                       pszFlushMode = "UM";
+               }
+               else
+               {
+                       pszFlushMode = "KM";
+               }
+
+               switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp)
+               {
+                       case PVRSRV_CACHE_OP_NONE:
+                               pszCacheOpType = "None";
+                               break;
+                       case PVRSRV_CACHE_OP_CLEAN:
+                               pszCacheOpType = "Clean";
+                               break;
+                       case PVRSRV_CACHE_OP_INVALIDATE:
+                               pszCacheOpType = "Invalidate";
+                               break;
+                       case PVRSRV_CACHE_OP_FLUSH:
+                               pszCacheOpType = "Flush";
+                               break;
+                       default:
+                               pszCacheOpType = "Unknown";
+                               break;
+               }
+
+               DIPrintf(psEntry,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                                       CACHEOP_RI_PRINTF,
+#else
+                                                       CACHEOP_PRINTF,
+#endif
+                                                       pszCacheOpType,
+                                                       pszFlushType,
+                                                       pszFlushMode,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                                       psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr,
+                                                       psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr,
+#endif
+                                                       psProcessStats->asCacheOp[i32ReadIdx].uiOffset,
+                                                       psProcessStats->asCacheOp[i32ReadIdx].uiSize,
+                                                       ui64ExecuteTime);
+               }
+
+} /* CacheOpStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+/*************************************************************************/ /*!
+@Function       MemStatsPrintElements
+@Description    Prints all elements for the memory statistic record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void
+MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                      PVRSRV_PROCESS_STATS *psProcessStats)
+{
+       IMG_UINT32      ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
+       IMG_UINT32      ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+       PVRSRV_MEM_ALLOC_REC *psRecord;
+       IMG_UINT32 ui32ItemNumber;
+
+       /* Write the header... */
+       DIPrintf(psEntry, "PID    ");
+
+       DIPrintf(psEntry, "Type                VAddress");
+       for (ui32ItemNumber = 1;  ui32ItemNumber < ui32VAddrFields;  ui32ItemNumber++)
+       {
+               DIPrintf(psEntry, "        ");
+       }
+
+       DIPrintf(psEntry, "  PAddress");
+       for (ui32ItemNumber = 1;  ui32ItemNumber < ui32PAddrFields;  ui32ItemNumber++)
+       {
+               DIPrintf(psEntry, "        ");
+       }
+
+       DIPrintf(psEntry, "  Size(bytes)\n");
+
+       psRecord = psProcessStats->psMemoryRecords;
+       if (psRecord == NULL)
+       {
+               DIPrintf(psEntry, "%-5d\n", psProcessStats->pid);
+       }
+
+       while (psRecord != NULL)
+       {
+               IMG_BOOL bPrintStat = IMG_TRUE;
+
+               DIPrintf(psEntry, "%-5d  ", psProcessStats->pid);
+
+               switch (psRecord->eAllocType)
+               {
+               case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:                             DIPrintf(psEntry, "KMALLOC             "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:                             DIPrintf(psEntry, "VMALLOC             "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:  DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA  "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:  DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA  "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:              DIPrintf(psEntry, "IOREMAP_PT_LMA      "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:                 DIPrintf(psEntry, "VMAP_PT_UMA         "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:             DIPrintf(psEntry, "ALLOC_LMA_PAGES     "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:             DIPrintf(psEntry, "ALLOC_UMA_PAGES     "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:   DIPrintf(psEntry, "MAP_UMA_LMA_PAGES   "); break;
+               case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:      DIPrintf(psEntry, "DMA_BUF_IMPORT      "); break;
+               default:                                                                                DIPrintf(psEntry, "INVALID             "); break;
+               }
+
+               if (bPrintStat)
+               {
+                       for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+                       {
+                               DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1));
+                       }
+                       DIPrintf(psEntry, "  ");
+
+                       for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+                       {
+                               DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1));
+                       }
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
+                       DIPrintf(psEntry, "  " IMG_SIZE_FMTSPEC, psRecord->uiBytes);
+
+                       DIPrintf(psEntry, "  %s", (IMG_CHAR*) psRecord->pvAllocdFromFile);
+
+                       DIPrintf(psEntry, "  %d\n", psRecord->ui32AllocdFromLine);
+#else
+                       DIPrintf(psEntry, "  " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes);
+#endif
+               }
+               /* Move to next record... */
+               psRecord = psRecord->psNext;
+       }
+} /* MemStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+/*************************************************************************/ /*!
+@Function       RIMemStatsPrintElements
+@Description    Prints all elements for the RI Memory record.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+                             PVRSRV_PROCESS_STATS *psProcessStats)
+{
+       IMG_CHAR   *pszStatFmtText  = NULL;
+       IMG_HANDLE *pRIHandle       = NULL;
+
+       /* Acquire RI lock */
+       RILockAcquireKM();
+
+       /*
+        * Loop through the RI system to get each line of text.
+        */
+       while (RIGetListEntryKM(psProcessStats->pid,
+                                                       &pRIHandle,
+                                                       &pszStatFmtText))
+       {
+               DIPrintf(psEntry, "%s", pszStatFmtText);
+       }
+
+       /* Release RI lock */
+       RILockReleaseKM();
+
+} /* RIMemStatsPrintElements */
+#endif
+
+#endif
+
+static IMG_UINT32      ui32FirmwareStartTimestamp;
+static IMG_UINT64      ui64FirmwareIdleDuration;
+
+void SetFirmwareStartTime(IMG_UINT32 ui32Time)
+{
+       ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time);
+}
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
+{
+       ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration);
+}
+
+static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
+                                        OSDI_IMPL_ENTRY *psEntry,
+                                        PVRSRV_POWER_STAT_TYPE eForced,
+                                        PVRSRV_POWER_STAT_TYPE ePowerOn)
+{
+       IMG_UINT32 ui32Index;
+
+       ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
+       DIPrintf(psEntry, "  Pre-Device:  %9u\n", pui32Stats[ui32Index]);
+
+       ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
+       DIPrintf(psEntry, "  Pre-System:  %9u\n", pui32Stats[ui32Index]);
+
+       ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
+       DIPrintf(psEntry, "  Post-System: %9u\n", pui32Stats[ui32Index]);
+
+       ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
+       DIPrintf(psEntry, "  Post-Device: %9u\n", pui32Stats[ui32Index]);
+}
+
+int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0];
+       IMG_UINT32 ui32Idx;
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n");
+       PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON);
+       DIPrintf(psEntry, "\n");
+
+       DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n");
+       PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF);
+       DIPrintf(psEntry, "\n");
+
+       DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n");
+       PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON);
+       DIPrintf(psEntry, "\n");
+
+       DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n");
+       PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF);
+       DIPrintf(psEntry, "\n");
+
+
+       DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
+       DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
+       DIPrintf(psEntry, "\n");
+
+       DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+       DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+       for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+       {
+               DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+                                                asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+                                                asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+       }
+
+       return 0;
+} /* PowerStatsPrintElements */
+
+int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       IMG_UINT32 ui32StatNumber;
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+       for (ui32StatNumber = 0;
+            ui32StatNumber < ARRAY_SIZE(pszDriverStatType);
+            ui32StatNumber++)
+       {
+               if (OSStringNCompare(pszDriverStatType[ui32StatNumber], "", 1) != 0)
+               {
+                       DIPrintf(psEntry, "%-34s%12llu\n",
+                                   pszDriverStatType[ui32StatNumber],
+                                   GET_GLOBAL_STAT_VALUE(ui32StatNumber));
+               }
+       }
+
+       OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+
+       return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVFindProcessMemStats
+@Description    Using the provided PID find memory stats for that process.
+                Memstats will be provided for live/connected processes only.
+                Memstat values provided by this API relate only to the physical
+                memory allocated by the process and does not relate to any of
+                the mapped or imported memory.
+@Input          pid                 Process to search for.
+@Input          ArraySize           Size of the array where memstat
+                                    records will be stored
+@Input          bAllProcessStats    Flag to denote if stats for
+                                    individual process are requested
+                                    stats for all processes are
+                                    requested
+@Input          MemoryStats         Handle to the memory where memstats
+                                    are stored.
+@Output         Memory statistics records for the requested pid.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats)
+{
+       IMG_INT i;
+       PVRSRV_PROCESS_STATS* psProcessStats;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pui32MemoryStats, "pui32MemoryStats");
+
+       if (bAllProcessStats)
+       {
+               PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT,
+                                 "MemStats array size is incorrect",
+                                 PVRSRV_ERROR_INVALID_PARAMS);
+
+               OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+               for (i = 0; i < ui32ArrSize; i++)
+               {
+                       pui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i);
+               }
+
+               OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+
+               return PVRSRV_OK;
+       }
+
+       PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT,
+                         "MemStats array size is incorrect",
+                         PVRSRV_ERROR_INVALID_PARAMS);
+
+       OSLockAcquire(g_psLinkedListLock);
+
+       /* Search for the given PID in the Live List */
+       psProcessStats = _FindProcessStatsInLiveList(pid);
+
+       if (psProcessStats == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid));
+               OSLockRelease(g_psLinkedListLock);
+
+               return PVRSRV_ERROR_PROCESS_NOT_FOUND;
+       }
+
+       OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+       for (i = 0; i < ui32ArrSize; i++)
+       {
+               pui32MemoryStats[i] = psProcessStats->i32StatValue[i];
+       }
+       OSLockRelease(psProcessStats->hLock);
+
+       OSLockRelease(g_psLinkedListLock);
+
+       return PVRSRV_OK;
+
+} /* PVRSRVFindProcessMemStats */
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetProcessMemUsage
+@Description    Calculate allocated kernel and graphics memory for all live or
+                connected processes. Memstat values provided by this API relate
+                only to the physical memory allocated by the process and does
+                not relate to any of the mapped or imported memory.
+@Output         pui32TotalMem                   Total memory usage for all live
+                                                PIDs connected to the driver.
+@Output         pui32NumberOfLivePids           Number of live pids currently
+                                                connected to the server.
+@Output         ppsPerProcessMemUsageData       Handle to an array of
+                                                PVRSRV_PER_PROCESS_MEM_USAGE,
+                                                number of elements defined by
+                                                pui32NumberOfLivePids.
+@Return         PVRSRV_OK                       Success
+                PVRSRV_ERROR_PROCESS_NOT_FOUND  No live processes.
+                PVRSRV_ERROR_OUT_OF_MEMORY      Failed to allocate memory for
+                                                ppsPerProcessMemUsageData.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem,
+                                                                         IMG_UINT32 *pui32NumberOfLivePids,
+                                                                         PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData)
+{
+       IMG_UINT32 ui32Counter = 0;
+       IMG_UINT32 ui32NumberOfLivePids = 0;
+       PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND;
+       PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+       PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL;
+
+       OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+       *pui32TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) +
+               GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) +
+               GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) +
+               GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) +
+               GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) +
+               GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA);
+
+       OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+
+       OSLockAcquire(g_psLinkedListLock);
+       psProcessStats = g_psLiveList;
+
+       while (psProcessStats != NULL)
+       {
+               psProcessStats = psProcessStats->psNext;
+               ui32NumberOfLivePids++;
+       }
+
+       if (ui32NumberOfLivePids > 0)
+       {
+               /* Use OSAllocZMemNoStats to prevent deadlock. */
+               psPerProcessMemUsageData = OSAllocZMemNoStats(ui32NumberOfLivePids * sizeof(*psPerProcessMemUsageData));
+
+               if (psPerProcessMemUsageData)
+               {
+                       psProcessStats = g_psLiveList;
+
+                       while (psProcessStats != NULL)
+                       {
+                               OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+                               psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid;
+
+                               psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] +
+                               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC];
+
+                               psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] +
+                               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] +
+                               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] +
+                               psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES];
+
+                               OSLockRelease(psProcessStats->hLock);
+                               psProcessStats = psProcessStats->psNext;
+                               ui32Counter++;
+                       }
+                       eError = PVRSRV_OK;
+               }
+               else
+               {
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+       }
+
+       OSLockRelease(g_psLinkedListLock);
+       *pui32NumberOfLivePids = ui32NumberOfLivePids;
+       *ppsPerProcessMemUsageData = psPerProcessMemUsageData;
+
+       return eError;
+
+} /* PVRSRVGetProcessMemUsage */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pvr_notifier.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pvr_notifier.c
new file mode 100644 (file)
index 0000000..3bc2beb
--- /dev/null
@@ -0,0 +1,647 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR notifier interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "dllist.h"
+
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrversion.h"
+#include "connection_server.h"
+
+#include "osfunc.h"
+#include "sofunc_pvr.h"
+
+#define PVR_DUMP_DRIVER_INFO(x, y)                                                                                                             \
+       PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x",                            \
+                                          (x),                                                                                                                         \
+                                          PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion),                                         \
+                                          PVRVERSION_UNPACK_MIN((y).ui32BuildVersion),                                         \
+                                          (y).ui32BuildRevision,                                                                                       \
+                                          (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release",         \
+                                          (y).ui32BuildOptions);
+
+#if !defined(WINDOW_SYSTEM)
+#define WINDOW_SYSTEM "Unknown"
+#endif
+
+#define IS_DECLARED(x) (x[0] != '\0')
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG
+{
+       PVRSRV_CMDCOMP_HANDLE   hCmdCompHandle;
+       PFN_CMDCOMP_NOTIFY              pfnCmdCompleteNotify;
+       DLLIST_NODE                             sListNode;
+} PVRSRV_CMDCOMP_NOTIFY;
+
+/* Head of the list of callbacks called when command complete happens */
+static DLLIST_NODE g_sCmdCompNotifyHead;
+static POSWR_LOCK g_hCmdCompNotifyLock;
+
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       eError = OSWRLockCreate(&g_hCmdCompNotifyLock);
+       PVR_RETURN_IF_ERROR(eError);
+
+       dllist_init(&g_sCmdCompNotifyHead);
+
+       return PVRSRV_OK;
+}
+
+void
+PVRSRVCmdCompleteDeinit(void)
+{
+       /* Check that all notify function have been unregistered */
+       if (!dllist_is_empty(&g_sCmdCompNotifyHead))
+       {
+               PDLLIST_NODE psNode;
+
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Command complete notify list is not empty!", __func__));
+
+               /* Clean up any stragglers */
+               psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+               while (psNode)
+               {
+                       PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+                       dllist_remove_node(psNode);
+
+                       psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+                       OSFreeMem(psNotify);
+
+                       psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+               }
+       }
+
+       if (g_hCmdCompNotifyLock)
+       {
+               OSWRLockDestroy(g_hCmdCompNotifyLock);
+       }
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+                                                               PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+                                                               PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+       PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pfnCmdCompleteNotify, "pfnCmdCompleteNotify");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(hCmdCompHandle, "hCmdCompHandle");
+
+       psNotify = OSAllocMem(sizeof(*psNotify));
+       PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify");
+
+       /* Set-up the notify data */
+       psNotify->hCmdCompHandle = hCmdCompHandle;
+       psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify;
+
+       /* Add it to the list of Notify functions */
+       OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+       dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode);
+       OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+       *phNotify = psNotify;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify)
+{
+       PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+       psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify;
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "hNotify");
+
+       OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+       dllist_remove_node(&psNotify->sListNode);
+       OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+       OSFreeMem(psNotify);
+
+       return PVRSRV_OK;
+}
+
+void
+PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+#if !defined(NO_HARDWARE)
+       DLLIST_NODE *psNode, *psNext;
+
+       /* Call notify callbacks to check if blocked work items can now proceed */
+       OSWRLockAcquireRead(g_hCmdCompNotifyLock);
+       dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext)
+       {
+               PVRSRV_CMDCOMP_NOTIFY *psNotify =
+                       IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+
+               if (hCmdCompCallerHandle != psNotify->hCmdCompHandle)
+               {
+                       psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle);
+               }
+       }
+       OSWRLockReleaseRead(g_hCmdCompNotifyLock);
+#endif
+}
+
+inline void
+PVRSRVSignalGlobalEO(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if (psPVRSRVData->hGlobalEventObject)
+       {
+               OSEventObjectSignal(psPVRSRVData->hGlobalEventObject);
+       }
+}
+
+inline void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+       PVRSRVNotifyCommandCompletion(hCmdCompCallerHandle);
+       PVRSRVSignalGlobalEO();
+}
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+/* Lockdep sees both locks as the same class due to same struct used thus warns
+ * about a possible deadlock (false positive),
+ * using nested api we can supply separate Class'
+ * */
+#define DN_LOCKCLASS_DRIVER 0
+#define DN_LOCKCLASS_DEVICE 1
+
+typedef struct DEBUG_REQUEST_ENTRY_TAG
+{
+       IMG_UINT32      ui32RequesterID;
+       DLLIST_NODE     sListHead;
+} DEBUG_REQUEST_ENTRY;
+
+typedef struct DEBUG_REQUEST_TABLE_TAG
+{
+       POSWR_LOCK              hLock;
+       DEBUG_REQUEST_ENTRY     asEntry[1];
+} DEBUG_REQUEST_TABLE;
+
+typedef struct DEBUG_REQUEST_NOTIFY_TAG
+{
+       IMG_HANDLE              hDebugTable;
+       PVRSRV_DBGREQ_HANDLE    hDbgRequestHandle;
+       PFN_DBGREQ_NOTIFY       pfnDbgRequestNotify;
+       IMG_UINT32              ui32RequesterID;
+       DLLIST_NODE             sListNode;
+} DEBUG_REQUEST_NOTIFY;
+
+static DEBUG_REQUEST_TABLE *g_psDriverDebugTable;
+
+static const IMG_UINT32 g_aui32DebugOrderTable[] = {
+       DEBUG_REQUEST_SRV,
+       DEBUG_REQUEST_RGX,
+       DEBUG_REQUEST_SYS,
+       DEBUG_REQUEST_APPHINT,
+       DEBUG_REQUEST_HTB,
+       DEBUG_REQUEST_DC,
+       DEBUG_REQUEST_SYNCCHECKPOINT,
+       DEBUG_REQUEST_SYNCTRACKING,
+       DEBUG_REQUEST_ANDROIDSYNC,
+       DEBUG_REQUEST_FALLBACKSYNC,
+       DEBUG_REQUEST_LINUXFENCE
+};
+static const IMG_UINT32 g_ui32DebugOrderTableReqCount = ARRAY_SIZE(g_aui32DebugOrderTable);
+
+static PVRSRV_ERROR
+_RegisterDebugTableI(DEBUG_REQUEST_TABLE **ppsDebugTable)
+{
+       DEBUG_REQUEST_TABLE *psDebugTable;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError;
+
+       if (*ppsDebugTable)
+       {
+               return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED;
+       }
+
+       psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) +
+                                                         (sizeof(DEBUG_REQUEST_ENTRY) * (g_ui32DebugOrderTableReqCount-1)));
+       PVR_RETURN_IF_NOMEM(psDebugTable);
+
+       eError = OSWRLockCreate(&psDebugTable->hLock);
+       PVR_GOTO_IF_ERROR(eError, ErrorFreeDebugTable);
+
+       /* Init the list heads */
+       for (i = 0; i < g_ui32DebugOrderTableReqCount; i++)
+       {
+               psDebugTable->asEntry[i].ui32RequesterID = g_aui32DebugOrderTable[i];
+               dllist_init(&psDebugTable->asEntry[i].sListHead);
+       }
+
+       *ppsDebugTable = psDebugTable;
+
+       return PVRSRV_OK;
+
+ErrorFreeDebugTable:
+       OSFreeMem(psDebugTable);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDeviceDbgTable(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       return _RegisterDebugTableI((DEBUG_REQUEST_TABLE**)&psDevNode->hDebugTable);
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDriverDbgTable(void)
+{
+       return _RegisterDebugTableI(&g_psDriverDebugTable);
+}
+
+static void _UnregisterDbgTableI(DEBUG_REQUEST_TABLE **ppsDebugTable)
+{
+       DEBUG_REQUEST_TABLE *psDebugTable;
+       IMG_UINT32 i;
+
+       PVR_ASSERT(*ppsDebugTable);
+       psDebugTable = *ppsDebugTable;
+       *ppsDebugTable = NULL;
+
+       for (i = 0; i < g_ui32DebugOrderTableReqCount; i++)
+       {
+               if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d",
+                                        __func__, i));
+               }
+       }
+
+       OSWRLockDestroy(psDebugTable->hLock);
+       psDebugTable->hLock = NULL;
+
+       OSFreeMem(psDebugTable);
+}
+
+void
+PVRSRVUnregisterDeviceDbgTable(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       _UnregisterDbgTableI((DEBUG_REQUEST_TABLE**)&psDevNode->hDebugTable);
+       PVR_ASSERT(!psDevNode->hDebugTable);
+}
+
+void
+PVRSRVUnregisterDriverDbgTable(void)
+{
+       _UnregisterDbgTableI(&g_psDriverDebugTable);
+       PVR_ASSERT(!g_psDriverDebugTable);
+}
+
+static PVRSRV_ERROR
+_RegisterDbgRequestNotifyI(IMG_HANDLE *phNotify,
+                                  DEBUG_REQUEST_TABLE *psDebugTable,
+                                  PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                  IMG_UINT32 ui32RequesterID,
+                                  PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+       DEBUG_REQUEST_NOTIFY *psNotify;
+       PDLLIST_NODE psHead = NULL;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDebugTable, "psDebugTable");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pfnDbgRequestNotify, "pfnDbRequestNotify");
+
+       /* NoStats used since this may be called outside of the register/de-register
+        * process calls which track memory use. */
+       psNotify = OSAllocMemNoStats(sizeof(*psNotify));
+       PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify");
+
+       /* Set-up the notify data */
+       psNotify->hDebugTable = psDebugTable;
+       psNotify->hDbgRequestHandle = hDbgRequestHandle;
+       psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify;
+       psNotify->ui32RequesterID = ui32RequesterID;
+
+       /* Lock down all the lists */
+       OSWRLockAcquireWrite(psDebugTable->hLock);
+
+       /* Find which list to add it to */
+       for (i = 0; i < g_ui32DebugOrderTableReqCount; i++)
+       {
+               if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID)
+               {
+                       psHead = &psDebugTable->asEntry[i].sListHead;
+               }
+       }
+
+       /* Failed to find debug requester */
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psHead, eError, ErrorReleaseLock);
+
+       /* Add it to the list of Notify functions */
+       dllist_add_to_tail(psHead, &psNotify->sListNode);
+
+       /* Unlock the lists */
+       OSWRLockReleaseWrite(psDebugTable->hLock);
+
+       *phNotify = psNotify;
+
+       return PVRSRV_OK;
+
+ErrorReleaseLock:
+       OSWRLockReleaseWrite(psDebugTable->hLock);
+       OSFreeMem(psNotify);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDeviceDbgRequestNotify(IMG_HANDLE *phNotify,
+                                        PVRSRV_DEVICE_NODE *psDevNode,
+                                        PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                        IMG_UINT32 ui32RequesterID,
+                                        PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode, "psDevNode");
+       if (!psDevNode->hDebugTable)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: psDevNode->hDebugTable not yet initialised!",
+                                __func__));
+               return PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+       return _RegisterDbgRequestNotifyI(phNotify,
+                                                 (DEBUG_REQUEST_TABLE *)psDevNode->hDebugTable,
+                                                 pfnDbgRequestNotify,
+                                                 ui32RequesterID,
+                                                 hDbgRequestHandle);
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDriverDbgRequestNotify(IMG_HANDLE *phNotify,
+                                        PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                        IMG_UINT32 ui32RequesterID,
+                                        PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+       if (!g_psDriverDebugTable)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: g_psDriverDebugTable not yet initialised!",
+                                __func__));
+               return PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+       return _RegisterDbgRequestNotifyI(phNotify,
+                                                 g_psDriverDebugTable,
+                                                 pfnDbgRequestNotify,
+                                                 ui32RequesterID,
+                                                 hDbgRequestHandle);
+}
+
+PVRSRV_ERROR
+SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify,
+                                                         PVRSRV_DEVICE_NODE *psDevNode,
+                                                         PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                                         IMG_UINT32 ui32RequesterID,
+                                                         PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+       return PVRSRVRegisterDeviceDbgRequestNotify(phNotify,
+                       psDevNode,
+                       pfnDbgRequestNotify,
+                       ui32RequesterID,
+                       hDbgRequestHandle);
+}
+
+static PVRSRV_ERROR
+_UnregisterDbgRequestNotify(IMG_HANDLE hNotify)
+{
+       DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify;
+       DEBUG_REQUEST_TABLE *psDebugTable;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "psNotify");
+
+       psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->hDebugTable;
+
+       OSWRLockAcquireWrite(psDebugTable->hLock);
+       dllist_remove_node(&psNotify->sListNode);
+       OSWRLockReleaseWrite(psDebugTable->hLock);
+
+       OSFreeMemNoStats(psNotify);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterDeviceDbgRequestNotify(IMG_HANDLE hNotify)
+{
+       return _UnregisterDbgRequestNotify(hNotify);
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterDriverDbgRequestNotify(IMG_HANDLE hNotify)
+{
+       return _UnregisterDbgRequestNotify(hNotify);
+}
+
+PVRSRV_ERROR
+SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify)
+{
+       return _UnregisterDbgRequestNotify(hNotify);
+}
+
+void
+PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode,
+                                  IMG_UINT32 ui32VerbLevel,
+                                  DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       DEBUG_REQUEST_TABLE *psDebugTable =
+               (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+       DEBUG_REQUEST_TABLE *psDriverDebugTable =
+               (DEBUG_REQUEST_TABLE *) g_psDriverDebugTable;
+       static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" };
+       const IMG_CHAR *szVerbosityLevel;
+       const IMG_CHAR *Bit32 = "32 Bit", *Bit64 = "64 Bit";
+       IMG_UINT32 i;
+
+       static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1,
+                     "Incorrect number of verbosity levels");
+
+       PVR_ASSERT(psDebugTable);
+       PVR_ASSERT(psDriverDebugTable);
+
+       if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable))
+       {
+               szVerbosityLevel = apszVerbosityTable[ui32VerbLevel];
+       }
+       else
+       {
+               szVerbosityLevel = "unknown";
+               PVR_ASSERT(!"Invalid verbosity level received");
+       }
+
+       PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------",
+                         szVerbosityLevel);
+
+#if defined(RGX_IRQ_HYPERV_HANDLER)
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+#endif
+       {
+               OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile);
+       }
+
+       PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s",
+                         PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR);
+
+       PVR_DUMPDEBUG_LOG("Time now: %" IMG_UINT64_FMTSPEC "us",
+                         OSClockus64());
+
+       switch (psPVRSRVData->eServicesState)
+       {
+               case PVRSRV_SERVICES_STATE_OK:
+                       PVR_DUMPDEBUG_LOG("Services State: OK");
+                       break;
+               case PVRSRV_SERVICES_STATE_BAD:
+                       PVR_DUMPDEBUG_LOG("Services State: BAD");
+                       break;
+               case PVRSRV_SERVICES_STATE_UNDEFINED:
+                       PVR_DUMPDEBUG_LOG("Services State: UNDEFINED");
+                       break;
+               default:
+                       PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)",
+                                         psPVRSRVData->eServicesState);
+                       break;
+       }
+
+       PVR_DUMPDEBUG_LOG("Server Errors: %d",
+                 PVRSRV_KM_ERRORS);
+
+       PVRSRVConnectionDebugNotify(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+
+       PVR_DUMPDEBUG_LOG("------[ Driver Info ]------");
+
+       PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s",
+                        (psPVRSRVData->sDriverInfo.bIsNoMatch) ? "MISMATCH" : "MATCHING");
+
+       PVR_DUMPDEBUG_LOG("KM Arch: %s",
+                         (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32);
+
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               PVR_DUMPDEBUG_LOG("Driver Mode: %s",
+                                 (PVRSRV_VZ_MODE_IS(HOST)) ? "Host":"Guest");
+       }
+
+       if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch)
+       {
+               if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) ==
+                       BUILD_ARCH_BOTH)
+               {
+                       PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32);
+
+               }
+               else
+               {
+                       PVR_DUMPDEBUG_LOG("UM Connected Clients: %s",
+                                        (psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32);
+               }
+       }
+
+       PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo);
+       PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo);
+
+       PVR_DUMPDEBUG_LOG("Window system: %s", (IS_DECLARED(WINDOW_SYSTEM)) ? (WINDOW_SYSTEM) : "Not declared");
+
+       /* Driver debug table */
+       OSWRLockAcquireReadNested(psDriverDebugTable->hLock, DN_LOCKCLASS_DRIVER);
+       /* Device debug table*/
+       OSWRLockAcquireReadNested(psDebugTable->hLock, DN_LOCKCLASS_DEVICE);
+
+       /* For each requester in Driver and Device table */
+       for (i = 0; i < g_ui32DebugOrderTableReqCount; i++)
+       {
+               DLLIST_NODE *psNode;
+               DLLIST_NODE *psNext;
+
+               /* For each notifier on this requestor */
+               dllist_foreach_node(&psDriverDebugTable->asEntry[i].sListHead, psNode, psNext)
+               {
+                       DEBUG_REQUEST_NOTIFY *psNotify =
+                               IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode);
+                       psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel,
+                                                         pfnDumpDebugPrintf, pvDumpDebugFile);
+               }
+
+               /* For each notifier on this requestor */
+               dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext)
+               {
+                       DEBUG_REQUEST_NOTIFY *psNotify =
+                               IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode);
+                       psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel,
+                                                       pfnDumpDebugPrintf, pvDumpDebugFile);
+               }
+       }
+
+       OSWRLockReleaseRead(psDebugTable->hLock);
+       OSWRLockReleaseRead(psDriverDebugTable->hLock);
+
+       PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------");
+
+       if (!pfnDumpDebugPrintf)
+       {
+               /* Only notify OS of an issue if the debug dump has gone there */
+               //OSWarnOn(IMG_TRUE);
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv.c
new file mode 100644 (file)
index 0000000..6149d83
--- /dev/null
@@ -0,0 +1,3004 @@
+/*************************************************************************/ /*!
+@File
+@Title          core services functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for core services functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxdebug.h"
+#include "handle.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "pdump_km.h"
+#include "ra.h"
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#include "services_km.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "debug_common.h"
+#include "pvr_notifier.h"
+#include "sync.h"
+#include "sync_server.h"
+#include "sync_checkpoint.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint_init.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "info_page.h"
+#include "info_page_defs.h"
+#include "pvrsrv_bridge_init.h"
+#include "devicemem_server.h"
+#include "km_apphint_defs.h"
+#include "di_server.h"
+#include "di_impl_brg.h"
+#include "htb_debug.h"
+#include "dma_km.h"
+
+#include "log2.h"
+
+#include "lists.h"
+#include "dllist.h"
+#include "syscommon.h"
+#include "sysvalidation.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+#include "physmem_hostmem.h"
+
+#include "tlintern.h"
+#include "htbserver.h"
+
+//#define MULTI_DEVICE_BRINGUP
+
+#if defined(MULTI_DEVICE_BRINGUP)
+#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__))
+#else
+#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__))
+#endif
+
+#if defined(SUPPORT_RGX)
+#include "rgxinit.h"
+#include "rgxhwperf.h"
+#include "rgxfwutils.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "vz_vmm_pvz.h"
+
+#include "devicemem_history_server.h"
+
+#if defined(SUPPORT_LINUX_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+
+#include "rgx_options.h"
+#include "srvinit.h"
+#include "rgxutils.h"
+
+#include "oskm_apphint.h"
+#include "pvrsrv_apphint.h"
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+#include "physmem_test.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#if defined(__linux__)
+#include "km_apphint.h"
+#endif /* defined(__linux__) */
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define INFINITE_SLEEP_TIMEOUT 0ULL
+#endif
+
+/*! Wait 100ms before retrying deferred clean-up again */
+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL
+
+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times
+ * a day to check for any missed clean-up. */
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT
+#else
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL
+#endif
+
+/*! When unloading try a few times to free everything remaining on the list */
+#define CLEANUP_THREAD_UNLOAD_RETRY 4
+
+#define PVRSRV_TL_CTLR_STREAM_SIZE 4096
+
+static PVRSRV_DATA     *gpsPVRSRVData;
+static IMG_UINT32 g_ui32InitFlags;
+
+/* mark which parts of Services were initialised */
+#define                INIT_DATA_ENABLE_PDUMPINIT      0x1U
+
+/* Callback to dump info of cleanup thread in debug_dump */
+static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile)
+{
+       PVRSRV_DATA *psPVRSRVData;
+       psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       PVR_DUMPDEBUG_LOG("    Number of deferred cleanup items Queued : %u",
+                             OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued));
+       PVR_DUMPDEBUG_LOG("    Number of deferred cleanup items dropped after "
+                             "retry limit reached : %u",
+                             OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted));
+}
+
+/* Add work to the cleanup thread work list.
+ * The work item will be executed by the cleanup thread
+ */
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData)
+{
+       PVRSRV_DATA *psPVRSRVData;
+       PVRSRV_ERROR eError;
+
+       psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       PVR_ASSERT(psData != NULL);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload)
+#else
+       if (psPVRSRVData->bUnload)
+#endif
+       {
+               CLEANUP_THREAD_FN pfnFree = psData->pfnFree;
+
+               PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately"));
+
+               eError = pfnFree(psData->pvData);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+                                               "(callback " IMG_PFN_FMTSPEC "). "
+                                               "Immediate free will not be retried.",
+                                               pfnFree));
+               }
+       }
+       else
+       {
+               OS_SPINLOCK_FLAGS uiFlags;
+
+               /* add this work item to the list */
+               OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+               dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode);
+               OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+
+               OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsQueued);
+
+               /* signal the cleanup thread to ensure this item gets processed */
+               eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+       }
+}
+
+/* Pop an item from the head of the cleanup thread work list */
+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData)
+{
+       DLLIST_NODE *psNode;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+       psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList);
+       if (psNode != NULL)
+       {
+               dllist_remove_node(psNode);
+       }
+       OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+
+       return psNode;
+}
+
+/* Process the cleanup thread work list */
+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData,
+                                              IMG_BOOL *pbUseGlobalEO)
+{
+       DLLIST_NODE *psNodeIter, *psNodeLast;
+       PVRSRV_ERROR eError;
+       IMG_BOOL bNeedRetry = IMG_FALSE;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       /* any callback functions which return error will be
+        * moved to the back of the list, and additional items can be added
+        * to the list at any time so we ensure we only iterate from the
+        * head of the list to the current tail (since the tail may always
+        * be changing)
+        */
+
+       OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+       psNodeLast = dllist_get_prev_node(&psPVRSRVData->sCleanupThreadWorkList);
+       OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+
+       if (psNodeLast == NULL)
+       {
+               /* no elements to clean up */
+               return IMG_FALSE;
+       }
+
+       do
+       {
+               psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData);
+
+               if (psNodeIter != NULL)
+               {
+                       PVRSRV_CLEANUP_THREAD_WORK *psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode);
+                       CLEANUP_THREAD_FN pfnFree;
+
+                       /* get the function pointer address here so we have access to it
+                        * in order to report the error in case of failure, without having
+                        * to depend on psData not having been freed
+                        */
+                       pfnFree = psData->pfnFree;
+
+                       *pbUseGlobalEO = psData->bDependsOnHW;
+                       eError = pfnFree(psData->pvData);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               /* move to back of the list, if this item's
+                                * retry count hasn't hit zero.
+                                */
+                               if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData))
+                               {
+                                       if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData))
+                                       {
+                                               bNeedRetry = IMG_TRUE;
+                                       }
+                               }
+                               else
+                               {
+                                       if (psData->ui32RetryCount-- > 0)
+                                       {
+                                               bNeedRetry = IMG_TRUE;
+                                       }
+                               }
+
+                               if (bNeedRetry)
+                               {
+                                       OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+                                       dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter);
+                                       OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags);
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+                                                               "(callback " IMG_PFN_FMTSPEC "). "
+                                                               "Retry limit reached",
+                                                               pfnFree));
+                                       OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued);
+                                       OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted);
+
+                               }
+                       }
+                       else
+                       {
+                               OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued);
+                       }
+               }
+       } while ((psNodeIter != NULL) && (psNodeIter != psNodeLast));
+
+       return bNeedRetry;
+}
+
+// #define CLEANUP_DPFL PVR_DBG_WARNING
+#define CLEANUP_DPFL    PVR_DBG_MESSAGE
+
+/* Create/initialise data required by the cleanup thread,
+ * before the cleanup thread is started
+ */
+static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData)
+{
+       PVRSRV_ERROR eError;
+
+       /* Create the clean up event object */
+
+       eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Exit);
+
+       /* initialise the mutex and linked list required for the cleanup thread work list */
+
+       eError = OSSpinLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Exit);
+
+       dllist_init(&psPVRSRVData->sCleanupThreadWorkList);
+
+Exit:
+       return eError;
+}
+
+static void CleanupThread(void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = pvData;
+       IMG_BOOL     bRetryWorkList = IMG_FALSE;
+       IMG_HANDLE       hGlobalEvent;
+       IMG_HANDLE       hOSEvent;
+       PVRSRV_ERROR eRc;
+       IMG_BOOL bUseGlobalEO = IMG_FALSE;
+       IMG_UINT32 uiUnloadRetry = 0;
+
+       /* Store the process id (pid) of the clean-up thread */
+       psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID();
+       psPVRSRVData->cleanupThreadTid = OSGetCurrentThreadID();
+       OSAtomicWrite(&psPVRSRVData->i32NumCleanupItemsQueued, 0);
+       OSAtomicWrite(&psPVRSRVData->i32NumCleanupItemsNotCompleted, 0);
+
+       PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... "));
+
+       /* Open an event on the clean up event object so we can listen on it,
+        * abort the clean up thread and driver if this fails.
+        */
+       eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent);
+       PVR_ASSERT(eRc == PVRSRV_OK);
+
+       eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent);
+       PVR_ASSERT(eRc == PVRSRV_OK);
+
+       /* While the driver is in a good state and is not being unloaded
+        * try to free any deferred items when signalled
+        */
+       while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+       {
+               IMG_HANDLE hEvent;
+
+               if (psPVRSRVData->bUnload)
+               {
+                       if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) ||
+                                       uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY)
+                       {
+                               break;
+                       }
+                       uiUnloadRetry++;
+               }
+
+               /* Wait until signalled for deferred clean up OR wait for a
+                * short period if the previous deferred clean up was not able
+                * to release all the resources before trying again.
+                * Bridge lock re-acquired on our behalf before the wait call returns.
+                */
+
+               if (bRetryWorkList && bUseGlobalEO)
+               {
+                       hEvent = hGlobalEvent;
+               }
+               else
+               {
+                       hEvent = hOSEvent;
+               }
+
+               eRc = OSEventObjectWaitKernel(hEvent,
+                               bRetryWorkList ?
+                               CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+                               CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
+               if (eRc == PVRSRV_ERROR_TIMEOUT)
+               {
+                       PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout"));
+               }
+               else if (eRc == PVRSRV_OK)
+               {
+                       PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received"));
+               }
+               else
+               {
+                       PVR_LOG_ERROR(eRc, "OSEventObjectWaitKernel");
+               }
+
+               bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO);
+       }
+
+       OSSpinLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock);
+
+       eRc = OSEventObjectClose(hOSEvent);
+       PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+       eRc = OSEventObjectClose(hGlobalEvent);
+       PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+       PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... "));
+}
+
+IMG_PID PVRSRVCleanupThreadGetPid(void)
+{
+       return gpsPVRSRVData->cleanupThreadPid;
+}
+
+uintptr_t PVRSRVCleanupThreadGetTid(void)
+{
+       return gpsPVRSRVData->cleanupThreadTid;
+}
+
+static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         va_list va)
+{
+#if defined(SUPPORT_RGX)
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+#endif
+       PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus;
+       PVRSRV_ERROR eError;
+       PVRSRV_DEVICE_DEBUG_DUMP_STATUS eDebugDumpState;
+       IMG_BOOL bCheckAfterTimePassed;
+
+       pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *);
+       bCheckAfterTimePassed = va_arg(va, IMG_BOOL);
+
+       if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+       {
+               return;
+       }
+
+       if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+       {
+               eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, bCheckAfterTimePassed);
+               PVR_WARN_IF_ERROR(eError, "pfnUpdateHealthStatus");
+       }
+       eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+
+       if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK)
+       {
+               if (eHealthStatus != *pePreviousHealthStatus)
+               {
+#if defined(SUPPORT_RGX)
+                       if (!(psDevInfo->ui32DeviceFlags &
+                                 RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN))
+#else
+                       /* In this case we don't have an RGX device */
+                       if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED)
+#endif
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+                                                "Device status not OK!!!"));
+                               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+                                                                  NULL, NULL);
+                       }
+               }
+       }
+
+       *pePreviousHealthStatus = eHealthStatus;
+
+       /* Have we received request from FW to capture debug dump(could be due to HWR) */
+       eDebugDumpState = (PVRSRV_DEVICE_DEBUG_DUMP_STATUS)OSAtomicCompareExchange(
+                                               &psDeviceNode->eDebugDumpRequested,
+                                               PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE,
+                                               PVRSRV_DEVICE_DEBUG_DUMP_NONE);
+       if (PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE == eDebugDumpState)
+       {
+               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+       }
+
+}
+
+#if defined(SUPPORT_RGX)
+static void HWPerfPeriodicHostEventsThread(void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = pvData;
+       IMG_HANDLE hOSEvent;
+       PVRSRV_ERROR eError;
+
+       eError = OSEventObjectOpen(psPVRSRVData->hHWPerfHostPeriodicEvObj, &hOSEvent);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen");
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+                       !psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop)
+#else
+       while (!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop)
+#endif
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode;
+               IMG_BOOL bInfiniteSleep = IMG_TRUE;
+
+               eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)psPVRSRVData->ui32HWPerfHostThreadTimeout * 1000);
+               if (eError == PVRSRV_OK && (psPVRSRVData->bUnload || psPVRSRVData->bHWPerfHostThreadStop))
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "HWPerfPeriodicHostEventsThread: Shutdown event received."));
+                       break;
+               }
+
+               for (psDeviceNode = psPVRSRVData->psDeviceNodeList;
+                    psDeviceNode != NULL;
+                    psDeviceNode = psDeviceNode->psNext)
+               {
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+                       /* If the psDevInfo or hHWPerfHostStream are NULL it most
+                        * likely means that this device or stream has not been
+                        * initialised yet, so just skip */
+                       if (psDevInfo == NULL || psDevInfo->hHWPerfHostStream == NULL)
+                       {
+                               continue;
+                       }
+
+                       /* Check if the HWPerf host stream is open for reading before writing
+                        * a packet, this covers cases where the event filter is not zeroed
+                        * before a reader disconnects. */
+                       if (TLStreamIsOpenForReading(psDevInfo->hHWPerfHostStream))
+                       {
+                               /* As long as any of the streams is opened don't go into
+                                * indefinite sleep. */
+                               bInfiniteSleep = IMG_FALSE;
+#if defined(SUPPORT_RGX)
+                               RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM_USAGE);
+#endif
+                       }
+               }
+
+               if (bInfiniteSleep)
+               {
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+                       psPVRSRVData->ui32HWPerfHostThreadTimeout = INFINITE_SLEEP_TIMEOUT;
+#else
+                       /* Use an 8 hour timeout if indefinite sleep is not supported. */
+                       psPVRSRVData->ui32HWPerfHostThreadTimeout = 60 * 60 * 8 * 1000;
+#endif
+               }
+       }
+
+       eError = OSEventObjectClose(hOSEvent);
+       PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+#endif
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+typedef enum
+{
+       DWT_ST_INIT,
+       DWT_ST_SLEEP_POWERON,
+       DWT_ST_SLEEP_POWEROFF,
+       DWT_ST_SLEEP_DEFERRED,
+       DWT_ST_FINAL
+} DWT_STATE;
+
+typedef enum
+{
+       DWT_SIG_POWERON,
+       DWT_SIG_POWEROFF,
+       DWT_SIG_TIMEOUT,
+       DWT_SIG_UNLOAD,
+       DWT_SIG_ERROR
+} DWT_SIGNAL;
+
+static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData)
+{
+       return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+                                                   PVRSRVIsDevicePowered);
+}
+
+static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData,
+                                         PVRSRV_DEVICE_HEALTH_STATUS *peStatus,
+                                         IMG_BOOL bTimeOut)
+{
+       List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+                                          DevicesWatchdogThread_ForEachVaCb,
+                                          peStatus,
+                                          bTimeOut);
+}
+
+static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent,
+                           IMG_UINT32 ui32Timeout)
+{
+       PVRSRV_ERROR eError;
+
+       eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+       psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+
+       if (eError == PVRSRV_OK)
+       {
+               if (psPVRSRVData->bUnload)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event"
+                               " received."));
+                       return DWT_SIG_UNLOAD;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state "
+                               "change event received."));
+
+                       if (_DwtIsPowerOn(psPVRSRVData))
+                       {
+                               return DWT_SIG_POWERON;
+                       }
+                       else
+                       {
+                               return DWT_SIG_POWEROFF;
+                       }
+               }
+       }
+       else if (eError == PVRSRV_ERROR_TIMEOUT)
+       {
+               return DWT_SIG_TIMEOUT;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when"
+               " waiting for event!", eError));
+       return DWT_SIG_ERROR;
+}
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+
+static void DevicesWatchdogThread(void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = pvData;
+       PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+       IMG_HANDLE hOSEvent;
+       PVRSRV_ERROR eError;
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+       DWT_STATE eState = DWT_ST_INIT;
+       const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+       const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT;
+#else
+       IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+       /* Flag used to defer the sleep timeout change by 1 loop iteration.
+        * This helps to ensure at least two health checks are performed before a long sleep.
+        */
+       IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE;
+#endif
+
+       PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.",
+                       DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT));
+
+       /* Open an event on the devices watchdog event object so we can listen on it
+          and abort the devices watchdog thread. */
+       eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen");
+
+       /* Loop continuously checking the device status every few seconds. */
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+                       !psPVRSRVData->bUnload)
+#else
+       while (!psPVRSRVData->bUnload)
+#endif
+       {
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+               switch (eState)
+               {
+                       case DWT_ST_INIT:
+                       {
+                               if (_DwtIsPowerOn(psPVRSRVData))
+                               {
+                                       eState = DWT_ST_SLEEP_POWERON;
+                               }
+                               else
+                               {
+                                       eState = DWT_ST_SLEEP_POWEROFF;
+                               }
+
+                               break;
+                       }
+                       case DWT_ST_SLEEP_POWERON:
+                       {
+                               DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent,
+                                                                   ui32OnTimeout);
+
+                               switch (eSignal) {
+                                       case DWT_SIG_POWERON:
+                                               /* self-transition, nothing to do */
+                                               break;
+                                       case DWT_SIG_POWEROFF:
+                                               eState = DWT_ST_SLEEP_DEFERRED;
+                                               break;
+                                       case DWT_SIG_TIMEOUT:
+                                               _DwtCheckHealthStatus(psPVRSRVData,
+                                                                     &ePreviousHealthStatus,
+                                                                     IMG_TRUE);
+                                               /* self-transition */
+                                               break;
+                                       case DWT_SIG_UNLOAD:
+                                               eState = DWT_ST_FINAL;
+                                               break;
+                                       case DWT_SIG_ERROR:
+                                               /* deliberately ignored */
+                                               break;
+                               }
+
+                               break;
+                       }
+                       case DWT_ST_SLEEP_POWEROFF:
+                       {
+                               DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent,
+                                                                   ui32OffTimeout);
+
+                               switch (eSignal) {
+                                       case DWT_SIG_POWERON:
+                                               eState = DWT_ST_SLEEP_POWERON;
+                                               _DwtCheckHealthStatus(psPVRSRVData,
+                                                                     &ePreviousHealthStatus,
+                                                                     IMG_FALSE);
+                                               break;
+                                       case DWT_SIG_POWEROFF:
+                                               /* self-transition, nothing to do */
+                                               break;
+                                       case DWT_SIG_TIMEOUT:
+                                               /* self-transition */
+                                               _DwtCheckHealthStatus(psPVRSRVData,
+                                                                     &ePreviousHealthStatus,
+                                                                     IMG_TRUE);
+                                               break;
+                                       case DWT_SIG_UNLOAD:
+                                               eState = DWT_ST_FINAL;
+                                               break;
+                                       case DWT_SIG_ERROR:
+                                               /* deliberately ignored */
+                                               break;
+                               }
+
+                               break;
+                       }
+                       case DWT_ST_SLEEP_DEFERRED:
+                       {
+                               DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent,
+                                                                  ui32OnTimeout);
+
+                               switch (eSignal) {
+                                       case DWT_SIG_POWERON:
+                                               eState = DWT_ST_SLEEP_POWERON;
+                                               _DwtCheckHealthStatus(psPVRSRVData,
+                                                                     &ePreviousHealthStatus,
+                                                                     IMG_FALSE);
+                                               break;
+                                       case DWT_SIG_POWEROFF:
+                                               /* self-transition, nothing to do */
+                                               break;
+                                       case DWT_SIG_TIMEOUT:
+                                               eState = DWT_ST_SLEEP_POWEROFF;
+                                               _DwtCheckHealthStatus(psPVRSRVData,
+                                                                     &ePreviousHealthStatus,
+                                                                     IMG_FALSE);
+                                               break;
+                                       case DWT_SIG_UNLOAD:
+                                               eState = DWT_ST_FINAL;
+                                               break;
+                                       case DWT_SIG_ERROR:
+                                               /* deliberately ignored */
+                                               break;
+                               }
+
+                               break;
+                       }
+                       case DWT_ST_FINAL:
+                               /* the loop should terminate on next spin if this state is
+                                * reached so nothing to do here. */
+                               break;
+               }
+
+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+               IMG_BOOL bPwrIsOn = IMG_FALSE;
+               IMG_BOOL bTimeOut = IMG_FALSE;
+
+               /* Wait time between polls (done at the start of the loop to allow devices
+                  to initialise) or for the event signal (shutdown or power on). */
+               eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+               psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+               if (eError == PVRSRV_OK)
+               {
+                       if (psPVRSRVData->bUnload)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received."));
+                               break;
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received."));
+                       }
+               }
+               else if (eError != PVRSRV_ERROR_TIMEOUT)
+               {
+                       /* If timeout do nothing otherwise print warning message. */
+                       PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+                                       "Error (%d) when waiting for event!", eError));
+               }
+               else
+               {
+                       bTimeOut = IMG_TRUE;
+               }
+
+               OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+               List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+                                                  DevicesWatchdogThread_ForEachVaCb,
+                                                  &ePreviousHealthStatus,
+                                                  bTimeOut);
+               bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+                                                                                                               PVRSRVIsDevicePowered);
+               OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+               if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans)
+               {
+                       psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0;
+                       ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+                       bDoDeferredTimeoutChange = IMG_FALSE;
+               }
+               else
+               {
+                       /* First, check if the previous loop iteration signalled a need to change the timeout period */
+                       if (bDoDeferredTimeoutChange)
+                       {
+                               ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+                               bDoDeferredTimeoutChange = IMG_FALSE;
+                       }
+                       else
+                       {
+                               /* Signal that we need to change the sleep timeout in the next loop iteration
+                                * to allow the device health check code a further iteration at the current
+                                * sleep timeout in order to determine bad health (e.g. stalled cCCB) by
+                                * comparing past and current state snapshots */
+                               bDoDeferredTimeoutChange = IMG_TRUE;
+                       }
+               }
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+       }
+
+       eError = OSEventObjectClose(hOSEvent);
+       PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+#if defined(SUPPORT_AUTOVZ)
+static void AutoVzWatchdogThread_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+       {
+               return;
+       }
+       else if (psDeviceNode->pfnUpdateAutoVzWatchdog != NULL)
+       {
+               psDeviceNode->pfnUpdateAutoVzWatchdog(psDeviceNode);
+       }
+}
+
+static void AutoVzWatchdogThread(void *pvData)
+{
+       PVRSRV_DATA *psPVRSRVData = pvData;
+       IMG_HANDLE hOSEvent;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Timeout = PVR_AUTOVZ_WDG_PERIOD_MS / 3;
+
+       /* Open an event on the devices watchdog event object so we can listen on it
+          and abort the devices watchdog thread. */
+       eError = OSEventObjectOpen(psPVRSRVData->hAutoVzWatchdogEvObj, &hOSEvent);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen");
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+                       !psPVRSRVData->bUnload)
+#else
+       while (!psPVRSRVData->bUnload)
+#endif
+       {
+               /* Wait time between polls (done at the start of the loop to allow devices
+                  to initialise) or for the event signal (shutdown or power on). */
+               eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+
+               List_PVRSRV_DEVICE_NODE_ForEach(psPVRSRVData->psDeviceNodeList,
+                                               AutoVzWatchdogThread_ForEachCb);
+       }
+
+       eError = OSEventObjectClose(hOSEvent);
+       PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+#endif /* SUPPORT_AUTOVZ */
+
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void)
+{
+       return gpsPVRSRVData;
+}
+
+static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData)
+{
+       if (NULL == psPVRSRVData)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_RETRIES] = WAIT_TRY_COUNT;
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_TIMEOUT_MS] =
+               ((MAX_HW_TIME_US / 10000) + 1000);
+               /* TIMEOUT_INFO_VALUE_TIMEOUT_MS resolves to...
+                       vp       : 2000  + 1000
+                       emu      : 2000  + 1000
+                       rgx_nohw : 50    + 1000
+                       plato    : 30000 + 1000 (VIRTUAL_PLATFORM or EMULATOR)
+                                  50    + 1000 (otherwise)
+               */
+
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_RETRIES] = 5;
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_TIMEOUT_MS] =
+               ((MAX_HW_TIME_US / 10000) + 100);
+               /* TIMEOUT_INFO_CONDITION_TIMEOUT_MS resolves to...
+                       vp       : 2000  + 100
+                       emu      : 2000  + 100
+                       rgx_nohw : 50    + 100
+                       plato    : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR)
+                                  50    + 100 (otherwise)
+               */
+
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_RETRIES] = 10;
+#if defined(VIRTUAL_PLATFORM)
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1200000U;
+#else
+#if defined(EMULATOR)
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 20000U;
+#else
+       psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1000U;
+#endif /* EMULATOR */
+#endif
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PopulateInfoPageBridges(PVRSRV_DATA *psPVRSRVData)
+{
+       PVR_RETURN_IF_INVALID_PARAM(psPVRSRVData);
+
+       psPVRSRVData->pui32InfoPage[BRIDGE_INFO_PVR_BRIDGES] = gui32PVRBridges;
+
+#if defined(SUPPORT_RGX)
+       psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = gui32RGXBridges;
+#else
+       psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = 0;
+#endif
+
+       return PVRSRV_OK;
+}
+
+static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle,
+                                       IMG_UINT32 ui32VerbLevel,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       PVR_UNREFERENCED_PARAMETER(hDbgRequestHandle);
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+       {
+               PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------");
+               OSThreadDumpInfo(pfnDumpDebugPrintf, pvDumpDebugFile);
+       }
+}
+
+PVRSRV_ERROR
+PVRSRVCommonDriverInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       PVRSRV_DATA     *psPVRSRVData = NULL;
+
+       IMG_UINT32 ui32AppHintCleanupThreadPriority;
+       IMG_UINT32 ui32AppHintWatchdogThreadPriority;
+       IMG_BOOL bEnablePageFaultDebug;
+       IMG_BOOL bEnableFullSyncTracking;
+
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault;
+
+       /*
+        * As this function performs one time driver initialisation, use the
+        * Services global device-independent data to determine whether or not
+        * this function has already been called.
+        */
+       if (gpsPVRSRVData)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__));
+               return PVRSRV_ERROR_ALREADY_EXISTS;
+       }
+
+       /*
+        * Allocate the device-independent data
+        */
+       psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData));
+       PVR_GOTO_IF_NOMEM(psPVRSRVData, eError, Error);
+
+       /* Now it is set up, point gpsPVRSRVData to the actual data */
+       gpsPVRSRVData = psPVRSRVData;
+
+       eError = OSWRLockCreate(&gpsPVRSRVData->hDeviceNodeListLock);
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /* Register the driver context debug table */
+       eError = PVRSRVRegisterDriverDbgTable();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /* Register the Server Thread Debug notifier */
+       eError = PVRSRVRegisterDriverDbgRequestNotify(&gpsPVRSRVData->hThreadsDbgReqNotify,
+                                                         _ThreadsDebugRequestNotify,
+                                                         DEBUG_REQUEST_SRV,
+                                                         NULL);
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = DIInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+#if defined(SUPPORT_DI_BRG_IMPL)
+       eError = PVRDIImplBrgRegister();
+       PVR_GOTO_IF_ERROR(eError, Error);
+#endif
+
+#ifdef PVRSRV_ENABLE_PROCESS_STATS
+       eError = PVRSRVStatsInitialise();
+       PVR_GOTO_IF_ERROR(eError, Error);
+#endif /* PVRSRV_ENABLE_PROCESS_STATS */
+
+       eError = HTB_CreateDIEntry();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /*
+        * Initialise the server bridges
+        */
+       eError = ServerBridgeInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = PhysHeapInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = DevmemIntInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = DebugCommonInitDriver();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = BridgeDispatcherInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /* Init any OS specific's */
+       eError = OSInitEnvData();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /* Early init. server cache maintenance */
+       eError = CacheOpInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       RIInitKM();
+#endif
+
+       ui32AppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG;
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug,
+                       &ui32AppHintDefault, &bEnablePageFaultDebug);
+       OSFreeKMAppHintState(pvAppHintState);
+
+       if (bEnablePageFaultDebug)
+       {
+               eError = DevicememHistoryInitKM();
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryInitKM", Error);
+       }
+
+       eError = PMRInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+       eError = DCInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+#endif
+
+       /* Initialise overall system state */
+       gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK;
+
+       /* Create an event object */
+       eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject);
+       PVR_GOTO_IF_ERROR(eError, Error);
+       gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+
+       eError = PVRSRVCmdCompleteInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = PVRSRVHandleInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       OSCreateKMAppHintState(&pvAppHintState);
+       ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, CleanupThreadPriority,
+                            &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority);
+
+       ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, WatchdogThreadPriority,
+                            &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority);
+
+       ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING;
+       OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableFullSyncTracking,
+                       &ui32AppHintDefault, &bEnableFullSyncTracking);
+       OSFreeKMAppHintState(pvAppHintState);
+       pvAppHintState = NULL;
+
+       eError = _CleanupThreadPrepare(gpsPVRSRVData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_CleanupThreadPrepare", Error);
+
+       /* Create a thread which is used to do the deferred cleanup */
+       eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread,
+                                       "pvr_defer_free",
+                                       CleanupThread,
+                                       CleanupThreadDumpInfo,
+                                       IMG_TRUE,
+                                       gpsPVRSRVData,
+                                       ui32AppHintCleanupThreadPriority);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:1", Error);
+
+       /* Create the devices watchdog event object */
+       eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+       /* Create a thread which is used to detect fatal errors */
+       eError = OSThreadCreatePriority(&gpsPVRSRVData->hDevicesWatchdogThread,
+                                       "pvr_device_wdg",
+                                       DevicesWatchdogThread,
+                                       NULL,
+                                       IMG_TRUE,
+                                       gpsPVRSRVData,
+                                       ui32AppHintWatchdogThreadPriority);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:2", Error);
+
+#if defined(SUPPORT_AUTOVZ)
+       /* Create the devices watchdog event object */
+       eError = OSEventObjectCreate("PVRSRV_AUTOVZ_WATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hAutoVzWatchdogEvObj);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+       /* Create a thread that maintains the FW-KM connection by regularly updating the virtualization watchdog */
+       eError = OSThreadCreatePriority(&gpsPVRSRVData->hAutoVzWatchdogThread,
+                                       "pvr_autovz_wdg",
+                                       AutoVzWatchdogThread,
+                                       NULL,
+                                       IMG_TRUE,
+                                       gpsPVRSRVData,
+                                       OS_THREAD_HIGHEST_PRIORITY);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:3", Error);
+#endif /* SUPPORT_AUTOVZ */
+
+#if defined(SUPPORT_RGX)
+       eError = OSLockCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Error);
+#endif
+
+       eError = HostMemDeviceCreate(&gpsPVRSRVData->psHostMemDeviceNode);
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /* Initialise the Transport Layer */
+       eError = TLInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       /* Initialise pdump */
+       eError = PDUMPINIT();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+       /* Initialise TL control stream */
+       eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream,
+                               PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE,
+                               TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL,
+                            NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "TLStreamCreate");
+               psPVRSRVData->hTLCtrlStream = NULL;
+       }
+
+       eError = InfoPageCreate(psPVRSRVData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "InfoPageCreate", Error);
+
+
+       /* Initialise the Timeout Info */
+       eError = InitialiseInfoPageTimeouts(psPVRSRVData);
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       eError = PopulateInfoPageBridges(psPVRSRVData);
+
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+       if (bEnableFullSyncTracking)
+       {
+               psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED;
+       }
+       if (bEnablePageFaultDebug)
+       {
+               psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED;
+       }
+
+       /* Initialise the Host Trace Buffer */
+       eError = HTBInit();
+       PVR_GOTO_IF_ERROR(eError, Error);
+
+#if defined(SUPPORT_RGX)
+       RGXHWPerfClientInitAppHintCallbacks();
+#endif
+
+       /* Late init. client cache maintenance via info. page */
+       eError = CacheOpInit2();
+       PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpInit2", Error);
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+       eError = SyncFbRegisterSyncFunctions();
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbRegisterSyncFunctions", Error);
+#endif
+
+#if defined(PDUMP)
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       /* If initialising the device on first connection, we will
+        * bind PDump capture to the first device we connect to later.
+        */
+       psPVRSRVData->ui32PDumpBoundDevice = PVRSRV_MAX_DEVICES;
+#else
+       /* If not initialising the device on first connection, bind PDump
+        * capture to device 0. This is because we need to capture PDump
+        * during device initialisation but only want to capture PDump for
+        * a single device (by default, device 0).
+        */
+       psPVRSRVData->ui32PDumpBoundDevice = 0;
+#endif
+#endif
+
+       return 0;
+
+Error:
+       PVRSRVCommonDriverDeInit();
+       return eError;
+}
+
+void
+PVRSRVCommonDriverDeInit(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bEnablePageFaultDebug = IMG_FALSE;
+
+       if (gpsPVRSRVData == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data",
+                                __func__));
+               return;
+       }
+
+       if (gpsPVRSRVData->pui32InfoPage != NULL)
+       {
+               bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED;
+       }
+
+       gpsPVRSRVData->bUnload = IMG_TRUE;
+
+#if defined(SUPPORT_RGX)
+       PVRSRVDestroyHWPerfHostThread();
+       if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock)
+       {
+               OSLockDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+               gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock = NULL;
+       }
+#endif
+
+       if (gpsPVRSRVData->hGlobalEventObject)
+       {
+               OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject);
+       }
+
+#if defined(SUPPORT_AUTOVZ)
+       /* Stop and cleanup the devices watchdog thread */
+       if (gpsPVRSRVData->hAutoVzWatchdogThread)
+       {
+               LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+               {
+                       if (gpsPVRSRVData->hAutoVzWatchdogEvObj)
+                       {
+                               eError = OSEventObjectSignal(gpsPVRSRVData->hAutoVzWatchdogEvObj);
+                               PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+                       }
+
+                       eError = OSThreadDestroy(gpsPVRSRVData->hAutoVzWatchdogThread);
+                       if (PVRSRV_OK == eError)
+                       {
+                               gpsPVRSRVData->hAutoVzWatchdogThread = NULL;
+                               break;
+                       }
+                       OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+               PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+       }
+
+       if (gpsPVRSRVData->hAutoVzWatchdogEvObj)
+       {
+               eError = OSEventObjectDestroy(gpsPVRSRVData->hAutoVzWatchdogEvObj);
+               gpsPVRSRVData->hAutoVzWatchdogEvObj = NULL;
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+       }
+#endif /* SUPPORT_AUTOVZ */
+
+       /* Stop and cleanup the devices watchdog thread */
+       if (gpsPVRSRVData->hDevicesWatchdogThread)
+       {
+               LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+               {
+                       if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+                       {
+                               eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj);
+                               PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+                       }
+
+                       eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread);
+                       if (PVRSRV_OK == eError)
+                       {
+                               gpsPVRSRVData->hDevicesWatchdogThread = NULL;
+                               break;
+                       }
+                       OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+               PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+       }
+
+       if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+       {
+               eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj);
+               gpsPVRSRVData->hDevicesWatchdogEvObj = NULL;
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+       }
+
+       /* Stop and cleanup the deferred clean up thread, event object and
+        * deferred context list.
+        */
+       if (gpsPVRSRVData->hCleanupThread)
+       {
+               LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+               {
+                       if (gpsPVRSRVData->hCleanupEventObject)
+                       {
+                               eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject);
+                               PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+                       }
+
+                       eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread);
+                       if (PVRSRV_OK == eError)
+                       {
+                               gpsPVRSRVData->hCleanupThread = NULL;
+                               break;
+                       }
+                       OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+               PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+       }
+
+       if (gpsPVRSRVData->hCleanupEventObject)
+       {
+               eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject);
+               gpsPVRSRVData->hCleanupEventObject = NULL;
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+       }
+
+       /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */
+       /* HTB De-init happens in device de-registration currently */
+       eError = HTBDeInit();
+       PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+       /* Tear down CacheOp framework information page first */
+       CacheOpDeInit2();
+
+       /* Clean up information page */
+       InfoPageDestroy(gpsPVRSRVData);
+
+       /* Close the TL control plane stream. */
+       if (gpsPVRSRVData->hTLCtrlStream != NULL)
+       {
+               TLStreamClose(gpsPVRSRVData->hTLCtrlStream);
+       }
+
+       /* deinitialise pdump */
+       if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+       {
+               PDUMPDEINIT();
+       }
+
+       /* Clean up Transport Layer resources that remain */
+       TLDeInit();
+
+       HostMemDeviceDestroy(gpsPVRSRVData->psHostMemDeviceNode);
+       gpsPVRSRVData->psHostMemDeviceNode = NULL;
+
+       eError = PVRSRVHandleDeInit();
+       PVR_LOG_IF_ERROR(eError, "PVRSRVHandleDeInit");
+
+       /* destroy event object */
+       if (gpsPVRSRVData->hGlobalEventObject)
+       {
+               OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject);
+               gpsPVRSRVData->hGlobalEventObject = NULL;
+       }
+
+       PVRSRVCmdCompleteDeinit();
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+       eError = DCDeInit();
+       PVR_LOG_IF_ERROR(eError, "DCDeInit");
+#endif
+
+       eError = PMRDeInit();
+       PVR_LOG_IF_ERROR(eError, "PMRDeInit");
+
+       BridgeDispatcherDeinit();
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       RIDeInitKM();
+#endif
+
+       if (bEnablePageFaultDebug)
+       {
+               DevicememHistoryDeInitKM();
+       }
+
+       CacheOpDeInit();
+
+       OSDeInitEnvData();
+
+       (void) DevmemIntDeInit();
+
+       ServerBridgeDeInit();
+
+       PhysHeapDeinit();
+
+       HTB_DestroyDIEntry();
+
+#ifdef PVRSRV_ENABLE_PROCESS_STATS
+       PVRSRVStatsDestroy();
+#endif /* PVRSRV_ENABLE_PROCESS_STATS */
+
+       DebugCommonDeInitDriver();
+
+       DIDeInit();
+
+       if (gpsPVRSRVData->hThreadsDbgReqNotify)
+       {
+               PVRSRVUnregisterDriverDbgRequestNotify(gpsPVRSRVData->hThreadsDbgReqNotify);
+       }
+
+       PVRSRVUnregisterDriverDbgTable();
+
+       OSWRLockDestroy(gpsPVRSRVData->hDeviceNodeListLock);
+
+       OSFreeMem(gpsPVRSRVData);
+       gpsPVRSRVData = NULL;
+}
+
+static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                       IMG_UINT32 ui32VerbLevel,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       /* Only dump info once */
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle;
+
+       PVR_DUMPDEBUG_LOG("------[ System Summary Device ID:%d ]------", psDeviceNode->sDevId.ui32InternalID);
+
+       switch (psDeviceNode->eCurrentSysPowerState)
+       {
+               case PVRSRV_SYS_POWER_STATE_OFF:
+                       PVR_DUMPDEBUG_LOG("Device System Power State: OFF");
+                       break;
+               case PVRSRV_SYS_POWER_STATE_ON:
+                       PVR_DUMPDEBUG_LOG("Device System Power State: ON");
+                       break;
+               default:
+                       PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)",
+                                                          psDeviceNode->eCurrentSysPowerState);
+                       break;
+       }
+
+       PVR_DUMPDEBUG_LOG("MaxHWTOut: %dus, WtTryCt: %d, WDGTOut(on,off): (%dms,%dms)",
+                         MAX_HW_TIME_US, WAIT_TRY_COUNT, DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT, DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT);
+
+       SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile);
+}
+
+#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (0x100000ULL * 32ULL) /* 32MB */
+
+static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       IMG_UINT32 ui32FlagsAccumulate = 0;
+       IMG_UINT32 i;
+
+       PVR_LOG_RETURN_IF_FALSE(psDevConfig->ui32PhysHeapCount > 0,
+                                                       "Device config must specify at least one phys heap config.",
+                                                       PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+       for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+       {
+               PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i];
+
+               PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0,
+                                                                  PVRSRV_ERROR_PHYSHEAP_CONFIG,
+                                                                  "Phys heap config %d: must specify usage flags.", i);
+
+               PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0,
+                                                               PVRSRV_ERROR_PHYSHEAP_CONFIG,
+                                                               "Phys heap config %d: duplicate usage flags.", i);
+
+               ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags;
+
+               /* Output message if default heap is LMA and smaller than recommended minimum */
+               if ((i == psDevConfig->eDefaultHeap) &&
+#if defined(__KERNEL__)
+                   ((psHeapConf->eType == PHYS_HEAP_TYPE_LMA) ||
+                    (psHeapConf->eType == PHYS_HEAP_TYPE_DMA)) &&
+#else
+                   (psHeapConf->eType == PHYS_HEAP_TYPE_LMA) &&
+#endif
+                   (psHeapConf->uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX 
+                                " (recommended minimum heap size is 0x%llx)",
+                                __func__, psHeapConf->uiSize,
+                                PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE));
+               }
+       }
+
+       if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_GPU_LOCAL)
+       {
+               PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0) ,
+                                                       "Device config must specify GPU local phys heap config.",
+                                                       PVRSRV_ERROR_PHYSHEAP_CONFIG);
+       }
+       else if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL)
+       {
+               PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_CPU_LOCAL) != 0) ,
+                                               "Device config must specify CPU local phys heap config.",
+                                               PVRSRV_ERROR_PHYSHEAP_CONFIG);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_PHYS_HEAP ePhysHeap;
+
+       eError = PVRSRVValidatePhysHeapConfig(psDevConfig);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig");
+
+       eError = PhysHeapCreateDeviceHeapsFromConfigs(psDeviceNode,
+                                                     psDevConfig->pasPhysHeaps,
+                                                     psDevConfig->ui32PhysHeapCount);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateDeviceHeapsFromConfigs", ErrorDeinit);
+
+       for (ePhysHeap = PVRSRV_PHYS_HEAP_DEFAULT+1;  ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++)
+       {
+               if (PhysHeapPVRLayerAcquire(ePhysHeap))
+               {
+                       eError = PhysHeapAcquireByDevPhysHeap(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit);
+               }
+
+               /* Calculate the total number of user accessible physical heaps */
+               if (psDeviceNode->apsPhysHeap[ePhysHeap] && PhysHeapUserModeAlloc(ePhysHeap))
+               {
+                       psDeviceNode->ui32UserAllocHeapCount++;
+               }
+       }
+
+       if (PhysHeapValidateDefaultHeapExists(psDeviceNode))
+       {
+               PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysHeapCheckUsageFlags", ErrorDeinit);
+       }
+
+       eError = PhysHeapMMUPxSetup(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapMMUPxSetup", ErrorDeinit);
+
+       return PVRSRV_OK;
+
+ErrorDeinit:
+       PVR_ASSERT(IMG_FALSE);
+       PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+
+       return eError;
+}
+
+void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_PHYS_HEAP ePhysHeapIdx;
+       IMG_UINT32 i;
+
+#if defined(SUPPORT_AUTOVZ)
+       if (psDeviceNode->psFwMMUReservedPhysHeap)
+       {
+               PhysHeapDestroy(psDeviceNode->psFwMMUReservedPhysHeap);
+               psDeviceNode->psFwMMUReservedPhysHeap = NULL;
+       }
+#endif
+
+       PhysHeapMMUPxDeInit(psDeviceNode);
+
+       /* Release heaps */
+       for (ePhysHeapIdx = 0;
+                ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
+                ePhysHeapIdx++)
+       {
+               if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+               {
+                       PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+               }
+       }
+
+       if (psDeviceNode->psFWMainPhysHeap)
+       {
+               PhysHeapDestroy(psDeviceNode->psFWMainPhysHeap);
+               psDeviceNode->psFWMainPhysHeap = NULL;
+       }
+
+       if (psDeviceNode->psFWCfgPhysHeap)
+       {
+               PhysHeapDestroy(psDeviceNode->psFWCfgPhysHeap);
+               psDeviceNode->psFWCfgPhysHeap = NULL;
+       }
+
+       for (i = 0; i < RGX_NUM_OS_SUPPORTED; i++)
+       {
+               if (psDeviceNode->apsFWPremapPhysHeap[i])
+               {
+                       PhysHeapDestroy(psDeviceNode->apsFWPremapPhysHeap[i]);
+                       psDeviceNode->apsFWPremapPhysHeap[i] = NULL;
+               }
+       }
+
+       PhysHeapDestroyDeviceHeaps(psDeviceNode);
+}
+
+PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+                                                                        PHYS_HEAP_USAGE_FLAGS ui32Flags)
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+       {
+               if (psDevConfig->pasPhysHeaps[i].ui32UsageFlags == ui32Flags)
+               {
+                       return &psDevConfig->pasPhysHeaps[i];
+               }
+       }
+
+       return NULL;
+}
+
+PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
+                                                                                        IMG_INT32 i32OsDeviceID,
+                                                                                        PVRSRV_DEVICE_NODE **ppsDeviceNode)
+{
+       PVRSRV_DATA                             *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_ERROR                    eError;
+       PVRSRV_DEVICE_CONFIG    *psDevConfig;
+       PVRSRV_DEVICE_NODE              *psDeviceNode;
+       IMG_UINT32                              ui32AppHintDefault;
+       IMG_UINT32                              ui32AppHintDriverMode;
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+       IMG_UINT32                              ui32AppHintPhysMemTestPasses;
+#endif
+       void *pvAppHintState    = NULL;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       IMG_HANDLE                              hProcessStats;
+#endif
+
+       MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32OsDeviceID);
+
+       /* Read driver mode (i.e. native, host or guest) AppHint early as it is
+          required by SysDevInit */
+       ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE;
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DriverMode,
+                                                &ui32AppHintDefault, &ui32AppHintDriverMode);
+       psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode);
+       psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode);
+       OSFreeKMAppHintState(pvAppHintState);
+       pvAppHintState = NULL;
+
+       psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode));
+       PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "psDeviceNode");
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       /* Allocate process statistics */
+       eError = PVRSRVStatsRegisterProcess(&hProcessStats);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode);
+#endif
+
+       psDeviceNode->sDevId.i32OsDeviceID = i32OsDeviceID;
+       psDeviceNode->sDevId.ui32InternalID = psPVRSRVData->ui32RegisteredDevices;
+
+       eError = SysDevInit(pvOSDevice, &psDevConfig);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats);
+
+       PVR_ASSERT(psDevConfig);
+       PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice);
+       PVR_ASSERT(!psDevConfig->psDevNode);
+
+       if ((psDevConfig->eDefaultHeap != PVRSRV_PHYS_HEAP_GPU_LOCAL) &&
+           (psDevConfig->eDefaultHeap != PVRSRV_PHYS_HEAP_CPU_LOCAL))
+       {
+               PVR_LOG_MSG(PVR_DBG_ERROR, "DEFAULT Heap is invalid, "
+                                          "it must be GPU_LOCAL or CPU_LOCAL");
+               PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats);
+       }
+
+       psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+
+       if (psDevConfig->pfnGpuDomainPower)
+       {
+               psDeviceNode->eCurrentSysPowerState = psDevConfig->pfnGpuDomainPower(psDeviceNode);
+       }
+       else
+       {
+               /* If the System Layer doesn't provide a function to query the power state
+                * of the system hardware, use a default implementation that keeps track of
+                * the power state locally and assumes the system starting state */
+               psDevConfig->pfnGpuDomainPower = PVRSRVDefaultDomainPower;
+
+#if defined(SUPPORT_AUTOVZ)
+               psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+#else
+               psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF;
+#endif
+       }
+
+       psDeviceNode->psDevConfig = psDevConfig;
+       psDevConfig->psDevNode = psDeviceNode;
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+       if (PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               /* Read AppHint - Configurable memory test pass count */
+               ui32AppHintDefault = 0;
+               OSCreateKMAppHintState(&pvAppHintState);
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses,
+                               &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses);
+               OSFreeKMAppHintState(pvAppHintState);
+               pvAppHintState = NULL;
+
+               if (ui32AppHintPhysMemTestPasses > 0)
+               {
+                       eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit);
+               }
+       }
+#endif
+
+       /* Initialise the paravirtualised connection */
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               /* If a device already exists */
+               if (psPVRSRVData->psDeviceNodeList != NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Virtualization is currently supported only on single device systems.",
+                                        __func__));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+                       goto ErrorSysDevDeInit;
+               }
+
+               PvzConnectionInit(psDevConfig);
+               PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit);
+       }
+
+       eError = PVRSRVRegisterDeviceDbgTable(psDeviceNode);
+       PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit);
+
+       eError = PVRSRVPowerLockInit(psDeviceNode);
+       PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable);
+
+       eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig);
+       PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit);
+
+#if defined(SUPPORT_RGX)
+       /* Requirements:
+        *  registered GPU and FW local heaps */
+       /*  debug table */
+       eError = RGXRegisterDevice(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "RGXRegisterDevice");
+               eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+               goto ErrorPhysMemHeapsDeinit;
+       }
+#endif
+
+       if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL)
+       {
+               eError = psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode);
+               PVR_GOTO_IF_ERROR(eError, ErrorPhysMemHeapsDeinit);
+       }
+
+       if (psDeviceNode->pfnFwMMUInit != NULL)
+       {
+               eError = psDeviceNode->pfnFwMMUInit(psDeviceNode);
+               PVR_GOTO_IF_ERROR(eError, ErrorFwMMUDeinit);
+       }
+
+       eError = SyncServerInit(psDeviceNode);
+       PVR_GOTO_IF_ERROR(eError, ErrorDeInitRgx);
+
+       eError = SyncCheckpointInit(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointInit", ErrorSyncCheckpointInit);
+
+       /*
+        * This is registered before doing device specific initialisation to ensure
+        * generic device information is dumped first during a debug request.
+        */
+       eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDeviceNode->hDbgReqNotify,
+                                                         psDeviceNode,
+                                                         _SysDebugRequestNotify,
+                                                         DEBUG_REQUEST_SYS,
+                                                         psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify", ErrorRegDbgReqNotify);
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+       eError = InitDVFS(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "InitDVFS", ErrorDVFSInitFail);
+#endif
+
+       OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0);
+
+#if defined(PVR_TESTING_UTILS)
+       TUtilsInit(psDeviceNode);
+#endif
+
+       OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+       if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list",
+                       __func__));
+               goto ErrorPageFaultLockFailCreate;
+       }
+
+       dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode));
+       PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx",
+                        (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+       PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ));
+
+/* SUPPORT_ALT_REGBASE is defined for rogue cores only */
+#if defined(SUPPORT_RGX) && defined(SUPPORT_ALT_REGBASE)
+       {
+               IMG_DEV_PHYADDR sRegsGpuPBase;
+
+               PhysHeapCpuPAddrToDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL],
+                                          1,
+                                          &sRegsGpuPBase,
+                                          &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+
+               PVR_LOG(("%s: Using alternate Register bank GPU address: 0x%08lx (orig: 0x%08lx)", __func__,
+                        (unsigned long)psDevConfig->sAltRegsGpuPBase.uiAddr,
+                        (unsigned long)sRegsGpuPBase.uiAddr));
+       }
+#endif
+
+#if defined(__linux__)
+       /* register the AppHint device control before device initialisation
+        * so individual AppHints can be configured during the init phase
+        */
+       {
+               int iError = pvr_apphint_device_register(psDeviceNode);
+               PVR_LOG_IF_FALSE(iError == 0, "pvr_apphint_device_register() failed");
+       }
+#endif /* defined(__linux__) */
+
+#if defined(SUPPORT_RGX)
+       RGXHWPerfInitAppHintCallbacks(psDeviceNode);
+#endif
+
+       eError = DebugCommonInitDevice(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DebugCommonInitDevice",
+                             ErrorDestroyMemoryContextPageFaultNotifyListLock);
+
+       /* Finally insert the device into the dev-list and set it as active */
+       OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock);
+       List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList,
+                                                                          psDeviceNode);
+       psPVRSRVData->ui32RegisteredDevices++;
+       OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock);
+
+       *ppsDeviceNode = psDeviceNode;
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+       /* Register the DVFS device now the device node is present in the dev-list */
+       eError = RegisterDVFSDevice(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RegisterDVFSDevice", ErrorRegisterDVFSDeviceFail);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       /* Close the process statistics */
+       PVRSRVStatsDeregisterProcess(hProcessStats);
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       OSLockCreateNoStats(&psDeviceNode->hValidationLock);
+#endif
+
+       return PVRSRV_OK;
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+ErrorRegisterDVFSDeviceFail:
+       /* Remove the device from the list */
+       OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock);
+       List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+       psPVRSRVData->ui32RegisteredDevices--;
+       OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock);
+#endif
+
+ErrorDestroyMemoryContextPageFaultNotifyListLock:
+       OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+       psDeviceNode->hMemoryContextPageFaultNotifyListLock = NULL;
+
+ErrorPageFaultLockFailCreate:
+#if defined(PVR_TESTING_UTILS)
+       TUtilsDeinit(psDeviceNode);
+#endif
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+ErrorDVFSInitFail:
+#endif
+
+       if (psDeviceNode->hDbgReqNotify)
+       {
+               PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+       }
+
+ErrorRegDbgReqNotify:
+       SyncCheckpointDeinit(psDeviceNode);
+
+ErrorSyncCheckpointInit:
+       SyncServerDeinit(psDeviceNode);
+
+ErrorDeInitRgx:
+#if defined(SUPPORT_RGX)
+       DevDeInitRGX(psDeviceNode);
+#endif
+ErrorFwMMUDeinit:
+ErrorPhysMemHeapsDeinit:
+       PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+ErrorPowerLockDeInit:
+       PVRSRVPowerLockDeInit(psDeviceNode);
+ErrorUnregisterDbgTable:
+       PVRSRVUnregisterDeviceDbgTable(psDeviceNode);
+ErrorPvzConnectionDeInit:
+       psDevConfig->psDevNode = NULL;
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               PvzConnectionDeInit();
+       }
+ErrorSysDevDeInit:
+       SysDevDeInit(psDevConfig);
+ErrorDeregisterStats:
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       /* Close the process statistics */
+       PVRSRVStatsDeregisterProcess(hProcessStats);
+ErrorFreeDeviceNode:
+#endif
+       OSFreeMemNoStats(psDeviceNode);
+
+       return eError;
+}
+
+#if defined(SUPPORT_RGX)
+static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL bValue)
+{
+       PVRSRV_ERROR eResult = PVRSRV_OK;
+       IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+       PVR_RETURN_IF_INVALID_PARAM(ui32Flag);
+       PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE,
+                           PVRSRV_ERROR_INVALID_PARAMS);
+
+       eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+                                   ui32Flag, bValue);
+
+       return eResult;
+}
+
+static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL *pbValue)
+{
+       PVRSRV_ERROR eResult = PVRSRV_OK;
+       IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+       IMG_UINT32 ui32State;
+
+       PVR_RETURN_IF_INVALID_PARAM(ui32Flag);
+       PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE,
+                           PVRSRV_ERROR_INVALID_PARAMS);
+
+       eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+                                   &ui32State);
+
+       if (PVRSRV_OK == eResult)
+       {
+               *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+       }
+
+       return eResult;
+}
+static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                  const void *psPrivate, IMG_BOOL bValue)
+{
+       PVRSRV_ERROR eResult = PVRSRV_OK;
+       IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+       PVR_RETURN_IF_INVALID_PARAM(ui32Flag);
+       PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE,
+                           PVRSRV_ERROR_INVALID_PARAMS);
+
+       eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+                                  ui32Flag, NULL, bValue);
+
+       return eResult;
+}
+
+static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+                                   const void *psPrivate, IMG_BOOL *pbValue)
+{
+       IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+       IMG_UINT32 ui32State;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_RETURN_IF_INVALID_PARAM(ui32Flag);
+       PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE,
+                           PVRSRV_ERROR_INVALID_PARAMS);
+
+       psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice;
+       ui32State = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags;
+
+       if (pbValue)
+       {
+               *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+       }
+
+       return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       IMG_BOOL bInitSuccesful = IMG_FALSE;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       IMG_HANDLE hProcessStats;
+#endif
+       PVRSRV_ERROR eError;
+
+       MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32OsDeviceID);
+
+       if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+#if defined(PDUMP)
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       {
+               PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData();
+
+               /* If first connection, bind this and future PDump clients to use this device */
+               if (psSRVData->ui32PDumpBoundDevice == PVRSRV_MAX_DEVICES)
+               {
+                       psSRVData->ui32PDumpBoundDevice = psDeviceNode->sDevId.ui32InternalID;
+               }
+       }
+#endif
+#endif
+
+       /* Initialise Connection_Data access mechanism */
+       dllist_init(&psDeviceNode->sConnections);
+       eError = OSLockCreate(&psDeviceNode->hConnectionsLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
+
+       PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
+       /* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       eError = PVRSRVStatsRegisterProcess(&hProcessStats);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess");
+#endif
+
+#if defined(SUPPORT_RGX)
+       eError = RGXInit(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit);
+#endif
+
+#if defined(SUPPORT_DMA_TRANSFER)
+       PVRSRVInitialiseDMA(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitialiseDMA", Exit);
+#endif
+
+       bInitSuccesful = IMG_TRUE;
+
+#if defined(SUPPORT_RGX)
+Exit:
+#endif
+       eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful);
+       PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceFinalise");
+
+#if defined(SUPPORT_RGX)
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating,
+                                                 _ReadStateFlag, _SetStateFlag,
+                                                 APPHINT_OF_DRIVER_NO_DEVICE,
+                                                 (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN));
+               PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap,
+                                                 _ReadStateFlag, _SetStateFlag,
+                                                 APPHINT_OF_DRIVER_NO_DEVICE,
+                                                 (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP));
+               PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger,
+                                                 _ReadStateFlag, _SetStateFlag,
+                                                 psDeviceNode,
+                                                 (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER));
+               PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory,
+                                                 _ReadStateFlag, _SetStateFlag,
+                                                 psDeviceNode,
+                                                 (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY));
+               PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList,
+                                                 _ReadStateFlag, _SetStateFlag,
+                                                 psDeviceNode,
+                                                 (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN));
+       }
+
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging,
+                                         _ReadDeviceFlag, _SetDeviceFlag,
+                                         psDeviceNode,
+                                         (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN));
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist,
+                                         _ReadDeviceFlag, _SetDeviceFlag,
+                                         psDeviceNode,
+                                         (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST));
+#if defined(SUPPORT_VALIDATION)
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_GPUUnitsPowerChange,
+                                         _ReadDeviceFlag, _SetDeviceFlag,
+                                         psDeviceNode,
+                                         (void*)((uintptr_t)RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN));
+#endif
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic,
+                                         RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable,
+                                         psDeviceNode,
+                                         NULL);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+       /* Close the process statistics */
+       PVRSRVStatsDeregisterProcess(hProcessStats);
+#endif
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_DATA                             *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_ERROR                    eError;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       IMG_BOOL                                bForceUnload = IMG_FALSE;
+
+       if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               bForceUnload = IMG_TRUE;
+       }
+#endif
+
+       MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32OsDeviceID);
+
+       psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+       UnregisterDVFSDevice(psDeviceNode);
+#endif
+
+       OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock);
+       List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+       psPVRSRVData->ui32RegisteredDevices--;
+       OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock);
+
+#if defined(__linux__)
+       pvr_apphint_device_unregister(psDeviceNode);
+#endif /* defined(__linux__) */
+
+       DebugCommonDeInitDevice(psDeviceNode);
+
+       if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL)
+       {
+               OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       OSLockDestroyNoStats(psDeviceNode->hValidationLock);
+       psDeviceNode->hValidationLock = NULL;
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+       SyncFbDeregisterDevice(psDeviceNode);
+#endif
+       /* Counter part to what gets done in PVRSRVDeviceFinalise */
+       if (psDeviceNode->hSyncCheckpointContext)
+       {
+               SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+               psDeviceNode->hSyncCheckpointContext = NULL;
+       }
+       if (psDeviceNode->hSyncPrimContext)
+       {
+               if (psDeviceNode->psMMUCacheSyncPrim)
+               {
+                       PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim;
+
+                       /* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */
+                       eError = PVRSRVPollForValueKM(psDeviceNode,
+                                                     psSync->pui32LinAddr,
+                                                     psDeviceNode->ui32NextMMUInvalidateUpdate-1,
+                                                     0xFFFFFFFF,
+                                                     POLL_FLAG_LOG_ERROR);
+                       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPollForValueKM");
+
+                       /* Important to set the device node pointer to NULL
+                        * before we free the sync-prim to make sure we don't
+                        * defer the freeing of the sync-prim's page tables itself.
+                        * The sync is used to defer the MMU page table
+                        * freeing. */
+                       psDeviceNode->psMMUCacheSyncPrim = NULL;
+
+                       /* Free general purpose sync primitive */
+                       SyncPrimFree(psSync);
+               }
+
+               SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext);
+               psDeviceNode->hSyncPrimContext = NULL;
+       }
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError == PVRSRV_OK)
+       {
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+               /*
+                * Firmware probably not responding if bForceUnload is set, but we still want to unload the
+                * driver.
+                */
+               if (!bForceUnload)
+#endif
+               {
+                       /* Force idle device */
+                       eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM");
+                               if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+                               {
+                                       PVRSRVPowerUnlock(psDeviceNode);
+                               }
+                               return eError;
+                       }
+               }
+
+               /* Power down the device if necessary */
+               eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                        PVRSRV_DEV_POWER_STATE_OFF,
+                                                                                        PVRSRV_POWER_FLAGS_FORCED);
+               PVRSRVPowerUnlock(psDeviceNode);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "PVRSRVSetDevicePowerStateKM");
+
+                       PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+                       /*
+                        * If the driver is okay then return the error, otherwise we can ignore
+                        * this error.
+                        */
+                       if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+                       {
+                               return eError;
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                                "%s: Will continue to unregister as driver status is not OK",
+                                                __func__));
+                       }
+               }
+       }
+
+#if defined(PVR_TESTING_UTILS)
+       TUtilsDeinit(psDeviceNode);
+#endif
+
+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
+       DeinitDVFS(psDeviceNode);
+#endif
+
+       if (psDeviceNode->hDbgReqNotify)
+       {
+               PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+       }
+
+       SyncCheckpointDeinit(psDeviceNode);
+
+       SyncServerDeinit(psDeviceNode);
+
+#if defined(SUPPORT_RGX)
+       DevDeInitRGX(psDeviceNode);
+#endif
+
+       PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+       PVRSRVPowerLockDeInit(psDeviceNode);
+
+       PVRSRVUnregisterDeviceDbgTable(psDeviceNode);
+
+       /* Release the Connection-Data lock as late as possible. */
+       if (psDeviceNode->hConnectionsLock)
+       {
+               OSLockDestroy(psDeviceNode->hConnectionsLock);
+       }
+
+       psDeviceNode->psDevConfig->psDevNode = NULL;
+
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               PvzConnectionDeInit();
+       }
+       SysDevDeInit(psDeviceNode->psDevConfig);
+
+       OSFreeMemNoStats(psDeviceNode);
+
+       return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceFinalise
+@Description  Performs the final parts of device initialisation.
+@Input        psDeviceNode            Device node of the device to finish
+                                      initialising
+@Input        bInitSuccessful         Whether or not device specific
+                                      initialisation was successful
+@Return       PVRSRV_ERROR     PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                          IMG_BOOL bInitSuccessful)
+{
+       PVRSRV_ERROR eError;
+       __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+       if (bInitSuccessful)
+       {
+               eError = SyncCheckpointContextCreate(psDeviceNode,
+                                                                                        &psDeviceNode->hSyncCheckpointContext);
+               PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointContextCreate", ErrorExit);
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+               eError = SyncFbRegisterDevice(psDeviceNode);
+               PVR_GOTO_IF_ERROR(eError, ErrorExit);
+#endif
+               eError = SyncPrimContextCreate(psDeviceNode,
+                                                                          &psDeviceNode->hSyncPrimContext);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "SyncPrimContextCreate");
+                       SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+                       goto ErrorExit;
+               }
+
+               /* Allocate MMU cache invalidate sync */
+               eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+                                                          &psDeviceNode->psMMUCacheSyncPrim,
+                                                          "pvrsrv dev MMU cache");
+               PVR_LOG_GOTO_IF_ERROR(eError, "SyncPrimAlloc", ErrorExit);
+
+               /* Set the sync prim value to a much higher value near the
+                * wrapping range. This is so any wrapping bugs would be
+                * seen early in the driver start-up.
+                */
+               SyncPrimSet(psDeviceNode->psMMUCacheSyncPrim, 0xFFFFFFF6UL);
+
+               /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6 */
+               psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7UL;
+
+               eError = PVRSRVPowerLock(psDeviceNode);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorExit);
+
+               /*
+                * Always ensure a single power on command appears in the pdump. This
+                * should be the only power related call outside of PDUMPPOWCMDSTART
+                * and PDUMPPOWCMDEND.
+                */
+               eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                        PVRSRV_DEV_POWER_STATE_ON,
+                                                                                        PVRSRV_POWER_FLAGS_FORCED);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to set device %p power state to 'on' (%s)",
+                                        __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+                       PVRSRVPowerUnlock(psDeviceNode);
+                       goto ErrorExit;
+               }
+
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+               eError = ValidateFWOnLoad(psDeviceNode->pvDevice);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "ValidateFWOnLoad");
+                       PVRSRVPowerUnlock(psDeviceNode);
+                       return eError;
+               }
+#endif
+
+               eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed compatibility check for device %p (%s)",
+                                        __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+                       PVRSRVPowerUnlock(psDeviceNode);
+                       PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+                       goto ErrorExit;
+               }
+
+               PDUMPPOWCMDSTART(psDeviceNode);
+
+               /* Force the device to idle if its default power state is off */
+               eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+                                                                                  &PVRSRVDeviceIsDefaultStateOFF,
+                                                                                  IMG_TRUE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM");
+                       if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+                       {
+                               PVRSRVPowerUnlock(psDeviceNode);
+                       }
+                       goto ErrorExit;
+               }
+
+               /* Place device into its default power state. */
+               eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                        PVRSRV_DEV_POWER_STATE_DEFAULT,
+                                                                                        PVRSRV_POWER_FLAGS_FORCED);
+               PDUMPPOWCMDEND(psDeviceNode);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to set device %p into its default power state (%s)",
+                                        __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+
+                       PVRSRVPowerUnlock(psDeviceNode);
+                       goto ErrorExit;
+               }
+
+               PVRSRVPowerUnlock(psDeviceNode);
+
+               /*
+                * If PDUMP is enabled and RGX device is supported, then initialise the
+                * performance counters that can be further modified in PDUMP. Then,
+                * before ending the init phase of the pdump, drain the commands put in
+                * the kCCB during the init phase.
+                */
+#if defined(SUPPORT_RGX)
+#if defined(PDUMP)
+               {
+                       eError = RGXInitHWPerfCounters(psDeviceNode);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitHWPerfCounters", ErrorExit);
+
+                       eError = RGXPdumpDrainKCCB(psDevInfo,
+                                                                          psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", ErrorExit);
+               }
+#endif
+#endif /* defined(SUPPORT_RGX) */
+               /* Now that the device(s) are fully initialised set them as active */
+               psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE;
+               eError = PVRSRV_OK;
+       }
+       else
+       {
+               /* Initialisation failed so set the device(s) into a bad state */
+               psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+               eError = PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+       /* Give PDump control a chance to end the init phase, depends on OS */
+       PDUMPENDINITPHASE(psDeviceNode);
+       return eError;
+
+ErrorExit:
+       /* Initialisation failed so set the device(s) into a bad state */
+       psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /* Only check devices which specify a compatibility check callback */
+       if (psDeviceNode->pfnInitDeviceCompatCheck)
+               return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
+       else
+               return PVRSRV_OK;
+}
+
+/*
+       PollForValueKM
+*/
+static
+PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem *     pui32LinMemAddr,
+                                                                                 IMG_UINT32                    ui32Value,
+                                                                                 IMG_UINT32                    ui32Mask,
+                                                                                 IMG_UINT32                    ui32Timeoutus,
+                                                                                 IMG_UINT32                    ui32PollPeriodus,
+                                                                                 POLL_FLAGS            ePollFlags)
+{
+#if defined(NO_HARDWARE)
+       PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(ui32Timeoutus);
+       PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+       PVR_UNREFERENCED_PARAMETER(ePollFlags);
+       return PVRSRV_OK;
+#else
+       IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+       LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+       {
+               ui32ActualValue = OSReadHWReg32((void __iomem *)pui32LinMemAddr, 0) & ui32Mask;
+
+               if (ui32ActualValue == ui32Value)
+               {
+                       return PVRSRV_OK;
+               }
+
+               if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+               {
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               OSWaitus(ui32PollPeriodus);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (BITMASK_HAS(ePollFlags, POLL_FLAG_LOG_ERROR))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+                        ui32Value, ui32ActualValue, ui32Mask));
+       }
+
+       return PVRSRV_ERROR_TIMEOUT;
+#endif /* NO_HARDWARE */
+}
+
+
+/*
+       PVRSRVPollForValueKM
+*/
+PVRSRV_ERROR PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE  *psDevNode,
+                                                                                               volatile IMG_UINT32     __iomem *pui32LinMemAddr,
+                                                                                               IMG_UINT32                      ui32Value,
+                                                                                               IMG_UINT32                      ui32Mask,
+                                                                                               POLL_FLAGS          ePollFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask,
+                                                 MAX_HW_TIME_US,
+                                                 MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                                 ePollFlags);
+       if (eError != PVRSRV_OK && BITMASK_HAS(ePollFlags, POLL_FLAG_DEBUG_DUMP))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)",
+                                       __func__, PVRSRVGetErrorString(eError),
+                                                               pui32LinMemAddr, ui32Value));
+               PVRSRVDebugRequest(psDevNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+                     IMG_UINT32                  ui32Value,
+                     IMG_UINT32                  ui32Mask)
+{
+#if defined(NO_HARDWARE)
+       PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       return PVRSRV_OK;
+#else
+
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       IMG_HANDLE hOSEvent;
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eErrorWait;
+       IMG_UINT32 ui32ActualValue;
+
+       eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectOpen", EventObjectOpenError);
+
+       eError = PVRSRV_ERROR_TIMEOUT; /* Initialiser for following loop */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask);
+
+               if (ui32ActualValue == ui32Value)
+               {
+                       /* Expected value has been found */
+                       eError = PVRSRV_OK;
+                       break;
+               }
+               else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+               {
+                       /* Services in bad state, don't wait any more */
+                       eError = PVRSRV_ERROR_NOT_READY;
+                       break;
+               }
+               else
+               {
+                       /* wait for event and retry */
+                       eErrorWait = OSEventObjectWait(hOSEvent);
+                       if (eErrorWait != PVRSRV_OK  &&  eErrorWait != PVRSRV_ERROR_TIMEOUT)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Failed with error %d. Found value 0x%x but was expected "
+                                        "to be 0x%x (Mask 0x%08x). Retrying",
+                                                __func__,
+                                                eErrorWait,
+                                                ui32ActualValue,
+                                                ui32Value,
+                                                ui32Mask));
+                       }
+               }
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       OSEventObjectClose(hOSEvent);
+
+       /* One last check in case the object wait ended after the loop timeout... */
+       if (eError != PVRSRV_OK &&
+           (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value)
+       {
+               eError = PVRSRV_OK;
+       }
+
+       /* Provide event timeout information to aid the Device Watchdog Thread... */
+       if (eError == PVRSRV_OK)
+       {
+               psPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+       }
+       else if (eError == PVRSRV_ERROR_TIMEOUT)
+       {
+               psPVRSRVData->ui32GEOConsecutiveTimeouts++;
+       }
+
+EventObjectOpenError:
+
+       return eError;
+
+#endif /* NO_HARDWARE */
+}
+
+int PVRSRVGetDriverStatus(void)
+{
+       return PVRSRVGetPVRSRVData()->eServicesState;
+}
+
+/*
+       PVRSRVSystemHasCacheSnooping
+*/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) &&
+               (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED))
+       {
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED)
+       {
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) ||
+               (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+       {
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) ||
+               (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+       {
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       return psDevConfig->bHasNonMappableLocalMemory;
+}
+
+/*
+       PVRSRVSystemWaitCycles
+*/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles)
+{
+       /* Delay in us */
+       IMG_UINT32 ui32Delayus = 1;
+
+       /* obtain the device freq */
+       if (psDevConfig->pfnClockFreqGet != NULL)
+       {
+               IMG_UINT32 ui32DeviceFreq;
+
+               ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData);
+
+               ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq;
+
+               if (ui32Delayus == 0)
+               {
+                       ui32Delayus = 1;
+               }
+       }
+
+       OSWaitus(ui32Delayus);
+}
+
+static void *
+PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                       va_list va)
+{
+       void *pvOSDevice = va_arg(va, void *);
+
+       if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice)
+       {
+               return psDeviceNode;
+       }
+
+       return NULL;
+}
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+                                                                                  IMG_UINT32 ui32IRQ,
+                                                                                  const IMG_CHAR *pszName,
+                                                                                  PFN_LISR pfnLISR,
+                                                                                  void *pvData,
+                                                                                  IMG_HANDLE *phLISRData)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode =
+               List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+                                                                          &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb,
+                                                                          pvOSDevice);
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       if (!psDeviceNode)
+       {
+               /* Device can't be found in the list so it isn't in the system */
+               PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present",
+                                __func__, pvOSDevice, ui32IRQ));
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ,
+                                                               pszName, pfnLISR, pvData, phLISRData);
+}
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+       return SysUninstallDeviceLISR(hLISRData);
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+/* functions only used on rogue, but header defining them is common */
+void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState)
+{
+       SysSetAxiProtOSid(ui32OSid, bState);
+}
+
+void SetTrustedDeviceAceEnabled(void)
+{
+       SysSetTrustedDeviceAceEnabled();
+}
+#endif
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (!ui32Timeout)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+
+       /* Create only once */
+       if (gpsPVRSRVData->hHWPerfHostPeriodicThread == NULL)
+       {
+               /* Create the HWPerf event object */
+               eError = OSEventObjectCreate("PVRSRV_HWPERFHOSTPERIODIC_EVENTOBJECT", &gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectCreate");
+
+               if (eError == PVRSRV_OK)
+               {
+                       gpsPVRSRVData->bHWPerfHostThreadStop = IMG_FALSE;
+                       gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout;
+                       /* Create a thread which is used to periodically emit host stream packets */
+                       eError = OSThreadCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread,
+                               "pvr_hwperf_host",
+                               HWPerfPeriodicHostEventsThread,
+                               NULL, IMG_TRUE, gpsPVRSRVData);
+                       PVR_LOG_IF_ERROR(eError, "OSThreadCreate");
+               }
+       }
+       /* If the thread has already been created then just update the timeout and wake up thread */
+       else
+       {
+               gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout;
+               eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+       }
+
+       OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+
+       /* Stop and cleanup the HWPerf periodic thread */
+       if (gpsPVRSRVData->hHWPerfHostPeriodicThread)
+       {
+               if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj)
+               {
+                       gpsPVRSRVData->bHWPerfHostThreadStop = IMG_TRUE;
+                       eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+                       PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+               }
+               LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+               {
+                       eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread);
+                       if (PVRSRV_OK == eError)
+                       {
+                               gpsPVRSRVData->hHWPerfHostPeriodicThread = NULL;
+                               break;
+                       }
+                       OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+               PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+
+               if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj)
+               {
+                       eError = OSEventObjectDestroy(gpsPVRSRVData->hHWPerfHostPeriodicEvObj);
+                       gpsPVRSRVData->hHWPerfHostPeriodicEvObj = NULL;
+                       PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+               }
+       }
+
+       OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock);
+       return eError;
+}
+#endif
+
+/*
+ * Scan the list of known devices until we find the specific instance or
+ * exhaust the list
+ */
+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstance(IMG_UINT32 uiInstance)
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       if (uiInstance >= gpsPVRSRVData->ui32RegisteredDevices)
+       {
+               return NULL;
+       }
+       OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock);
+       for (psDevNode = gpsPVRSRVData->psDeviceNodeList;
+            psDevNode != NULL; psDevNode = psDevNode->psNext)
+       {
+               if (uiInstance == psDevNode->sDevId.ui32InternalID)
+               {
+                       break;
+               }
+       }
+       OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock);
+
+       return psDevNode;
+}
+
+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance)
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock);
+       for (psDevNode = gpsPVRSRVData->psDeviceNodeList;
+            psDevNode != NULL; psDevNode = psDevNode->psNext)
+       {
+               if (i32OSInstance == psDevNode->sDevId.i32OsDeviceID)
+               {
+                       break;
+               }
+       }
+       OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock);
+
+       return psDevNode;
+}
+
+/* Default function for querying the power state of the system */
+PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       return psDevNode->eCurrentSysPowerState;
+}
+/*****************************************************************************
+ End of file (pvrsrv.c)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv_bridge_init.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv_bridge_init.c
new file mode 100644 (file)
index 0000000..2ce4ae0
--- /dev/null
@@ -0,0 +1,385 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Init/Deinit Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common PVR Bridge init/deinit code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_bridge_init.h"
+#include "srvcore.h"
+
+/* These will go when full bridge gen comes in */
+#if defined(PDUMP)
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+void DeinitPDUMPCTRLBridge(void);
+PVRSRV_ERROR InitPDUMPBridge(void);
+void DeinitPDUMPBridge(void);
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+void DeinitRGXPDUMPBridge(void);
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+PVRSRV_ERROR InitDCBridge(void);
+void DeinitDCBridge(void);
+#endif
+PVRSRV_ERROR InitMMBridge(void);
+void DeinitMMBridge(void);
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+void DeinitCMMBridge(void);
+#endif
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+void DeinitPDUMPMMBridge(void);
+PVRSRV_ERROR InitSRVCOREBridge(void);
+void DeinitSRVCOREBridge(void);
+PVRSRV_ERROR InitSYNCBridge(void);
+void DeinitSYNCBridge(void);
+#if defined(SUPPORT_DMA_TRANSFER)
+PVRSRV_ERROR InitDMABridge(void);
+void DeinitDMABridge(void);
+#endif
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+void DeinitRGXTA3DBridge(void);
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+PVRSRV_ERROR InitRGXTQBridge(void);
+void DeinitRGXTQBridge(void);
+#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */
+
+#if defined(SUPPORT_USC_BREAKPOINT)
+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void);
+void DeinitRGXBREAKPOINTBridge(void);
+#endif
+PVRSRV_ERROR InitRGXFWDBGBridge(void);
+void DeinitRGXFWDBGBridge(void);
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+void DeinitRGXHWPERFBridge(void);
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void);
+void DeinitRGXREGCONFIGBridge(void);
+#endif
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+void DeinitRGXKICKSYNCBridge(void);
+#endif /* SUPPORT_RGX */
+PVRSRV_ERROR InitCACHEBridge(void);
+void DeinitCACHEBridge(void);
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSMMBridge(void);
+void DeinitSMMBridge(void);
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+void DeinitHTBUFFERBridge(void);
+#endif
+PVRSRV_ERROR InitPVRTLBridge(void);
+void DeinitPVRTLBridge(void);
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+PVRSRV_ERROR InitRIBridge(void);
+void DeinitRIBridge(void);
+#endif
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+void DeinitDEVICEMEMHISTORYBridge(void);
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+PVRSRV_ERROR InitVALIDATIONBridge(void);
+void DeinitVALIDATIONBridge(void);
+#endif
+#if defined(PVR_TESTING_UTILS)
+PVRSRV_ERROR InitTUTILSBridge(void);
+void DeinitTUTILSBridge(void);
+#endif
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+void DeinitSYNCTRACKINGBridge(void);
+#if defined(SUPPORT_WRAP_EXTMEM)
+PVRSRV_ERROR InitMMEXTMEMBridge(void);
+void DeinitMMEXTMEMBridge(void);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void);
+void DeinitSYNCFALLBACKBridge(void);
+#endif
+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void);
+void DeinitRGXTIMERQUERYBridge(void);
+#if defined(SUPPORT_DI_BRG_IMPL)
+PVRSRV_ERROR InitDIBridge(void);
+void DeinitDIBridge(void);
+#endif
+
+PVRSRV_ERROR
+ServerBridgeInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       BridgeDispatchTableStartOffsetsInit();
+
+       eError = InitSRVCOREBridge();
+       PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge");
+
+       eError = InitSYNCBridge();
+       PVR_LOG_IF_ERROR(eError, "InitSYNCBridge");
+
+#if defined(PDUMP)
+       eError = InitPDUMPCTRLBridge();
+       PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge");
+#endif
+
+       eError = InitMMBridge();
+       PVR_LOG_IF_ERROR(eError, "InitMMBridge");
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+       eError = InitCMMBridge();
+       PVR_LOG_IF_ERROR(eError, "InitCMMBridge");
+#endif
+
+#if defined(PDUMP)
+       eError = InitPDUMPMMBridge();
+       PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge");
+
+       eError = InitPDUMPBridge();
+       PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge");
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+       eError = InitDCBridge();
+       PVR_LOG_IF_ERROR(eError, "InitDCBridge");
+#endif
+
+       eError = InitCACHEBridge();
+       PVR_LOG_IF_ERROR(eError, "InitCACHEBridge");
+
+#if defined(SUPPORT_SECURE_EXPORT)
+       eError = InitSMMBridge();
+       PVR_LOG_IF_ERROR(eError, "InitSMMBridge");
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+       eError = InitHTBUFFERBridge();
+       PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge");
+#endif
+
+       eError = InitPVRTLBridge();
+       PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge");
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       eError = InitRIBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRIBridge");
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+       eError = InitVALIDATIONBridge();
+       PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge");
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+       eError = InitTUTILSBridge();
+       PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge");
+#endif
+
+       eError = InitDEVICEMEMHISTORYBridge();
+       PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge");
+
+       eError = InitSYNCTRACKINGBridge();
+       PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge");
+
+#if defined(SUPPORT_DMA_TRANSFER)
+       eError = InitDMABridge();
+       PVR_LOG_IF_ERROR(eError, "InitDMABridge");
+#endif
+
+#if defined(SUPPORT_RGX)
+
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+       eError = InitRGXTQBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge");
+#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */
+
+       eError = InitRGXTA3DBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge");
+
+       #if defined(SUPPORT_USC_BREAKPOINT)
+       eError = InitRGXBREAKPOINTBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXBREAKPOINTBridge");
+#endif
+
+       eError = InitRGXFWDBGBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXFWDBGBridge");
+
+#if defined(PDUMP)
+       eError = InitRGXPDUMPBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge");
+#endif
+
+       eError = InitRGXHWPERFBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge");
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+       eError = InitRGXREGCONFIGBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge");
+#endif
+
+       eError = InitRGXKICKSYNCBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge");
+
+       eError = InitRGXTIMERQUERYBridge();
+       PVR_LOG_IF_ERROR(eError, "InitRGXTIMERQUERYBridge");
+
+#endif /* SUPPORT_RGX */
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+       eError = InitMMEXTMEMBridge();
+       PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge");
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+       eError = InitSYNCFALLBACKBridge();
+       PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge");
+#endif
+
+#if defined(SUPPORT_DI_BRG_IMPL)
+       eError = InitDIBridge();
+       PVR_LOG_IF_ERROR(eError, "InitDIBridge");
+#endif
+
+       eError = OSPlatformBridgeInit();
+       PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit");
+
+       return eError;
+}
+
+void ServerBridgeDeInit(void)
+{
+       OSPlatformBridgeDeInit();
+
+#if defined(SUPPORT_DI_BRG_IMPL)
+       DeinitDIBridge();
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+       DeinitSYNCFALLBACKBridge();
+#endif
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+       DeinitMMEXTMEMBridge();
+#endif
+
+       DeinitSRVCOREBridge();
+
+       DeinitSYNCBridge();
+
+#if defined(PDUMP)
+       DeinitPDUMPCTRLBridge();
+#endif
+
+       DeinitMMBridge();
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+       DeinitCMMBridge();
+#endif
+
+#if defined(PDUMP)
+       DeinitPDUMPMMBridge();
+
+       DeinitPDUMPBridge();
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+       DeinitTUTILSBridge();
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+       DeinitDCBridge();
+#endif
+
+       DeinitCACHEBridge();
+
+#if defined(SUPPORT_SECURE_EXPORT)
+       DeinitSMMBridge();
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+       DeinitHTBUFFERBridge();
+#endif
+
+       DeinitPVRTLBridge();
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+       DeinitVALIDATIONBridge();
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       DeinitRIBridge();
+#endif
+
+       DeinitDEVICEMEMHISTORYBridge();
+
+       DeinitSYNCTRACKINGBridge();
+
+#if defined(SUPPORT_DMA_TRANSFER)
+       DeinitDMABridge();
+#endif
+
+#if defined(SUPPORT_RGX)
+
+#if defined(SUPPORT_RGXTQ_BRIDGE)
+       DeinitRGXTQBridge();
+#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */
+
+       DeinitRGXTA3DBridge();
+
+#if defined(SUPPORT_USC_BREAKPOINT)
+       DeinitRGXBREAKPOINTBridge();
+#endif
+
+       DeinitRGXFWDBGBridge();
+
+#if defined(PDUMP)
+       DeinitRGXPDUMPBridge();
+#endif
+
+       DeinitRGXHWPERFBridge();
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+       DeinitRGXREGCONFIGBridge();
+#endif
+
+       DeinitRGXKICKSYNCBridge();
+
+       DeinitRGXTIMERQUERYBridge();
+
+#endif /* SUPPORT_RGX */
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv_pool.c b/drivers/gpu/drm/img/img-rogue/services/server/common/pvrsrv_pool.c
new file mode 100644 (file)
index 0000000..d62a062
--- /dev/null
@@ -0,0 +1,260 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services pool implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a generic pool implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "lock.h"
+#include "dllist.h"
+#include "allocmem.h"
+
+struct _PVRSRV_POOL_
+{
+       POS_LOCK hLock;
+       /* total max number of permitted entries in the pool */
+       IMG_UINT uiMaxEntries;
+       /* currently number of pool entries created. these may be in the pool
+        * or in-use
+        */
+       IMG_UINT uiNumBusy;
+       /* number of not-in-use entries currently free in the pool */
+       IMG_UINT uiNumFree;
+
+       DLLIST_NODE sFreeList;
+
+       const IMG_CHAR *pszName;
+
+       PVRSRV_POOL_ALLOC_FUNC *pfnAlloc;
+       PVRSRV_POOL_FREE_FUNC *pfnFree;
+       void *pvPrivData;
+};
+
+typedef struct _PVRSRV_POOL_ENTRY_
+{
+       DLLIST_NODE sNode;
+       void *pvData;
+} PVRSRV_POOL_ENTRY;
+
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+                                       PVRSRV_POOL_FREE_FUNC *pfnFree,
+                                       IMG_UINT32 ui32MaxEntries,
+                                       const IMG_CHAR *pszName,
+                                       void *pvPrivData,
+                                       PVRSRV_POOL **ppsPool)
+{
+       PVRSRV_POOL *psPool;
+       PVRSRV_ERROR eError;
+
+       psPool = OSAllocMem(sizeof(PVRSRV_POOL));
+       PVR_GOTO_IF_NOMEM(psPool, eError, err_alloc);
+
+       eError = OSLockCreate(&psPool->hLock);
+
+       PVR_GOTO_IF_ERROR(eError, err_lock_create);
+
+       psPool->uiMaxEntries = ui32MaxEntries;
+       psPool->uiNumBusy = 0;
+       psPool->uiNumFree = 0;
+       psPool->pfnAlloc = pfnAlloc;
+       psPool->pfnFree = pfnFree;
+       psPool->pvPrivData = pvPrivData;
+       psPool->pszName = pszName;
+
+       dllist_init(&psPool->sFreeList);
+
+       *ppsPool = psPool;
+
+       return PVRSRV_OK;
+
+err_lock_create:
+       OSFreeMem(psPool);
+err_alloc:
+       return eError;
+}
+
+static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool,
+                                       PVRSRV_POOL_ENTRY *psEntry)
+{
+       psPool->pfnFree(psPool->pvPrivData, psEntry->pvData);
+       OSFreeMem(psEntry);
+
+       return PVRSRV_OK;
+}
+
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool)
+{
+       if (psPool->uiNumBusy != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s "
+                                               "with %u entries still in use",
+                                               __func__,
+                                               psPool->pszName,
+                                               psPool->uiNumBusy));
+               return;
+       }
+
+       OSLockDestroy(psPool->hLock);
+
+       if (psPool->uiNumFree)
+       {
+               PVRSRV_POOL_ENTRY *psEntry;
+               DLLIST_NODE *psChosenNode;
+
+               psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+
+               while (psChosenNode)
+               {
+                       dllist_remove_node(psChosenNode);
+
+                       psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+                       _DestroyPoolEntry(psPool, psEntry);
+
+                       psPool->uiNumFree--;
+
+                       psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+               }
+
+               PVR_ASSERT(psPool->uiNumFree == 0);
+       }
+
+       OSFreeMem(psPool);
+}
+
+static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool,
+                                       PVRSRV_POOL_ENTRY **ppsEntry)
+{
+       PVRSRV_POOL_ENTRY *psNewEntry;
+       PVRSRV_ERROR eError;
+
+       psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY));
+       PVR_GOTO_IF_NOMEM(psNewEntry, eError, err_allocmem);
+
+       dllist_init(&psNewEntry->sNode);
+
+       eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData);
+
+       PVR_GOTO_IF_ERROR(eError, err_pfn_alloc);
+
+       *ppsEntry = psNewEntry;
+
+       return PVRSRV_OK;
+
+err_pfn_alloc:
+       OSFreeMem(psNewEntry);
+err_allocmem:
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+                                       PVRSRV_POOL_TOKEN *hToken,
+                                       void **ppvDataOut)
+{
+       PVRSRV_POOL_ENTRY *psEntry;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       DLLIST_NODE *psChosenNode;
+
+       OSLockAcquire(psPool->hLock);
+
+       psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+       if (unlikely(psChosenNode == NULL))
+       {
+               /* no available elements in the pool. try to create one */
+
+               eError = _CreateNewPoolEntry(psPool, &psEntry);
+
+               PVR_GOTO_IF_ERROR(eError, out_unlock);
+       }
+       else
+       {
+               dllist_remove_node(psChosenNode);
+
+               psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+               psPool->uiNumFree--;
+       }
+
+#if defined(DEBUG) || defined(SUPPORT_VALIDATION)
+       /* Don't poison the IN buffer as that is copied from client and would be
+        * waste of cycles.
+        */
+       OSCachedMemSet(((IMG_PBYTE)psEntry->pvData)+PVRSRV_MAX_BRIDGE_IN_SIZE,
+                       PVRSRV_POISON_ON_ALLOC_VALUE, PVRSRV_MAX_BRIDGE_OUT_SIZE);
+#endif
+
+       psPool->uiNumBusy++;
+       *hToken = psEntry;
+       *ppvDataOut = psEntry->pvData;
+
+out_unlock:
+       OSLockRelease(psPool->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_POOL_ENTRY *psEntry = hToken;
+
+       PVR_ASSERT(psPool->uiNumBusy > 0);
+
+       OSLockAcquire(psPool->hLock);
+
+       /* put this entry in the pool if the pool has space,
+        * otherwise free it
+        */
+       if (psPool->uiNumFree < psPool->uiMaxEntries)
+       {
+               dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode);
+               psPool->uiNumFree++;
+       }
+       else
+       {
+               eError = _DestroyPoolEntry(psPool, psEntry);
+       }
+
+       psPool->uiNumBusy--;
+
+       OSLockRelease(psPool->hLock);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/ri_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/ri_server.c
new file mode 100644 (file)
index 0000000..d1fe2b2
--- /dev/null
@@ -0,0 +1,2123 @@
+/*************************************************************************/ /*!
+@File           ri_server.c
+@Title          Resource Information (RI) server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Resource Information (RI) server functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+ #include <linux/version.h>
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+  #include <linux/stdarg.h>
+ #else
+  #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "srvkm.h"
+#include "lock.h"
+
+/* services/include */
+#include "pvr_ricommon.h"
+
+/* services/server/include/ */
+#include "ri_server.h"
+
+/* services/include/shared/ */
+#include "hash.h"
+/* services/shared/include/ */
+#include "dllist.h"
+
+#include "pmr.h"
+
+/* include/device.h */
+#include "device.h"
+
+#if !defined(RI_UNIT_TEST)
+#include "pvrsrv.h"
+#endif
+
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+#define USE_RI_LOCK            1
+
+/*
+ * Initial size use for Hash table. (Used to index the RI list entries).
+ */
+#define _RI_INITIAL_HASH_TABLE_SIZE    64
+
+/*
+ * Values written to the 'valid' field of RI structures when created and
+ * cleared prior to being destroyed. The code can then check this value
+ * before accessing the provided pointer contents as a valid RI structure.
+ */
+#define _VALID_RI_LIST_ENTRY   0x66bccb66
+#define _VALID_RI_SUBLIST_ENTRY        0x77cddc77
+#define _INVALID                               0x00000000
+
+/*
+ * If this define is set to 1, details of the linked lists (addresses,
+ * prev/next ptrs, etc) are also output when function RIDumpList() is called.
+ */
+#define _DUMP_LINKEDLIST_INFO          0
+
+
+typedef IMG_UINT64 _RI_BASE_T;
+
+
+/* No +1 in SIZE macros since sizeof includes \0 byte in size */
+
+#define RI_PROC_BUF_SIZE    16
+
+#define RI_MEMDESC_SUM_FRMT     "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\
+                                                  "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\
+                                                     "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n"
+#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+30+60)
+
+
+#define RI_PMR_SUM_FRMT     "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K  "\
+                                        "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n"
+#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(20+40))
+
+#define RI_PMR_ENTRY_FRMT      "%%sPID:%%-5d <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c"
+#define RI_PMR_ENTRY_BUF_SIZE  (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+16+PVR_ANNOTATION_MAX_LEN+10+10))
+#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT))
+
+/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */
+#define RI_MEMDESC_ENTRY_PROC_FRMT        "[%5d:%s]"
+#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE    (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16)
+
+#define RI_SYS_ALLOC_IMPORT_FRMT      "{Import from PID %d}"
+#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5)
+static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE];
+
+#define RI_MEMDESC_ENTRY_IMPORT_FRMT     "{Import from PID %d}"
+#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5)
+
+#define RI_MEMDESC_ENTRY_UNPINNED_FRMT     "{Unpinned}"
+#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT))
+
+#define RI_MEMDESC_ENTRY_FRMT      "%%sPID:%%-5d 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c"
+#define RI_MEMDESC_ENTRY_BUF_SIZE  (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\
+                                               RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE))
+#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT))
+
+
+#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\
+                              MAX(RI_PMR_ENTRY_BUF_SIZE,\
+                                  MAX(RI_MEMDESC_SUM_BUF_SIZE,\
+                                      RI_PMR_SUM_BUF_SIZE))))
+
+
+
+
+/* Structure used to make linked sublist of memory allocations (MEMDESC) */
+struct _RI_SUBLIST_ENTRY_
+{
+       DLLIST_NODE                             sListNode;
+       struct _RI_LIST_ENTRY_  *psRI;
+       IMG_UINT32                              valid;
+       IMG_BOOL                                bIsImport;
+       IMG_BOOL                                bIsSuballoc;
+       IMG_PID                                 pid;
+       IMG_CHAR                                ai8ProcName[RI_PROC_BUF_SIZE];
+       IMG_DEV_VIRTADDR                sVAddr;
+       IMG_UINT64                              ui64Offset;
+       IMG_UINT64                              ui64Size;
+       IMG_CHAR                                ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1];
+       DLLIST_NODE                             sProcListNode;
+};
+
+/*
+ * Structure used to make linked list of PMRs. Sublists of allocations
+ * (MEMDESCs) made from these PMRs are chained off these entries.
+ */
+struct _RI_LIST_ENTRY_
+{
+       DLLIST_NODE                             sListNode;
+       DLLIST_NODE                             sSysAllocListNode;
+       DLLIST_NODE                             sSubListFirst;
+       IMG_UINT32                              valid;
+       PMR                                             *psPMR;
+       IMG_PID                                 pid;
+       IMG_CHAR                                ai8ProcName[RI_PROC_BUF_SIZE];
+       IMG_UINT16                              ui16SubListCount;
+       IMG_UINT16                              ui16MaxSubListCount;
+       IMG_UINT32                              ui32RIPMRFlags; /* Flags used to indicate the type of allocation */
+       IMG_UINT32                              ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */
+};
+
+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY;
+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY;
+
+static IMG_UINT16      g_ui16RICount;
+static HASH_TABLE      *g_pRIHashTable;
+static IMG_UINT16      g_ui16ProcCount;
+static HASH_TABLE      *g_pProcHashTable;
+
+static POS_LOCK                g_hRILock;
+
+/* Linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and lock
+ * to prevent concurrent access to it.
+ */
+static POS_LOCK                g_hSysAllocPidListLock;
+static DLLIST_NODE     g_sSysAllocPidListHead;
+
+/*
+ * Flag used to indicate if RILock should be destroyed when final PMR entry is
+ * deleted, i.e. if RIDeInitKM() has already been called before that point but
+ * the handle manager has deferred deletion of RI entries.
+ */
+static IMG_BOOL                bRIDeInitDeferred = IMG_FALSE;
+
+/*
+ * Used as head of linked-list of PMR RI entries - this is useful when we wish
+ * to iterate all PMR list entries (when we don't have a PMR ref)
+ */
+static DLLIST_NODE     sListFirst;
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv);
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv);
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv);
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid);
+#define _RIOutput(x) PVR_LOG(x)
+
+#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS    0x1
+#define RI_FLAG_SYSALLOC_PMR                           0x2
+
+static IMG_UINT32
+_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+static IMG_UINT32
+_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+       IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+       IMG_UINT32 ui;
+       IMG_UINT32 uHashKey = 0;
+
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+       for (ui = 0; ui < uKeyLen; ui++)
+       {
+               IMG_UINT32 uHashPart = *p++;
+
+               uHashPart += (uHashPart << 12);
+               uHashPart ^= (uHashPart >> 22);
+               uHashPart += (uHashPart << 4);
+               uHashPart ^= (uHashPart >> 9);
+               uHashPart += (uHashPart << 10);
+               uHashPart ^= (uHashPart >> 2);
+               uHashPart += (uHashPart << 7);
+               uHashPart ^= (uHashPart >> 12);
+
+               uHashKey += uHashPart;
+       }
+
+       return uHashKey;
+}
+
+static IMG_BOOL
+_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2);
+
+static IMG_BOOL
+_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+       IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+       IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+       IMG_UINT32 ui;
+
+       for (ui = 0; ui < uKeyLen; ui++)
+       {
+               if (*p1++ != *p2++)
+                       return IMG_FALSE;
+       }
+
+       return IMG_TRUE;
+}
+
+static void _RILock(void)
+{
+#if (USE_RI_LOCK == 1)
+       OSLockAcquire(g_hRILock);
+#endif
+}
+
+static void _RIUnlock(void)
+{
+#if (USE_RI_LOCK == 1)
+       OSLockRelease(g_hRILock);
+#endif
+}
+
+/* This value maintains a count of the number of PMRs attributed to the
+ * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock, so it
+ * does not need to be an ATOMIC_T.
+ */
+static IMG_UINT32 g_ui32SysAllocPMRCount;
+
+
+PVRSRV_ERROR RIInitKM(void)
+{
+       IMG_INT iCharsWritten;
+       PVRSRV_ERROR eError;
+
+       bRIDeInitDeferred = IMG_FALSE;
+
+       iCharsWritten = OSSNPrintf(g_szSysAllocImport,
+                   RI_SYS_ALLOC_IMPORT_FRMT_SIZE,
+                   RI_SYS_ALLOC_IMPORT_FRMT,
+                   PVR_SYS_ALLOC_PID);
+       PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \
+                       "OSSNPrintf failed to initialise g_szSysAllocImport");
+
+       eError = OSLockCreate(&g_hSysAllocPidListLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)",
+                        __func__,
+                        eError));
+       }
+       dllist_init(&(g_sSysAllocPidListHead));
+#if (USE_RI_LOCK == 1)
+       eError = OSLockCreate(&g_hRILock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: OSLockCreate (g_hRILock) failed (returned %d)",
+                        __func__,
+                        eError));
+       }
+#endif
+       return eError;
+}
+void RIDeInitKM(void)
+{
+#if (USE_RI_LOCK == 1)
+       if (g_ui16RICount > 0)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: called with %d entries remaining - deferring OSLockDestroy()",
+                        __func__,
+                        g_ui16RICount));
+               bRIDeInitDeferred = IMG_TRUE;
+       }
+       else
+       {
+               OSLockDestroy(g_hRILock);
+               OSLockDestroy(g_hSysAllocPidListLock);
+       }
+#endif
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RILockAcquireKM
+
+ @Description
+            Acquires the RI Lock (which protects the integrity of the RI
+            linked lists). Caller will be suspended until lock is acquired.
+
+ @Return       None
+
+******************************************************************************/
+void RILockAcquireKM(void)
+{
+       _RILock();
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RILockReleaseKM
+
+ @Description
+            Releases the RI Lock (which protects the integrity of the RI
+            linked lists).
+
+ @Return       None
+
+******************************************************************************/
+void RILockReleaseKM(void)
+{
+       _RIUnlock();
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIWritePMREntryWithOwnerKM
+
+ @Description
+            Writes a new Resource Information list entry.
+            The new entry will be inserted at the head of the list of
+            PMR RI entries and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this reference relates
+
+ @input     ui32Owner - PID of the process which owns the allocation. This
+                        may not be the current process (e.g. a request to
+                        grow a buffer may happen in the context of a kernel
+                        thread, or we may import further resource for a
+                        suballocation made from the FW heap which can then
+                        also be utilized by other processes)
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR,
+                                        IMG_PID ui32Owner)
+{
+       PMR *pPMRHashKey = psPMR;
+       RI_LIST_ENTRY *psRIEntry;
+       uintptr_t hashData;
+
+       /* if Hash table has not been created, create it now */
+       if (!g_pRIHashTable)
+       {
+               g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+               g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+       }
+       PVR_RETURN_IF_NOMEM(g_pRIHashTable);
+       PVR_RETURN_IF_NOMEM(g_pProcHashTable);
+
+       PVR_RETURN_IF_INVALID_PARAM(psPMR);
+
+       /* Acquire RI Lock */
+       _RILock();
+
+       /* Look-up psPMR in Hash Table */
+       hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+       psRIEntry = (RI_LIST_ENTRY *)hashData;
+       if (!psRIEntry)
+       {
+               /*
+                * If failed to find a matching existing entry, create a new one
+                */
+               psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY));
+               if (!psRIEntry)
+               {
+                       /* Release RI Lock */
+                       _RIUnlock();
+                       /* Error - no memory to allocate for new RI entry */
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+               else
+               {
+                       PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR);
+                       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR);
+
+                       /*
+                        * Add new RI Entry
+                        */
+                       if (g_ui16RICount == 0)
+                       {
+                               /* Initialise PMR entry linked-list head */
+                               dllist_init(&sListFirst);
+                       }
+                       g_ui16RICount++;
+
+                       dllist_init (&(psRIEntry->sSysAllocListNode));
+                       dllist_init (&(psRIEntry->sSubListFirst));
+                       psRIEntry->ui16SubListCount = 0;
+                       psRIEntry->ui16MaxSubListCount = 0;
+                       psRIEntry->valid = _VALID_RI_LIST_ENTRY;
+
+                       /* Check if this PMR should be accounted for under the
+                        * PVR_SYS_ALLOC_PID debugFS entry. This should happen if
+                        * we are in the driver init phase, the flags indicate
+                        * this is a FW Main allocation (made from FW heap)
+                        * or the owner PID is PVR_SYS_ALLOC_PID.
+                        * Also record host dev node allocs on the system PID.
+                        */
+                       if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+                               PVRSRV_CHECK_FW_MAIN(uiPMRFlags) ||
+                               ui32Owner == PVR_SYS_ALLOC_PID ||
+                               psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
+                       {
+                               psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR;
+                               OSSNPrintf(psRIEntry->ai8ProcName,
+                                               RI_PROC_BUF_SIZE,
+                                               "SysProc");
+                               psRIEntry->pid = PVR_SYS_ALLOC_PID;
+                               OSLockAcquire(g_hSysAllocPidListLock);
+                               /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */
+                               dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sSysAllocListNode));
+                               OSLockRelease(g_hSysAllocPidListLock);
+                               g_ui32SysAllocPMRCount++;
+                       }
+                       else
+                       {
+                               psRIEntry->ui32RIPMRFlags = 0;
+                               psRIEntry->pid = ui32Owner;
+                       }
+
+                       OSSNPrintf(psRIEntry->ai8ProcName,
+                                       RI_PROC_BUF_SIZE,
+                                       "%s",
+                                       OSGetCurrentClientProcessNameKM());
+                       /* Add PMR entry to linked-list of all PMR entries */
+                       dllist_init (&(psRIEntry->sListNode));
+                       dllist_add_to_tail(&sListFirst, (PDLLIST_NODE)&(psRIEntry->sListNode));
+               }
+
+               psRIEntry->psPMR = psPMR;
+               psRIEntry->ui32Flags = 0;
+
+               /* Create index entry in Hash Table */
+               HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry);
+
+               /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */
+               PMRStoreRIHandle(psPMR, psRIEntry);
+       }
+       /* Release RI Lock */
+       _RIUnlock();
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIWritePMREntryKM
+
+ @Description
+            Writes a new Resource Information list entry.
+            The new entry will be inserted at the head of the list of
+            PMR RI entries and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this reference relates
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR)
+{
+       return RIWritePMREntryWithOwnerKM(psPMR,
+                                         OSGetCurrentClientProcessIDKM());
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIWriteMEMDESCEntryKM
+
+ @Description
+            Writes a new Resource Information sublist entry.
+            The new entry will be inserted at the head of the sublist of
+            the indicated PMR list entry, and assigned the values provided.
+
+ @input     psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates
+ @input     ui32TextBSize - Length of string provided in psz8TextB parameter
+ @input     psz8TextB - String describing this secondary reference (may be null)
+ @input     ui64Offset - Offset from the start of the PMR at which this allocation begins
+ @input     ui64Size - Size of this allocation
+ @input     bIsImport - Flag indicating if this is an allocation or an import
+ @input     bIsSuballoc - Flag indicating if this is a sub-allocation
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+                                   IMG_UINT32 ui32TextBSize,
+                                   const IMG_CHAR *psz8TextB,
+                                   IMG_UINT64 ui64Offset,
+                                   IMG_UINT64 ui64Size,
+                                   IMG_BOOL bIsImport,
+                                   IMG_BOOL bIsSuballoc,
+                                   RI_HANDLE *phRIHandle)
+{
+       RI_SUBLIST_ENTRY *psRISubEntry;
+       RI_LIST_ENTRY *psRIEntry;
+       PMR *pPMRHashKey = psPMR;
+       uintptr_t hashData;
+       IMG_PID pid;
+
+       /* Check Hash tables have been created (meaning at least one PMR has been defined) */
+       PVR_RETURN_IF_INVALID_PARAM(g_pRIHashTable);
+       PVR_RETURN_IF_INVALID_PARAM(g_pProcHashTable);
+
+       PVR_RETURN_IF_INVALID_PARAM(psPMR);
+       PVR_RETURN_IF_INVALID_PARAM(phRIHandle);
+
+       /* Acquire RI Lock */
+       _RILock();
+
+       *phRIHandle = NULL;
+
+       /* Look-up psPMR in Hash Table */
+       hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+       psRIEntry = (RI_LIST_ENTRY *)hashData;
+       if (!psRIEntry)
+       {
+               /* Release RI Lock */
+               _RIUnlock();
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY));
+       if (!psRISubEntry)
+       {
+               /* Release RI Lock */
+               _RIUnlock();
+               /* Error - no memory to allocate for new RI sublist entry */
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+       else
+       {
+               /*
+                * Insert new entry in sublist
+                */
+               PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst));
+
+               /*
+                * Insert new entry before currentNode
+                */
+               if (!currentNode)
+               {
+                       currentNode = &(psRIEntry->sSubListFirst);
+               }
+               dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode));
+
+               psRISubEntry->psRI = psRIEntry;
+
+               /* Increment number of entries in sublist */
+               psRIEntry->ui16SubListCount++;
+               if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount)
+               {
+                       psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount;
+               }
+               psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+       }
+
+       /* If allocation is made during device or driver initialisation,
+        * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use
+        * the current PID.
+        * Record host dev node allocations on the system PID.
+        */
+       {
+               PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR);
+
+               if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+                       psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
+               {
+                       psRISubEntry->pid = psRISubEntry->psRI->pid;
+               }
+               else
+               {
+                       psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+               }
+       }
+
+       if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: TextBSize too long (%u). Text will be truncated "
+                                "to %zu characters", __func__,
+                                ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1));
+       }
+
+       /* copy ai8TextB field data */
+       OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB);
+
+       psRISubEntry->ui64Offset = ui64Offset;
+       psRISubEntry->ui64Size = ui64Size;
+       psRISubEntry->bIsImport = bIsImport;
+       psRISubEntry->bIsSuballoc = bIsSuballoc;
+       OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+       dllist_init (&(psRISubEntry->sProcListNode));
+
+       /*
+        *      Now insert this MEMDESC into the proc list
+        */
+       /* look-up pid in Hash Table */
+       pid = psRISubEntry->pid;
+       hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+       if (!hashData)
+       {
+               /*
+                * No allocations for this pid yet
+                */
+               HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+               /* Increment number of entries in proc hash table */
+               g_ui16ProcCount++;
+       }
+       else
+       {
+               /*
+                * Insert allocation into pid allocations linked list
+                */
+               PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+               /*
+                * Insert new entry
+                */
+               dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+       }
+       *phRIHandle = (RI_HANDLE)psRISubEntry;
+       /* Release RI Lock */
+       _RIUnlock();
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIWriteProcListEntryKM
+
+ @Description
+            Write a new entry in the process list directly. We have to do this
+            because there might be no, multiple or changing PMR handles.
+
+            In the common case we have a PMR that will be added to the PMR list
+            and one or several MemDescs that are associated to it in a sub-list.
+            Additionally these MemDescs will be inserted in the per-process list.
+
+            There might be special descriptors from e.g. new user APIs that
+            are associated with no or multiple PMRs and not just one.
+            These can be now added to the per-process list (as RI_SUBLIST_ENTRY)
+            directly with this function and won't be listed in the PMR list (RIEntry)
+            because there might be no PMR.
+
+            To remove entries from the per-process list, just use
+            RIDeleteMEMDESCEntryKM().
+
+ @input     psz8TextB - String describing this secondary reference (may be null)
+ @input     ui64Size - Size of this allocation
+ @input     ui64DevVAddr - Virtual address of this entry
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+                                    const IMG_CHAR *psz8TextB,
+                                    IMG_UINT64 ui64Size,
+                                    IMG_UINT64 ui64DevVAddr,
+                                    RI_HANDLE *phRIHandle)
+{
+       uintptr_t hashData = 0;
+       IMG_PID         pid;
+       RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+       if (!g_pRIHashTable)
+       {
+               g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+               g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+
+               if (!g_pRIHashTable || !g_pProcHashTable)
+               {
+                       /* Error - no memory to allocate for Hash table(s) */
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+       }
+
+       /* Acquire RI Lock */
+       _RILock();
+
+       *phRIHandle = NULL;
+
+       psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY));
+       if (!psRISubEntry)
+       {
+               /* Release RI Lock */
+               _RIUnlock();
+               /* Error - no memory to allocate for new RI sublist entry */
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+
+       psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+       if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: TextBSize too long (%u). Text will be truncated "
+                        "to %zu characters", __func__,
+                        ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1));
+       }
+
+       /* copy ai8TextB field data */
+       OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB);
+
+       psRISubEntry->ui64Offset = 0;
+       psRISubEntry->ui64Size = ui64Size;
+       psRISubEntry->sVAddr.uiAddr = ui64DevVAddr;
+       psRISubEntry->bIsImport = IMG_FALSE;
+       psRISubEntry->bIsSuballoc = IMG_FALSE;
+       OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+       dllist_init (&(psRISubEntry->sProcListNode));
+
+       /*
+        *      Now insert this MEMDESC into the proc list
+        */
+       /* look-up pid in Hash Table */
+       pid = psRISubEntry->pid;
+       hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+       if (!hashData)
+       {
+               /*
+                * No allocations for this pid yet
+                */
+               HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+               /* Increment number of entries in proc hash table */
+               g_ui16ProcCount++;
+       }
+       else
+       {
+               /*
+                * Insert allocation into pid allocations linked list
+                */
+               PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+               /*
+                * Insert new entry
+                */
+               dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+       }
+       *phRIHandle = (RI_HANDLE)psRISubEntry;
+       /* Release RI Lock */
+       _RIUnlock();
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIUpdateMEMDESCAddrKM
+
+ @Description
+            Update a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be updated
+ @input     sVAddr - New address for the RI entry
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+                                                                  IMG_DEV_VIRTADDR sVAddr)
+{
+       RI_SUBLIST_ENTRY *psRISubEntry;
+
+       PVR_RETURN_IF_INVALID_PARAM(hRIHandle);
+
+       psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+       if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+       {
+               /* Pointer does not point to valid structure */
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Acquire RI lock*/
+       _RILock();
+
+       psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr;
+
+       /* Release RI lock */
+       _RIUnlock();
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDeletePMREntryKM
+
+ @Description
+            Delete a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle)
+{
+       RI_LIST_ENTRY *psRIEntry;
+       PMR                     *pPMRHashKey;
+       PVRSRV_ERROR eResult = PVRSRV_OK;
+
+       PVR_RETURN_IF_INVALID_PARAM(hRIHandle);
+
+       psRIEntry = (RI_LIST_ENTRY *)hRIHandle;
+
+       if (psRIEntry->valid != _VALID_RI_LIST_ENTRY)
+       {
+               /* Pointer does not point to valid structure */
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (psRIEntry->ui16SubListCount == 0)
+       {
+               /* Acquire RI lock*/
+               _RILock();
+
+               /* Remove the HASH table index entry */
+               pPMRHashKey = psRIEntry->psPMR;
+               HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey);
+
+               psRIEntry->valid = _INVALID;
+
+               /* Remove PMR entry from linked-list of PMR entries */
+               dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode));
+
+               if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)
+               {
+                       dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode));
+                       g_ui32SysAllocPMRCount--;
+               }
+
+               /* Now, free the memory used to store the RI entry */
+               OSFreeMemNoStats(psRIEntry);
+               psRIEntry = NULL;
+
+               /*
+                * Decrement number of RI entries - if this is now zero,
+                * we can delete the RI hash table
+                */
+               if (--g_ui16RICount == 0)
+               {
+                       HASH_Delete(g_pRIHashTable);
+                       g_pRIHashTable = NULL;
+
+                       _RIUnlock();
+
+                       /* If deInit has been deferred, we can now destroy the RI Lock */
+                       if (bRIDeInitDeferred)
+                       {
+                               OSLockDestroy(g_hRILock);
+                       }
+               }
+               else
+               {
+                       /* Release RI lock*/
+                       _RIUnlock();
+               }
+               /*
+                * Make the handle NULL once PMR RI entry is deleted
+                */
+               hRIHandle = NULL;
+       }
+       else
+       {
+               eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+       }
+
+       return eResult;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDeleteMEMDESCEntryKM
+
+ @Description
+            Delete a Resource Information entry.
+            Entry can be from RIEntry list or ProcList.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle)
+{
+       RI_LIST_ENTRY *psRIEntry = NULL;
+       RI_SUBLIST_ENTRY *psRISubEntry;
+       uintptr_t hashData;
+       IMG_PID pid;
+
+       PVR_RETURN_IF_INVALID_PARAM(hRIHandle);
+
+       psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+       if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+       {
+               /* Pointer does not point to valid structure */
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Acquire RI lock*/
+       _RILock();
+
+       /* For entries which do have a parent PMR remove the node from the sublist */
+       if (psRISubEntry->psRI)
+       {
+               psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI;
+
+               /* Now, remove entry from the sublist */
+               dllist_remove_node(&(psRISubEntry->sListNode));
+       }
+
+       psRISubEntry->valid = _INVALID;
+
+       /* Remove the entry from the proc allocations linked list */
+       pid = psRISubEntry->pid;
+       /* If this is the only allocation for this pid, just remove it from the hash table */
+       if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL)
+       {
+               HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+               /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */
+               if (--g_ui16ProcCount == 0)
+               {
+                       HASH_Delete(g_pProcHashTable);
+                       g_pProcHashTable = NULL;
+               }
+       }
+       else
+       {
+               hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+               if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode))
+               {
+                       HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+                       HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode)));
+               }
+       }
+       dllist_remove_node(&(psRISubEntry->sProcListNode));
+
+       /* Now, free the memory used to store the sublist entry */
+       OSFreeMemNoStats(psRISubEntry);
+       psRISubEntry = NULL;
+
+       /*
+        * Decrement number of entries in sublist if this MemDesc had a parent entry.
+        */
+       if (psRIEntry)
+       {
+               psRIEntry->ui16SubListCount--;
+       }
+
+       /* Release RI lock*/
+       _RIUnlock();
+
+       /*
+        * Make the handle NULL once MEMDESC RI entry is deleted
+        */
+       hRIHandle = NULL;
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDeleteListKM
+
+ @Description
+            Delete all Resource Information entries and free associated
+            memory.
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteListKM(void)
+{
+       PVRSRV_ERROR eResult = PVRSRV_OK;
+
+       _RILock();
+
+       if (g_pRIHashTable)
+       {
+               eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries, NULL);
+               if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+               {
+                       /*
+                        * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+                        * the hash table gets deleted as a result of deleting the final PMR entry,
+                        * so this is not a real error condition...
+                        */
+                       eResult = PVRSRV_OK;
+               }
+       }
+
+       /* After the run through the RIHashTable that holds the PMR entries there might be
+        * still entries left in the per-process hash table because they were added with
+        * RIWriteProcListEntryKM() and have no PMR parent associated.
+        */
+       if (g_pProcHashTable)
+       {
+               eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries, NULL);
+               if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+               {
+                       /*
+                        * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+                        * the hash table gets deleted as a result of deleting the final PMR entry,
+                        * so this is not a real error condition...
+                        */
+                       eResult = PVRSRV_OK;
+               }
+       }
+
+       _RIUnlock();
+
+       return eResult;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDumpListKM
+
+ @Description
+            Dumps out the contents of the RI List entry for the
+            specified PMR, and all MEMDESC allocation entries
+            in the associated sub linked list.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @input     psPMR - PMR for which RI entry details are to be output
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR)
+{
+       PVRSRV_ERROR eError;
+
+       /* Acquire RI lock*/
+       _RILock();
+
+       eError = _DumpList(psPMR, 0);
+
+       /* Release RI lock*/
+       _RIUnlock();
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIGetListEntryKM
+
+ @Description
+            Returns pointer to a formatted string with details of the specified
+            list entry. If no entry exists (e.g. it may have been deleted
+            since the previous call), NULL is returned.
+
+ @input     pid - pid for which RI entry details are to be output
+ @input     ppHandle - handle to the entry, if NULL, the first entry will be
+                     returned.
+ @output    pszEntryString - string to be output for the entry
+ @output    hEntry - hEntry will be returned pointing to the next entry
+                     (or NULL if there is no next entry)
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+                                                 IMG_HANDLE **ppHandle,
+                                                 IMG_CHAR **ppszEntryString)
+{
+       RI_SUBLIST_ENTRY  *psRISubEntry = NULL;
+       RI_LIST_ENTRY  *psRIEntry = NULL;
+       uintptr_t     hashData = 0;
+       IMG_PID       hashKey  = pid;
+
+       static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX];
+
+       static IMG_UINT64 ui64TotalMemdescAlloc;
+       static IMG_UINT64 ui64TotalImport;
+       static IMG_UINT64 ui64TotalPMRAlloc;
+       static IMG_UINT64 ui64TotalPMRBacked;
+       static enum {
+               RI_GET_STATE_MEMDESCS_LIST_START,
+               RI_GET_STATE_MEMDESCS_SUMMARY,
+               RI_GET_STATE_PMR_LIST,
+               RI_GET_STATE_PMR_SUMMARY,
+               RI_GET_STATE_END,
+               RI_GET_STATE_LAST
+       } g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START;
+
+       static DLLIST_NODE *psNode;
+       static DLLIST_NODE *psSysAllocNode;
+       static IMG_CHAR szProcName[RI_PROC_BUF_SIZE];
+       static IMG_UINT32 ui32ProcessedSysAllocPMRCount;
+
+       acStringBuffer[0] = '\0';
+
+       switch (g_bNextGetState)
+       {
+       case RI_GET_STATE_MEMDESCS_LIST_START:
+               /* look-up pid in Hash Table, to obtain first entry for pid */
+               hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey);
+               if (hashData)
+               {
+                       if (*ppHandle)
+                       {
+                               psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle;
+                               if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+                               {
+                                       psRISubEntry = NULL;
+                               }
+                       }
+                       else
+                       {
+                               psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+                               if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+                               {
+                                       psRISubEntry = NULL;
+                               }
+                       }
+               }
+
+               if (psRISubEntry)
+               {
+                       PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode);
+
+                       if (psRISubEntry->bIsImport)
+                       {
+                               ui64TotalImport += psRISubEntry->ui64Size;
+                       }
+                       else
+                       {
+                               ui64TotalMemdescAlloc += psRISubEntry->ui64Size;
+                       }
+
+                       _GenerateMEMDESCEntryString(psRISubEntry,
+                                                                               IMG_TRUE,
+                                                                               RI_MEMDESC_ENTRY_BUF_SIZE,
+                                                                               acStringBuffer);
+
+                       if (szProcName[0] == '\0')
+                       {
+                               OSStringLCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ?
+                                               PVRSRV_MODNAME : psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE);
+                       }
+
+
+                       *ppszEntryString = acStringBuffer;
+                       *ppHandle        = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode);
+
+                       if (psNextProcListNode == NULL ||
+                               psNextProcListNode == (PDLLIST_NODE)hashData)
+                       {
+                               g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY;
+                       }
+                       /* else continue to list MEMDESCs */
+               }
+               else
+               {
+                       if (ui64TotalMemdescAlloc == 0)
+                       {
+                               acStringBuffer[0] = '\0';
+                               *ppszEntryString = acStringBuffer;
+                               g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY;
+                       }
+                       /* else continue to list MEMDESCs */
+               }
+               break;
+
+       case RI_GET_STATE_MEMDESCS_SUMMARY:
+               OSSNPrintf(acStringBuffer,
+                          RI_MEMDESC_SUM_BUF_SIZE,
+                          RI_MEMDESC_SUM_FRMT,
+                          pid,
+                          szProcName,
+                          ui64TotalMemdescAlloc,
+                          ui64TotalMemdescAlloc >> 10,
+                          ui64TotalImport,
+                          ui64TotalImport >> 10,
+                          (ui64TotalMemdescAlloc + ui64TotalImport),
+                          (ui64TotalMemdescAlloc + ui64TotalImport) >> 10);
+
+               *ppszEntryString = acStringBuffer;
+               ui64TotalMemdescAlloc = 0;
+               ui64TotalImport = 0;
+               szProcName[0] = '\0';
+
+               g_bNextGetState = RI_GET_STATE_PMR_LIST;
+               break;
+
+       case RI_GET_STATE_PMR_LIST:
+               if (pid == PVR_SYS_ALLOC_PID)
+               {
+                       OSLockAcquire(g_hSysAllocPidListLock);
+                       acStringBuffer[0] = '\0';
+                       if (!psSysAllocNode)
+                       {
+                               psSysAllocNode = &g_sSysAllocPidListHead;
+                               ui32ProcessedSysAllocPMRCount = 0;
+                       }
+                       psSysAllocNode = dllist_get_next_node(psSysAllocNode);
+
+                       if (szProcName[0] == '\0')
+                       {
+                               OSStringLCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE);
+                       }
+                       if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead)
+                       {
+                               IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0;
+
+                               psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode);
+                               _GeneratePMREntryString(psRIEntry,
+                                                                               IMG_TRUE,
+                                                                               RI_PMR_ENTRY_BUF_SIZE,
+                                                                               acStringBuffer);
+                               PMR_LogicalSize(psRIEntry->psPMR,
+                                                               &uiPMRLogicalSize);
+                               ui64TotalPMRAlloc += uiPMRLogicalSize;
+                               PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking);
+                               ui64TotalPMRBacked += uiPMRPhysicalBacking;
+
+                               ui32ProcessedSysAllocPMRCount++;
+                               if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1)
+                               {
+                                       g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+                               }
+                               /* else continue to list PMRs */
+                       }
+                       else
+                       {
+                               g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+                       }
+                       *ppszEntryString = (IMG_CHAR *)acStringBuffer;
+                       OSLockRelease(g_hSysAllocPidListLock);
+               }
+               else
+               {
+                       IMG_BOOL bPMRToDisplay = IMG_FALSE;
+
+                       /* Iterate through the 'touched' PMRs and display details */
+                       if (!psNode)
+                       {
+                               psNode = dllist_get_next_node(&sListFirst);
+                       }
+                       else
+                       {
+                               psNode = dllist_get_next_node(psNode);
+                       }
+
+                       while ((psNode != NULL && psNode != &sListFirst) &&
+                                       !bPMRToDisplay)
+                       {
+                               psRIEntry =     IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode);
+                               if (psRIEntry->pid == pid)
+                               {
+                                       IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0;
+
+                                       /* This PMR was 'touched', so display details and unflag it*/
+                                       _GeneratePMREntryString(psRIEntry,
+                                                                                       IMG_TRUE,
+                                                                                       RI_PMR_ENTRY_BUF_SIZE,
+                                                                                       acStringBuffer);
+                                       PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize);
+                                       ui64TotalPMRAlloc += uiPMRLogicalSize;
+                                       PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking);
+                                       ui64TotalPMRBacked += uiPMRPhysicalBacking;
+
+                                       /* Remember the name of the process for 1 PMR for the summary */
+                                       if (szProcName[0] == '\0')
+                                       {
+                                               OSStringLCopy(szProcName, psRIEntry->ai8ProcName, RI_PROC_BUF_SIZE);
+                                       }
+                                       bPMRToDisplay = IMG_TRUE;
+                               }
+                               else
+                               {
+                                       psNode = dllist_get_next_node(psNode);
+                               }
+                       }
+
+                       if (psNode == NULL || (psNode == &sListFirst))
+                       {
+                               g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+                       }
+                       /* else continue listing PMRs */
+               }
+               break;
+
+       case RI_GET_STATE_PMR_SUMMARY:
+               OSSNPrintf(acStringBuffer,
+                          RI_PMR_SUM_BUF_SIZE,
+                          RI_PMR_SUM_FRMT,
+                          pid,
+                          szProcName,
+                          ui64TotalPMRAlloc,
+                          ui64TotalPMRAlloc >> 10,
+                          ui64TotalPMRBacked,
+                          ui64TotalPMRBacked >> 10);
+
+               *ppszEntryString = acStringBuffer;
+               ui64TotalPMRAlloc = 0;
+               ui64TotalPMRBacked = 0;
+               szProcName[0] = '\0';
+               psSysAllocNode = NULL;
+
+               g_bNextGetState = RI_GET_STATE_END;
+               break;
+
+       default:
+               PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState));
+
+               __fallthrough;
+       case RI_GET_STATE_END:
+               /* Reset state ready for the next gpu_mem_area file to display */
+               *ppszEntryString = NULL;
+               *ppHandle        = NULL;
+               psNode = NULL;
+               szProcName[0] = '\0';
+
+               g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START;
+               return IMG_FALSE;
+               break;
+       }
+
+       return IMG_TRUE;
+}
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry,
+                                        IMG_BOOL bDebugFs,
+                                        IMG_UINT16 ui16MaxStrLen,
+                                        IMG_CHAR *pszEntryString)
+{
+       IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE];
+       IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE];
+       IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE];
+       const IMG_CHAR *pszAnnotationText;
+       IMG_PID uiRIPid = 0;
+       PMR* psRIPMR = NULL;
+       IMG_UINT32 ui32RIPMRFlags = 0;
+
+       if (psRISubEntry->psRI != NULL)
+       {
+               uiRIPid = psRISubEntry->psRI->pid;
+               psRIPMR = psRISubEntry->psRI->psPMR;
+               ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags;
+       }
+
+       OSSNPrintf(szEntryFormat,
+                       RI_MEMDESC_ENTRY_FRMT_SIZE,
+                       RI_MEMDESC_ENTRY_FRMT,
+                       DEVMEM_ANNOTATION_MAX_LEN);
+
+       if (!bDebugFs)
+       {
+               /* we don't include process ID info for debugfs output */
+               OSSNPrintf(szProc,
+                               RI_MEMDESC_ENTRY_PROC_BUF_SIZE,
+                               RI_MEMDESC_ENTRY_PROC_FRMT,
+                               psRISubEntry->pid,
+                               psRISubEntry->ai8ProcName);
+       }
+
+       if (psRISubEntry->bIsImport && psRIPMR)
+       {
+               OSSNPrintf((IMG_CHAR *)&szImport,
+                          RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE,
+                          RI_MEMDESC_ENTRY_IMPORT_FRMT,
+                          uiRIPid);
+               /* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+               pszAnnotationText = PMR_GetAnnotation(psRIPMR);
+       }
+       else if (!psRISubEntry->bIsSuballoc && psRIPMR)
+       {
+               /* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+               pszAnnotationText = PMR_GetAnnotation(psRIPMR);
+       }
+       else
+       {
+               /* Set pszAnnotationText to that of the MEMDESC RI entry */
+               pszAnnotationText = psRISubEntry->ai8TextB;
+       }
+
+       /* Don't print memdescs if they are local imports
+        * (i.e. imported PMRs allocated by this process)
+        */
+       if (bDebugFs &&
+               ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) &&
+               (psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid)
+                                                                        || (psRISubEntry->pid == PVR_SYS_ALLOC_PID))))
+       {
+               /* Don't print this entry */
+               pszEntryString[0] = '\0';
+       }
+       else
+       {
+               OSSNPrintf(pszEntryString,
+                                  ui16MaxStrLen,
+                                  szEntryFormat,
+                                  (bDebugFs ? "" : "   "),
+                                  psRISubEntry->pid,
+                                  (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
+                                  pszAnnotationText,
+                                  (bDebugFs ? "" : (char *)szProc),
+                                  psRISubEntry->ui64Size,
+                                  psRIPMR,
+                                  (psRISubEntry->bIsImport ? (char *)&szImport : ""),
+                                  (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "",
+                                  (psRIPMR && PMR_IsUnpinned(psRIPMR)) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : "",
+                                  (bDebugFs ? '\n' : ' '));
+       }
+}
+
+/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry,
+                                    IMG_BOOL bDebugFs,
+                                    IMG_UINT16 ui16MaxStrLen,
+                                    IMG_CHAR *pszEntryString)
+{
+       const IMG_CHAR*   pszAnnotationText;
+       IMG_DEVMEM_SIZE_T uiLogicalSize = 0;
+       IMG_DEVMEM_SIZE_T uiPhysicalSize = 0;
+       IMG_CHAR          szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE];
+
+       PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize);
+
+       PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize);
+
+       OSSNPrintf(szEntryFormat,
+                       RI_PMR_ENTRY_FRMT_SIZE,
+                       RI_PMR_ENTRY_FRMT,
+                       DEVMEM_ANNOTATION_MAX_LEN);
+
+       /* Set pszAnnotationText to that PMR RI entry */
+       pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR);
+
+       OSSNPrintf(pszEntryString,
+                  ui16MaxStrLen,
+                  szEntryFormat,
+                  (bDebugFs ? "" : "   "),
+                  psRIEntry->pid,
+                  (void*)psRIEntry->psPMR,
+                  pszAnnotationText,
+                  uiLogicalSize,
+                  uiPhysicalSize,
+                  (bDebugFs ? '\n' : ' '));
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _DumpList
+
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     psPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+                  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid)
+{
+       RI_LIST_ENTRY *psRIEntry = NULL;
+       RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+       IMG_UINT16 ui16SubEntriesParsed = 0;
+       uintptr_t hashData = 0;
+       IMG_PID hashKey;
+       PMR *pPMRHashKey = psPMR;
+       IMG_BOOL bDisplayedThisPMR = IMG_FALSE;
+       IMG_UINT64 ui64LogicalSize = 0;
+
+       PVR_RETURN_IF_INVALID_PARAM(psPMR);
+
+       if (g_pRIHashTable && g_pProcHashTable)
+       {
+               if (pid != 0)
+               {
+                       /* look-up pid in Hash Table */
+                       hashKey = pid;
+                       hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+                       if (hashData)
+                       {
+                               psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+                               if (psRISubEntry)
+                               {
+                                       psRIEntry = psRISubEntry->psRI;
+                               }
+                       }
+               }
+               else
+               {
+                       /* Look-up psPMR in Hash Table */
+                       hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+                       psRIEntry = (RI_LIST_ENTRY *)hashData;
+               }
+               if (!psRIEntry)
+               {
+                       /* No entry found in hash table */
+                       return PVRSRV_ERROR_NOT_FOUND;
+               }
+               while (psRIEntry)
+               {
+                       bDisplayedThisPMR = IMG_FALSE;
+                       /* Output details for RI entry */
+                       if (!pid)
+                       {
+                               PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize);
+
+                               _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx,
+                                           PMR_GetAnnotation(psRIEntry->psPMR),
+                                           psRIEntry->psPMR,
+                                           (IMG_UINT)psRIEntry->ui16SubListCount,
+                                           ui64LogicalSize));
+                               bDisplayedThisPMR = IMG_TRUE;
+                       }
+                       ui16SubEntriesParsed = 0;
+                       if (psRIEntry->ui16SubListCount)
+                       {
+#if _DUMP_LINKEDLIST_INFO
+                               _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%p}\n",
+                                           psRIEntry->sSubListFirst.psNextNode));
+#endif /* _DUMP_LINKEDLIST_INFO */
+                               if (!pid)
+                               {
+                                       psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+                                                                       RI_SUBLIST_ENTRY, sListNode);
+                               }
+                               /* Traverse RI sublist and output details for each entry */
+                               while (psRISubEntry)
+                               {
+                                       if (psRIEntry)
+                                       {
+                                               if ((ui16SubEntriesParsed >= psRIEntry->ui16SubListCount))
+                                               {
+                                                       break;
+                                               }
+                                               if (!bDisplayedThisPMR)
+                                               {
+                                                       PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize);
+
+                                                       _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx,
+                                                                   PMR_GetAnnotation(psRIEntry->psPMR),
+                                                                   psRIEntry->psPMR,
+                                                                   (IMG_UINT)psRIEntry->ui16SubListCount,
+                                                                   ui64LogicalSize));
+                                                       bDisplayedThisPMR = IMG_TRUE;
+                                               }
+                                       }
+#if _DUMP_LINKEDLIST_INFO
+                                       _RIOutput (("RI LIST:    [this subentry:0x%p]\n",psRISubEntry));
+                                       _RIOutput (("RI LIST:     psRI:0x%p\n",psRISubEntry->psRI));
+#endif /* _DUMP_LINKEDLIST_INFO */
+
+                                       {
+                                               IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE];
+
+                                               _GenerateMEMDESCEntryString(psRISubEntry,
+                                                                           IMG_FALSE,
+                                                                           RI_MEMDESC_ENTRY_BUF_SIZE,
+                                                                           szEntryString);
+                                               _RIOutput (("%s",szEntryString));
+                                       }
+
+                                       if (pid)
+                                       {
+                                               if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+                                                       (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+                                               {
+                                                       psRISubEntry = NULL;
+                                               }
+                                               else
+                                               {
+                                                       psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+                                                                                       RI_SUBLIST_ENTRY, sProcListNode);
+                                                       if (psRISubEntry)
+                                                       {
+                                                               if (psRIEntry != psRISubEntry->psRI)
+                                                               {
+                                                                       /*
+                                                                        * The next MEMDESC in the process linked list is in a different PMR
+                                                                        */
+                                                                       psRIEntry = psRISubEntry->psRI;
+                                                                       bDisplayedThisPMR = IMG_FALSE;
+                                                               }
+                                                       }
+                                               }
+                                       }
+                                       else
+                                       {
+                                               ui16SubEntriesParsed++;
+                                               psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+                                                                               RI_SUBLIST_ENTRY, sListNode);
+                                       }
+                               }
+                       }
+                       if (!pid && psRIEntry)
+                       {
+                               if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount)
+                               {
+                                       /*
+                                        * Output error message as sublist does not contain the
+                                        * number of entries indicated by sublist count
+                                        */
+                                       _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n",
+                                                   ui16SubEntriesParsed, psRIEntry->ui16SubListCount));
+                               }
+                               else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst)))
+                               {
+                                       /*
+                                        * Output error message as sublist is empty but sublist count
+                                        * is not zero
+                                        */
+                                       _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n",
+                                                   psRIEntry->ui16SubListCount));
+                               }
+                       }
+                       psRIEntry = NULL;
+               }
+       }
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDumpAllKM
+
+ @Description
+            Dumps out the contents of all RI List entries (i.e. for all
+            MEMDESC allocations for each PMR).
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpAllKM(void)
+{
+       if (g_pRIHashTable)
+       {
+               return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries, NULL);
+       }
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDumpProcessKM
+
+ @Description
+            Dumps out the contents of all MEMDESC RI List entries (for every
+            PMR) which have been allocate by the specified process only.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 dummyPMR;
+
+       if (!g_pProcHashTable)
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Acquire RI lock*/
+       _RILock();
+
+       eError = _DumpList((PMR *)&dummyPMR, pid);
+
+       /* Release RI lock*/
+       _RIUnlock();
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _TotalAllocsForProcess
+
+ @Description
+            Totals all PMR physical backing for given process.
+
+ @input     pid - ID of process.
+
+ @input     ePhysHeapType - type of Physical Heap for which to total allocs
+
+ @Return       Size of all physical backing for PID's PMRs allocated from the
+            specified heap type (in bytes).
+
+******************************************************************************/
+static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType)
+{
+       RI_LIST_ENTRY *psRIEntry = NULL;
+       RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL;
+       RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+       uintptr_t hashData = 0;
+       IMG_PID hashKey;
+       IMG_INT32 i32TotalPhysical = 0;
+
+       if (g_pRIHashTable && g_pProcHashTable)
+       {
+               if (pid == PVR_SYS_ALLOC_PID)
+               {
+                       IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0;
+                       DLLIST_NODE *psSysAllocNode = NULL;
+
+                       OSLockAcquire(g_hSysAllocPidListLock);
+                       psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead);
+                       while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead)
+                       {
+                               psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode);
+                               ui32ProcessedSysAllocPMRCount++;
+                               if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)
+                               {
+                                       IMG_UINT64 ui64PhysicalSize;
+
+                                       PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize);
+                                       if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff))
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__));
+                                       }
+                                       i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff);
+                               }
+                               psSysAllocNode = dllist_get_next_node(psSysAllocNode);
+                       }
+                       OSLockRelease(g_hSysAllocPidListLock);
+               }
+               else
+               {
+                       if (pid != 0)
+                       {
+                               /* look-up pid in Hash Table */
+                               hashKey = pid;
+                               hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+                               if (hashData)
+                               {
+                                       psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+                                       psRISubEntry = psInitialRISubEntry;
+                                       if (psRISubEntry)
+                                       {
+                                               psRIEntry = psRISubEntry->psRI;
+                                       }
+                               }
+                       }
+
+                       while (psRISubEntry && psRIEntry)
+                       {
+                               if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) &&
+                                       (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) &&
+                                       (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType))
+                               {
+                                       IMG_UINT64 ui64PhysicalSize;
+
+
+                                       PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize);
+                                       if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff))
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__));
+                                       }
+                                       i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff);
+                                       psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS;
+                               }
+                               if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+                                       (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+                               {
+                                       psRISubEntry = NULL;
+                                       psRIEntry = NULL;
+                               }
+                               else
+                               {
+                                       psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+                                                                       RI_SUBLIST_ENTRY, sProcListNode);
+                                       if (psRISubEntry)
+                                       {
+                                               psRIEntry = psRISubEntry->psRI;
+                                       }
+                               }
+                       }
+                       psRISubEntry = psInitialRISubEntry;
+                       if (psRISubEntry)
+                       {
+                               psRIEntry = psRISubEntry->psRI;
+                       }
+                       while (psRISubEntry && psRIEntry)
+                       {
+                               psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS;
+                               if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+                                       (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+                               {
+                                       psRISubEntry = NULL;
+                                       psRIEntry = NULL;
+                               }
+                               else
+                               {
+                                       psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+                                                                       RI_SUBLIST_ENTRY, sProcListNode);
+                                       if (psRISubEntry)
+                                       {
+                                               psRIEntry = psRISubEntry->psRI;
+                                       }
+                               }
+                       }
+               }
+       }
+       return i32TotalPhysical;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RITotalAllocProcessKM
+
+ @Description
+            Returns the total of allocated GPU memory (backing for PMRs)
+            which has been allocated from the specific heap by the specified
+            process only.
+
+ @Return       Amount of physical backing allocated (in bytes)
+
+******************************************************************************/
+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType)
+{
+       IMG_INT32 i32BackingTotal = 0;
+
+       if (g_pProcHashTable)
+       {
+               /* Acquire RI lock*/
+               _RILock();
+
+               i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType);
+
+               /* Release RI lock*/
+               _RIUnlock();
+       }
+       return i32BackingTotal;
+}
+
+#if defined(DEBUG)
+/*!
+*******************************************************************************
+
+ @Function     _DumpProcessList
+
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     psPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+                  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpProcessList(PMR *psPMR,
+                                                                        IMG_PID pid,
+                                                                        IMG_UINT64 ui64Offset,
+                                                                        IMG_DEV_VIRTADDR *psDevVAddr)
+{
+       RI_LIST_ENTRY *psRIEntry = NULL;
+       RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+       IMG_UINT16 ui16SubEntriesParsed = 0;
+       uintptr_t hashData = 0;
+       PMR *pPMRHashKey = psPMR;
+
+       psDevVAddr->uiAddr = 0;
+
+       PVR_RETURN_IF_INVALID_PARAM(psPMR);
+
+       if (g_pRIHashTable && g_pProcHashTable)
+       {
+               PVR_ASSERT(psPMR && pid);
+
+               /* Look-up psPMR in Hash Table */
+               hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+               psRIEntry = (RI_LIST_ENTRY *)hashData;
+
+               if (!psRIEntry)
+               {
+                       /* No entry found in hash table */
+                       return PVRSRV_ERROR_NOT_FOUND;
+               }
+
+               if (psRIEntry->ui16SubListCount)
+               {
+                       psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+                                                                                       RI_SUBLIST_ENTRY, sListNode);
+
+                       /* Traverse RI sublist and output details for each entry */
+                       while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+                       {
+                               if (pid == psRISubEntry->pid)
+                               {
+                                       IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset;
+                                       IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size;
+
+                                       if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset)
+                                       {
+                                               psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr;
+                                               return PVRSRV_OK;
+                                       }
+                               }
+
+                               ui16SubEntriesParsed++;
+                               psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+                                                                                               RI_SUBLIST_ENTRY, sListNode);
+                       }
+               }
+       }
+
+       return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RIDumpProcessListKM
+
+ @Description
+            Dumps out selected contents of all MEMDESC RI List entries (for a
+            PMR) which have been allocate by the specified process only.
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+                                                                IMG_PID pid,
+                                                                IMG_UINT64 ui64Offset,
+                                                                IMG_DEV_VIRTADDR *psDevVAddr)
+{
+       PVRSRV_ERROR eError;
+
+       if (!g_pProcHashTable)
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Acquire RI lock*/
+       _RILock();
+
+       eError = _DumpProcessList(psPMR,
+                                                         pid,
+                                                         ui64Offset,
+                                                         psDevVAddr);
+
+       /* Release RI lock*/
+       _RIUnlock();
+
+       return eError;
+}
+#endif
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv)
+{
+       RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+
+       PVR_UNREFERENCED_PARAMETER (k);
+       PVR_UNREFERENCED_PARAMETER (pvPriv);
+
+       return RIDumpListKM(psRIEntry->psPMR);
+}
+
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv)
+{
+       RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+       RI_SUBLIST_ENTRY *psRISubEntry;
+       PVRSRV_ERROR eResult = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER (k);
+       PVR_UNREFERENCED_PARAMETER (pvPriv);
+
+       while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0))
+       {
+               psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode);
+               eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry);
+       }
+       if (eResult == PVRSRV_OK)
+       {
+               eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry);
+               /*
+                * If we've deleted the Hash table, return
+                * an error to stop the iterator...
+                */
+               if (!g_pRIHashTable)
+               {
+                       eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+               }
+       }
+       return eResult;
+}
+
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv)
+{
+       RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v;
+       PVRSRV_ERROR eResult;
+
+       PVR_UNREFERENCED_PARAMETER (k);
+       PVR_UNREFERENCED_PARAMETER (pvPriv);
+
+       eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry);
+       if (eResult == PVRSRV_OK && !g_pProcHashTable)
+       {
+               /*
+                * If we've deleted the Hash table, return
+                * an error to stop the iterator...
+                */
+               eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+       }
+
+       return eResult;
+}
+
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/srvcore.c b/drivers/gpu/drm/img/img-rogue/services/server/common/srvcore.c
new file mode 100644 (file)
index 0000000..42a8a82
--- /dev/null
@@ -0,0 +1,1450 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements core PVRSRV API, server side
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "img_types_check.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "device.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "srvkm.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "log2.h"
+
+#include "srvcore.h"
+#include "pvrsrv.h"
+#include "power.h"
+
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#include "rgxinit.h"
+#include "rgx_compat_bvnc.h"
+#endif
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+#include "lock.h"
+#include "osfunc.h"
+#include "device_connection.h"
+#include "process_stats.h"
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#include "services_km.h"
+#endif
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined(PVRSRV_MISSING_NO_SPEC_IMPL)
+#pragma message ("There is no implementation of OSConfineArrayIndexNoSpeculation() - see osfunc.h")
+#endif
+
+/* For the purpose of maintainability, it is intended that this file should not
+ * contain any OS specific #ifdefs. Please find a way to add e.g.
+ * an osfunc.c abstraction or override the entire function in question within
+ * env,*,pvr_bridge_k.c
+ */
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,};
+
+#define PVR_DISPATCH_OFFSET_FIRST_FUNC                 0
+#define PVR_DISPATCH_OFFSET_LAST_FUNC                  1
+#define PVR_DISPATCH_OFFSET_ARRAY_MAX                  2
+
+#define PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE
+
+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX];
+
+
+#define PVRSRV_MAX_POOLED_BRIDGE_BUFFERS 8     /*!< Initial number of pooled bridge buffers */
+
+static PVRSRV_POOL *g_psBridgeBufferPool;      /*! Pool of bridge buffers */
+
+
+#if defined(DEBUG_BRIDGE_KM)
+/* a lock used for protecting bridge call timing calculations
+ * for calls which do not acquire a lock
+ */
+static POS_LOCK g_hStatsLock;
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+
+void BridgeGlobalStatsLock(void)
+{
+       OSLockAcquire(g_hStatsLock);
+}
+
+void BridgeGlobalStatsUnlock(void)
+{
+       OSLockRelease(g_hStatsLock);
+}
+#endif
+
+void BridgeDispatchTableStartOffsetsInit(void)
+{
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED1][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED1][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DI_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DI_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMA][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMA_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMA][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMA_DISPATCH_LAST;
+#if defined(SUPPORT_RGX)
+       /* Need a gap here to start next entry at element 128 */
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTIMERQUERY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTIMERQUERY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST;
+       g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST;
+#endif
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+
+#if defined(INTEGRITY_OS)
+PVRSRV_ERROR PVRSRVPrintBridgeStats()
+{
+       IMG_UINT32 ui32Index;
+       IMG_UINT32 ui32Remainder;
+
+       printf("Total Bridge call count = %u\n"
+                  "Total number of bytes copied via copy_from_user = %u\n"
+                  "Total number of bytes copied via copy_to_user = %u\n"
+                  "Total number of bytes copied via copy_*_user = %u\n\n"
+                  "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n",
+                  g_BridgeGlobalStats.ui32IOCTLCount,
+                  g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+                  g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+                  g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+                  "#",
+                  "Bridge Name",
+                  "Wrapper Function",
+                  "Call Count",
+                  "copy_from_user (B)",
+                  "copy_to_user (B)",
+                  "Total Time (us)",
+                  "Max Time (us)");
+
+       /* Is the item asked for (starts at 0) a valid table index? */
+       for ( ui32Index=0; ui32Index < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; ui32Index++ )
+       {
+               PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = &g_BridgeDispatchTable[ui32Index];
+               printf("%3d: %-60s   %-48s   %-10u   %-20u   %-20u   %-20llu   %-20llu\n",
+                          (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+                          psEntry->pszIOCName,
+                          (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)",
+                          psEntry->ui32CallCount,
+                          psEntry->ui32CopyFromUserTotalBytes,
+                          psEntry->ui32CopyToUserTotalBytes,
+                          (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+                          (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+
+
+       }
+}
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+                                       IMG_UINT32 ui32DispatchTableEntry,
+                                       void *pvDest,
+                                       void __user *pvSrc,
+                                       IMG_UINT32 ui32Size)
+{
+       g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
+       g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+       return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+                                 IMG_UINT32 ui32DispatchTableEntry,
+                                 void __user *pvDest,
+                                 void *pvSrc,
+                                 IMG_UINT32 ui32Size)
+{
+       g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
+       g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+       return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#else
+INLINE PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+                                       IMG_UINT32 ui32DispatchTableEntry,
+                                       void *pvDest,
+                                       void __user *pvSrc,
+                                       IMG_UINT32 ui32Size)
+{
+       PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+       return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+INLINE PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+                                 IMG_UINT32 ui32DispatchTableEntry,
+                                 void __user *pvDest,
+                                 void *pvSrc,
+                                 IMG_UINT32 ui32Size)
+{
+       PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+       return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+                               PVRSRV_DEVICE_NODE * psDeviceNode,
+                               IMG_UINT32 ui32Flags,
+                               IMG_UINT32 ui32ClientBuildOptions,
+                               IMG_UINT32 ui32ClientDDKVersion,
+                               IMG_UINT32 ui32ClientDDKBuild,
+                               IMG_UINT8  *pui8KernelArch,
+                               IMG_UINT32 *pui32CapabilityFlags,
+                               IMG_UINT64 *ui64PackedBvnc)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       IMG_UINT32                      ui32BuildOptions, ui32BuildOptionsMismatch;
+       IMG_UINT32                      ui32DDKVersion, ui32DDKBuild;
+       PVRSRV_DATA                     *psSRVData = NULL;
+       IMG_UINT64                      ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize();
+       static IMG_BOOL         bIsFirstConnection=IMG_FALSE;
+
+#if defined(SUPPORT_RGX)
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+
+       /* Gather BVNC information to output to UM */
+
+       *ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+                              psDevInfo->sDevFeatureCfg.ui32V,
+                              psDevInfo->sDevFeatureCfg.ui32N,
+                              psDevInfo->sDevFeatureCfg.ui32C);
+#else
+       *ui64PackedBvnc = 0;
+#endif /* defined(SUPPORT_RGX)*/
+
+       /* Clear the flags */
+       *pui32CapabilityFlags = 0;
+
+       psSRVData = PVRSRVGetPVRSRVData();
+
+       psConnection->ui32ClientFlags = ui32Flags;
+
+       /*Set flags to pass back to the client showing which cache coherency is available.*/
+       /* Is the system snooping of caches emulated in software? */
+       if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig))
+       {
+               *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG;
+       }
+       else
+       {
+               /*Set flags to pass back to the client showing which cache coherency is available.*/
+               /*Is the system CPU cache coherent?*/
+               if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+               {
+                       *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG;
+               }
+               /*Is the system device cache coherent?*/
+               if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig))
+               {
+                       *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG;
+               }
+       }
+
+       /* Has the system device non-mappable local memory?*/
+       if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig))
+       {
+               *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG;
+       }
+
+       /* Is system using FBCDC v31? */
+       if (psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode))
+       {
+               *pui32CapabilityFlags |= PVRSRV_FBCDC_V3_1_USED;
+       }
+
+       /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */
+       if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize)
+       {
+               *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED;
+       }
+       else
+       {
+               if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA)
+               {
+                       *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED;
+               }
+               else
+               {
+                       /* This can happen when processor has more virtual address bits
+                          than device (i.e. alloc is not always guaranteed to succeed) */
+                       *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL;
+               }
+       }
+
+       /* Is the system DMA capable? */
+       if (psDeviceNode->bHasSystemDMA)
+       {
+               *pui32CapabilityFlags |= PVRSRV_SYSTEM_DMA_USED;
+       }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+       IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+       IMG_BOOL   bOSidAxiProtReg = IMG_FALSE;
+
+       ui32OSid    = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK)    >> (VIRTVAL_FLAG_OSID_SHIFT);
+       ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT);
+
+#if defined(EMULATOR)
+{
+    /* AXI_ACELITE is only supported on rogue cores - volcanic cores all support full ACE
+     * and don't want to compile the code below (RGX_FEATURE_AXI_ACELITE_BIT_MASK is not
+     * defined for volcanic cores).
+     */
+
+     PVRSRV_RGXDEV_INFO *psDevInfo;
+     psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))
+#else
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE))
+#endif
+       {
+               IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0;
+
+               ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT);
+               ui32OSidAxiProtTD  = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK)  >> (VIRTVAL_FLAG_AXIPTD_SHIFT);
+
+               PVR_DPF((PVR_DBG_MESSAGE,
+                               "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s",
+                               ui32OSidReg,
+                               (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE"));
+
+               bOSidAxiProtReg = ui32OSidAxiProtReg == 1;
+               PVR_DPF((PVR_DBG_MESSAGE,
+                               "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s",
+                               ui32OSidReg,
+                               bOSidAxiProtReg?"TRUE":"FALSE"));
+
+               SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD);
+       }
+}
+#endif /* defined(EMULATOR) */
+
+       /* We now know the OSid, OSidReg and bOSidAxiProtReg setting for this
+        * connection. We can access these from wherever we have a connection
+        * reference and do not need to traverse an arbitrary linked-list to
+        * obtain them. The settings are process-specific.
+        */
+       psConnection->ui32OSid = ui32OSid;
+       psConnection->ui32OSidReg = ui32OSidReg;
+       psConnection->bOSidAxiProtReg = bOSidAxiProtReg;
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                "[GPU Virtualization Validation]: OSIDs: %d, %d",
+                ui32OSid,
+                ui32OSidReg));
+}
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Only enabled if enabled in the UM */
+       if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Workload Estimation disabled. Not enabled in UM",
+                               __func__));
+       }
+#endif
+
+#if defined(SUPPORT_PDVFS)
+       /* Only enabled if enabled in the UM */
+       if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Proactive DVFS disabled. Not enabled in UM",
+                        __func__));
+       }
+#endif
+
+       ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+       ui32DDKBuild = PVRVERSION_BUILD;
+
+       if (ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT)
+       {
+               psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT;
+       }
+       else
+       {
+               psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT;
+       }
+
+       if (IMG_FALSE == bIsFirstConnection)
+       {
+               psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+               psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions;
+
+               psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion;
+               psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion;
+
+               psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild;
+               psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild;
+
+               psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType =
+                               ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
+
+               psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType =
+                               (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
+
+               if (sizeof(void *) == POINTER_SIZE_64BIT)
+               {
+                       psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT;
+               }
+               else
+               {
+                       psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT;
+               }
+       }
+
+       /* Masking out every option that is not kernel specific*/
+       ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM;
+
+       /*
+        * Validate the build options
+        */
+       ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+       if (ui32BuildOptions != ui32ClientBuildOptions)
+       {
+               ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+               /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+               ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+               if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+                               "extra options present in client-side driver: (0x%x). Please check rgx_options.h",
+                               __func__,
+                               ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit);
+               }
+
+               if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+                               "extra options present in KM driver: (0x%x). Please check rgx_options.h",
+                               __func__,
+                               ui32BuildOptions & ui32BuildOptionsMismatch ));
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit);
+               }
+               if (IMG_FALSE == bIsFirstConnection)
+               {
+                       PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.",
+                                                                                                                                                       __func__,
+                                                                                                                                                       ui32ClientBuildOptions,
+                                                                                                                                                       (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+                                                                                                                                                       ui32BuildOptions,
+                                                                                                                                                       (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug"));
+               }else{
+                       PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.",
+                                                                                                                                               __func__,
+                                                                                                                                               ui32ClientBuildOptions,
+                                                                                                                                               ui32BuildOptions));
+
+               }
+               if (!psSRVData->sDriverInfo.bIsNoMatch)
+                       psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __func__));
+       }
+
+       /*
+        * Validate DDK version
+        */
+       if (ui32ClientDDKVersion != ui32DDKVersion)
+       {
+               if (!psSRVData->sDriverInfo.bIsNoMatch)
+                       psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+               PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).",
+                               __func__,
+                               PVRVERSION_MAJ, PVRVERSION_MIN,
+                               PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion),
+                               PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion)));
+               PVR_DBG_BREAK;
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_VERSION_MISMATCH, chk_exit);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]",
+                               __func__,
+                               PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN));
+       }
+
+       /* Create stream for every connection except for the special clients
+        * that don't need it e.g.: recipients of HWPerf data. */
+       if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM))
+       {
+               IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+               OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
+                          PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC,
+                          psDeviceNode->sDevId.i32OsDeviceID,
+                          psConnection->pid);
+
+               eError = TLStreamCreate(&psConnection->hClientTLStream,
+                                       acStreamName,
+                                       PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT,
+                                       TL_OPMODE_DROP_NEWER |
+                                       TL_FLAG_ALLOCATE_ON_FIRST_OPEN,
+                                       NULL, NULL, NULL, NULL);
+               if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS)
+               {
+                       PVR_LOG_ERROR(eError, "TLStreamCreate");
+                       psConnection->hClientTLStream = NULL;
+               }
+               else if (eError == PVRSRV_OK)
+               {
+                       /* Set "tlctrl" stream as a notification channel. This channel is
+                        * is used to notify recipients about stream open/close (by writer)
+                        * actions (and possibly other actions in the future). */
+                       eError = TLStreamSetNotifStream(psConnection->hClientTLStream,
+                                                       psSRVData->hTLCtrlStream);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_LOG_ERROR(eError, "TLStreamSetNotifStream");
+                               TLStreamClose(psConnection->hClientTLStream);
+                               psConnection->hClientTLStream = NULL;
+                       }
+               }
+
+               /* Reset error status. We don't want to propagate any errors from here. */
+               eError = PVRSRV_OK;
+               PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName));
+       }
+
+       /*
+        * Validate DDK build
+        */
+       if (ui32ClientDDKBuild != ui32DDKBuild)
+       {
+               if (!psSRVData->sDriverInfo.bIsNoMatch)
+                       psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+               PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).",
+                               __func__, ui32DDKBuild, ui32ClientDDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+               PVR_DBG_BREAK;
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_BUILD_MISMATCH, chk_exit);
+#endif
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]",
+                               __func__, ui32DDKBuild, ui32ClientDDKBuild));
+       }
+
+#if defined(PDUMP)
+       /* Success so far so is it the PDump client that is connecting? */
+       if (ui32Flags & SRV_FLAGS_PDUMPCTRL)
+       {
+               if (psDeviceNode->sDevId.ui32InternalID == psSRVData->ui32PDumpBoundDevice)
+               {
+                       PDumpConnectionNotify(psDeviceNode);
+               }
+               else
+               {
+                       eError = PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE;
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PDump requested for device %u but only permitted for device %u",
+                                       __func__, psDeviceNode->sDevId.ui32InternalID, psSRVData->ui32PDumpBoundDevice));
+                       goto chk_exit;
+               }
+       }
+       else
+       {
+               /* Warn if the app is connecting to a device PDump won't be able to capture */
+               if (psDeviceNode->sDevId.ui32InternalID != psSRVData->ui32PDumpBoundDevice)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: NB. App running on device %d won't be captured by PDump (must be on device %u)",
+                                       __func__, psDeviceNode->sDevId.ui32InternalID, psSRVData->ui32PDumpBoundDevice));
+               }
+       }
+#endif
+
+       PVR_ASSERT(pui8KernelArch != NULL);
+
+       if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)
+       {
+               *pui8KernelArch = 64;
+       }
+       else
+       {
+               *pui8KernelArch = 32;
+       }
+
+       bIsFirstConnection = IMG_TRUE;
+
+#if defined(DEBUG_BRIDGE_KM)
+       {
+               int ii;
+
+               /* dump dispatch table offset lookup table */
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __func__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1));
+               for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+               }
+       }
+#endif
+
+#if defined(PDUMP)
+       if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL))
+       {
+               IMG_UINT64 ui64PDumpState = 0;
+
+               PDumpGetStateKM(&ui64PDumpState);
+               if (ui64PDumpState & PDUMP_STATE_CONNECTED)
+               {
+                       *pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING;
+               }
+       }
+#endif
+
+chk_exit:
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void)
+{
+#if defined(INTEGRITY_OS) && defined(DEBUG_BRIDGE_KM)
+       PVRSRVPrintBridgeStats();
+#endif
+       /* just return OK, per-process data is cleaned up by resmgr */
+
+       return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireGlobalEventObjectKM
+@Description    Acquire the global event object.
+@Output         phGlobalEventObject    On success, points to the global event
+                                       object handle
+@Return         PVRSRV_ERROR           PVRSRV_OK on success or an error
+                                       otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       *phGlobalEventObject = psPVRSRVData->hGlobalEventObject;
+
+       return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseGlobalEventObjectKM
+@Description    Release the global event object.
+@Output         hGlobalEventObject    Global event object handle
+@Return         PVRSRV_ERROR          PVRSRV_OK on success or an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject)
+{
+       PVR_ASSERT(PVRSRVGetPVRSRVData()->hGlobalEventObject == hGlobalEventObject);
+
+       return PVRSRV_OK;
+}
+
+/*
+       PVRSRVDumpDebugInfoKM
+*/
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+                                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         IMG_UINT32 ui32VerbLevel)
+{
+       if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       PVR_LOG(("User requested PVR debug info"));
+
+       PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL);
+
+       return PVRSRV_OK;
+}
+
+/*
+       PVRSRVGetDevClockSpeedKM
+*/
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_PUINT32  pui32RGXClockSpeed)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed);
+       PVR_WARN_IF_ERROR(eError, "pfnDeviceClockSpeed");
+
+       return eError;
+}
+
+
+/*
+       PVRSRVHWOpTimeoutKM
+*/
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+                                       PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+       PVR_LOG(("User requested OS reset"));
+       OSPanic();
+#endif
+       PVR_LOG(("HW operation timeout, dump server info"));
+       PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+       return PVRSRV_OK;
+}
+
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+               IMG_UINT8 *psBridgeIn,
+               IMG_UINT8 *psBridgeOut,
+               CONNECTION_DATA *psConnection)
+{
+       PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+       PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(DEBUG_BRIDGE_KM)
+       PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to "
+                        "Dummy Wrapper (probably not what you want!)",
+                        __func__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+#else
+       PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to "
+                        "Dummy Wrapper (probably not what you want!)",
+                        __func__, ui32DispatchTableEntry));
+#endif
+       return PVRSRV_ERROR_BRIDGE_ENOTTY;
+}
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+                                    PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    IMG_UINT32 ui32AlignChecksSize,
+                                    IMG_UINT32 aui32AlignChecks[])
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(NO_HARDWARE)
+
+       PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL);
+       return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize,
+                                              aui32AlignChecks);
+
+#else
+
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize);
+       PVR_UNREFERENCED_PARAMETER(aui32AlignChecks);
+
+       return PVRSRV_OK;
+
+#endif /* !defined(NO_HARDWARE) */
+
+}
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 *pui32DeviceStatus)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* First try to update the status. */
+       if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+       {
+               PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode,
+                                                                         IMG_FALSE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to "
+                                        "check for device status (%d)", eError));
+
+                       /* Return unknown status and error because we don't know what
+                        * happened and if the status is valid. */
+                       *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+                       return eError;
+               }
+       }
+
+       switch (OSAtomicRead(&psDeviceNode->eHealthStatus))
+       {
+               case PVRSRV_DEVICE_HEALTH_STATUS_OK:
+                       *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK;
+                       return PVRSRV_OK;
+               case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:
+                       *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING;
+                       return PVRSRV_OK;
+               case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:
+               case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:
+               case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:
+                       *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR;
+                       return PVRSRV_OK;
+               default:
+                       *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+                       return PVRSRV_ERROR_INTERNAL_ERROR;
+       }
+}
+
+PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                                      IMG_UINT32 ui32CapsSize,
+                                      IMG_UINT32 *pui32NumCores,
+                                      IMG_UINT64 *pui64Caps)
+{
+       PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui32CapsSize > 0)
+       {
+               /* Clear the buffer to ensure no uninitialised data is returned to UM
+                * if the pfn call below does not write to the whole array, or is null.
+                */
+               memset(pui64Caps, 0x00, (ui32CapsSize * sizeof(IMG_UINT64)));
+       }
+
+       if (psDeviceNode->pfnGetMultiCoreInfo != NULL)
+       {
+               eError = psDeviceNode->pfnGetMultiCoreInfo(psDeviceNode, ui32CapsSize, pui32NumCores, pui64Caps);
+       }
+       return eError;
+}
+
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for removing entries in the g_BridgeDispatchTable array.
+ *               All this does is zero the entry to allow for a full table re-population
+ *               later.
+ *
+ * @param ui32BridgeGroup
+ * @param ui32Index
+ *
+ * @return
+ ********************************************************************************/
+void
+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index)
+{
+       ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+       g_BridgeDispatchTable[ui32Index].pfFunction = NULL;
+       g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL;
+#if defined(DEBUG_BRIDGE_KM)
+       g_BridgeDispatchTable[ui32Index].pszIOCName = NULL;
+       g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL;
+       g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL;
+       g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+       g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+       g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+       g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does
+ *               error checking.
+ *
+ * @param ui32Index
+ * @param pszIOCName
+ * @param pfFunction
+ * @param pszFunctionName
+ *
+ * @return
+ ********************************************************************************/
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+                                          IMG_UINT32 ui32Index,
+                                          const IMG_CHAR *pszIOCName,
+                                          BridgeWrapperFunction pfFunction,
+                                          const IMG_CHAR *pszFunctionName,
+                                          POS_LOCK hBridgeLock,
+                                          const IMG_CHAR *pszBridgeLockName)
+{
+       static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX;               /* -1 */
+
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+       PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+       PVR_UNREFERENCED_PARAMETER(pszBridgeLockName);
+#endif
+
+       ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+       /* Enable this to dump out the dispatch table entries */
+       PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __func__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+       PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __func__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName));
+#endif
+
+       /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly
+        * interested in spotting any large gap of wasted memory that could be
+        * accidentally introduced.
+        *
+        * This will currently flag up any gaps > 5 entries.
+        *
+        * NOTE: This shouldn't be debug only since switching from debug->release
+        * etc is likely to modify the available ioctls and thus be a point where
+        * mistakes are exposed. This isn't run at a performance critical time.
+        */
+       if ((ui32PrevIndex != IMG_UINT32_MAX) &&
+               ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+                (ui32Index <= ui32PrevIndex)))
+       {
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+                                __func__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+                                ui32Index, pszIOCName));
+#else
+               PVR_DPF((PVR_DBG_MESSAGE,
+                                "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+                                __func__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+       }
+
+       if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range",
+                                __func__, (IMG_UINT)ui32Index, pszIOCName));
+
+#if defined(DEBUG_BRIDGE_KM)
+               PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu",
+                                __func__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT));
+#if defined(SUPPORT_RGX)
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST));
+
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_RGX_LAST));
+#endif
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST));
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu",
+                                __func__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST));
+#endif
+
+               OSPanic();
+       }
+
+       /* Panic if the previous entry has been overwritten as this is not allowed!
+        * NOTE: This shouldn't be debug only since switching from debug->release
+        * etc is likely to modify the available ioctls and thus be a point where
+        * mistakes are exposed. This isn't run at a performance critical time.
+        */
+       if (g_BridgeDispatchTable[ui32Index].pfFunction)
+       {
+               if (g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction)
+               {
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)",
+                                __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName),
+                                (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+#else
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)",
+                                __func__, pszIOCName, ui32Index,
+                                (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+                       PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+#endif
+                       OSPanic();
+               }
+       }
+       else
+       {
+               g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+               g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock;
+#if defined(DEBUG_BRIDGE_KM)
+               g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+               g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+               g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName;
+               g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+               g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+               g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+               g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+       }
+
+       ui32PrevIndex = ui32Index;
+}
+
+static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut)
+{
+       PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+       *pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE +
+                            PVRSRV_MAX_BRIDGE_OUT_SIZE);
+       PVR_RETURN_IF_NOMEM(*pvOut);
+
+       return PVRSRV_OK;
+}
+
+static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData)
+{
+       PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+       OSFreeMem(pvFreeData);
+}
+
+PVRSRV_ERROR BridgeDispatcherInit(void)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(DEBUG_BRIDGE_KM)
+       eError = OSLockCreate(&g_hStatsLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errorLockCreateFailed);
+#endif
+
+       eError = PVRSRVPoolCreate(_BridgeBufferAlloc,
+                                 _BridgeBufferFree,
+                                 PVRSRV_MAX_POOLED_BRIDGE_BUFFERS,
+                                 "Bridge buffer pool",
+                                 NULL,
+                                 &g_psBridgeBufferPool);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPoolCreate", erroPoolCreateFailed);
+
+       return PVRSRV_OK;
+
+erroPoolCreateFailed:
+#if defined(DEBUG_BRIDGE_KM)
+       OSLockDestroy(g_hStatsLock);
+       g_hStatsLock = NULL;
+errorLockCreateFailed:
+#endif
+       return eError;
+}
+
+void BridgeDispatcherDeinit(void)
+{
+       if (g_psBridgeBufferPool)
+       {
+               PVRSRVPoolDestroy(g_psBridgeBufferPool);
+               g_psBridgeBufferPool = NULL;
+       }
+
+#if defined(DEBUG_BRIDGE_KM)
+       if (g_hStatsLock)
+       {
+               OSLockDestroy(g_hStatsLock);
+               g_hStatsLock = NULL;
+       }
+#endif
+}
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+                          PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM)
+{
+
+       void       * psBridgeIn=NULL;
+       void       * psBridgeOut=NULL;
+       BridgeWrapperFunction pfBridgeHandler;
+       IMG_UINT32   ui32DispatchTableEntry, ui32GroupBoundary;
+       PVRSRV_ERROR err = PVRSRV_OK;
+#if !defined(INTEGRITY_OS)
+       PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL;
+#endif
+       IMG_UINT32 ui32Timestamp = OSClockus();
+#if defined(DEBUG_BRIDGE_KM)
+       IMG_UINT64      ui64TimeStart;
+       IMG_UINT64      ui64TimeEnd;
+       IMG_UINT64      ui64TimeDiff;
+#endif
+       IMG_UINT32      ui32DispatchTableIndex, ui32DispatchTableEntryIndex;
+
+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH)
+       PVR_DBG_BREAK;
+#endif
+
+       if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d",
+                       __func__, psBridgePackageKM->ui32BridgeID));
+               PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error);
+       }
+
+       ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT);
+
+       ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+       ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC];
+
+       /* bridge function is not implemented in this build */
+       if (0 == ui32DispatchTableEntry)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+                        __func__,
+                        ui32DispatchTableEntry,
+                        ui32GroupBoundary,
+                        psBridgePackageKM->ui32BridgeID,
+                        psBridgePackageKM->ui32FunctionID));
+               /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */
+               err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry,
+                                 psBridgeIn,
+                                 psBridgeOut,
+                                 psConnection);
+               goto return_error;
+       }
+       if ((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+                        __func__,
+                        ui32DispatchTableEntry,
+                        ui32GroupBoundary,
+                        psBridgePackageKM->ui32BridgeID,
+                        psBridgePackageKM->ui32FunctionID));
+               PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error);
+       }
+       ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID;
+       ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1);
+       if (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu,"
+                       " (bridge module %d, function %d)", __func__,
+                       ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT,
+                       psBridgePackageKM->ui32BridgeID,
+                       psBridgePackageKM->ui32FunctionID));
+               PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error);
+       }
+#if defined(DEBUG_BRIDGE_KM)
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)",
+                       __func__,
+                       ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
+                        __func__,
+                        g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName));
+       g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++;
+       g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+
+       if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL)
+       {
+               OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock);
+       }
+#if !defined(INTEGRITY_OS)
+       /* try to acquire a bridge buffer from the pool */
+
+       err = PVRSRVPoolGet(g_psBridgeBufferPool,
+                       &hBridgeBufferPoolToken,
+                       &psBridgeIn);
+       PVR_LOG_GOTO_IF_ERROR(err, "PVRSRVPoolGet", unlock_and_return_error);
+
+       psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE;
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+       ui64TimeStart = OSClockns64();
+#endif
+
+       if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small "
+                       "(data size %u, buffer size %u)!", __func__,
+                       psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE));
+               PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error);
+       }
+
+#if !defined(INTEGRITY_OS)
+       if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small "
+                       "(data size %u, buffer size %u)!", __func__,
+                       psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE));
+               PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error);
+       }
+
+       if ((CopyFromUserWrapper (psConnection,
+                                                         ui32DispatchTableEntryIndex,
+                                                         psBridgeIn,
+                                                         psBridgePackageKM->pvParamIn,
+                                                         psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK)
+#if defined(__QNXNTO__)
+/* For Neutrino, the output bridge buffer acts as an input as well */
+                                       || (CopyFromUserWrapper(psConnection,
+                                                                                       ui32DispatchTableEntryIndex,
+                                                                                       psBridgeOut,
+                                                                                       (void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize),
+                                                                                       psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+#endif
+               ) /* end of if-condition */
+       {
+               PVR_LOG_GOTO_WITH_ERROR("CopyFromUserWrapper", err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error);
+       }
+#else
+       psBridgeIn = psBridgePackageKM->pvParamIn;
+       psBridgeOut = psBridgePackageKM->pvParamOut;
+#endif
+
+       pfBridgeHandler =
+               (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction;
+
+       if (pfBridgeHandler == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!",
+                                __func__, ui32DispatchTableEntry));
+               PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error);
+       }
+
+       /* pfBridgeHandler functions do not fail and return an IMG_INT.
+        * The value returned is either 0 or PVRSRV_OK (0).
+        * In the event this changes an error may be +ve or -ve,
+        * so try to return something consistent here.
+        */
+       if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex,
+                                                 psBridgeIn,
+                                                 psBridgeOut,
+                                                 psConnection)
+               )
+       {
+               PVR_LOG_GOTO_WITH_ERROR("pfBridgeHandler", err, PVRSRV_ERROR_BRIDGE_EPERM, unlock_and_return_error);
+       }
+
+       /*
+          This should always be true as a.t.m. all bridge calls have to
+          return an error message, but this could change so we do this
+          check to be safe.
+       */
+#if !defined(INTEGRITY_OS)
+       if (psBridgePackageKM->ui32OutBufferSize > 0)
+       {
+               if (CopyToUserWrapper (psConnection,
+                                               ui32DispatchTableEntryIndex,
+                                               psBridgePackageKM->pvParamOut,
+                                               psBridgeOut,
+                                               psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+               {
+                       PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error);
+               }
+       }
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+       ui64TimeEnd = OSClockns64();
+
+       ui64TimeDiff = ui64TimeEnd - ui64TimeStart;
+
+       /* if there is no lock held then acquire the stats lock to
+        * ensure the calculations are done safely
+        */
+       if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL)
+       {
+               BridgeGlobalStatsLock();
+       }
+
+       g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff;
+
+       if (ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS)
+       {
+               g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff;
+       }
+
+       if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL)
+       {
+               BridgeGlobalStatsUnlock();
+       }
+#endif
+
+unlock_and_return_error:
+
+       if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL)
+       {
+               OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock);
+       }
+
+#if !defined(INTEGRITY_OS)
+       if (hBridgeBufferPoolToken != NULL)
+       {
+               err = PVRSRVPoolPut(g_psBridgeBufferPool,
+                               hBridgeBufferPoolToken);
+               PVR_LOG_IF_ERROR(err, "PVRSRVPoolPut");
+       }
+#endif
+
+return_error:
+       if (err)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __func__, err));
+       }
+       /* ignore transport layer bridge to avoid HTB flooding */
+       if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL)
+       {
+               if (err)
+               {
+                       HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp,
+                               psBridgePackageKM->ui32BridgeID,
+                               psBridgePackageKM->ui32FunctionID, err);
+               }
+               else
+               {
+                       HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp,
+                               psBridgePackageKM->ui32BridgeID,
+                               psBridgePackageKM->ui32FunctionID);
+               }
+       }
+
+       return err;
+}
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray)
+{
+#if !defined(__QNXNTO__)
+       return PVRSRVFindProcessMemStats(pid,
+                                       ui32ArrSize,
+                                       bAllProcessStats,
+                                       pui32MemStatArray);
+#else
+       PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/sync_checkpoint.c b/drivers/gpu/drm/img/img-rogue/services/server/common/sync_checkpoint.c
new file mode 100644 (file)
index 0000000..1bab1af
--- /dev/null
@@ -0,0 +1,2981 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation checkpoint interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Server side code for services synchronisation interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint_init.h"
+#include "lock.h"
+#include "log2.h"
+#include "pvrsrv.h"
+#include "pdump_km.h"
+#include "info_page.h"
+
+#include "pvrsrv_sync_km.h"
+#include "rgxhwperf.h"
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "rgxsoctimer.h"
+#endif
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/* Enable this to turn on debug relating to the creation and
+   resolution of contexts */
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+
+/* Enable this to turn on debug relating to the creation and
+   resolution of fences */
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+   allocation and freeing */
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+   enqueuing and signalling */
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint pool */
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+
+/* Enable this to turn on debug relating to sync checkpoint UFO
+   lookup */
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+
+/* Enable this to turn on sync checkpoint deferred cleanup debug
+ * (for syncs we have been told to free but which have some
+ * outstanding FW operations remaining (enqueued in CCBs)
+ */
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#else
+
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#endif
+
+/* Maximum number of deferred sync checkpoint signal/error received for atomic context */
+#define SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL 500
+
+/* Set the size of the sync checkpoint pool (not used if 0).
+ * A pool will be maintained for each sync checkpoint context.
+ */
+#if defined(PDUMP)
+#define SYNC_CHECKPOINT_POOL_SIZE      0
+#else
+#define SYNC_CHECKPOINT_POOL_SIZE      128
+#define SYNC_CHECKPOINT_POOL_MASK (SYNC_CHECKPOINT_POOL_SIZE - 1)
+#endif
+
+/* The 'sediment' value represents the minimum number of
+ * sync checkpoints which must be in the pool before one
+ * will be allocated from the pool rather than from memory.
+ * This effectively helps avoid re-use of a sync checkpoint
+ * just after it has been returned to the pool, making
+ * debugging somewhat easier to understand.
+ */
+#define SYNC_CHECKPOINT_POOL_SEDIMENT 20
+
+#if (SYNC_CHECKPOINT_POOL_SIZE & (SYNC_CHECKPOINT_POOL_SIZE - 1)) != 0
+#error "SYNC_CHECKPOINT_POOL_SIZE must be power of 2."
+#endif
+
+#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE  10
+
+/*
+       This defines the maximum amount of synchronisation memory
+       that can be allocated per sync checkpoint context.
+       In reality this number is meaningless as we would run out
+       of synchronisation memory before we reach this limit, but
+       we need to provide a size to the span RA.
+ */
+#define MAX_SYNC_CHECKPOINT_MEM  (4 * 1024 * 1024)
+
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_
+{
+       IMG_UINT32            ui32BlockCount;            /*!< Number of contexts in the list */
+       IMG_UINT32            ui32BlockListSize;         /*!< Size of the array contexts */
+       SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */
+} SYNC_CHECKPOINT_BLOCK_LIST;
+
+struct _SYNC_CHECKPOINT_CONTEXT_CTL_
+{
+       SHARED_DEV_CONNECTION                                   psDeviceNode;
+       PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN    pfnFenceResolve;
+       PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN             pfnFenceCreate;
+       /*
+        *  Used as head of linked-list of sync checkpoints for which
+        *  SyncCheckpointFree() has been called, but have outstanding
+        *  FW operations (enqueued in CCBs)
+        *  This list will be check whenever a SyncCheckpointFree() is
+        *  called, and when SyncCheckpointContextDestroy() is called.
+        */
+       DLLIST_NODE                                                             sDeferredCleanupListHead;
+       /* Lock to protect the deferred cleanup list */
+       POS_SPINLOCK                                                    hDeferredCleanupListLock;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+       SYNC_CHECKPOINT                                         *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE];
+       IMG_BOOL                                                                bSyncCheckpointPoolFull;
+       IMG_BOOL                                                                bSyncCheckpointPoolValid;
+       IMG_UINT32                                                              ui32SyncCheckpointPoolCount;
+       IMG_UINT32                                                              ui32SyncCheckpointPoolWp;
+       IMG_UINT32                                                              ui32SyncCheckpointPoolRp;
+       POS_SPINLOCK                                                    hSyncCheckpointPoolLock; /*! Protects access to the checkpoint pool control data. */
+#endif
+}; /*_SYNC_CHECKPOINT_CONTEXT_CTL is already typedef-ed in sync_checkpoint_internal.h */
+
+/* this is the max number of sync checkpoint records we will search or dump
+ * at any time.
+ */
+#define SYNC_CHECKPOINT_RECORD_LIMIT 20000
+
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+struct SYNC_CHECKPOINT_RECORD
+{
+       PVRSRV_DEVICE_NODE              *psDevNode;
+       SYNC_CHECKPOINT_BLOCK   *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */
+       IMG_UINT32                              ui32SyncOffset;                 /*!< offset to sync in block */
+       IMG_UINT32                              ui32FwBlockAddr;
+       IMG_PID                                 uiPID;
+       IMG_UINT32                              ui32UID;
+       IMG_UINT64                              ui64OSTime;
+       DLLIST_NODE                             sNode;
+       IMG_CHAR                                szClassName[PVRSRV_SYNC_NAME_LENGTH];
+       PSYNC_CHECKPOINT                pSyncCheckpt;
+};
+
+static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct = NULL;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint);
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+#endif
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+static IMG_UINT32 gui32NumSyncCheckpointContexts = 0;
+#endif
+
+/* Defined values to indicate status of sync checkpoint, which is
+ * stored in the memory of the structure */
+#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa
+#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb
+#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc
+
+#if defined(SUPPORT_RGX)
+static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags)
+{
+       if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO)
+           && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+       {
+               RGX_HWPERF_UFO_EV eEv;
+               RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+               if (psSyncCheckpointInt)
+               {
+                       if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+                               (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED))
+                       {
+                               sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+                               sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+                               eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS;
+                       }
+                       else
+                       {
+                               sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+                               sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+                               sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+                               eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL;
+                       }
+                       RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData,
+                           (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+               }
+       }
+}
+
+static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags)
+{
+       if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO)
+           && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+       {
+               RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+               if (psSyncCheckpointInt)
+               {
+                       sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+                       sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+                       sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+                       RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData,
+                           (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+               }
+       }
+}
+#endif
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord,
+                           SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+                           IMG_UINT32 ui32FwBlockAddr,
+                           IMG_UINT32 ui32SyncOffset,
+                           IMG_UINT32 ui32UID,
+                           IMG_UINT32 ui32ClassNameSize,
+                           const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt);
+static PVRSRV_ERROR
+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord);
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+                                 DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 void *pvDumpDebugFile);
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                        IMG_UINT32 ui32VerbLevel,
+                                        DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                        void *pvDumpDebugFile);
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode);
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#if defined(PDUMP)
+static void
+MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData);
+static PVRSRV_ERROR _SyncCheckpointAllocPDump(PVRSRV_DEVICE_NODE *psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint);
+static PVRSRV_ERROR _SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags);
+static PVRSRV_ERROR _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent);
+#endif
+
+/* Unique incremental ID assigned to sync checkpoints when allocated */
+static IMG_UINT32 g_SyncCheckpointUID;
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext);
+
+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *) psContext;
+       _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContextInt->psContextCtl;
+       IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount);
+
+       if (ui32RefCt == 0)
+       {
+               PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+                             "SyncCheckpointContextUnref context already freed");
+       }
+       else if (OSAtomicDecrement(&psContextInt->hRefCount) == 0)
+       {
+               /* SyncCheckpointContextDestroy only when no longer referenced */
+               OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock);
+               psCtxCtl->hDeferredCleanupListLock = NULL;
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+               if (psCtxCtl->ui32SyncCheckpointPoolCount)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                               "%s called for context<%p> with %d sync checkpoints still"
+                               " in the pool",
+                               __func__,
+                               (void *) psContext,
+                               psCtxCtl->ui32SyncCheckpointPoolCount));
+               }
+               psCtxCtl->bSyncCheckpointPoolValid = IMG_FALSE;
+               OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock);
+               psCtxCtl->hSyncCheckpointPoolLock = NULL;
+#endif
+               OSFreeMem(psContextInt->psContextCtl);
+               RA_Delete(psContextInt->psSpanRA);
+               RA_Delete(psContextInt->psSubAllocRA);
+               OSLockDestroy(psContextInt->hLock);
+               psContextInt->hLock = NULL;
+               OSFreeMem(psContext);
+       }
+}
+
+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext;
+       IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount);
+
+       if (ui32RefCt == 0)
+       {
+               PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+                             "SyncCheckpointContextRef context use after free");
+       }
+       else
+       {
+               OSAtomicIncrement(&psContextInt->hRefCount);
+       }
+}
+
+/*
+       Internal interfaces for management of synchronisation block memory
+ */
+static PVRSRV_ERROR
+_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext,
+                          SYNC_CHECKPOINT_BLOCK    **ppsSyncBlock)
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+       SYNC_CHECKPOINT_BLOCK *psSyncBlk;
+       PVRSRV_ERROR eError;
+
+       psSyncBlk = OSAllocMem(sizeof(*psSyncBlk));
+       PVR_LOG_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc);
+
+       psSyncBlk->psContext = psContext;
+
+       /* Allocate sync checkpoint block */
+       psDevNode = psContext->psDevNode;
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block);
+
+       psSyncBlk->psDevNode = psDevNode;
+
+       eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+                                            &psSyncBlk->hMemDesc,
+                                            &psSyncBlk->ui32FirmwareAddr,
+                                            &psSyncBlk->ui32SyncBlockSize);
+       PVR_LOG_GOTO_IF_ERROR(eError, "pfnAllocUFOBlock", fail_alloc_ufo_block);
+
+       eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+                                         (void **) &psSyncBlk->pui32LinAddr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail_devmem_acquire);
+
+       OSAtomicWrite(&psSyncBlk->hRefCount, 1);
+
+       OSLockCreate(&psSyncBlk->hLock);
+
+       PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)",
+                             psSyncBlk->ui32FirmwareAddr);
+#if defined(PDUMP)
+       OSLockAcquire(psContext->hSyncCheckpointBlockListLock);
+       dllist_add_to_tail(&psContext->sSyncCheckpointBlockListHead, &psSyncBlk->sListNode);
+       OSLockRelease(psContext->hSyncCheckpointBlockListLock);
+#endif
+
+       *ppsSyncBlock = psSyncBlk;
+       return PVRSRV_OK;
+
+fail_devmem_acquire:
+       psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+fail_alloc_ufo_block:
+       OSFreeMem(psSyncBlk);
+fail_alloc:
+       return eError;
+}
+
+static void
+_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk)
+{
+       OSLockAcquire(psSyncBlk->hLock);
+       if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount))
+       {
+               PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+#if defined(PDUMP)
+               OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock);
+               dllist_remove_node(&psSyncBlk->sListNode);
+               OSLockRelease(psSyncBlk->psContext->hSyncCheckpointBlockListLock);
+#endif
+               DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+               psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+               OSLockRelease(psSyncBlk->hLock);
+               OSLockDestroy(psSyncBlk->hLock);
+               psSyncBlk->hLock = NULL;
+               OSFreeMem(psSyncBlk);
+       }
+       else
+       {
+               OSLockRelease(psSyncBlk->hLock);
+       }
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena,
+                           RA_LENGTH_T uSize,
+                           RA_FLAGS_T uFlags,
+                           const IMG_CHAR *pszAnnotation,
+                           RA_BASE_T *puiBase,
+                           RA_LENGTH_T *puiActualSize,
+                           RA_PERISPAN_HANDLE *phImport)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+       SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL;
+       RA_LENGTH_T uiSpanSize;
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(uFlags);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM((hArena != NULL), "hArena");
+
+       /* Check we've not be called with an unexpected size */
+       PVR_LOG_RETURN_IF_INVALID_PARAM((uSize == sizeof(SYNC_CHECKPOINT_FW_OBJ)), "uSize");
+
+       /*
+               Ensure the sync checkpoint context doesn't go away while we have
+               sync blocks attached to it.
+        */
+       SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext);
+
+       /* Allocate the block of memory */
+       eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock);
+       PVR_GOTO_IF_ERROR(eError, fail_syncblockalloc);
+
+       /* Allocate a span for it */
+       eError = RA_Alloc(psContext->psSpanRA,
+                         psSyncBlock->ui32SyncBlockSize,
+                         RA_NO_IMPORT_MULTIPLIER,
+                         0,
+                         psSyncBlock->ui32SyncBlockSize,
+                         pszAnnotation,
+                         &psSyncBlock->uiSpanBase,
+                         &uiSpanSize,
+                         NULL);
+       PVR_GOTO_IF_ERROR(eError, fail_spanalloc);
+
+       /*
+               There is no reason the span RA should return an allocation larger
+               then we request
+        */
+       PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize),
+                        "uiSpanSize invalid");
+
+       *puiBase = psSyncBlock->uiSpanBase;
+       *puiActualSize = psSyncBlock->ui32SyncBlockSize;
+       *phImport = psSyncBlock;
+       return PVRSRV_OK;
+
+fail_spanalloc:
+       _FreeSyncCheckpointBlock(psSyncBlock);
+fail_syncblockalloc:
+       SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext);
+
+       return eError;
+}
+
+static void
+_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena,
+                             RA_BASE_T uiBase,
+                             RA_PERISPAN_HANDLE hImport)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+       SYNC_CHECKPOINT_BLOCK   *psSyncBlock = hImport;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE((psContext != NULL), "hArena invalid");
+       PVR_LOG_RETURN_VOID_IF_FALSE((psSyncBlock != NULL), "hImport invalid");
+       PVR_LOG_RETURN_VOID_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid");
+
+       /* Free the span this import is using */
+       RA_Free(psContext->psSpanRA, uiBase);
+
+       /* Free the sync checkpoint block */
+       _FreeSyncCheckpointBlock(psSyncBlock);
+
+       /*      Drop our reference to the sync checkpoint context */
+       SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext);
+}
+
+static INLINE IMG_UINT32 _SyncCheckpointGetOffset(SYNC_CHECKPOINT *psSyncInt)
+{
+       IMG_UINT64 ui64Temp;
+
+       ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase;
+       PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+       return (IMG_UINT32)ui64Temp;
+}
+
+/* Used by SyncCheckpointContextCreate() below */
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+       PVR_ASSERT(IsPower2(ui32Align));
+       return ExactLog2(ui32Align);
+}
+
+/*
+       External interfaces
+ */
+
+PVRSRV_ERROR
+SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns)
+{
+       PVR_ASSERT(psSyncCheckpointPfns != NULL);
+
+       if (g_psSyncCheckpointPfnStruct)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s called but already initialised", __func__));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+       g_psSyncCheckpointPfnStruct = psSyncCheckpointPfns;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                           PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints,
+                           PSYNC_CHECKPOINT **papsSyncCheckpoints,
+                           IMG_UINT64 *pui64FenceUID,
+                           PDUMP_FLAGS_T ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 i;
+#if defined(PDUMP)
+       SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
+#endif
+
+       if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceResolve))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+                               __func__));
+               eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+               PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL");
+               return eError;
+       }
+
+       if (papsSyncCheckpoints)
+       {
+               eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve(
+                                          psSyncCheckpointContext,
+                                          hFence,
+                                          pui32NumSyncCheckpoints,
+                                          papsSyncCheckpoints,
+                                          pui64FenceUID);
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceResolve");
+
+#if defined(PDUMP)
+       if (*papsSyncCheckpoints)
+       {
+               for (i = 0; i < *pui32NumSyncCheckpoints; i++)
+               {
+                       psSyncCheckpoint = (SYNC_CHECKPOINT *)(*papsSyncCheckpoints)[i];
+                       psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags;
+               }
+       }
+#endif
+
+       if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)",
+                               __func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE));
+
+               /* Free resources after error */
+               if (*papsSyncCheckpoints)
+               {
+                       for (i = 0; i < *pui32NumSyncCheckpoints; i++)
+                       {
+                               SyncCheckpointDropRef((*papsSyncCheckpoints)[i]);
+                       }
+
+                       SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints);
+               }
+
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+       {
+               IMG_UINT32 ii;
+
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() for fence %d returned the following %d checkpoints:",
+                               __func__,
+                               hFence,
+                               *pui32NumSyncCheckpoints));
+
+               for (ii=0; ii<*pui32NumSyncCheckpoints; ii++)
+               {
+                       PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints +  ii);
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s:   *papsSyncCheckpoints[%d]:<%p>",
+                                       __func__,
+                                       ii,
+                                       (void*)psNextCheckpoint));
+               }
+       }
+#endif
+
+       return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode,
+                          const IMG_CHAR *pszFenceName,
+                          PVRSRV_TIMELINE hTimeline,
+                          PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                          PVRSRV_FENCE *phNewFence,
+                          IMG_UINT64 *puiUpdateFenceUID,
+                          void **ppvFenceFinaliseData,
+                          PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+                          void **ppvTimelineUpdateSyncPrim,
+                          IMG_UINT32 *pui32TimelineUpdateValue,
+                          PDUMP_FLAGS_T ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+       if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceCreate))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+                               __func__));
+               eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+               PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceCreate is NULL");
+       }
+       else
+       {
+               eError = g_psSyncCheckpointPfnStruct->pfnFenceCreate(
+                                         psDevNode,
+                                         pszFenceName,
+                                         hTimeline,
+                                         psSyncCheckpointContext,
+                                         phNewFence,
+                                         puiUpdateFenceUID,
+                                         ppvFenceFinaliseData,
+                                         psNewSyncCheckpoint,
+                                         ppvTimelineUpdateSyncPrim,
+                                         pui32TimelineUpdateValue);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s failed to create new fence<%p> for timeline<%d> using "
+                                       "sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s",
+                                       __func__,
+                                       (void*)phNewFence,
+                                       hTimeline,
+                                       (void*)psSyncCheckpointContext,
+                                       (void*)psNewSyncCheckpoint,
+                                       PVRSRVGetErrorString(eError)));
+               }
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s created new fence<%d> for timeline<%d> using "
+                                       "sync checkpoint context<%p>, new sync_checkpoint=<%p>",
+                                       __func__,
+                                       *phNewFence,
+                                       hTimeline,
+                                       (void*)psSyncCheckpointContext,
+                                       (void*)*psNewSyncCheckpoint));
+               }
+#endif
+
+#if defined(PDUMP)
+               if (eError == PVRSRV_OK)
+               {
+                       SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT*)(*psNewSyncCheckpoint);
+                       if (psSyncCheckpoint)
+                       {
+                               psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags;
+                       }
+               }
+#endif
+       }
+       return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceDataRollback)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+                               __func__));
+               eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+               PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback is NULL");
+       }
+       else
+       {
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: called to rollback fence data <%p>",
+                               __func__,
+                               pvFinaliseData));
+#endif
+               eError = g_psSyncCheckpointPfnStruct->pfnFenceDataRollback(
+                           hFence, pvFinaliseData);
+               PVR_LOG_IF_ERROR(eError,
+                                "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback returned error");
+       }
+       return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode,
+                            PVRSRV_FENCE hFence,
+                            void *pvFinaliseData,
+                            PSYNC_CHECKPOINT psSyncCheckpoint,
+                            const IMG_CHAR *pszName)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceFinalise)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)",
+                               __func__));
+               eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+       }
+       else
+       {
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: called to finalise fence <%d>",
+                               __func__,
+                               hFence));
+#endif
+               eError = g_psSyncCheckpointPfnStruct->pfnFenceFinalise(hFence, pvFinaliseData);
+               PVR_LOG_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceFinalise returned error");
+
+               RGXSRV_HWPERF_ALLOC_FENCE(psDevNode, OSGetCurrentClientProcessIDKM(), hFence,
+                                         SyncCheckpointGetFirmwareAddr(psSyncCheckpoint),
+                                         pszName, OSStringLength(pszName));
+       }
+       return eError;
+}
+
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem)
+{
+       if (g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem)
+       {
+               g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem(pvCheckpointListMem);
+       }
+}
+
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+                               __func__));
+               eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+               PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines is NULL");
+       }
+       else
+       {
+               g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData);
+       }
+       return eError;
+
+}
+
+PVRSRV_ERROR
+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_LOG_RETURN_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+       if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs)
+       {
+               *pui32NumSyncOwnedUFOs = 0;
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+                               __func__));
+               eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+               PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs is NULL");
+       }
+       else
+       {
+               *pui32NumSyncOwnedUFOs = g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs);
+               PVR_LOG(("%d sync checkpoint%s owned by %s in stalled context",
+                        *pui32NumSyncOwnedUFOs, *pui32NumSyncOwnedUFOs==1 ? "" : "s",
+                        g_psSyncCheckpointPfnStruct->pszImplName));
+       }
+       return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                            PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext = NULL;
+       _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL),
+                         "ppsSyncCheckpointContext invalid",
+                         PVRSRV_ERROR_INVALID_PARAMS);
+
+       psContext = OSAllocMem(sizeof(*psContext));
+       PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */
+
+       psContextCtl = OSAllocMem(sizeof(*psContextCtl));
+       PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */
+
+       eError = OSLockCreate(&psContext->hLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", fail_create_context_lock);
+
+       eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+       eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock);
+#endif
+
+       dllist_init(&psContextCtl->sDeferredCleanupListHead);
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+       psContextCtl->ui32SyncCheckpointPoolCount = 0;
+       psContextCtl->ui32SyncCheckpointPoolWp = 0;
+       psContextCtl->ui32SyncCheckpointPoolRp = 0;
+       psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+       psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE;
+#endif
+       psContext->psDevNode = psDevNode;
+
+       OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext);
+       OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext);
+
+       /*
+               Create the RA for sub-allocations of the sync checkpoints
+
+               Note:
+               The import size doesn't matter here as the server will pass
+               back the blocksize when it does the import which overrides
+               what we specify here.
+        */
+       psContext->psSubAllocRA = RA_Create(psContext->azName,
+                                           /* Params for imports */
+                                           _Log2(sizeof(IMG_UINT32)),
+                                           RA_LOCKCLASS_2,
+                                           _SyncCheckpointBlockImport,
+                                           _SyncCheckpointBlockUnimport,
+                                           psContext,
+                                           RA_POLICY_DEFAULT);
+       PVR_LOG_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc);
+
+       /*
+               Create the span-management RA
+
+               The RA requires that we work with linear spans. For our use
+               here we don't require this behaviour as we're always working
+               within offsets of blocks (imports). However, we need to keep
+               the RA happy so we create the "span" management RA which
+               ensures that all are imports are added to the RA in a linear
+               fashion
+        */
+       psContext->psSpanRA = RA_Create(psContext->azSpanName,
+                                       /* Params for imports */
+                                       0,
+                                       RA_LOCKCLASS_1,
+                                       NULL,
+                                       NULL,
+                                       NULL,
+                                       RA_POLICY_DEFAULT);
+       PVR_LOG_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span);
+
+       if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL))
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed");
+               goto fail_span_add;
+       }
+
+       OSAtomicWrite(&psContext->hRefCount, 1);
+       OSAtomicWrite(&psContext->hCheckpointCount, 0);
+
+       psContext->psContextCtl = psContextCtl;
+
+       *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext;
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING,
+                       "%s: created psSyncCheckpointContext=<%p> (%d contexts exist)",
+                       __func__,
+                       (void*)*ppsSyncCheckpointContext,
+                       ++gui32NumSyncCheckpointContexts));
+#endif
+
+#if defined(PDUMP)
+       dllist_init(&psContext->sSyncCheckpointBlockListHead);
+
+       eError = OSLockCreate(&psContext->hSyncCheckpointBlockListLock);
+       PVR_GOTO_IF_ERROR(eError, fail_span_add);
+
+       OSLockAcquire(psDevNode->hSyncCheckpointContextListLock);
+       dllist_add_to_tail(&psDevNode->sSyncCheckpointContextListHead, &psContext->sListNode);
+       OSLockRelease(psDevNode->hSyncCheckpointContextListLock);
+
+#endif
+
+       return PVRSRV_OK;
+
+fail_span_add:
+       RA_Delete(psContext->psSpanRA);
+fail_span:
+       RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+       OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock);
+       psContextCtl->hSyncCheckpointPoolLock = NULL;
+fail_create_pool_lock:
+#endif
+       OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock);
+       psContextCtl->hDeferredCleanupListLock = NULL;
+fail_create_deferred_cleanup_lock:
+       OSLockDestroy(psContext->hLock);
+       psContext->hLock = NULL;
+fail_create_context_lock:
+       OSFreeMem(psContextCtl);
+fail_alloc2:
+       OSFreeMem(psContext);
+fail_alloc:
+       return eError;
+}
+
+/* Poisons and frees the checkpoint
+ * Decrements context refcount. */
+static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+       psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0;
+       psSyncCheckpoint->psSyncCheckpointFwObj = NULL;
+       psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
+
+       RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+               psSyncCheckpoint->uiSpanAddr);
+       psSyncCheckpoint->psSyncCheckpointBlock = NULL;
+
+       OSFreeMem(psSyncCheckpoint);
+
+       OSAtomicDecrement(&psContext->hCheckpointCount);
+}
+
+PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext;
+       PVRSRV_DEVICE_NODE *psDevNode;
+       IMG_INT iRf = 0;
+
+       PVR_LOG_RETURN_IF_FALSE((psSyncCheckpointContext != NULL),
+                         "psSyncCheckpointContext invalid",
+                         PVRSRV_ERROR_INVALID_PARAMS);
+
+       psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING,
+                       "%s: destroying psSyncCheckpointContext=<%p> (now have %d contexts)",
+                       __func__,
+                       (void*)psSyncCheckpointContext,
+                       --gui32NumSyncCheckpointContexts));
+#endif
+
+       _CheckDeferredCleanupList(psContext);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+       if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0)
+       {
+               IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s freed %d sync checkpoints that were still in the pool for context<%p>",
+                               __func__,
+                               ui32NumFreedFromPool,
+                               (void*)psContext));
+#else
+               PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool);
+#endif
+       }
+#endif
+
+       iRf = OSAtomicRead(&psContext->hCheckpointCount);
+
+       if (iRf != 0)
+       {
+               OS_SPINLOCK_FLAGS uiFlags;
+
+               /* Note, this is not a permanent error as the caller may retry later */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s <%p> attempted with active references (iRf=%d), "
+                               "may be the result of a race",
+                               __func__,
+                               (void*)psContext,
+                               iRf));
+
+               eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT;
+
+               OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+               {
+                       DLLIST_NODE *psNode, *psNext;
+
+                       dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+                       {
+                               SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode);
+                               bool bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode);
+
+                               /* Line below avoids build error in release builds (where PVR_DPF is not defined) */
+                               PVR_UNREFERENCED_PARAMETER(bDeferredFree);
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s",
+                                               __func__,
+                                               (void*)psSyncCheckpoint,
+                                               psSyncCheckpoint->ui32UID,
+                                               psSyncCheckpoint->azName,
+                                               OSAtomicRead(&psSyncCheckpoint->hRefCount),
+                                               psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ?
+                                                               "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" :
+                                                               psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ?
+                                                                               "PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED",
+                                               psSyncCheckpoint->ui32FWAddr,
+                                               OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+                                               psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+                                               bDeferredFree ? "(deferred free)" : ""));
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+                               gui32NumSyncCheckpointContexts++;
+#endif
+                       }
+               }
+               OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+       }
+       else
+       {
+               IMG_INT iRf2 = 0;
+
+               iRf2 = OSAtomicRead(&psContext->hRefCount);
+               SyncCheckpointContextUnref(psSyncCheckpointContext);
+       }
+
+#if defined(PDUMP)
+       if (dllist_is_empty(&psContext->sSyncCheckpointBlockListHead))
+       {
+               OSLockDestroy(psContext->hSyncCheckpointBlockListLock);
+               psContext->hSyncCheckpointBlockListLock = NULL;
+
+               OSLockAcquire(psDevNode->hSyncCheckpointContextListLock);
+               dllist_remove_node(&psContext->sListNode);
+               OSLockRelease(psDevNode->hSyncCheckpointContextListLock);
+       }
+#endif
+
+       return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+                    PVRSRV_TIMELINE hTimeline,
+                    PVRSRV_FENCE hFence,
+                    const IMG_CHAR *pszCheckpointName,
+                    PSYNC_CHECKPOINT *ppsSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
+       _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext;
+       PVRSRV_DEVICE_NODE *psDevNode;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
+       PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+       psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+       PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool",
+                        __func__));
+#endif
+       psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt);
+       if (!psNewSyncCheckpoint)
+       {
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s     checkpoint pool empty - will have to allocate",
+                                __func__));
+#endif
+       }
+#endif
+       /* If pool is empty (or not defined) alloc the new sync checkpoint */
+       if (!psNewSyncCheckpoint)
+       {
+               psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+               PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */
+
+               eError = RA_Alloc(psSyncContextInt->psSubAllocRA,
+                                 sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj),
+                                 RA_NO_IMPORT_MULTIPLIER,
+                                 0,
+                                 sizeof(IMG_UINT32),
+                                 (IMG_CHAR*)pszCheckpointName,
+                                 &psNewSyncCheckpoint->uiSpanAddr,
+                                 NULL,
+                                 (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_raalloc);
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+                               __func__,
+                               (void*)psSyncContextInt->psSubAllocRA,
+                               psNewSyncCheckpoint->uiSpanAddr));
+#endif
+               psNewSyncCheckpoint->psSyncCheckpointFwObj =
+                               (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
+                                               (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
+               psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+                                                 _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1;
+               OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
+               psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s called to allocate new sync checkpoint<%p> for context<%p>",
+                                __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext));
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s                    psSyncCheckpointFwObj<%p>",
+                                __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj));
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s                    psSyncCheckpoint FwAddr=0x%x",
+                                __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint)));
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s                    pszCheckpointName = %s",
+                                __func__, pszCheckpointName));
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s                    psSyncCheckpoint Timeline=%d",
+                                __func__, hTimeline));
+#endif
+       }
+
+       psNewSyncCheckpoint->hTimeline = hTimeline;
+       OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1);
+       OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0);
+       psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0;
+       psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ACTIVE;
+       psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM();
+       OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode));
+
+       if (pszCheckpointName)
+       {
+               /* Copy over the checkpoint name annotation */
+               OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+       }
+       else
+       {
+               /* No sync checkpoint name annotation */
+               psNewSyncCheckpoint->azName[0] = '\0';
+       }
+
+       /* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */
+       psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+
+       /* Assign unique ID to this sync checkpoint */
+       psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++;
+
+#if defined(PDUMP)
+       /* Flushing deferred fence signals to pdump */
+       MISRHandler_PdumpDeferredSyncSignalPoster(psDevNode);
+
+       _SyncCheckpointAllocPDump(psDevNode, psNewSyncCheckpoint);
+#endif
+
+       RGXSRV_HWPERF_ALLOC_SYNC_CP(psDevNode, psNewSyncCheckpoint->hTimeline,
+                                   OSGetCurrentClientProcessIDKM(),
+                                   hFence,
+                                   psNewSyncCheckpoint->ui32FWAddr,
+                                   psNewSyncCheckpoint->azName,
+                                   sizeof(psNewSyncCheckpoint->azName));
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+       {
+               IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH];
+
+               if (pszCheckpointName)
+               {
+                       /* Copy the checkpoint name annotation into a fixed-size array */
+                       OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+               }
+               else
+               {
+                       /* No checkpoint name annotation */
+                       szChkptName[0] = 0;
+               }
+               /* record this sync */
+               eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord,
+                                                psNewSyncCheckpoint->psSyncCheckpointBlock,
+                                                psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr,
+                                                _SyncCheckpointGetOffset(psNewSyncCheckpoint),
+                                                psNewSyncCheckpoint->ui32UID,
+                                                OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH),
+                                                szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)",
+                                       __func__,
+                                       szChkptName,
+                                       PVRSRVGetErrorString(eError)));
+                       psNewSyncCheckpoint->hRecord = NULL;
+                       /* note the error but continue without affecting driver operation */
+               }
+       }
+
+       {
+               OS_SPINLOCK_FLAGS uiFlags;
+               /* Add the sync checkpoint to the device list */
+               OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+               dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList,
+                                  &psNewSyncCheckpoint->sListNode);
+               OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+       }
+
+       *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint;
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING,
+                       "%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>",
+                       __func__,
+                       psNewSyncCheckpoint->ui32UID,
+                       (void*)psNewSyncCheckpoint));
+#endif
+       return PVRSRV_OK;
+
+fail_raalloc:
+       OSFreeMem(psNewSyncCheckpoint);
+fail_alloc:
+       return eError;
+}
+
+static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext;
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+       psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+
+       /*
+        * Without this reference, the context may be destroyed as soon
+        * as _FreeSyncCheckpoint is called, but the context is still
+        * needed when _CheckDeferredCleanupList is called at the end
+        * of this function.
+        */
+       SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext);
+
+       PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE);
+       if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed"));
+       }
+       else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount))
+       {
+               /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */
+               if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+                               (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+               {
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..",
+                                       __func__));
+#endif
+                       if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+                               && psSyncCheckpointInt->hRecord)
+                       {
+                               PVRSRV_ERROR eError;
+                               /* remove this sync record */
+                               eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+                               PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
+                       }
+
+                       {
+                               OS_SPINLOCK_FLAGS uiFlags;
+                               /* Remove the sync checkpoint from the global list */
+                               OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+                               dllist_remove_node(&psSyncCheckpointInt->sListNode);
+                               OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+                       }
+
+                       RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s attempting to return sync checkpoint to the pool",
+                                       __func__));
+#endif
+                       if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+                       {
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s pool is full, so just free it",
+                                               __func__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+                                               __func__,
+                                               psSyncCheckpointInt->ui32UID,
+                                               (void*)psSyncCheckpointInt,
+                                               (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+                                               psSyncCheckpointInt->uiSpanAddr));
+#endif
+                               _FreeSyncCheckpoint(psSyncCheckpointInt);
+                       }
+               }
+               else
+               {
+                       OS_SPINLOCK_FLAGS uiFlags;
+#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d "
+                                       "- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>",
+                                       __func__,
+                                       OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+                                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount,
+                                       psSyncCheckpointInt->ui32UID,
+                                       (void*)psSyncCheckpointInt));
+#endif
+                       /* Add the sync checkpoint to the deferred free list */
+                       OSSpinLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags);
+                       dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead,
+                                          &psSyncCheckpointInt->sDeferredFreeListNode);
+                       OSSpinLockRelease(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags);
+               }
+       }
+       else
+       {
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d",
+                               __func__,
+                               psSyncCheckpointInt->ui32UID,
+                               (void*)psSyncCheckpointInt,
+                               (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount))));
+#endif
+       }
+
+       /* See if any sync checkpoints in the deferred cleanup list can be freed */
+       _CheckDeferredCleanupList(psContext);
+
+       SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext);
+}
+
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING,
+                       "%s Entry,  psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x",
+                       __func__,
+                       psSyncCheckpointInt->ui32UID,
+                       (void*)psSyncCheckpoint,
+                       (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)),
+                       psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+       SyncCheckpointUnref(psSyncCheckpointInt);
+}
+
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+       if (psSyncCheckpointInt)
+       {
+               PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE),
+                                "psSyncCheckpoint already signalled");
+
+               if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+               {
+#if defined(SUPPORT_RGX)
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+                       RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+#endif
+                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+#if defined(PDUMP)
+                       _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags);
+#endif
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+                                        "when value is already %d",
+                                        __func__,
+                                        PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+                                        psSyncCheckpointInt->ui32UID,
+                                        psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+               }
+       }
+}
+
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+       if (psSyncCheckpointInt)
+       {
+               PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE),
+                                "psSyncCheckpoint already signalled");
+
+               if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+               {
+#if defined(SUPPORT_RGX)
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+                       RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE);
+#endif
+                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+               }
+               else
+               {
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+                                       "when value is already %d",
+                                       __func__,
+                                       PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+                                       psSyncCheckpointInt->ui32UID,
+                                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+#endif
+               }
+       }
+}
+
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+       if (psSyncCheckpointInt)
+       {
+               PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE),
+                                "psSyncCheckpoint already signalled");
+
+               if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+               {
+#if defined(SUPPORT_RGX)
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+                       if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+                       {
+                               RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+                               sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+                               sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+                               sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+                               RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData,
+                                                 (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+                       }
+#endif
+
+                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+#if defined(PDUMP)
+                       _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags);
+#endif
+               }
+       }
+}
+
+IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+       IMG_BOOL bRet = IMG_FALSE;
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+       if (psSyncCheckpointInt)
+       {
+#if defined(SUPPORT_RGX)
+               PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+               RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+#endif
+               bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+                               (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED));
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s called for psSyncCheckpoint<%p>, returning %d",
+                               __func__,
+                               (void*)psSyncCheckpoint,
+                               bRet));
+#endif
+       }
+       return bRet;
+}
+
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+       IMG_BOOL bRet = IMG_FALSE;
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+       if (psSyncCheckpointInt)
+       {
+#if defined(SUPPORT_RGX)
+               PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+               RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+#endif
+               bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED);
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s called for psSyncCheckpoint<%p>, returning %d",
+                               __func__,
+                               (void*)psSyncCheckpoint,
+                               bRet));
+#endif
+       }
+       return bRet;
+}
+
+const IMG_CHAR *
+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_RETURN_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null");
+
+       switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)
+       {
+               case PVRSRV_SYNC_CHECKPOINT_SIGNALLED:
+                       return "Signalled";
+               case PVRSRV_SYNC_CHECKPOINT_ACTIVE:
+                       return "Active";
+               case PVRSRV_SYNC_CHECKPOINT_ERRORED:
+                       return "Errored";
+               case PVRSRV_SYNC_CHECKPOINT_UNDEF:
+                       return "Undefined";
+               default:
+                       return "Unknown";
+       }
+}
+
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       PVRSRV_ERROR eRet = PVRSRV_OK;
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint");
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+                       __func__,
+                       psSyncCheckpointInt,
+                       OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+                       OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1,
+                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+       OSAtomicIncrement(&psSyncCheckpointInt->hRefCount);
+
+       return eRet;
+}
+
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       PVRSRV_ERROR eRet = PVRSRV_OK;
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint");
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+                       __func__,
+                       psSyncCheckpointInt,
+                       OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+                       OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1,
+                       psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+       SyncCheckpointUnref(psSyncCheckpointInt);
+
+       return eRet;
+}
+
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint");
+
+       if (psSyncCheckpointInt)
+       {
+#if !defined(NO_HARDWARE)
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+                               __func__,
+                               (void*)psSyncCheckpoint,
+                               OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+                               OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1,
+                               psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+               OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+#endif
+       }
+}
+
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+       PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+       if (psSyncCheckpointInt)
+       {
+               if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+               {
+                       return &psSyncCheckpointInt->sCheckpointUFOAddr;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+                                       __func__,
+                                       (void*)psSyncCheckpoint,
+                                       psSyncCheckpointInt->ui32ValidationCheck));
+               }
+       }
+
+invalid_chkpt:
+       return NULL;
+}
+
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+       IMG_UINT32 ui32Ret = 0;
+
+       PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+       if (psSyncCheckpointInt)
+       {
+               if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+               {
+                       ui32Ret = psSyncCheckpointInt->ui32FWAddr;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+                                       __func__,
+                                       (void*)psSyncCheckpoint,
+                                       psSyncCheckpointInt->ui32ValidationCheck));
+               }
+       }
+
+invalid_chkpt:
+       return ui32Ret;
+}
+
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+       IMG_UINT32 ui32Ret = 0;
+
+       PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+       if (psSyncCheckpointInt)
+       {
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s returning ID for sync checkpoint<%p>",
+                               __func__,
+                               (void*)psSyncCheckpointInt));
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s (validationCheck=0x%x)",
+                               __func__,
+                               psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+               ui32Ret = psSyncCheckpointInt->ui32UID;
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s (ui32UID=0x%x)",
+                               __func__,
+                               psSyncCheckpointInt->ui32UID));
+#endif
+       }
+       return ui32Ret;
+
+invalid_chkpt:
+       return 0;
+}
+
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+       PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE;
+
+       PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+       if (psSyncCheckpointInt)
+       {
+               i32Ret = psSyncCheckpointInt->hTimeline;
+       }
+       return i32Ret;
+
+invalid_chkpt:
+       return 0;
+}
+
+
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+       PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+       return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount);
+}
+
+IMG_UINT32
+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+       PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+       return OSAtomicRead(&psSyncCheckpointInt->hRefCount);
+}
+
+IMG_PID
+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
+       PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+       return psSyncCheckpointInt->uiProcess;
+}
+
+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+                                IMG_UINT32 ui32FwAddr)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt;
+       PDLLIST_NODE psNode, psNext;
+       IMG_UINT32 ui32State = 0;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+       dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+       {
+               psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode);
+               if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+               {
+                       ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+                       break;
+               }
+       }
+       OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+       return ui32State;
+}
+
+void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+                                IMG_UINT32 ui32FwAddr)
+{
+       SYNC_CHECKPOINT *psSyncCheckpointInt;
+       PDLLIST_NODE psNode, psNext;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING,
+                       "%s called to error UFO with ui32FWAddr=%d",
+                       __func__,
+                       ui32FwAddr));
+#endif
+
+       OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+       dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+       {
+               psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode);
+               if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+               {
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s calling SyncCheckpointError for sync checkpoint <%p>",
+                                       __func__,
+                                       (void*)psSyncCheckpointInt));
+#endif
+                       /* Mark as errored */
+                       SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE);
+                       break;
+               }
+       }
+       OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+}
+
+void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr)
+{
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING,
+                       "%s called to rollback UFO with ui32FWAddr=0x%x",
+                       __func__,
+                       ui32FwAddr));
+#endif
+#if !defined(NO_HARDWARE)
+       {
+               SYNC_CHECKPOINT *psSyncCheckpointInt = NULL;
+               PDLLIST_NODE psNode = NULL, psNext = NULL;
+               OS_SPINLOCK_FLAGS uiFlags;
+
+               OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+               dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+               {
+                       psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode);
+                       if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+                       {
+#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+                               PVR_DPF((PVR_DBG_WARNING,
+                                       "%s called for psSyncCheckpointInt<%p> %d->%d",
+                                       __func__,
+                                       (void *) psSyncCheckpointInt,
+                                       OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+                                       OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount) - 1));
+#endif
+                               OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+                               break;
+                       }
+               }
+               OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+       }
+#endif
+}
+
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode);
+
+       if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
+       {
+               PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s",
+                                  psSyncCheckpoint->ui32UID,
+                                  psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+                                  _SyncCheckpointGetOffset(psSyncCheckpoint),
+                                  OSAtomicRead(&psSyncCheckpoint->hRefCount),
+                                  OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+                                  psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+                                  psSyncCheckpoint->azName);
+       }
+}
+
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                       IMG_UINT32 ui32VerbLevel,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+       DLLIST_NODE *psNode, *psNext;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+       {
+               PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------");
+               OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+               dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+               {
+                       _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+               }
+               OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+       }
+}
+
+PVRSRV_ERROR
+SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+       PVRSRV_ERROR eError;
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       psDevInfo = psDevNode->pvDevice;
+#endif
+
+       eError = OSSpinLockCreate(&psDevNode->hSyncCheckpointListLock);
+       PVR_RETURN_IF_ERROR(eError);
+
+       dllist_init(&psDevNode->sSyncCheckpointSyncsList);
+
+       eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncCheckpointNotify,
+                                                         psDevNode,
+                                                         _SyncCheckpointDebugRequest,
+                                                         DEBUG_REQUEST_SYNCCHECKPOINT,
+                                                         (PVRSRV_DBGREQ_HANDLE)psDevNode);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+       {
+               _SyncCheckpointRecordListInit(psDevNode);
+       }
+
+#if defined(PDUMP)
+       eError = OSSpinLockCreate(&psDevInfo->hSyncCheckpointSignalSpinLock);
+       if (eError != PVRSRV_OK)
+       {
+               psDevInfo->hSyncCheckpointSignalSpinLock = NULL;
+               goto e1;
+       }
+
+       eError = OSLockCreate(&psDevNode->hSyncCheckpointSignalLock);
+       if (eError != PVRSRV_OK)
+       {
+               psDevNode->hSyncCheckpointSignalLock = NULL;
+               goto e2;
+       }
+
+       psDevNode->pui8DeferredSyncCPSignal = OSAllocMem(SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL
+                                                             * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL));
+       PVR_GOTO_IF_NOMEM(psDevNode->pui8DeferredSyncCPSignal, eError, e3);
+
+       psDevNode->ui16SyncCPWriteIdx = 0;
+       psDevNode->ui16SyncCPReadIdx = 0;
+
+       eError = OSInstallMISR(&psDevNode->pvSyncCPMISR,
+                                       MISRHandler_PdumpDeferredSyncSignalPoster,
+                                       psDevNode,
+                                       "RGX_PdumpDeferredSyncSignalPoster");
+       PVR_GOTO_IF_ERROR(eError, e4);
+
+       eError = OSLockCreate(&psDevNode->hSyncCheckpointContextListLock);
+       if (eError != PVRSRV_OK)
+       {
+               psDevNode->hSyncCheckpointContextListLock = NULL;
+               goto e5;
+       }
+
+
+       dllist_init(&psDevNode->sSyncCheckpointContextListHead);
+
+       eError = PDumpRegisterTransitionCallbackFenceSync(psDevNode,
+                                                               _SyncCheckpointPDumpTransition,
+                                                               &psDevNode->hTransition);
+       if (eError != PVRSRV_OK)
+       {
+               psDevNode->hTransition = NULL;
+               goto e6;
+       }
+#endif
+
+       return PVRSRV_OK;
+
+#if defined(PDUMP)
+e6:
+       OSLockDestroy(psDevNode->hSyncCheckpointContextListLock);
+       psDevNode->hSyncCheckpointContextListLock = NULL;
+e5:
+       (void) OSUninstallMISR(psDevNode->pvSyncCPMISR);
+       psDevNode->pvSyncCPMISR = NULL;
+e4:
+       if (psDevNode->pui8DeferredSyncCPSignal)
+       {
+               OSFreeMem(psDevNode->pui8DeferredSyncCPSignal);
+               psDevNode->pui8DeferredSyncCPSignal = NULL;
+       }
+e3:
+       OSLockDestroy(psDevNode->hSyncCheckpointSignalLock);
+       psDevNode->hSyncCheckpointSignalLock = NULL;
+e2:
+       OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock);
+       psDevInfo->hSyncCheckpointSignalSpinLock = NULL;
+e1:
+       _SyncCheckpointRecordListDeinit(psDevNode);
+#endif
+e0:
+       OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock);
+       psDevNode->hSyncCheckpointListLock = NULL;
+
+       return eError;
+}
+
+void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       psDevInfo = psDevNode->pvDevice;
+       PDumpUnregisterTransitionCallbackFenceSync(psDevNode->hTransition);
+       psDevNode->hTransition = NULL;
+
+       if (psDevNode->hSyncCheckpointContextListLock)
+       {
+               OSLockDestroy(psDevNode->hSyncCheckpointContextListLock);
+               psDevNode->hSyncCheckpointContextListLock = NULL;
+       }
+
+       if (psDevNode->pvSyncCPMISR)
+       {
+               (void) OSUninstallMISR(psDevNode->pvSyncCPMISR);
+               psDevNode->pvSyncCPMISR = NULL;
+       }
+
+       if (psDevNode->pui8DeferredSyncCPSignal)
+       {
+               OSFreeMem(psDevNode->pui8DeferredSyncCPSignal);
+               psDevNode->pui8DeferredSyncCPSignal = NULL;
+       }
+       if (psDevNode->hSyncCheckpointSignalLock)
+       {
+               OSLockDestroy(psDevNode->hSyncCheckpointSignalLock);
+               psDevNode->hSyncCheckpointSignalLock = NULL;
+       }
+       if (psDevInfo->hSyncCheckpointSignalSpinLock)
+       {
+               OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock);
+               psDevInfo->hSyncCheckpointSignalSpinLock = NULL;
+       }
+#endif
+
+       PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncCheckpointNotify);
+       psDevNode->hSyncCheckpointNotify = NULL;
+       OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock);
+       psDevNode->hSyncCheckpointListLock = NULL;
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+       {
+               _SyncCheckpointRecordListDeinit(psDevNode);
+       }
+}
+
+void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr,
+                                IMG_CHAR * pszSyncInfo, size_t len)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_BOOL bFound = IMG_FALSE;
+
+       if (!pszSyncInfo)
+       {
+               return;
+       }
+
+       pszSyncInfo[0] = '\0';
+
+       OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+       dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+       {
+               struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+                               IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+               if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr)
+               {
+                       SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+                       if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+                       {
+                               void *pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr,
+                                                                                                       psSyncCheckpointRec->ui32SyncOffset);
+                               OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)",
+                                          (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+                                                  "SIGNALLED" :
+                                                  ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+                                                          "ERRORED" : "ACTIVE"),
+                                                          psSyncCheckpointRec->uiPID,
+                                                          psSyncCheckpointRec->szClassName);
+                       }
+                       else
+                       {
+                               OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)",
+                                          psSyncCheckpointRec->uiPID,
+                                          psSyncCheckpointRec->szClassName);
+                       }
+
+                       bFound = IMG_TRUE;
+                       break;
+               }
+       }
+       OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+       if (!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT))
+       {
+               OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+       }
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordAdd(
+                       PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord,
+                       SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+                       IMG_UINT32 ui32FwBlockAddr,
+                       IMG_UINT32 ui32SyncOffset,
+                       IMG_UINT32 ui32UID,
+                       IMG_UINT32 ui32ClassNameSize,
+                       const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt)
+{
+       struct SYNC_CHECKPOINT_RECORD * psSyncRec;
+       _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext;
+       PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_RETURN_IF_INVALID_PARAM(phRecord);
+
+       *phRecord = NULL;
+
+       psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+       PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */
+
+       psSyncRec->psDevNode = psDevNode;
+       psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock;
+       psSyncRec->ui32SyncOffset = ui32SyncOffset;
+       psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+       psSyncRec->ui64OSTime = OSClockns64();
+       psSyncRec->uiPID = OSGetCurrentProcessID();
+       psSyncRec->ui32UID = ui32UID;
+       psSyncRec->pSyncCheckpt = pSyncCheckpt;
+       if (pszClassName)
+       {
+               if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH)
+                       ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH;
+               /* Copy over the class name annotation */
+               OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+       }
+       else
+       {
+               /* No class name annotation */
+               psSyncRec->szClassName[0] = 0;
+       }
+
+       OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+       if (psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT)
+       {
+               dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode);
+               psDevNode->ui32SyncCheckpointRecordCount++;
+
+               if (psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark)
+               {
+                       psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.",
+                               __func__,
+                               pszClassName,
+                               psDevNode->ui32SyncCheckpointRecordCount));
+               OSFreeMem(psSyncRec);
+               psSyncRec = NULL;
+               eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+       }
+       OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+       *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+       return eError;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord)
+{
+       struct SYNC_CHECKPOINT_RECORD **ppFreedSync;
+       struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord;
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       PVR_RETURN_IF_INVALID_PARAM(hRecord);
+
+       psDevNode = pSync->psDevNode;
+
+       OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+       dllist_remove_node(&pSync->sNode);
+
+       if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range",
+                               __func__));
+               psDevNode->uiSyncCheckpointRecordFreeIdx = 0;
+       }
+       ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx];
+       psDevNode->uiSyncCheckpointRecordFreeIdx =
+                       (psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+       if (*ppFreedSync)
+       {
+               OSFreeMem(*ppFreedSync);
+       }
+       pSync->psSyncCheckpointBlock = NULL;
+       pSync->ui64OSTime = OSClockns64();
+       *ppFreedSync = pSync;
+
+       psDevNode->ui32SyncCheckpointRecordCount--;
+
+       OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+       return PVRSRV_OK;
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec,
+                                       IMG_UINT64 ui64TimeNow,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt;
+       SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+       IMG_UINT64 ui64DeltaS;
+       IMG_UINT32 ui32DeltaF;
+       IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime;
+       ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+       if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+       {
+               void *pSyncCheckpointAddr;
+               pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr,
+                                                                                       psSyncCheckpointRec->ui32SyncOffset);
+
+               PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)",
+                                 psSyncCheckpointRec->uiPID,
+                                 ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID,
+                                 (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+                                 OSAtomicRead(&psSyncCheckpoint->hRefCount),
+                             OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+                             psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+                                 (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+                                         "SIGNALLED" :
+                                         ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+                                                 "ERRORED" : "ACTIVE"),
+                                                 psSyncCheckpointRec->szClassName);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x State=<null_ptr> (%s)",
+                                 psSyncCheckpointRec->uiPID,
+                                 ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID,
+                                 (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+                                 psSyncCheckpointRec->szClassName
+               );
+       }
+}
+
+static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                         IMG_UINT32 ui32VerbLevel,
+                                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                         void *pvDumpDebugFile)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+       IMG_UINT64 ui64TimeNowS;
+       IMG_UINT32 ui32TimeNowF;
+       IMG_UINT64 ui64TimeNow = OSClockns64();
+       DLLIST_NODE *psNode, *psNext;
+
+       ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+       {
+               IMG_UINT32 i;
+
+               OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+               PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05" IMG_UINT64_FMTSPEC ".%09u)",
+                                 psDevNode->ui32SyncCheckpointRecordCount,
+                                 psDevNode->ui32SyncCheckpointRecordCountHighWatermark,
+                                 ui64TimeNowS,
+                                 ui32TimeNowF);
+               if (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)
+               {
+                       PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+                                         SYNC_CHECKPOINT_RECORD_LIMIT);
+               }
+               PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+                                 "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+
+               dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+               {
+                       struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+                                       IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+                       _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow,
+                                                  pfnDumpDebugPrintf, pvDumpDebugFile);
+               }
+
+               PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05" IMG_UINT64_FMTSPEC ".%09u",
+                                 ui64TimeNowS,
+                                 ui32TimeNowF);
+               PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+                                 "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+               for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+                               i != psDevNode->uiSyncCheckpointRecordFreeIdx;
+                               i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+               {
+                       if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+                       {
+                               _SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i],
+                                                          ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+                       }
+                       else
+                       {
+                               break;
+                       }
+               }
+               OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+       }
+}
+#undef NS_IN_S
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock);
+       PVR_GOTO_IF_ERROR(eError, fail_lock_create);
+       dllist_init(&psDevNode->sSyncCheckpointRecordList);
+
+       psDevNode->ui32SyncCheckpointRecordCount = 0;
+       psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0;
+
+       eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify,
+                                                     psDevNode,
+                                                     _SyncCheckpointRecordRequest,
+                                                     DEBUG_REQUEST_SYNCCHECKPOINT,
+                                                     (PVRSRV_DBGREQ_HANDLE)psDevNode);
+       PVR_GOTO_IF_ERROR(eError, fail_dbg_register);
+
+       return PVRSRV_OK;
+
+fail_dbg_register:
+       OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+fail_lock_create:
+       return eError;
+}
+
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       DLLIST_NODE *psNode, *psNext;
+       int i;
+
+       OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+       dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+       {
+               struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec =
+                               IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+
+               dllist_remove_node(psNode);
+               OSFreeMem(pSyncCheckpointRec);
+       }
+
+       for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+       {
+               if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+               {
+                       OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]);
+                       psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL;
+               }
+       }
+       OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+       if (psDevNode->hSyncCheckpointRecordNotify)
+       {
+               PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify);
+       }
+       OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+}
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_SyncCheckpointAllocPDump(PVRSRV_DEVICE_NODE *psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+       PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+                             psSyncCheckpoint->azName,
+                             psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline,
+                             psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr);
+
+       DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+                                 _SyncCheckpointGetOffset(psSyncCheckpoint),
+                                 PVRSRV_SYNC_CHECKPOINT_ACTIVE,
+                                 PDUMP_FLAGS_CONTINUOUS);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags)
+{
+       IMG_BOOL bSleepAllowed = (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       psDevInfo = psDevNode->pvDevice;
+       /*
+               We might be ask to PDump sync state outside of capture range
+               (e.g. texture uploads) so make this continuous.
+        */
+       if (bSleepAllowed)
+       {
+               if (ui32Status == PVRSRV_SYNC_CHECKPOINT_ERRORED)
+               {
+                       PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS,
+                                       "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+                                       psSyncCheckpoint->azName,
+                                       psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline,
+                                       (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+                                       _SyncCheckpointGetOffset(psSyncCheckpoint)));
+               }
+               else
+               {
+                       PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS,
+                                       "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+                                       psSyncCheckpoint->azName,
+                                       psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline,
+                                       (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+                                       _SyncCheckpointGetOffset(psSyncCheckpoint)));
+               }
+
+               DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+                                         _SyncCheckpointGetOffset(psSyncCheckpoint),
+                                         ui32Status,
+                                         PDUMP_FLAGS_CONTINUOUS);
+       }
+       else
+       {
+               _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData;
+               OS_SPINLOCK_FLAGS uiFlags;
+               IMG_UINT16 ui16NewWriteIdx;
+
+               OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags);
+
+               ui16NewWriteIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPWriteIdx);
+               if (ui16NewWriteIdx == psDevNode->ui16SyncCPReadIdx)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: ERROR Deferred SyncCheckpointSignal CB is full)",
+                               __func__));
+               }
+               else
+               {
+                       psSyncData = GET_CP_CB_BASE(psDevNode->ui16SyncCPWriteIdx);
+                       psSyncData->asSyncCheckpoint = *psSyncCheckpoint;
+                       psSyncData->ui32Status = ui32Status;
+                       psDevNode->ui16SyncCPWriteIdx = ui16NewWriteIdx;
+               }
+
+               OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags);
+
+               OSScheduleMISR(psDevNode->pvSyncCPMISR);
+       }
+
+       return PVRSRV_OK;
+}
+
+static void
+MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData)
+{
+       PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData;
+       OS_SPINLOCK_FLAGS uiFlags;
+       IMG_UINT16 ui16ReadIdx, ui16WriteIdx;
+       _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       psDevInfo = psDevNode->pvDevice;
+
+       OSLockAcquire(psDevNode->hSyncCheckpointSignalLock);
+
+       OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags);
+       /* Snapshot current write and read offset of CB */
+       ui16WriteIdx = psDevNode->ui16SyncCPWriteIdx;
+       ui16ReadIdx = psDevNode->ui16SyncCPReadIdx;
+
+       OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags);
+       /* CB is empty */
+       if (ui16WriteIdx == ui16ReadIdx)
+       {
+               OSLockRelease(psDevNode->hSyncCheckpointSignalLock);
+               return;
+       }
+       do
+       {
+               /* Read item in the CB and flush it to pdump */
+               psSyncData = GET_CP_CB_BASE(ui16ReadIdx);
+               _SyncCheckpointUpdatePDump(psDevNode, &psSyncData->asSyncCheckpoint, psSyncData->ui32Status, PVRSRV_FENCE_FLAG_NONE);
+               ui16ReadIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPReadIdx);
+               /* Increment read offset in CB as one item is flushed to pdump */
+               OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags);
+               psDevNode->ui16SyncCPReadIdx = ui16ReadIdx;
+               OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags);
+               /* Call to this function will flush all the items present in CB
+                * when this function is called i.e. use snapshot of WriteOffset
+                * taken at the beginning in this function and iterate till Write != Read */
+       } while (ui16WriteIdx != ui16ReadIdx);
+
+       OSLockRelease(psDevNode->hSyncCheckpointSignalLock);
+}
+
+PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence)
+{
+       PVRSRV_ERROR eError;
+       PSYNC_CHECKPOINT *apsCheckpoints = NULL;
+       SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
+       IMG_UINT32 i, uiNumCheckpoints = 0;
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+#endif
+
+       if (hFence != PVRSRV_NO_FENCE)
+       {
+               eError = g_psSyncCheckpointPfnStruct->pfnSyncFenceGetCheckpoints(hFence, &uiNumCheckpoints, &apsCheckpoints);
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_LOG_RETURN_IF_ERROR(eError, "g_pfnFenceGetCheckpoints");
+
+       if (uiNumCheckpoints)
+       {
+               /* Flushing deferred fence signals to pdump */
+               psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0];
+               MISRHandler_PdumpDeferredSyncSignalPoster(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode);
+       }
+
+       for (i=0; i < uiNumCheckpoints; i++)
+       {
+               psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[i];
+               if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED)
+               {
+                       PDUMPCOMMENTWITHFLAGS(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode,
+                                                       psSyncCheckpoint->ui32PDumpFlags,
+                                                       "Wait for Fence %s (ID:%d)",
+                                                       psSyncCheckpoint->azName,
+                                                       psSyncCheckpoint->ui32UID);
+
+                       eError = DevmemPDumpDevmemPol32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+                                                               _SyncCheckpointGetOffset(psSyncCheckpoint),
+                                                               PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+                                                               0xFFFFFFFF,
+                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                               psSyncCheckpoint->ui32PDumpFlags);
+                       PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32");
+               }
+       }
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP)
+       /* Sampling of USC timers can only be done after synchronisation for a 3D kick is over */
+       if (uiNumCheckpoints)
+       {
+               psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0];
+               psDevInfo = psSyncCheckpoint->psSyncCheckpointBlock->psDevNode->pvDevice;
+               if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER)
+               {
+                       RGXValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL);
+               }
+       }
+#endif
+
+       /* Free the memory that was allocated for the sync checkpoint list returned */
+       if (apsCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsCheckpoints);
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext;
+       DLLIST_NODE *psNode, *psNext;
+       DLLIST_NODE *psNode1, *psNext1;
+       PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData;
+
+       if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED))
+       {
+               OSLockAcquire(psDevNode->hSyncCheckpointContextListLock);
+               dllist_foreach_node(&psDevNode->sSyncCheckpointContextListHead, psNode, psNext)
+               {
+                       psContext = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT_CONTEXT, sListNode);
+
+                       OSLockAcquire(psContext->hSyncCheckpointBlockListLock);
+                       dllist_foreach_node(&psContext->sSyncCheckpointBlockListHead, psNode1, psNext1)
+                       {
+                               SYNC_CHECKPOINT_BLOCK *psSyncBlk =
+                                       IMG_CONTAINER_OF(psNode1, SYNC_CHECKPOINT_BLOCK, sListNode);
+                               DevmemPDumpLoadMem(psSyncBlk->hMemDesc,
+                                                          0,
+                                                          psSyncBlk->ui32SyncBlockSize,
+                                                          PDUMP_FLAGS_CONTINUOUS);
+                       }
+                       OSLockRelease(psContext->hSyncCheckpointBlockListLock);
+               }
+               OSLockRelease(psDevNode->hSyncCheckpointContextListLock);
+       }
+
+       return PVRSRV_OK;
+}
+#endif
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+       _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl;
+       PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode;
+       DECLARE_DLLIST(sCleanupList);
+       DLLIST_NODE *psNode, *psNext;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING, "%s called", __func__));
+#endif
+
+       /* Check the deferred cleanup list and free any sync checkpoints we can */
+       OSSpinLockAcquire(psCtxCtl->hDeferredCleanupListLock, uiFlags);
+
+       if (dllist_is_empty(&psCtxCtl->sDeferredCleanupListHead))
+       {
+               OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags);
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __func__));
+#endif
+               /* if list is empty then we have nothing to do here */
+               return;
+       }
+
+       dllist_foreach_node(&psCtxCtl->sDeferredCleanupListHead, psNode, psNext)
+       {
+               SYNC_CHECKPOINT *psSyncCheckpointInt =
+                               IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sDeferredFreeListNode);
+
+               if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+                               (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+               {
+                       if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+                               && psSyncCheckpointInt->hRecord)
+                       {
+                               PVRSRV_ERROR eError;
+                               /* remove this sync record */
+                               eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+                               PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
+                       }
+
+                       /* Move the sync checkpoint from the deferred free list to local list */
+                       dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode);
+                       /* It's not an ideal solution to traverse list of checkpoints-to-free
+                        * twice but it allows us to avoid holding the lock for too long */
+                       dllist_add_to_tail(&sCleanupList, &psSyncCheckpointInt->sDeferredFreeListNode);
+               }
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), "
+                               "still pending (enq=%d,FWRef=%d)", __func__,
+                               psSyncCheckpointInt->azName, psSyncCheckpointInt->ui32UID,
+                               (void*)psSyncCheckpointInt,
+                               (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)),
+                               psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+               }
+#endif
+       }
+
+       OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags);
+
+       dllist_foreach_node(&sCleanupList, psNode, psNext) {
+               SYNC_CHECKPOINT *psSyncCheckpointInt =
+                               IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sDeferredFreeListNode);
+
+               /* Remove the sync checkpoint from the global list */
+               OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+               dllist_remove_node(&psSyncCheckpointInt->sListNode);
+               OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
+
+               RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s attempting to return sync(ID:%d),%p> to pool",
+                       __func__,
+                       psSyncCheckpointInt->ui32UID,
+                       (void *) psSyncCheckpointInt));
+#endif
+               if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+               {
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+                       PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it",
+                               __func__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                              "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)",
+                               __func__,
+                               psSyncCheckpointInt->azName,
+                               psSyncCheckpointInt->ui32UID,
+                               (void*)psSyncCheckpointInt,
+                               (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)),
+                               psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+                       _FreeSyncCheckpoint(psSyncCheckpointInt);
+               }
+       }
+}
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+       _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl;
+       SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       /* Acquire sync checkpoint pool lock */
+       OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+
+       /* Check if we can allocate from the pool */
+       if (psCtxCtl->bSyncCheckpointPoolValid &&
+           (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT) &&
+           (psCtxCtl->ui32SyncCheckpointPoolWp != psCtxCtl->ui32SyncCheckpointPoolRp))
+       {
+               /* Get the next sync checkpoint from the pool */
+               psSyncCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp];
+               psCtxCtl->ui32SyncCheckpointPoolRp =
+                       (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK;
+               psCtxCtl->ui32SyncCheckpointPoolCount--;
+               psCtxCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+               psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, "
+                       "poolRp=%d, poolWp=%d",
+                       __func__,
+                       psSyncCheckpoint->ui32UID,
+                       psCtxCtl->ui32SyncCheckpointPoolCount,
+                       SYNC_CHECKPOINT_POOL_SIZE,
+                       (void *) psContext,
+                       psCtxCtl->ui32SyncCheckpointPoolRp,
+                       psCtxCtl->ui32SyncCheckpointPoolWp));
+#endif
+       }
+       /* Release sync checkpoint pool lock */
+       OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+
+       return psSyncCheckpoint;
+}
+
+static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+       _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+       _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl;
+       IMG_BOOL bReturnedToPool = IMG_FALSE;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       /* Acquire sync checkpoint pool lock */
+       OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+
+       /* Check if pool has space */
+       if (psCtxCtl->bSyncCheckpointPoolValid && !psCtxCtl->bSyncCheckpointPoolFull)
+       {
+               /* Put the sync checkpoint into the next write slot in the pool */
+               psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint;
+               psCtxCtl->ui32SyncCheckpointPoolWp =
+                       (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & SYNC_CHECKPOINT_POOL_MASK;
+               psCtxCtl->ui32SyncCheckpointPoolCount++;
+               psCtxCtl->bSyncCheckpointPoolFull =
+                       ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) &&
+                       (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp));
+               bReturnedToPool = IMG_TRUE;
+               psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF;
+               psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d",
+                       __func__,
+                       psSyncCheckpoint->ui32UID,
+                       psCtxCtl->ui32SyncCheckpointPoolCount,
+                       SYNC_CHECKPOINT_POOL_SIZE,
+                       psCtxCtl->ui32SyncCheckpointPoolRp,
+                       psCtxCtl->ui32SyncCheckpointPoolWp));
+#endif
+       }
+       /* Release sync checkpoint pool lock */
+       OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+
+       return bReturnedToPool;
+}
+
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+       _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl;
+       SYNC_CHECKPOINT *psCheckpoint = NULL;
+       DECLARE_DLLIST(sCleanupList);
+       DLLIST_NODE *psThis, *psNext;
+       OS_SPINLOCK_FLAGS uiFlags;
+       IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount;
+       IMG_BOOL bPoolValid;
+
+       /* Acquire sync checkpoint pool lock */
+       OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+
+       bPoolValid = psCtxCtl->bSyncCheckpointPoolValid;
+       ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount;
+
+       /* While the pool still contains sync checkpoints, free them */
+       while (bPoolValid && psCtxCtl->ui32SyncCheckpointPoolCount > 0)
+       {
+               /* Get the sync checkpoint from the next read slot in the pool */
+               psCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp];
+               psCtxCtl->ui32SyncCheckpointPoolRp =
+                       (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK;
+               psCtxCtl->ui32SyncCheckpointPoolCount--;
+               psCtxCtl->bSyncCheckpointPoolFull =
+                       ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) &&
+                       (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp));
+
+               if (psCheckpoint)
+               {
+                       PVR_ASSERT(!dllist_node_is_in_list(&psCheckpoint->sListNode));
+                       /* before checkpoints are added to the pool they are removed
+                        * from the list so it's safe to use sListNode here */
+                       dllist_add_to_head(&sCleanupList, &psCheckpoint->sListNode);
+               }
+               else
+               {
+                       ui32NullScpCount++;
+               }
+       }
+
+       /* Release sync checkpoint pool lock */
+       OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+
+       /* go through the local list and free all of the sync checkpoints */
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+       PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, "
+               "uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext,
+               bPoolValid, ui32PoolCount));
+
+       if (ui32NullScpCount > 0)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s pool contained %u NULL entries", __func__,
+                       ui32NullScpCount));
+       }
+#endif
+
+       dllist_foreach_node(&sCleanupList, psThis, psNext)
+       {
+               psCheckpoint = IMG_CONTAINER_OF(psThis, SYNC_CHECKPOINT, sListNode);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+               if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry "
+                               "(ui32ValidationCheck=0x%x)", __func__,
+                               psCheckpoint->ui32ValidationCheck));
+               }
+
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s psSyncCheckpoint(ID:%d)",
+                       __func__, psCheckpoint->ui32UID));
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s psSyncCheckpoint->ui32ValidationCheck=0x%x",
+                       __func__, psCheckpoint->ui32ValidationCheck));
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s psSyncCheckpoint->uiSpanAddr=0x%llx",
+                       __func__, psCheckpoint->uiSpanAddr));
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>",
+                       __func__, (void *) psCheckpoint->psSyncCheckpointBlock));
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>",
+                       __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext));
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>",
+                       __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA));
+
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), "
+                       "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+                       __func__,
+                       psCheckpoint->ui32UID,
+                       (void *) psCheckpoint,
+                       (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+                       psCheckpoint->uiSpanAddr));
+#endif
+
+               dllist_remove_node(psThis);
+
+               _FreeSyncCheckpoint(psCheckpoint);
+               ui32ItemsFreed++;
+       }
+
+       return ui32ItemsFreed;
+}
+#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/sync_fallback_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/sync_fallback_server.c
new file mode 100644 (file)
index 0000000..c1916e2
--- /dev/null
@@ -0,0 +1,3138 @@
+/*************************************************************************/ /*!
+@File           sync_fallback_server.c
+@Title          Fallback implementation of server fence sync interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    The server implementation of software fallback synchronisation.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(DEBUG)
+#define PVR_DPF_FUNCTION_TRACE_ON 1
+#endif
+
+#include "pvr_debug.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_sync_km.h"
+#include "pvrsrv_sync_server.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint_internal.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "handle.h"
+#include "pvrsrv.h"
+#include "hash.h"
+#include "rgxhwperf.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+
+#if defined(PVR_TESTING_UTILS)
+#include "tutils_km.h"
+#endif
+
+#include "ossecure_export.h"
+
+/* Refcounting debug.
+ * Define SYNC_FB_REF_DEBUG to print out a reference count log. */
+// #define SYNC_FB_REF_DEBUG 1
+
+#if defined(SYNC_FB_REF_DEBUG)
+#define PRINT_REF(type, opchr, pRef, ptr, name, info, num) \
+       PVR_LOG(("        %s REF(%c) -> %d - %6s: %-5u - %-30s (0x%p)", type, opchr, pRef, info, num, name, ptr))
+#else
+#define PRINT_REF(type, opchr, pRef, ptr, name, info, num)
+#endif
+
+#define REF_SET(type, pRef, val, ptr, name, info, num) OSAtomicWrite(pRef, val); PRINT_REF(type, '=', val, ptr, name, info, num)
+#define REF_INC(type, pRef,      ptr, name, info, num) OSAtomicIncrement(pRef); PRINT_REF(type, '+', OSAtomicRead(pRef), ptr, name, info, num)
+#define REF_DEC(type, pRef,      ptr, name, info, num) OSAtomicDecrement(pRef); PRINT_REF(type, '-', OSAtomicRead(pRef), ptr, name, info, num)
+
+/* Timelines */
+#define TL_REF_SET(pRef, val, ptr) REF_SET("TL", pRef, val, ptr, ptr->pszName, "UID", (IMG_UINT64) ptr->iUID)
+#define TL_REF_INC(pRef, ptr)      REF_INC("TL", pRef,      ptr, ptr->pszName, "UID", (IMG_UINT64) ptr->iUID)
+#define TL_REF_DEC(pRef, ptr)      REF_DEC("TL", pRef,      ptr, ptr->pszName, "UID", (IMG_UINT64) ptr->iUID)
+
+/* Fences */
+#define FENCE_REF_SET(pRef, val, ptr) REF_SET("FE", pRef, val, ptr, ptr->pszName, "#Syncs", ptr->uiNumSyncs)
+#define FENCE_REF_INC(pRef, ptr)      REF_INC("FE", pRef,      ptr, ptr->pszName, "#Syncs", ptr->uiNumSyncs)
+#define FENCE_REF_DEC(pRef, ptr)      REF_DEC("FE", pRef,      ptr, ptr->pszName, "#Syncs", ptr->uiNumSyncs)
+
+/* SyncPt */
+#define PT_REF_SET(pRef, val, ptr) REF_SET("PT", pRef, val, ptr, ptr->psTl->pszName, "SeqNum" ,ptr->uiSeqNum)
+#define PT_REF_INC(pRef, ptr)      REF_INC("PT", pRef,      ptr, ptr->psTl->pszName, "SeqNum" ,ptr->uiSeqNum)
+#define PT_REF_DEC(pRef, ptr)      REF_DEC("PT", pRef,      ptr, ptr->psTl->pszName, "SeqNum" ,ptr->uiSeqNum)
+
+
+/* Simple prints for error and warning */
+#define ERR(msg, ...) PVR_DPF((PVR_DBG_ERROR, \
+                          "%s: " msg, \
+                          __func__, \
+                          ##__VA_ARGS__));
+
+#define WRN(msg, ...) PVR_DPF((PVR_DBG_WARNING, \
+                          "%s: " msg, \
+                          __func__, \
+                          ##__VA_ARGS__));
+
+// #define SYNC_FB_DEBUG 1
+#if defined(SYNC_FB_DEBUG)
+#define DBG(...) PVR_LOG(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+/* Functions for timelines */
+typedef IMG_BOOL (*PFN_SYNC_PT_HAS_SIGNALLED)(PVRSRV_SYNC_PT *psSyncPt);
+typedef void (*PFN_SYNC_FREE) (IMG_HANDLE hAttachedSync);
+
+
+/* The states a SyncPt can be in */
+typedef enum
+{
+       PVRSRV_SYNC_NOT_SIGNALLED,     /*!< sync pt has not yet signalled */
+       PVRSRV_SYNC_SIGNALLED,         /*!< sync pt has signalled */
+       PVRSRV_SYNC_ERRORED            /*!< sync pt has errored*/
+} PVRSRV_SYNC_STATE;
+
+typedef enum
+{
+       PVRSRV_SYNC_HANDLE_UNKNOWN,
+       PVRSRV_SYNC_HANDLE_PVR,
+       PVRSRV_SYNC_HANDLE_SW
+} PVRSRV_SYNC_HANDLE_TYPE;
+
+typedef struct _PVRSRV_SYNC_SIGNAL_CB_
+{
+       DLLIST_NODE sCallbackNode;
+       IMG_HANDLE  hAttachedSync;
+       IMG_HANDLE  hPrivData;
+       PVRSRV_ERROR (*pfnSignal)(IMG_HANDLE hAttachedSync,
+                                 PVRSRV_SYNC_STATE eState);
+       PFN_SYNC_FREE pfnSyncFree;
+
+} PVRSRV_SYNC_SIGNAL_CB;
+
+struct _PVRSRV_SYNC_PT_
+{
+       /* The timeline this sync pt is associated with */
+       PVRSRV_TIMELINE_SERVER  *psTl;
+       IMG_UINT32                              uiSeqNum;
+       /* Refcount */
+       ATOMIC_T                                iRef;
+       /* Timeline list. Take TL lock! */
+       DLLIST_NODE                             sTlSyncList;
+       /* Timeline active list. Take TL lock! */
+       DLLIST_NODE                             sTlSyncActiveList;
+
+       /* List of callbacks to signal attached syncs.
+        *
+        * THE FIRST ITEM OF THIS LIST DEFINES THE FLAVOUR OF THE SYNC PT
+        * AND MUST BE CREATED TOGETHER WITH THE SYNC PT!
+        * Usually this is done when creating a fence.
+        * E.g. if PVR has been asked to create a fence we would
+        * create a sync pt for it with an attached sync checkpoint.
+        *
+        * In case someone waits for this sync pt who is not able
+        * to access the first item, a new foreign sync
+        * needs to be attached that can be read by the waiter.
+        * This might be the case if a second device is involved that cannot
+        * access sync checkpoints of another device or a device that needs
+        * to wait for a different sync type that it is not able to read
+        * e.g. a SW sync */
+       DLLIST_NODE                             sSignalCallbacks;
+       /* Can have a PVRSRV_SYNC_STATE */
+       ATOMIC_T                                iStatus;
+       /* PID of the sync pt creator, used for cleanup-unblocking */
+       IMG_UINT32                              uiPID;
+};
+
+/* Definition representing an attached SW sync pt.
+ * This is the counterpart to the SYNC_CHECKPOINTS for syncs that get
+ * signalled by the CPU. */
+typedef struct _PVRSRV_SYNC_PT_SW_
+{
+        IMG_BOOL bSignalled;
+} PVRSRV_SYNC_PT_SW;
+
+/*! Possible states for a PVRSRV_FENCE */
+typedef enum
+{
+    PVRSRV_FENCE_NOT_SIGNALLED,             /*!< fence has not yet signalled (not all components have signalled) */
+    PVRSRV_FENCE_SIGNALLED                  /*!< fence has signalled (all components have signalled/errored) */
+} PVRSRV_FENCE_STATE;
+
+struct _PVRSRV_FENCE_SERVER_
+{
+       IMG_UINT32                      uiNumSyncs;
+       PVRSRV_SYNC_PT          **apsFenceSyncList;
+       ATOMIC_T                        iRef;
+       /* Only written to when waiter checks if fence is met */
+       ATOMIC_T                        iStatus;
+       IMG_INT64                       iUID;
+       IMG_CHAR                        pszName[SYNC_FB_FENCE_MAX_LENGTH];
+       PVRSRV_DEVICE_NODE      *psDevNode;
+       DLLIST_NODE                     sFenceListNode;
+};
+
+struct _PVRSRV_FENCE_EXPORT_
+{
+       PVRSRV_FENCE_SERVER *psFence;
+};
+
+typedef struct _PVRSRV_TIMELINE_OPS_
+{
+       /* Supposed to be called when someone queries the TL
+        * to update its active generic syncs */
+       PFN_SYNC_PT_HAS_SIGNALLED pfnSyncPtHasSignalled;
+} PVRSRV_TIMELINE_OPS;
+
+struct _PVRSRV_TIMELINE_SERVER_
+{
+       /* Never take the fence lock after this one */
+       POS_LOCK                        hTlLock;
+       /* Timeline list. Contains all sync pts of the timeline that
+        * were not destroyed. Signalled or unsignalled. Take TL lock! */
+       DLLIST_NODE                     sSyncList;
+       /* Timeline active list. Contains all sync pts of the timeline
+        * that were not signalled yet.
+        * Before removing node, check if it's still in list. Take TL lock! */
+       DLLIST_NODE                     sSyncActiveList;
+       IMG_CHAR                        pszName[SYNC_FB_TIMELINE_MAX_LENGTH];
+       ATOMIC_T                        iRef;
+       PVRSRV_TIMELINE_OPS sTlOps;
+       DLLIST_NODE                     sTlList;
+       /* This ID helps to order the sync pts in a fence when merging */
+       IMG_INT64                       iUID;
+       /* The sequence number of the last sync pt created */
+       ATOMIC_T                        iSeqNum;
+       /* The sequence number of the latest signalled sync pt */
+       ATOMIC_T                        iLastSignalledSeqNum;
+       /* The PID of the process which created this timeline */
+       IMG_PID                         uiPID;
+};
+
+typedef struct _SYNC_FB_CONTEXT_DEVICE_LIST_
+{
+       DLLIST_NODE sDeviceListNode;
+       IMG_HANDLE hDBGNotify;
+       PVRSRV_DEVICE_NODE *psDevice;
+} SYNC_FB_CONTEXT_DEVICE_LIST;
+
+typedef struct _SYNC_FB_CONTEXT_
+{
+       IMG_HANDLE hSyncEventObject;
+       IMG_HANDLE hCMDNotify;
+       DLLIST_NODE sDeviceList;
+       DLLIST_NODE sTlList;
+       POS_LOCK hFbContextLock;
+       DLLIST_NODE sFenceList; /* protected by hFbContextLock */
+       HASH_TABLE *sCheckpointHashTable; /* protected by hFbContextLock */
+       PFN_SYNC_CHECKPOINT_STRUCT sSyncCheckpointReg;
+} SYNC_FB_CONTEXT;
+
+/* GLOBALS */
+static SYNC_FB_CONTEXT gsSyncFbContext;
+
+/* Declarations */
+static void _SyncFbTimelineAcquire(PVRSRV_TIMELINE_SERVER *psTl);
+static void _SyncFbFenceAcquire(PVRSRV_FENCE_SERVER *psFence);
+static PVRSRV_ERROR _SyncFbSyncPtSignalAttached(PVRSRV_SYNC_PT *psSyncPt,
+                                        PVRSRV_SYNC_STATE eSignal);
+static PVRSRV_ERROR SyncFbFenceRollbackPVR(PVRSRV_FENCE iFence, void *pvFenceData);
+static PVRSRV_ERROR _SyncFbSyncPtSignalPVR(IMG_HANDLE hSync,
+                                           PVRSRV_SYNC_STATE eState);
+static PVRSRV_ERROR _SyncFbSyncPtSignalSW(IMG_HANDLE hSync,
+                                          PVRSRV_SYNC_STATE eState);
+static IMG_BOOL _SyncFbFenceSyncsHaveSignalled(PVRSRV_FENCE_SERVER *psFence);
+static IMG_BOOL _SyncFbSyncPtHasSignalled(PVRSRV_SYNC_PT *psSyncPt);
+static IMG_BOOL _SyncFbSyncPtHasSignalledPVR(PVRSRV_SYNC_PT *psSyncPt);
+static IMG_BOOL _SyncFbSyncPtHasSignalledSW(PVRSRV_SYNC_PT *psSyncPt);
+static IMG_BOOL _SyncFbFenceAddPt(PVRSRV_FENCE_SERVER *psFence,
+                                  IMG_UINT32 *i,
+                                  PVRSRV_SYNC_PT *psSyncPt);
+static PVRSRV_ERROR _SyncFbSWTimelineFenceCreate(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                 PVRSRV_TIMELINE_SERVER *psTl,
+                                                 IMG_UINT32 uiFenceNameSize,
+                                                 const IMG_CHAR *pszFenceName,
+                                                 PVRSRV_FENCE_SERVER **ppsOutputFence,
+                                                 IMG_UINT64 *pui64SyncPtIdx);
+static PVRSRV_ERROR _SyncSWTimelineAdvanceSigErr(PVRSRV_TIMELINE_SERVER *psTl,
+                                                 PVRSRV_SYNC_STATE eState,
+                                                 IMG_UINT64 *pui64SyncPtIdx);
+static void _SyncSWTimelineCheckForUnsignalledPts(PVRSRV_TIMELINE_SERVER *psTl);
+
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         GENERIC FUNCTIONS                                 */
+/*                                                                           */
+/*****************************************************************************/
+
+
+/* Add a fence to the global fence list */
+static inline void _SyncFbFenceListAdd(PVRSRV_FENCE_SERVER *psFence)
+{
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_add_to_tail(&gsSyncFbContext.sFenceList, &psFence->sFenceListNode);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+}
+
+/* Remove a fence from the global fence list */
+static inline void _SyncFbFenceListDel(PVRSRV_FENCE_SERVER *psFence)
+{
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_remove_node(&psFence->sFenceListNode);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+}
+
+/* Add a timeline to the global timeline list */
+static inline void _SyncFbFTimelineListAdd(PVRSRV_TIMELINE_SERVER *psTl)
+{
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_add_to_tail(&gsSyncFbContext.sTlList, &psTl->sTlList);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+}
+
+/* Remove a timeline from the global timeline list */
+static inline void _SyncFbTimelineListDel(PVRSRV_TIMELINE_SERVER *psTl)
+{
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_remove_node(&psTl->sTlList);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+}
+
+/* Signal the sync event object to wake up waiters */
+static PVRSRV_ERROR _SyncFbSignalEO(void)
+{
+       PVRSRV_ERROR eError;
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(gsSyncFbContext.hSyncEventObject != NULL);
+
+       eError = OSEventObjectSignal(gsSyncFbContext.hSyncEventObject);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* Retrieve the process handle base for the calling PID to look up sync objects */
+static PVRSRV_ERROR _SyncFbGetProcHandleBase(PVRSRV_HANDLE_BASE **ppsHandleBase)
+{
+       *ppsHandleBase = PVRSRVRetrieveProcessHandleBase();
+
+       if (*ppsHandleBase == NULL)
+       {
+               ERR("Failed to retrieve process handle base");
+               return PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE;
+       }
+
+       return PVRSRV_OK;
+}
+
+/* Look up a handle in the process handle base of the calling PID */
+static PVRSRV_ERROR _SyncFbLookupProcHandle(IMG_HANDLE hHandle,
+                                            PVRSRV_HANDLE_TYPE eType,
+                                            IMG_BOOL bRefHandle,
+                                            void **ppvData,
+                                            PVRSRV_HANDLE_BASE **ppsBase)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_HANDLE_BASE *psHandleBase;
+
+       eError = _SyncFbGetProcHandleBase(&psHandleBase);
+
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       DBG(("%s: Handle Base: %p", __func__, psHandleBase));
+
+       eError = PVRSRVLookupHandle(psHandleBase,
+                                   ppvData,
+                                   hHandle,
+                                   eType,
+                                   bRefHandle);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       *ppsBase = psHandleBase;
+
+       return PVRSRV_OK;
+
+e1:
+       return eError;
+}
+
+/* Destroy a handle in case a resource has not been registered with
+ * the resource manager */
+static PVRSRV_ERROR _SyncFbDestroyHandle(IMG_HANDLE hHandle,
+                                         PVRSRV_HANDLE_TYPE eType)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_HANDLE_BASE *psHandleBase;
+
+       eError = _SyncFbGetProcHandleBase(&psHandleBase);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       eError = PVRSRVDestroyHandle(psHandleBase, hHandle, eType);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDestroyHandle", e1);
+
+       return PVRSRV_OK;
+
+e1:
+       return eError;
+}
+
+/* Currently unused */
+/*
+static PVRSRV_ERROR _SyncFbFindProcHandle(void *pvData,
+                                          PVRSRV_HANDLE_TYPE eType,
+                                          IMG_HANDLE *phHandle,
+                                          PVRSRV_HANDLE_BASE **ppsBase)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_HANDLE_BASE *psHandleBase;
+
+       PVR_DPF_ENTERED;
+
+       eError = _SyncFbGetProcHandleBase(&psHandleBase);
+
+       PVR_GOTO_IF_ERROR(eError, eExit);
+
+       eError = PVRSRVFindHandle(psHandleBase,
+                                 phHandle,
+                                 pvData,
+                                                         eType);
+       PVR_GOTO_IF_ERROR(eError, eExit);
+
+       *ppsBase = psHandleBase;
+
+       PVR_DPF_RETURN_OK;
+
+eExit:
+       PVR_DPF_RETURN_RC(eError);
+}
+*/
+
+/* Returns the type of a sync point determined by its registered
+ * signalling callback. Type can be e.g. a PVR sync point containing
+ * sync checkpoints or a software sync point*/
+static PVRSRV_SYNC_HANDLE_TYPE _SyncFbSyncPtHandleType(PVRSRV_SYNC_SIGNAL_CB *psCb)
+{
+       if (psCb == NULL)
+               return PVRSRV_SYNC_HANDLE_UNKNOWN;
+
+       if (psCb->pfnSignal == &_SyncFbSyncPtSignalPVR)
+               return PVRSRV_SYNC_HANDLE_PVR;
+
+       if (psCb->pfnSignal == &_SyncFbSyncPtSignalSW)
+               return PVRSRV_SYNC_HANDLE_SW;
+
+       return PVRSRV_SYNC_HANDLE_UNKNOWN;
+}
+
+static PVRSRV_SYNC_HANDLE_TYPE _SyncFbTimelineHandleType(PVRSRV_TIMELINE_SERVER *psTl)
+{
+       if (psTl == NULL)
+               return PVRSRV_SYNC_HANDLE_UNKNOWN;
+
+       if (psTl->sTlOps.pfnSyncPtHasSignalled == &_SyncFbSyncPtHasSignalledPVR)
+               return PVRSRV_SYNC_HANDLE_PVR;
+
+       if (psTl->sTlOps.pfnSyncPtHasSignalled == &_SyncFbSyncPtHasSignalledSW)
+               return PVRSRV_SYNC_HANDLE_SW;
+
+       return PVRSRV_SYNC_HANDLE_UNKNOWN;
+}
+
+/* Print info about a sync point to the debug dump log */
+static void _SyncFbDebugRequestPrintSyncPt(PVRSRV_SYNC_PT *psSyncPt,
+                                           IMG_BOOL bPrintTl,
+                                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                           void *pvDumpDebugFile)
+{
+       PDLLIST_NODE psCBNode, psNextCBNode;
+       PVRSRV_SYNC_SIGNAL_CB *psCb;
+
+       if (bPrintTl)
+       {
+               PVR_DUMPDEBUG_LOG(" - SyncPt: SeqNum: %u, Refs: %d, Timeline: %-9s <%#"IMG_UINT64_FMTSPECx">, %-9s - <0x%p>",
+                                                 psSyncPt->uiSeqNum,
+                                                 OSAtomicRead(&psSyncPt->iRef),
+                                                 psSyncPt->psTl->pszName,
+                                                 psSyncPt->psTl->iUID,
+                                                 OSAtomicRead(&psSyncPt->iStatus) == PVRSRV_SYNC_SIGNALLED ? "Signalled" :
+                                                       OSAtomicRead(&psSyncPt->iStatus) == PVRSRV_SYNC_ERRORED ? "Errored" : "Active",
+                                                 psSyncPt);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG(" - SyncPt: SeqNum: %u, Refs: %d, %-9s - <0x%p>",
+                                                 psSyncPt->uiSeqNum,
+                                                 OSAtomicRead(&psSyncPt->iRef),
+                                                 OSAtomicRead(&psSyncPt->iStatus) == PVRSRV_SYNC_SIGNALLED ? "Signalled" :
+                                                       OSAtomicRead(&psSyncPt->iStatus) == PVRSRV_SYNC_ERRORED ? "Errored" : "Active",
+                                                 psSyncPt);
+       }
+
+       /* ... all attached syncs to that sync point*/
+       dllist_foreach_node(&psSyncPt->sSignalCallbacks,
+                           psCBNode,
+                           psNextCBNode)
+       {
+               psCb = IMG_CONTAINER_OF(psCBNode,
+                                       PVRSRV_SYNC_SIGNAL_CB,
+                                       sCallbackNode);
+
+               switch (_SyncFbSyncPtHandleType(psCb))
+               {
+                       case PVRSRV_SYNC_HANDLE_PVR:
+                       {
+                               PSYNC_CHECKPOINT pCP = psCb->hAttachedSync;
+                               PVR_DUMPDEBUG_LOG("    - CbType: PVR-Checkpoint, ID: %u, FWAddr: %#08x, Enq: %d, Ref: %d, %-9s - <0x%p>",
+                                                 SyncCheckpointGetId(pCP),
+                                                 SyncCheckpointGetFirmwareAddr(pCP),
+                                                 SyncCheckpointGetEnqueuedCount(pCP),
+                                                 SyncCheckpointGetReferenceCount(pCP),
+                                                 SyncCheckpointGetStateString(pCP),
+                                                 pCP);
+                               break;
+                       }
+                       case PVRSRV_SYNC_HANDLE_SW:
+                       {
+                               PVRSRV_SYNC_PT_SW *psSWPt = psCb->hAttachedSync;
+                               PVR_DUMPDEBUG_LOG("    - CbType: SW-Syncpoint, %-9s - <0x%p>",
+                                                 psSWPt->bSignalled ? "Signalled" : "Active",
+                                                 psSWPt);
+                               break;
+                       }
+                       case PVRSRV_SYNC_HANDLE_UNKNOWN:
+                       default:
+                               PVR_DUMPDEBUG_LOG("    - CbType: Unknown - <0x%p>",
+                                                 psCb->hAttachedSync);
+               }
+       }
+}
+
+/* Function registered with the debug dump mechanism. Prints out all timelines
+ * with pending syncs. */
+static void _SyncFbDebugRequest(IMG_HANDLE hDebugRequestHandle,
+                                IMG_UINT32 ui32VerbLevel,
+                                DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                void *pvDumpDebugFile)
+{
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+       {
+               IMG_UINT32 i;
+
+               PDLLIST_NODE psTlNode, psNextTlNode;
+               PVRSRV_TIMELINE_SERVER *psTl;
+
+               PDLLIST_NODE psFenceNode, psNextFenceNode;
+               PVRSRV_FENCE_SERVER *psFence;
+
+               PDLLIST_NODE psPtNode, psNextPtNode;
+               PVRSRV_SYNC_PT *psSyncPt;
+
+               OSLockAcquire(gsSyncFbContext.hFbContextLock);
+
+               PVR_DUMPDEBUG_LOG("------[ Fallback Fence Sync: timelines ]------");
+
+               /* Iterate over all timelines */
+               dllist_foreach_node(&gsSyncFbContext.sTlList, psTlNode, psNextTlNode)
+               {
+
+                       psTl = IMG_CONTAINER_OF(psTlNode,
+                                               PVRSRV_TIMELINE_SERVER,
+                                               sTlList);
+
+                       OSLockAcquire(psTl->hTlLock);
+
+                       PVR_DUMPDEBUG_LOG("Timeline: %s, SeqNum: %d/%d - <%#"IMG_UINT64_FMTSPECx">",
+                                         psTl->pszName,
+                                         OSAtomicRead(&psTl->iLastSignalledSeqNum),
+                                         OSAtomicRead(&psTl->iSeqNum),
+                                         psTl->iUID);
+
+                       /* ... all active sync points in the timeline */
+                       dllist_foreach_node(&psTl->sSyncActiveList, psPtNode, psNextPtNode)
+                       {
+
+                               psSyncPt = IMG_CONTAINER_OF(psPtNode,
+                                                           PVRSRV_SYNC_PT,
+                                                           sTlSyncActiveList);
+
+                               _SyncFbDebugRequestPrintSyncPt(psSyncPt,
+                                                              IMG_FALSE,
+                                                              pfnDumpDebugPrintf,
+                                                              pvDumpDebugFile);
+
+                       }
+                       OSLockRelease(psTl->hTlLock);
+               }
+
+               PVR_DUMPDEBUG_LOG("------[ Fallback Fence Sync: fences ]------");
+
+               /* Iterate over all fences */
+               dllist_foreach_node(&gsSyncFbContext.sFenceList,
+                                                       psFenceNode,
+                                                       psNextFenceNode)
+               {
+                       psFence = IMG_CONTAINER_OF(psFenceNode,
+                                                  PVRSRV_FENCE_SERVER,
+                                                  sFenceListNode);
+
+                       PVR_DUMPDEBUG_LOG("Fence: %s, %-9s - <%#"IMG_UINT64_FMTSPECx">",
+                                         psFence->pszName,
+                                         _SyncFbFenceSyncsHaveSignalled(psFence) ?
+                                             "Signalled" : "Active",
+                                              psFence->iUID);
+
+                       /* ... all sync points in the fence */
+                       for (i = 0; i < psFence->uiNumSyncs; i++)
+                       {
+                               _SyncFbDebugRequestPrintSyncPt(psFence->apsFenceSyncList[i],
+                                               IMG_TRUE,
+                                                              pfnDumpDebugPrintf,
+                                                              pvDumpDebugFile);
+                       }
+               }
+
+               OSLockRelease(gsSyncFbContext.hFbContextLock);
+       }
+
+}
+
+/* Notify callback that is called as part of the RGX MISR e.g. after FW
+ * signalled the host that work completed. */
+static void _SyncFbTimelineUpdate_NotifyCMD(void *psSyncFbContext)
+{
+       PVRSRV_TIMELINE_SERVER *psTl;
+       PVRSRV_SYNC_PT *psSyncPt;
+       PDLLIST_NODE psTlList = &gsSyncFbContext.sTlList;
+       PDLLIST_NODE psCurrentTl, psNextTl;
+       PDLLIST_NODE psCurrentPt, psNextPt;
+       IMG_BOOL bSignalled = IMG_FALSE, bSignal = IMG_FALSE;
+
+       PVR_DPF_ENTERED;
+
+       /* Outer loop over all timelines */
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_foreach_node(psTlList, psCurrentTl, psNextTl)
+       {
+               psTl = IMG_CONTAINER_OF(psCurrentTl,
+                                       PVRSRV_TIMELINE_SERVER,
+                                       sTlList);
+
+               /* Inner loop over all SyncPts in the timeline.
+                * Check & Update all active SyncPts */
+               OSLockAcquire(psTl->hTlLock);
+               dllist_foreach_node(&psTl->sSyncActiveList, psCurrentPt, psNextPt)
+               {
+                       psSyncPt = IMG_CONTAINER_OF(psCurrentPt,
+                                                   PVRSRV_SYNC_PT,
+                                                   sTlSyncActiveList);
+
+                       /* If the SyncPt has been signalled we have to
+                        * update all attached syncs */
+                       bSignalled = psTl->sTlOps.pfnSyncPtHasSignalled(psSyncPt);
+                       if (bSignalled)
+                       {
+                               /* Wake up waiters after releasing the locks */
+                               bSignal = IMG_TRUE;
+
+                               /* Remove the SyncPt from the active list of the timeline. */
+                               dllist_remove_node(psCurrentPt);
+                       }
+                       else
+                       {
+                               /* No need to check further points on this timeline because
+                                * this sync pt will be signalled first */
+                               break;
+                       }
+
+               }/* End inner loop */
+               OSLockRelease(psTl->hTlLock);
+
+       } /* End outer loop */
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       if (bSignal)
+       {
+               PVR_LOG_IF_ERROR(_SyncFbSignalEO(), "_SyncFbSignalEO");
+       }
+
+       PVR_DPF_RETURN;
+}
+
+static IMG_UINT32
+_SyncCheckpointFWAddrHash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+       IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+       IMG_UINT32 ui;
+       IMG_UINT32 uHashKey = 0;
+
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+       for (ui = 0; ui < uKeyLen; ui++)
+       {
+               IMG_UINT32 uHashPart = *p++;
+
+               uHashPart += (uHashPart << 12);
+               uHashPart ^= (uHashPart >> 22);
+               uHashPart += (uHashPart << 4);
+               uHashPart ^= (uHashPart >> 9);
+               uHashPart += (uHashPart << 10);
+               uHashPart ^= (uHashPart >> 2);
+               uHashPart += (uHashPart << 7);
+               uHashPart ^= (uHashPart >> 12);
+
+               uHashKey += uHashPart;
+       }
+
+       return uHashKey;
+}
+
+static IMG_BOOL
+_SyncCheckpointFWAddrCompare(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+       IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+       IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+       IMG_UINT32 ui;
+
+       for (ui = 0; ui < uKeyLen; ui++)
+       {
+               if (*p1++ != *p2++)
+                       return IMG_FALSE;
+       }
+
+       return IMG_TRUE;
+}
+
+#if defined(PDUMP)
+static PVRSRV_ERROR SyncFbFenceGetCheckpoints(PVRSRV_FENCE hFence, IMG_UINT32 *puiNumCheckpoints,
+                                               PSYNC_CHECKPOINT **papsCheckpoints)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_SERVER *psFence;
+       PVRSRV_HANDLE_BASE *psHBase;
+       PSYNC_CHECKPOINT *apsCheckpoints;
+       PSYNC_CHECKPOINT psCheckpoint;
+       PVRSRV_SYNC_SIGNAL_CB *psSyncCB;
+       PVRSRV_SYNC_PT *psSyncPt;
+       PDLLIST_NODE psNode;
+       IMG_UINT32 i, uiNumCheckpoints = 0;
+
+       if (hFence == PVRSRV_NO_FENCE)
+       {
+               *puiNumCheckpoints = 0;
+               eError = PVRSRV_OK;
+               goto e0;
+       }
+
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE) (uintptr_t) hFence,
+                                        PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                        IMG_TRUE,
+                                        (void**)&psFence,
+                                        &psHBase);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       apsCheckpoints = OSAllocMem(sizeof(*apsCheckpoints) * psFence->uiNumSyncs);
+       PVR_LOG_GOTO_IF_NOMEM(apsCheckpoints, eError, e1);
+
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+
+       /* Increase refcount to make sure fence is not destroyed while waiting */
+       _SyncFbFenceAcquire(psFence);
+
+       /* Go through all syncs and add them to the list */
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               psSyncPt = psFence->apsFenceSyncList[i];
+
+               psNode = dllist_get_next_node(&psSyncPt->sSignalCallbacks);
+               psSyncCB = IMG_CONTAINER_OF(psNode, PVRSRV_SYNC_SIGNAL_CB, sCallbackNode);
+
+               if (_SyncFbSyncPtHandleType(psSyncCB) == PVRSRV_SYNC_HANDLE_PVR)
+               {
+                       psCheckpoint = (PSYNC_CHECKPOINT) psSyncCB->hAttachedSync;
+                       apsCheckpoints[uiNumCheckpoints++] = psCheckpoint;
+               }
+       }
+
+       SyncFbFenceRelease(psFence);
+
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       *puiNumCheckpoints = uiNumCheckpoints;
+       *papsCheckpoints = apsCheckpoints;
+
+e1:
+       PVRSRVReleaseHandle(psHBase,
+                           (IMG_HANDLE) (uintptr_t) hFence,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+e0:
+       return eError;
+}
+#endif
+
+PVRSRV_ERROR SyncFbRegisterSyncFunctions(void)
+{
+       /* Initialise struct and register with sync_checkpoint.c */
+       gsSyncFbContext.sSyncCheckpointReg.pfnFenceResolve = &SyncFbFenceResolvePVR;
+       gsSyncFbContext.sSyncCheckpointReg.pfnFenceCreate = &SyncFbFenceCreatePVR;
+       gsSyncFbContext.sSyncCheckpointReg.pfnFenceDataRollback = &SyncFbFenceRollbackPVR;
+       gsSyncFbContext.sSyncCheckpointReg.pfnFenceFinalise = NULL; /* no fence finalise function required */
+       gsSyncFbContext.sSyncCheckpointReg.pfnNoHWUpdateTimelines = &_SyncFbTimelineUpdate_NotifyCMD;
+       gsSyncFbContext.sSyncCheckpointReg.pfnFreeCheckpointListMem = OSFreeMem;
+       gsSyncFbContext.sSyncCheckpointReg.pfnDumpInfoOnStalledUFOs = &SyncFbDumpInfoOnStalledUFOs;
+       OSStringLCopy(gsSyncFbContext.sSyncCheckpointReg.pszImplName, "SyncFb", SYNC_CHECKPOINT_IMPL_MAX_STRLEN);
+#if defined(PDUMP)
+       gsSyncFbContext.sSyncCheckpointReg.pfnSyncFenceGetCheckpoints = &SyncFbFenceGetCheckpoints;
+#endif
+
+       return SyncCheckpointRegisterFunctions(&gsSyncFbContext.sSyncCheckpointReg);
+}
+
+PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+       SYNC_FB_CONTEXT_DEVICE_LIST *psNewDeviceEntry;
+
+       PVR_DPF_ENTERED;
+
+       /* Initialise the sync fallback context */
+       if (gsSyncFbContext.hSyncEventObject == NULL)
+       {
+               eError = OSEventObjectCreate("Sync event object",
+                                            &gsSyncFbContext.hSyncEventObject);
+               PVR_GOTO_IF_ERROR(eError, e1);
+
+               dllist_init(&gsSyncFbContext.sTlList);
+               dllist_init(&gsSyncFbContext.sFenceList);
+               dllist_init(&gsSyncFbContext.sDeviceList);
+               gsSyncFbContext.sCheckpointHashTable = HASH_Create_Extended(64, sizeof(IMG_UINT32), _SyncCheckpointFWAddrHash, _SyncCheckpointFWAddrCompare);
+
+               eError = OSLockCreate(&gsSyncFbContext.hFbContextLock);
+               PVR_GOTO_IF_ERROR(eError, e2);
+
+               eError = PVRSRVRegisterCmdCompleteNotify(&gsSyncFbContext.hCMDNotify,
+                                                        &_SyncFbTimelineUpdate_NotifyCMD,
+                                                        &gsSyncFbContext);
+               PVR_GOTO_IF_ERROR(eError, e3);
+       }
+
+       psNewDeviceEntry = OSAllocMem(sizeof(*psNewDeviceEntry));
+       PVR_GOTO_IF_NOMEM(psNewDeviceEntry, eError, e4);
+
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_add_to_tail(&gsSyncFbContext.sDeviceList, &psNewDeviceEntry->sDeviceListNode);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       psNewDeviceEntry->psDevice = psDeviceNode;
+
+       eError = PVRSRVRegisterDeviceDbgRequestNotify(&psNewDeviceEntry->hDBGNotify,
+                                                     psDeviceNode,
+                                                     _SyncFbDebugRequest,
+                                                     DEBUG_REQUEST_FALLBACKSYNC,
+                                                     NULL);
+       PVR_GOTO_IF_ERROR(eError, e5);
+
+       PVR_DPF_RETURN_RC(eError);
+
+
+e5:
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       dllist_remove_node(&psNewDeviceEntry->sDeviceListNode);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+       OSFreeMem(psNewDeviceEntry);
+e4:
+       PVRSRVUnregisterCmdCompleteNotify(gsSyncFbContext.hCMDNotify);
+e3:
+       OSLockDestroy(gsSyncFbContext.hFbContextLock);
+e2:
+       OSEventObjectDestroy(gsSyncFbContext.hSyncEventObject);
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       SYNC_FB_CONTEXT_DEVICE_LIST *psDeviceEntry;
+       PDLLIST_NODE psNode, psNext;
+
+       PVR_DPF_ENTERED;
+
+       /* Return if Init was never called */
+       if (gsSyncFbContext.hSyncEventObject == NULL)
+               goto e1;
+
+       /* Check device list for the given device and remove it */
+       dllist_foreach_node(&gsSyncFbContext.sDeviceList, psNode, psNext)
+       {
+               psDeviceEntry = IMG_CONTAINER_OF(psNode,
+                                                SYNC_FB_CONTEXT_DEVICE_LIST,
+                                                sDeviceListNode);
+
+               if (psDeviceEntry->psDevice == psDeviceNode)
+               {
+                       PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceEntry->hDBGNotify);
+
+                       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+                       dllist_remove_node(psNode);
+                       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+                       OSFreeMem(psDeviceEntry);
+                       break;
+               }
+       }
+
+       /* If there are still devices registered with us don't deinit module */
+       if (!dllist_is_empty(&gsSyncFbContext.sDeviceList))
+       {
+               goto e1;
+       }
+
+       PVRSRVUnregisterCmdCompleteNotify(gsSyncFbContext.hCMDNotify);
+
+
+       eError = OSEventObjectDestroy(gsSyncFbContext.hSyncEventObject);
+       PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+       gsSyncFbContext.hSyncEventObject = NULL;
+
+       OSLockDestroy(gsSyncFbContext.hFbContextLock);
+
+e1:
+       return eError;
+}
+
+/* HOLD TL LOCK!
+ * Creates a new sync point on a timeline */
+static PVRSRV_ERROR _SyncFbSyncPtCreate(PVRSRV_SYNC_PT **ppsSyncPt,
+                                       PVRSRV_TIMELINE_SERVER *psTl,
+                                       IMG_UINT32 uiSeqNumber)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_SYNC_PT *psNewSyncPt;
+
+       PVR_DPF_ENTERED;
+
+       psNewSyncPt = OSAllocMem(sizeof(*psNewSyncPt));
+       PVR_LOG_GOTO_IF_NOMEM(psNewSyncPt, eError, e1);
+
+       psNewSyncPt->psTl = psTl;
+       OSAtomicWrite(&psNewSyncPt->iStatus, PVRSRV_SYNC_NOT_SIGNALLED);
+
+       psNewSyncPt->uiSeqNum = uiSeqNumber;
+       psNewSyncPt->uiPID = OSGetCurrentClientProcessIDKM();
+       PT_REF_SET(&psNewSyncPt->iRef, 1,psNewSyncPt);
+
+       dllist_init(&psNewSyncPt->sTlSyncList);
+       dllist_init(&psNewSyncPt->sTlSyncActiveList);
+       dllist_init(&psNewSyncPt->sSignalCallbacks);
+
+       /* Increment Tl ref due to new checkpoint*/
+       _SyncFbTimelineAcquire(psTl);
+
+       dllist_add_to_tail(&psTl->sSyncList, &psNewSyncPt->sTlSyncList);
+       dllist_add_to_tail(&psTl->sSyncActiveList, &psNewSyncPt->sTlSyncActiveList);
+
+       *ppsSyncPt = psNewSyncPt;
+
+       PVR_DPF_RETURN_OK;
+
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* Increment sync point refcount */
+static void _SyncFbSyncPtAcquire(PVRSRV_SYNC_PT *psSyncPt)
+{
+       PT_REF_INC(&psSyncPt->iRef, psSyncPt);
+}
+
+/* Release and maybe destroy sync point if refcount is 0 */
+static PVRSRV_ERROR _SyncFbSyncPtRelease(PVRSRV_SYNC_PT *psSyncPt,
+                                         IMG_BOOL bError)
+{
+       PVRSRV_ERROR eError;
+       PDLLIST_NODE psNode, psNext;
+       PVRSRV_SYNC_SIGNAL_CB *psSyncCB;
+       IMG_INT iRef;
+
+       PVR_DPF_ENTERED1(psSyncPt);
+
+       iRef = PT_REF_DEC(&psSyncPt->iRef, psSyncPt);
+       if (iRef != 0)
+       {
+               eError = PVRSRV_OK;
+               goto e1;
+       }
+
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       /* Remove all checkpoints from the ufo lookup table.*/
+       dllist_foreach_node(&psSyncPt->sSignalCallbacks, psNode, psNext)
+       {
+               psSyncCB = IMG_CONTAINER_OF(psNode,
+                                           PVRSRV_SYNC_SIGNAL_CB,
+                                           sCallbackNode);
+
+               if (_SyncFbSyncPtHandleType(psSyncCB) == PVRSRV_SYNC_HANDLE_PVR)
+               {
+                       IMG_UINT32 ui32FwAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT) psSyncCB->hAttachedSync);
+
+                       DBG(("%s: Removing key <%#08x>", __func__, ui32FwAddr));
+                       HASH_Remove_Extended(gsSyncFbContext.sCheckpointHashTable, &ui32FwAddr);
+               }
+       }
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       OSLockAcquire(psSyncPt->psTl->hTlLock);
+       if (dllist_node_is_in_list(&psSyncPt->sTlSyncActiveList))
+               dllist_remove_node(&psSyncPt->sTlSyncActiveList);
+
+       dllist_remove_node(&psSyncPt->sTlSyncList);
+
+       if (bError)
+       {
+               _SyncFbSyncPtSignalAttached(psSyncPt, PVRSRV_SYNC_ERRORED);
+       }
+       OSLockRelease(psSyncPt->psTl->hTlLock);
+
+       /* Remove all attached nodes and signal them.*/
+       while (!dllist_is_empty(&psSyncPt->sSignalCallbacks))
+       {
+               psNode = dllist_get_next_node(&psSyncPt->sSignalCallbacks);
+               psSyncCB = IMG_CONTAINER_OF(psNode,
+                                           PVRSRV_SYNC_SIGNAL_CB,
+                                           sCallbackNode);
+
+               psSyncCB->pfnSyncFree(psSyncCB->hAttachedSync);
+               dllist_remove_node(&psSyncCB->sCallbackNode);
+               OSFreeMem(psSyncCB);
+       }
+
+       eError = SyncFbTimelineRelease(psSyncPt->psTl);
+       PVR_LOG_IF_ERROR(eError, "SyncFbTimelineRelease");
+
+       OSFreeMem(psSyncPt);
+
+       PVR_DPF_RETURN_OK;
+
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* HOLD TL LOCK!
+ * Mark all attached syncs of a sync point with the state eSignal */
+static PVRSRV_ERROR _SyncFbSyncPtSignalAttached(PVRSRV_SYNC_PT *psSyncPt,
+                                                PVRSRV_SYNC_STATE eSignal)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK, eRet;
+       PDLLIST_NODE psCurrentCB, psNextCB;
+       PVRSRV_SYNC_SIGNAL_CB *psCB;
+
+       PVR_DPF_ENTERED1(psSyncPt);
+
+       if (dllist_is_empty(&psSyncPt->sSignalCallbacks))
+       {
+               ERR("Sync pt has no attached syncs. Make sure to attach one "
+                   "when creating a new sync pt to define its flavour");
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e1;
+       }
+
+       dllist_foreach_node(&psSyncPt->sSignalCallbacks, psCurrentCB, psNextCB)
+       {
+               psCB = IMG_CONTAINER_OF(psCurrentCB,
+                                       PVRSRV_SYNC_SIGNAL_CB,
+                                       sCallbackNode);
+               eRet = psCB->pfnSignal(psCB->hAttachedSync, eSignal);
+               if (eRet != PVRSRV_OK)
+               {
+                       ERR("Failed to signal an attached sync, system might block!");
+                       eError = eRet;
+                       /* Don't jump to exit but try to signal remaining syncs */
+               }
+       }
+
+e1:
+       PVR_DPF_RETURN_RC1(eError, psSyncPt);
+}
+
+/* HOLD TL LOCK!
+ * Mark all a sync point with the state eSignal */
+static PVRSRV_ERROR _SyncFbSyncPtSignal(PVRSRV_SYNC_PT *psSyncPt, PVRSRV_SYNC_STATE eSignal)
+{
+       PVR_DPF_ENTERED1(psSyncPt);
+
+       OSAtomicWrite(&psSyncPt->iStatus, eSignal);
+
+       if (psSyncPt->uiSeqNum >
+           OSAtomicRead(&psSyncPt->psTl->iLastSignalledSeqNum))
+       {
+               OSAtomicWrite(&psSyncPt->psTl->iLastSignalledSeqNum,
+                             psSyncPt->uiSeqNum);
+       }
+
+       PVR_DPF_RETURN_OK;
+}
+
+/* Check whether all syncs in a fence were signalled */
+static IMG_BOOL _SyncFbFenceSyncsHaveSignalled(PVRSRV_FENCE_SERVER *psFence)
+{
+       IMG_UINT32 i;
+
+       PVR_DPF_ENTERED1(psFence);
+
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               if (OSAtomicRead(&psFence->apsFenceSyncList[i]->iStatus) ==
+                       PVRSRV_SYNC_NOT_SIGNALLED)
+               {
+                       PVR_DPF_RETURN_RC1(IMG_FALSE, psFence);
+               }
+       }
+
+       OSAtomicWrite(&psFence->iStatus,
+                     PVRSRV_FENCE_SIGNALLED);
+
+       PVR_DPF_RETURN_RC1(IMG_TRUE, psFence);
+}
+
+/* Increment timeline refcount */
+static void _SyncFbTimelineAcquire(PVRSRV_TIMELINE_SERVER *psTl)
+{
+       TL_REF_INC(&psTl->iRef, psTl);
+}
+
+PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl)
+{
+       IMG_INT iRef;
+
+       PVR_DPF_ENTERED1(psTl);
+
+       iRef = TL_REF_DEC(&psTl->iRef, psTl);
+       if (iRef != 0)
+       {
+               PVR_DPF_RETURN_OK;
+       }
+
+       if (_SyncFbTimelineHandleType(psTl) == PVRSRV_SYNC_HANDLE_SW)
+       {
+               _SyncSWTimelineCheckForUnsignalledPts(psTl);
+       }
+
+       _SyncFbTimelineListDel(psTl);
+
+       OSLockDestroy(psTl->hTlLock);
+
+#if defined(DEBUG)
+       psTl->sTlOps.pfnSyncPtHasSignalled = NULL;
+       psTl->hTlLock = NULL;
+#endif
+
+       OSFreeMem(psTl);
+
+       PVR_DPF_RETURN_OK;
+}
+
+/* Increment fence refcount */
+static void _SyncFbFenceAcquire(PVRSRV_FENCE_SERVER *psFence)
+{
+       FENCE_REF_INC(&psFence->iRef, psFence);
+}
+
+PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK, eRet;
+       IMG_INT iRef;
+       IMG_UINT32 i;
+       IMG_BOOL bError = IMG_FALSE;
+       IMG_BOOL bCleanup = IMG_FALSE;
+
+       PVR_DPF_ENTERED1(psFence);
+
+       /* If cleanup thread, force cleanup of SW fences, otherwise
+        * simply decrement psFence->iRef
+        */
+       if (OSGetCurrentClientProcessIDKM() == PVRSRVGetPVRSRVData()->cleanupThreadPid)
+       {
+               IMG_UINT32 uiSyncPt;
+
+               /* Force erroring of any SW sync pts this fence may contain */
+               for (uiSyncPt=0; uiSyncPt < psFence->uiNumSyncs; uiSyncPt++)
+               {
+                       PVRSRV_SYNC_PT *psSyncPt = psFence->apsFenceSyncList[uiSyncPt];
+
+                       /* If this is a SW sync point from a timeline which was created
+                        * by the process being cleaned-up, then error it
+                        */
+                       if ((_SyncFbTimelineHandleType(psSyncPt->psTl) == PVRSRV_SYNC_HANDLE_SW) &&
+                           (PVRSRVGetPurgeConnectionPid() == psSyncPt->psTl->uiPID))
+                       {
+                               OSLockAcquire(psSyncPt->psTl->hTlLock);
+                               _SyncFbSyncPtSignalAttached(psSyncPt, PVRSRV_SYNC_ERRORED);
+                               OSLockRelease(psSyncPt->psTl->hTlLock);
+                       }
+               }
+       }
+
+       iRef = FENCE_REF_DEC(&psFence->iRef, psFence);
+       if (iRef != 0)
+       {
+               goto e1;
+       }
+
+       PDUMPCOMMENTWITHFLAGS(psFence->psDevNode, 0,
+                             "Destroy Fence %s (ID:%"IMG_UINT64_FMTSPEC")",
+                             psFence->pszName,
+                             psFence->iUID);
+
+       if (OSGetCurrentClientProcessIDKM() ==
+                       PVRSRVGetPVRSRVData()->cleanupThreadPid)
+       {
+               bCleanup = IMG_TRUE;
+       }
+
+       _SyncFbFenceListDel(psFence);
+
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               PVRSRV_SYNC_PT *psSyncPt = psFence->apsFenceSyncList[i];
+
+               if (bCleanup &&
+                               _SyncFbTimelineHandleType(psSyncPt->psTl) == PVRSRV_SYNC_HANDLE_SW)
+               {
+                       bError = IMG_TRUE;
+               }
+
+               eRet = _SyncFbSyncPtRelease(psSyncPt,
+                                           bError);
+               if (eRet != PVRSRV_OK)
+               {
+                       ERR("Error when releasing SyncPt, this might leak memory")
+                       eError = eRet;
+                       /* Try to continue and release the other sync pts, return error */
+               }
+       }
+
+#if defined(DEBUG)
+       {
+               for (i = 0; i < psFence->uiNumSyncs; i++)
+               {
+                       psFence->apsFenceSyncList[i] = NULL;
+               }
+               psFence->uiNumSyncs = 0;
+       }
+#endif
+
+       OSFreeMem(psFence->apsFenceSyncList);
+       OSFreeMem(psFence);
+
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence,
+                            PVRSRV_FENCE_SERVER **ppsOutFence)
+{
+       PVR_DPF_ENTERED1(psInFence);
+
+       FENCE_REF_INC(&psInFence->iRef, psInFence);
+
+       PDUMPCOMMENTWITHFLAGS(psInFence->psDevNode, 0,
+                             "Dup Fence %s (ID:%"IMG_UINT64_FMTSPEC").",
+                             psInFence->pszName,
+                             psInFence->iUID);
+
+       *ppsOutFence = psInFence;
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsOutFence);
+}
+
+static IMG_BOOL _SyncFbSyncPtHasSignalled(PVRSRV_SYNC_PT *psSyncPt)
+{
+       return psSyncPt->psTl->sTlOps.pfnSyncPtHasSignalled(psSyncPt);
+}
+
+static IMG_BOOL _SyncFbFenceAddPt(PVRSRV_FENCE_SERVER *psFence,
+                                  IMG_UINT32 *i,
+                                  PVRSRV_SYNC_PT *psSyncPt)
+{
+       /*
+        * If the fence is signalled there is no need to add it to the fence.
+        * One exception is PDUMP drivers where we need to make sure we
+        * set up proper synchronisation in the pdump stream.
+        */
+#if !defined(PDUMP)
+       if (_SyncFbSyncPtHasSignalled(psSyncPt)) return IMG_FALSE;
+#endif
+       _SyncFbSyncPtAcquire(psSyncPt);
+       psFence->apsFenceSyncList[*i] = psSyncPt;
+       (*i)++;
+       return IMG_TRUE;
+}
+
+PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1,
+                              PVRSRV_FENCE_SERVER *psInFence2,
+                              IMG_UINT32 uiFenceNameSize,
+                              const IMG_CHAR *pszFenceName,
+                              PVRSRV_FENCE_SERVER **ppsOutFence)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_FENCE_SERVER *psNewFence;
+       IMG_UINT32 i, i1, i2;
+       IMG_UINT32 uiFenceSyncListSize;
+
+       PVR_DPF_ENTERED;
+
+       psNewFence = OSAllocMem(sizeof(*psNewFence));
+       PVR_LOG_GOTO_IF_NOMEM(psNewFence, eError, e1);
+
+       psNewFence->psDevNode = psInFence1->psDevNode;
+
+       uiFenceSyncListSize = sizeof(*(psNewFence->apsFenceSyncList)) *
+                       (psInFence1->uiNumSyncs + psInFence2->uiNumSyncs);
+
+       psNewFence->apsFenceSyncList = OSAllocMem(uiFenceSyncListSize);
+       PVR_LOG_GOTO_IF_NOMEM(psNewFence->apsFenceSyncList, eError, e2);
+
+       if (uiFenceNameSize == 1)
+       {
+               OSSNPrintf(psNewFence->pszName,
+                          SYNC_FB_FENCE_MAX_LENGTH,
+                          "Fence-Merged");
+       }
+       else
+       {
+               if (pszFenceName)
+               {
+                       OSStringLCopy(psNewFence->pszName,
+                                                 pszFenceName,
+                                                 SYNC_FB_FENCE_MAX_LENGTH);
+               }
+               else
+               {
+                       psNewFence->pszName[0] = '\0';
+               }
+       }
+
+       /* Add sync pts from input fence 1 & 2
+        * - no duplicates in one timeline
+        * - sync pts in one fence are ordered by timeline UID
+        *
+        * */
+       for (i = 0, i1 = 0, i2 = 0;
+            i1 < psInFence1->uiNumSyncs && i2 < psInFence2->uiNumSyncs;)
+       {
+               PVRSRV_SYNC_PT *psSyncPt1 = psInFence1->apsFenceSyncList[i1];
+               PVRSRV_SYNC_PT *psSyncPt2 = psInFence2->apsFenceSyncList[i2];
+
+               /* Adding sync pts in order of their timeline UID, smaller ID first */
+               if (psSyncPt1->psTl->iUID <
+            psSyncPt2->psTl->iUID)
+               {
+                       _SyncFbFenceAddPt(psNewFence, &i, psSyncPt1);
+                       i1++;
+               }
+               else if (psSyncPt1->psTl->iUID >
+                        psSyncPt2->psTl->iUID)
+               {
+                       _SyncFbFenceAddPt(psNewFence, &i, psSyncPt2);
+                       i2++;
+               }
+               /* In case the timeline UID is the same just add the point that is
+                * later on that timeline. */
+               else
+               {
+                       /* --> Some C magic to find out if 'a' is a point later in the
+                        * timeline than 'b', wrap around is taken into account:
+                        *                      (a - b <= ((IMG_INT)(~0U>>1)) ) */
+                       if ( psSyncPt1->uiSeqNum - psSyncPt2->uiSeqNum <=
+                           ((IMG_INT)(~0U>>1)) )
+                       {
+                               _SyncFbFenceAddPt(psNewFence, &i, psSyncPt1);
+                       }
+                       else
+                       {
+                               _SyncFbFenceAddPt(psNewFence, &i, psSyncPt2);
+                       }
+
+                       i1++;
+                       i2++;
+               }
+       }
+
+       /* Add the remaining syncs pts to the fence. At this point we only enter
+        * either the first or the second loop because one fence has
+        * more sync pts than the other.
+        */
+       for (; i1 < psInFence1->uiNumSyncs; i1++)
+       {
+               _SyncFbFenceAddPt(psNewFence, &i, psInFence1->apsFenceSyncList[i1]);
+       }
+
+       for (; i2 < psInFence2->uiNumSyncs; i2++)
+       {
+               _SyncFbFenceAddPt(psNewFence, &i, psInFence2->apsFenceSyncList[i2]);
+       }
+
+       /* Fill remaining fields */
+       psNewFence->uiNumSyncs = i;
+       psNewFence->iUID = (IMG_INT64)(uintptr_t) psNewFence;
+       FENCE_REF_SET(&psNewFence->iRef, 1, psNewFence);
+
+       OSAtomicWrite(&psNewFence->iStatus, PVRSRV_SYNC_NOT_SIGNALLED);
+
+       _SyncFbFenceListAdd(psNewFence);
+
+       PDUMPCOMMENTWITHFLAGS(psInFence1->psDevNode, 0,
+                             "Merge Fence1 %s (ID:%"IMG_UINT64_FMTSPEC"), Fence2 %s (ID:%"IMG_UINT64_FMTSPEC") "
+                             "to Fence %s (ID:%"IMG_UINT64_FMTSPEC")",
+                             psInFence1->pszName,
+                             psInFence1->iUID,
+                             psInFence2->pszName,
+                             psInFence2->iUID,
+                             psNewFence->pszName,
+                             psNewFence->iUID);
+
+       *ppsOutFence = psNewFence;
+
+       PVR_DPF_RETURN_RC1(eError, *ppsOutFence);
+
+e2:
+       OSFreeMem(psNewFence);
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 ui32TimeoutInMs)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_HANDLE hOSEvent;
+       IMG_UINT32 t1 = 0, t2 = 0;
+
+       PVR_DPF_ENTERED1(psFence);
+
+       /* Increase refcount to make sure fence is not destroyed while waiting */
+       _SyncFbFenceAcquire(psFence);
+
+       if (OSAtomicRead(&psFence->iStatus) == PVRSRV_FENCE_NOT_SIGNALLED)
+       {
+               PVRSRV_ERROR eErrorClose;
+
+               /* If the status of the fence is not signalled it could mean that
+                * there are actually syncs still pending or that we have not
+                * checked yet whether the syncs were met, therefore do the
+                * check now and return in case they are. If they are not, go
+                * to sleep and wait. */
+
+               if (_SyncFbFenceSyncsHaveSignalled(psFence))
+               {
+                       goto e1;
+               }
+               else if (ui32TimeoutInMs == 0)
+               {
+                       eError = PVRSRV_ERROR_TIMEOUT;
+                       goto e1;
+               }
+
+               eError = OSEventObjectOpen(gsSyncFbContext.hSyncEventObject,
+                                          &hOSEvent);
+               PVR_GOTO_IF_ERROR(eError, e1);
+
+               while (!_SyncFbFenceSyncsHaveSignalled(psFence) && ui32TimeoutInMs)
+               {
+                       t1 = OSClockms();
+                       /* Wait for EO to be signalled */
+                       eError = OSEventObjectWaitTimeout(hOSEvent,
+                                                         ui32TimeoutInMs * 1000);
+                       t2 = OSClockms();
+
+                       if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_TIMEOUT)
+                       {
+                               break;
+                       }
+
+
+                       /* Reduce timeout by the time we have just waited */
+                       if (ui32TimeoutInMs < (t2-t1))
+                       {
+                               ui32TimeoutInMs = 0;
+                       }
+                       else
+                       {
+                               ui32TimeoutInMs -= (t2-t1);
+                       }
+               }
+
+               eErrorClose = OSEventObjectClose(hOSEvent);
+               if (eErrorClose != PVRSRV_OK)
+               {
+                       ERR("Unable to close Event Object");
+
+                       /* Do not overwrite previous error
+                        * if it was something else than PVRSRV_OK */
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eErrorClose;
+                       }
+               }
+       }
+e1:
+
+       SyncFbFenceRelease(psFence);
+
+       PVR_DPF_RETURN_RC1(eError, psFence);
+}
+
+PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiLine,
+                             IMG_UINT32 uiFileNameLength,
+                             const IMG_CHAR *pszFile,
+                             IMG_UINT32 uiModuleLength,
+                             const IMG_CHAR *pszModule,
+                             IMG_UINT32 uiDescLength,
+                             const IMG_CHAR *pszDesc)
+{
+
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 i;
+
+
+       PVR_DPF_ENTERED1(psFence);
+
+       PVR_LOG(("  Fence dump request from:"));
+#if defined(DEBUG)
+       PVR_LOG(("    %s (%s:%u)", pszModule, pszFile, uiLine));
+#else
+       PVR_LOG(("    %s (location only available in debug build)", pszModule));
+#endif
+       PVR_LOG(("  Desc: %s", pszDesc));
+       PVR_LOG(("---------------- FENCE ----------------"));
+       PVR_LOG(("%s (UID: %"IMG_UINT64_FMTSPEC")", psFence->pszName, psFence->iUID));
+
+       PVR_LOG(("  Signalled: %s",
+               _SyncFbFenceSyncsHaveSignalled(psFence)?"Yes":"No"));
+       PVR_LOG(("  Ref: %d", OSAtomicRead(&psFence->iRef) ));
+
+       PVR_LOG(("  Sync Points:"));
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               PVRSRV_SYNC_PT *psSP = psFence->apsFenceSyncList[i];
+               PVR_LOG(("    Point %u)", i));
+               PVR_LOG(("      On timeline:     %s (UID: %"IMG_UINT64_FMTSPEC")",
+                        psSP->psTl->pszName, psSP->psTl->iUID));
+               PVR_LOG(("      Sequence number: %u", psSP->uiSeqNum));
+               PVR_LOG(("      Signalled:       %s",
+                       psSP->psTl->sTlOps.pfnSyncPtHasSignalled(psSP)? "Yes":"No"));
+               PVR_LOG(("      Ref:             %d", OSAtomicRead(&psSP->iRef)));
+       }
+       PVR_LOG(("----------------------------------------"));
+
+       PVR_DPF_RETURN_RC1(eError, psFence);
+}
+
+static PVRSRV_ERROR _SyncFbTimelineCreate(PFN_SYNC_PT_HAS_SIGNALLED pfnHasPtSignalled,
+                                          IMG_UINT32 uiTimelineNameSize,
+                                          const IMG_CHAR *pszTimelineName,
+                                          PVRSRV_TIMELINE_SERVER **ppsTimeline)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_TIMELINE_SERVER *psNewTl;
+
+       PVR_DPF_ENTERED;
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(ppsTimeline, eError, e1);
+
+       psNewTl = OSAllocMem(sizeof(*psNewTl));
+       PVR_LOG_GOTO_IF_NOMEM(psNewTl, eError, e2);
+
+       eError = OSLockCreate(&psNewTl->hTlLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e3);
+
+       if (uiTimelineNameSize == 1)
+       {
+               OSSNPrintf(psNewTl->pszName,
+                          SYNC_FB_TIMELINE_MAX_LENGTH,
+                          "TL-%s-%d",
+                          OSGetCurrentClientProcessNameKM(),
+                          OSGetCurrentClientProcessIDKM());
+       }
+       else
+       {
+               if (pszTimelineName)
+               {
+                       OSStringLCopy((IMG_CHAR*) psNewTl->pszName,
+                                     pszTimelineName,
+                                     SYNC_FB_TIMELINE_MAX_LENGTH);
+               }
+               else
+               {
+                       psNewTl->pszName[0] = '\0';
+               }
+       }
+
+       dllist_init(&psNewTl->sSyncList);
+       dllist_init(&psNewTl->sSyncActiveList);
+       dllist_init(&psNewTl->sTlList);
+
+       _SyncFbFTimelineListAdd(psNewTl);
+
+       psNewTl->sTlOps.pfnSyncPtHasSignalled = pfnHasPtSignalled;
+       psNewTl->iUID = (IMG_INT64)(uintptr_t) psNewTl; /* Not unique throughout the driver lifetime */
+       OSAtomicWrite(&psNewTl->iSeqNum, 0);
+       OSAtomicWrite(&psNewTl->iLastSignalledSeqNum, 0);
+       psNewTl->uiPID = OSGetCurrentClientProcessIDKM();
+
+       /* Set initial refcount value */
+       TL_REF_SET(&psNewTl->iRef, 1, psNewTl);
+
+       *ppsTimeline = psNewTl;
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, psNewTl);
+
+e3:
+       OSFreeMem(psNewTl);
+e2:
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         PVR SPECIFIC FUNCTIONS                            */
+/*                                                                           */
+/*****************************************************************************/
+
+/* Free a PVR sync point with its sync checkpoint */
+static void _SyncFbSyncPtFreePVR(IMG_HANDLE hSync)
+{
+       PVR_DPF_ENTERED1(hSync);
+
+       SyncCheckpointFree((PSYNC_CHECKPOINT) hSync);
+
+       PVR_DPF_RETURN;
+}
+
+
+/* Mark a sync checkpoint with the given state.
+ * MAKE SURE TO WAKE UP FW AFTER CALLING THIS */
+static PVRSRV_ERROR _SyncFbSyncPtSignalPVR(IMG_HANDLE hSync,
+                                           PVRSRV_SYNC_STATE eState)
+{
+       PSYNC_CHECKPOINT psSyncCheck = (PSYNC_CHECKPOINT) hSync;
+
+       PVR_DPF_ENTERED1(hSync);
+
+       if (!SyncCheckpointIsSignalled(psSyncCheck, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+       {
+               switch (eState)
+               {
+                       case PVRSRV_SYNC_SIGNALLED:
+                               SyncCheckpointSignal(psSyncCheck, PVRSRV_FENCE_FLAG_NONE);
+                               break;
+                       case PVRSRV_SYNC_ERRORED:
+                               SyncCheckpointError(psSyncCheck, PVRSRV_FENCE_FLAG_NONE);
+                               break;
+                       default:
+                               ERR("Passed unknown sync state, "
+                                               "please use a valid one for signalling.");
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, hSync);
+}
+
+/* Check whether the native sync of the SyncPt has signalled.
+ *
+ * HOLD TL LOCK!
+ */
+static IMG_BOOL _SyncFbSyncPtHasSignalledPVR(PVRSRV_SYNC_PT *psSyncPt)
+{
+       PDLLIST_NODE psCBNode;
+       PVRSRV_SYNC_SIGNAL_CB *psCB;
+       PSYNC_CHECKPOINT psSyncCheck;
+       IMG_BOOL bRet = IMG_FALSE;
+
+       PVR_DPF_ENTERED1(psSyncPt);
+
+       /* If the SyncPt is not signalled yet,
+        * check whether the first attached sync has.
+        *
+        * Change SyncPt state to signalled or errored if yes.
+        * Also notify other attached syncs.
+        */
+       if (OSAtomicRead(&psSyncPt->iStatus) == PVRSRV_SYNC_NOT_SIGNALLED)
+       {
+               /* List must have at least the device sync attached if we are called */
+               PVR_ASSERT(!dllist_is_empty(&psSyncPt->sSignalCallbacks));
+
+               /* Retrieve the first sync checkpoint of that sync pt */
+               psCBNode = dllist_get_next_node(&psSyncPt->sSignalCallbacks);
+               psCB = IMG_CONTAINER_OF(psCBNode, PVRSRV_SYNC_SIGNAL_CB, sCallbackNode);
+               psSyncCheck = (PSYNC_CHECKPOINT) psCB->hAttachedSync;
+
+               if (SyncCheckpointIsSignalled(psSyncCheck, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               {
+                       _SyncFbSyncPtSignal(psSyncPt, PVRSRV_SYNC_SIGNALLED);
+
+                       /* Signal all other attached syncs */
+                       PVR_LOG_IF_ERROR(_SyncFbSyncPtSignalAttached(psSyncPt, PVRSRV_SYNC_SIGNALLED),
+                                        "_SyncFbSyncPtSignalAttached");
+
+                       bRet = IMG_TRUE;
+               }
+
+               PVR_DPF_RETURN_RC1(bRet, psSyncPt);
+       }
+       else
+       {
+               PVR_DPF_RETURN_RC1(IMG_TRUE, psSyncPt);
+       }
+}
+
+PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize,
+                                     const IMG_CHAR *pszTimelineName,
+                                     PVRSRV_TIMELINE_SERVER **ppsTimeline)
+{
+       return _SyncFbTimelineCreate(&_SyncFbSyncPtHasSignalledPVR,
+                                    uiTimelineNameSize,
+                                    pszTimelineName,
+                                    ppsTimeline);
+}
+
+PVRSRV_ERROR SyncFbFenceCreatePVR(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                  const IMG_CHAR *pszName,
+                                  PVRSRV_TIMELINE iTl,
+                                  PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                  PVRSRV_FENCE *piOutFence,
+                                  IMG_UINT64 *puiFenceUID,
+                                  void **ppvFenceFinaliseData,
+                                  PSYNC_CHECKPOINT *ppsOutCheckpoint,
+                                  void **ppvTimelineUpdateSync,
+                                  IMG_UINT32 *puiTimelineUpdateValue)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_SERVER *psNewFence;
+       PVRSRV_SYNC_PT *psNewSyncPt = NULL;
+       PVRSRV_SYNC_SIGNAL_CB *psNewSyncSignalCB;
+       PVRSRV_HANDLE_BASE      *psHandleBase;
+       PVRSRV_TIMELINE_SERVER *psTl;
+       IMG_HANDLE hOutFence;
+       IMG_UINT32 ui32FwAddr;
+
+       PVR_UNREFERENCED_PARAMETER(ppvTimelineUpdateSync);
+       PVR_UNREFERENCED_PARAMETER(puiTimelineUpdateValue);
+
+       PVR_DPF_ENTERED;
+
+       /* The fallback implementation does not need to finalise
+        * the fence, so set the ppvFenceFinaliseData to NULL
+        * (if provided)
+        */
+       if (ppvFenceFinaliseData != NULL )
+       {
+               *ppvFenceFinaliseData = NULL;
+       }
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(pszName, eError, e0);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(piOutFence, eError, e0);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(ppsOutCheckpoint, eError, e0);
+
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE) (uintptr_t) iTl,
+                                        PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+                                        IMG_TRUE,
+                                        (void**) &psTl,
+                                        &psHandleBase);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       if (unlikely(_SyncFbTimelineHandleType(psTl) != PVRSRV_SYNC_HANDLE_PVR))
+       {
+               PVR_LOG_GOTO_WITH_ERROR("_SyncFbTimelineHandleType", eError, PVRSRV_ERROR_INVALID_PARAMS, e1);
+       }
+
+       /* Allocate:
+        *              Fence
+        *              Sync Signal CB
+        *              SyncPt List
+        *              Sync Checkpoint
+        *              SyncPt
+        *              Handle
+        *      Setup
+        */
+       psNewFence = OSAllocMem(sizeof(*psNewFence));
+       PVR_LOG_GOTO_IF_NOMEM(psNewFence, eError, e2);
+
+       psNewSyncSignalCB = OSAllocMem(sizeof(*psNewSyncSignalCB));
+       PVR_LOG_GOTO_IF_NOMEM(psNewSyncSignalCB, eError, e3);
+
+       psNewFence->apsFenceSyncList = OSAllocMem(sizeof(*(psNewFence->apsFenceSyncList)));
+       PVR_LOG_GOTO_IF_NOMEM(psNewFence->apsFenceSyncList, eError, e4);
+
+       psNewFence->psDevNode = psDeviceNode;
+
+       /* Lock down TL until new point is fully created and inserted */
+       OSLockAcquire(psTl->hTlLock);
+
+       eError = _SyncFbSyncPtCreate(&psNewSyncPt,
+                                    psTl,
+                                    OSAtomicIncrement(&psTl->iSeqNum));
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               OSLockRelease(psTl->hTlLock);
+               ERR("Cannot allocate SyncPt.");
+               goto e5;
+       }
+
+       eError = SyncCheckpointAlloc(psSyncCheckpointContext,
+                                    iTl,
+                                    PVRSRV_NO_FENCE,
+                                    pszName,
+                                    ppsOutCheckpoint);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               OSLockRelease(psTl->hTlLock);
+               ERR("Cannot allocate SyncCheckpoint.");
+               goto e6;
+       }
+
+       ui32FwAddr = SyncCheckpointGetFirmwareAddr(*ppsOutCheckpoint);
+
+       /* Init Sync Signal CB */
+       psNewSyncSignalCB->hAttachedSync = (IMG_HANDLE) *ppsOutCheckpoint;
+       psNewSyncSignalCB->pfnSignal = &_SyncFbSyncPtSignalPVR;
+       psNewSyncSignalCB->pfnSyncFree = &_SyncFbSyncPtFreePVR;
+       psNewSyncSignalCB->hPrivData = (IMG_HANDLE) psSyncCheckpointContext;
+
+       dllist_add_to_tail(&psNewSyncPt->sSignalCallbacks,
+                          &psNewSyncSignalCB->sCallbackNode);
+
+       OSLockRelease(psTl->hTlLock);
+
+       DBG(("%s: Inserting key <%p> = %p", __func__, *ppsOutCheckpoint, psNewSyncPt));
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+       HASH_Insert_Extended(gsSyncFbContext.sCheckpointHashTable, (void *)&ui32FwAddr, (uintptr_t) psNewSyncPt);
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       /* Init Fence */
+       OSStringLCopy(psNewFence->pszName,
+                     pszName,
+                     SYNC_FB_FENCE_MAX_LENGTH);
+
+       psNewFence->apsFenceSyncList[0] = psNewSyncPt;
+       psNewFence->uiNumSyncs = 1;
+       FENCE_REF_SET(&psNewFence->iRef, 1, psNewFence);
+       OSAtomicWrite(&psNewFence->iStatus, PVRSRV_SYNC_NOT_SIGNALLED);
+       psNewFence->iUID = (IMG_INT64)(uintptr_t) psNewFence; /* Not unique throughout the driver lifetime */
+
+       eError = PVRSRVAllocHandle(psHandleBase,
+                                  &hOutFence,
+                                  (void*) psNewFence,
+                                  PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                  PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                  (PFN_HANDLE_RELEASE) &SyncFbFenceRelease);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandle", e7);
+
+       _SyncFbFenceListAdd(psNewFence);
+
+       PDUMPCOMMENTWITHFLAGS(psNewFence->psDevNode, 0,
+                             "Allocated PVR Fence %s (ID:%"IMG_UINT64_FMTSPEC") with Checkpoint (ID:%d) "
+                             "on Timeline %s (ID:%"IMG_UINT64_FMTSPEC")",
+                             psNewFence->pszName,
+                             psNewFence->iUID,
+                             SyncCheckpointGetId(psNewSyncSignalCB->hAttachedSync),
+                             psTl->pszName,
+                             psTl->iUID);
+
+       PVRSRVReleaseHandle(psHandleBase,
+                           (IMG_HANDLE) (uintptr_t) iTl,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+
+       *puiFenceUID = psNewFence->iUID;
+       *piOutFence = (PVRSRV_FENCE) (uintptr_t) hOutFence;
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, psNewFence);
+
+e7:
+       SyncCheckpointFree(*ppsOutCheckpoint);
+e6:
+       _SyncFbSyncPtRelease(psNewSyncPt, IMG_FALSE);
+e5:
+       OSFreeMem(psNewFence->apsFenceSyncList);
+e4:
+       OSFreeMem(psNewSyncSignalCB);
+e3:
+       OSFreeMem(psNewFence);
+e2:
+e1:
+       PVRSRVReleaseHandle(psHandleBase,
+                           (IMG_HANDLE) (uintptr_t) iTl,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+e0:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* Resolve caller has to free the sync checkpoints and free the
+ * array that holds the pointers. */
+PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext,
+                                   PVRSRV_FENCE iFence,
+                                   IMG_UINT32 *puiNumCheckpoints,
+                                   PSYNC_CHECKPOINT **papsCheckpoints,
+                                   IMG_UINT64 *pui64FenceUID)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_SERVER *psFence;
+       PVRSRV_HANDLE_BASE *psHBase;
+       PSYNC_CHECKPOINT *apsCheckpoints;
+       PSYNC_CHECKPOINT psCheckpoint;
+       PVRSRV_SYNC_SIGNAL_CB *psSyncCB, *psNewSyncCB;
+       PVRSRV_SYNC_PT *psSyncPt;
+       PDLLIST_NODE psNode;
+       IMG_UINT32 i, uiNumCheckpoints = 0;
+
+       PVR_DPF_ENTERED;
+
+       if (iFence == PVRSRV_NO_FENCE)
+       {
+               *puiNumCheckpoints = 0;
+               eError = PVRSRV_OK;
+               goto e0;
+       }
+
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE) (uintptr_t) iFence,
+                                        PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                        IMG_TRUE,
+                                        (void**)&psFence,
+                                        &psHBase);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       apsCheckpoints = OSAllocMem(sizeof(*apsCheckpoints) * psFence->uiNumSyncs);
+       PVR_LOG_GOTO_IF_NOMEM(apsCheckpoints, eError, e1);
+
+       /* Go through all syncs and add them to the list */
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               psNewSyncCB = NULL;
+               psSyncPt = psFence->apsFenceSyncList[i];
+
+               /* Don't skip signalled fences on PDUMP to make sure we set up proper
+                  synchronisation in the pdump stream. */
+#if !defined(PDUMP)
+               if (_SyncFbSyncPtHasSignalled(psSyncPt))
+               {
+                       continue;
+               }
+#endif
+
+               OSLockAcquire(gsSyncFbContext.hFbContextLock);
+               OSLockAcquire(psSyncPt->psTl->hTlLock);
+               psNode = dllist_get_next_node(&psSyncPt->sSignalCallbacks);
+               psSyncCB = IMG_CONTAINER_OF(psNode, PVRSRV_SYNC_SIGNAL_CB, sCallbackNode);
+
+               /* If we have a sync checkpoint AND
+                * it uses the same context as the given one,
+                * just add the checkpoint to the resolve list.*/
+               if ((_SyncFbSyncPtHandleType(psSyncCB) == PVRSRV_SYNC_HANDLE_PVR) &&
+                   (psContext == (PSYNC_CHECKPOINT_CONTEXT) psSyncCB->hPrivData))
+               {
+                       psCheckpoint = (PSYNC_CHECKPOINT) psSyncCB->hAttachedSync;
+               }
+               /* Else create a new sync checkpoint in the given context */
+               else
+               {
+                       IMG_UINT32 ui32FwAddr;
+
+                       eError = SyncCheckpointAlloc(psContext,
+                                                    SYNC_CHECKPOINT_FOREIGN_CHECKPOINT,
+                                                    iFence,
+                                                    psFence->pszName,
+                                                    &psCheckpoint);
+                       if (eError != PVRSRV_OK)
+                       {
+                               OSLockRelease(psSyncPt->psTl->hTlLock);
+                               OSLockRelease(gsSyncFbContext.hFbContextLock);
+                               goto e2;
+                       }
+
+                       psNewSyncCB = OSAllocMem(sizeof(*psNewSyncCB));
+                       if (psNewSyncCB == NULL)
+                       {
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               OSLockRelease(psSyncPt->psTl->hTlLock);
+                               OSLockRelease(gsSyncFbContext.hFbContextLock);
+                               goto e3;
+                       }
+
+                       psNewSyncCB->hAttachedSync = (IMG_HANDLE) psCheckpoint;
+                       psNewSyncCB->hPrivData = (IMG_HANDLE) psContext;
+                       psNewSyncCB->pfnSignal = &_SyncFbSyncPtSignalPVR;
+                       psNewSyncCB->pfnSyncFree = &_SyncFbSyncPtFreePVR;
+                       dllist_add_to_tail(&psFence->apsFenceSyncList[i]->sSignalCallbacks,
+                                          &psNewSyncCB->sCallbackNode);
+
+                       /* Insert the checkpoint into the firmware address lookup table,
+                        * in case this checkpoint gets errored by the host driver. */
+                       ui32FwAddr = SyncCheckpointGetFirmwareAddr(psCheckpoint);
+
+                       DBG(("%s: Inserting key (fwAddr0x%x)<%p> = %p", __func__, ui32FwAddr, psCheckpoint, psSyncPt));
+                       HASH_Insert_Extended(gsSyncFbContext.sCheckpointHashTable, (void *)&ui32FwAddr, (uintptr_t) psSyncPt);
+
+                       /* If the existing sync pt has already been signalled, then signal
+                        * this new sync too */
+                       if (_SyncFbSyncPtHasSignalled(psFence->apsFenceSyncList[i]))
+                       {
+                               _SyncFbSyncPtSignalPVR(psNewSyncCB->hAttachedSync, PVRSRV_SYNC_SIGNALLED);
+                       }
+               }
+               OSLockRelease(psSyncPt->psTl->hTlLock);
+               OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+               /* Take a reference, resolve caller is responsible
+                * to drop it after use */
+               eError = SyncCheckpointTakeRef(psCheckpoint);
+               PVR_GOTO_IF_ERROR(eError, e4);
+
+               apsCheckpoints[uiNumCheckpoints++] = psCheckpoint;
+       }
+
+       *pui64FenceUID = psFence->iUID;
+       *puiNumCheckpoints = uiNumCheckpoints;
+       *papsCheckpoints = apsCheckpoints;
+
+       PVRSRVReleaseHandle(psHBase,
+                           (IMG_HANDLE) (uintptr_t) iFence,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+
+       PVR_DPF_RETURN_OK;
+
+e4:
+       if (psNewSyncCB)
+               OSFreeMem(psNewSyncCB);
+e3:
+       SyncCheckpointFree(psCheckpoint);
+e2:
+       for (; i > 0; i--)
+       {
+               SyncCheckpointDropRef(apsCheckpoints[i-1]);
+               SyncCheckpointFree(apsCheckpoints[i-1]);
+       }
+
+       OSFreeMem(apsCheckpoints);
+e1:
+       PVRSRVReleaseHandle(psHBase,
+                           (IMG_HANDLE) (uintptr_t) iFence,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+e0:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* In case something went wrong after FenceCreate we can roll back (destroy)
+ * the fence in the server */
+static PVRSRV_ERROR SyncFbFenceRollbackPVR(PVRSRV_FENCE iFence, void *pvFenceData)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+       PVR_UNREFERENCED_PARAMETER(pvFenceData);
+
+       PVR_GOTO_IF_INVALID_PARAM(iFence != PVRSRV_NO_FENCE, eError, e1);
+
+       eError = _SyncFbDestroyHandle((IMG_HANDLE) (uintptr_t) iFence,
+                                     PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       PVR_DPF_RETURN_OK;
+
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* Dump debug info on syncs relating to any of the FWAddrs in the
+ * given array. This is called when rgx_ccb.c determines we have a
+ * stalled CCB, so this debug will aid in debugging which sync(s)
+ * have failed to signal.
+ */
+IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs)
+{
+       IMG_UINT32 ui32NumFallbackUfos = 0;
+       PDLLIST_NODE psFenceNode, psNextFenceNode;
+       PVRSRV_FENCE_SERVER *psFence;
+       IMG_UINT32 *pui32NextFWAddr = vaddrs;
+       IMG_UINT32 ui32CurrentUfo;
+
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+
+       for (ui32CurrentUfo=0; ui32CurrentUfo<nr_ufos; ui32CurrentUfo++)
+       {
+               if (pui32NextFWAddr)
+               {
+                       /* Iterate over all fences */
+                       dllist_foreach_node(&gsSyncFbContext.sFenceList,
+                                                               psFenceNode,
+                                                               psNextFenceNode)
+                       {
+                               IMG_UINT32 i;
+                               IMG_BOOL bFenceDumped = IMG_FALSE;
+                               IMG_UINT32                        ui32SyncPtBitmask = 0;
+                               IMG_UINT32                        ui32SyncCheckpointFWAddr = 0;
+                               PVRSRV_SYNC_PT            *psSyncPt = NULL;
+
+                               psFence = IMG_CONTAINER_OF(psFenceNode,
+                                                                                  PVRSRV_FENCE_SERVER,
+                                                                                  sFenceListNode);
+
+                               /* ... all sync points in the fence */
+                               for (i = 0; i < psFence->uiNumSyncs; i++)
+                               {
+                                       PDLLIST_NODE psCBNode, psNextCBNode;
+
+                                       psSyncPt = psFence->apsFenceSyncList[i];
+
+                                       dllist_foreach_node(&psSyncPt->sSignalCallbacks,
+                                                                               psCBNode,
+                                                                               psNextCBNode)
+                                       {
+                                               PVRSRV_SYNC_SIGNAL_CB *psCb = IMG_CONTAINER_OF(psCBNode,
+                                                                                              PVRSRV_SYNC_SIGNAL_CB,
+                                                                                              sCallbackNode);
+
+                                               switch (_SyncFbSyncPtHandleType(psCb))
+                                               {
+                                                       case PVRSRV_SYNC_HANDLE_PVR:
+                                                       {
+                                                               ui32SyncCheckpointFWAddr = SyncCheckpointGetFirmwareAddr(psCb->hAttachedSync);
+                                                               ui32SyncPtBitmask |= 1;
+                                                               break;
+                                                       }
+                                                       case PVRSRV_SYNC_HANDLE_SW:
+                                                       {
+                                                               ui32SyncPtBitmask |= 2;
+                                                               break;
+                                                       }
+                                                       default:
+                                                               break;
+                                               }
+                                       }
+                               }
+
+                               if ((ui32SyncPtBitmask == 0x3) &&
+                                       (ui32SyncCheckpointFWAddr == *pui32NextFWAddr))
+                               {
+                                       /* Print fence info (if not already done so) */
+                                       if (!bFenceDumped)
+                                       {
+                                               PVR_LOG(("Fence: %s, ID: %"IMG_UINT64_FMTSPEC", %s - (0x%p)",
+                                                                 psFence->pszName,
+                                                                 psFence->iUID,
+                                                                 _SyncFbFenceSyncsHaveSignalled(psFence) ?
+                                                                                 "Signalled" : "Pending  ",
+                                                                 psFence));
+                                               bFenceDumped = IMG_TRUE;
+                                       }
+                                       _SyncFbDebugRequestPrintSyncPt(psSyncPt,
+                                                                      IMG_TRUE,
+                                                                                                  NULL,
+                                                                                                  NULL);
+                                       ui32NumFallbackUfos++;
+                               }
+                       }
+                       pui32NextFWAddr++;
+               }
+       }
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       return ui32NumFallbackUfos;
+}
+
+static inline PVRSRV_SYNC_STATE _SyncFbCheckpointToSyncState(PVRSRV_SYNC_CHECKPOINT_STATE eState)
+{
+       switch (eState)
+       {
+               case PVRSRV_SYNC_CHECKPOINT_SIGNALLED:
+                       return PVRSRV_SYNC_SIGNALLED;
+               case PVRSRV_SYNC_CHECKPOINT_ERRORED:
+                       return PVRSRV_SYNC_ERRORED;
+               case PVRSRV_SYNC_CHECKPOINT_ACTIVE:
+                       return PVRSRV_SYNC_NOT_SIGNALLED;
+               default:
+                       PVR_ASSERT("Unknown sync checkpoint state");
+                       return PVRSRV_SYNC_ERRORED;
+       }
+}
+
+IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value)
+{
+       PVRSRV_SYNC_PT *psSyncPt;
+       PVRSRV_SYNC_STATE eSignal;
+       PVRSRV_SYNC_SIGNAL_CB *psCB;
+       PDLLIST_NODE psCurrentCB, psNextCB;
+
+       PVR_DPF_ENTERED1(ui32FwAddr);
+
+       OSLockAcquire(gsSyncFbContext.hFbContextLock);
+
+       DBG(("%s: Looking up key (fwAddr=0x%x)", __func__, ui32FwAddr));
+       psSyncPt = (PVRSRV_SYNC_PT *) HASH_Retrieve_Extended(gsSyncFbContext.sCheckpointHashTable, (void *) &ui32FwAddr);
+       DBG(("%s: Found sync pt <%p>", __func__, psSyncPt));
+
+       if (!psSyncPt)
+       {
+               /* Sync Fallback does not know about this UFO, skip processing it. */
+               goto err_release;
+       }
+
+       eSignal = _SyncFbCheckpointToSyncState(ui32Value);
+       _SyncFbSyncPtSignal(psSyncPt, eSignal);
+
+       /*
+        * Signal all attachments except for the one representing the signalled
+        * checkpoint.
+        */
+
+       if (dllist_is_empty(&psSyncPt->sSignalCallbacks))
+       {
+               ERR("Sync pt has no attached syncs. Make sure to attach one "
+                   "when creating a new sync pt to define its flavour");
+       }
+
+       dllist_foreach_node(&psSyncPt->sSignalCallbacks, psCurrentCB, psNextCB)
+       {
+               psCB = IMG_CONTAINER_OF(psCurrentCB,
+                                       PVRSRV_SYNC_SIGNAL_CB,
+                                       sCallbackNode);
+
+               if (_SyncFbSyncPtHandleType(psCB) == PVRSRV_SYNC_HANDLE_PVR &&
+                   SyncCheckpointGetFirmwareAddr(psCB->hAttachedSync) == ui32FwAddr)
+               {
+                       continue;
+               }
+
+               if (psCB->pfnSignal(psCB->hAttachedSync, eSignal) != PVRSRV_OK)
+               {
+                       ERR("Failed to signal an attached sync, system might block!");
+                       /* Don't jump to exit but try to signal remaining syncs */
+               }
+       }
+
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+
+       PVR_DPF_RETURN_RC(IMG_TRUE);
+
+err_release:
+       OSLockRelease(gsSyncFbContext.hFbContextLock);
+       PVR_DPF_RETURN_RC(IMG_FALSE);
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         SW SPECIFIC FUNCTIONS                            */
+/*                                                                           */
+/*****************************************************************************/
+
+/* Free a SW sync point with its sync checkpoint */
+static void _SyncFbSyncPtFreeSW(IMG_HANDLE hSync)
+{
+       PVR_DPF_ENTERED1(hSync);
+
+       OSFreeMem(hSync);
+
+       PVR_DPF_RETURN;
+}
+
+static IMG_BOOL _SyncFbSyncPtHasSignalledSW(PVRSRV_SYNC_PT *psSyncPt)
+{
+       PDLLIST_NODE psCBNode;
+       PVRSRV_SYNC_SIGNAL_CB *psCB;
+       PVRSRV_SYNC_PT_SW *psSWSyncPt;
+       IMG_BOOL bRet = IMG_FALSE;
+
+       PVR_DPF_ENTERED1(psSyncPt);
+
+       /* If the SyncPt has not been signalled yet,
+        * check whether the first attached sync has.
+        *
+        * Change SyncPt state to signalled or errored if yes.
+        * Also notify other attached syncs.
+        */
+       if (OSAtomicRead(&psSyncPt->iStatus) == PVRSRV_SYNC_NOT_SIGNALLED)
+       {
+               /* List must have at least the device sync attached if we are called */
+               PVR_ASSERT(!dllist_is_empty(&psSyncPt->sSignalCallbacks));
+
+               /* Retrieve the first sync checkpoint of that sync pt */
+               psCBNode = dllist_get_next_node(&psSyncPt->sSignalCallbacks);
+               psCB = IMG_CONTAINER_OF(psCBNode, PVRSRV_SYNC_SIGNAL_CB, sCallbackNode);
+               psSWSyncPt = (PVRSRV_SYNC_PT_SW*) psCB->hAttachedSync;
+
+               if (psSWSyncPt->bSignalled)
+               {
+                       _SyncFbSyncPtSignal(psSyncPt, PVRSRV_SYNC_SIGNALLED);
+
+                       /* Signal all other attached syncs */
+                       PVR_LOG_IF_ERROR(_SyncFbSyncPtSignalAttached(psSyncPt, PVRSRV_SYNC_SIGNALLED),
+                                        "_SyncFbSyncPtSignalAttached");
+
+                       bRet = IMG_TRUE;
+               }
+
+               PVR_DPF_RETURN_RC1(bRet, psSyncPt);
+       }
+       else
+       {
+               PVR_DPF_RETURN_RC1(IMG_TRUE, psSyncPt);
+       }
+}
+
+/* Mark an attached sw sync pt with the given state.
+ * MAKE SURE TO WAKE UP FW AFTER CALLING THIS (if enqueued)*/
+static PVRSRV_ERROR _SyncFbSyncPtSignalSW(IMG_HANDLE hSync,
+                                          PVRSRV_SYNC_STATE eState)
+{
+       PVRSRV_SYNC_PT_SW *psSWSyncPt = (PVRSRV_SYNC_PT_SW*) hSync;
+
+       PVR_DPF_ENTERED1(hSync);
+
+       if (!psSWSyncPt->bSignalled)
+       {
+               switch (eState)
+               {
+                       case PVRSRV_SYNC_SIGNALLED:
+                       case PVRSRV_SYNC_ERRORED:
+                               psSWSyncPt->bSignalled = IMG_TRUE;
+                               break;
+                       default:
+                               ERR("Passed unknown sync state (%d), "
+                                               "please use a valid one for signalling.",
+                                               eState);
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, hSync);
+}
+
+PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize,
+                                    const IMG_CHAR *pszTimelineName,
+                                    PVRSRV_TIMELINE_SERVER **ppsTimeline)
+{
+       return _SyncFbTimelineCreate(&_SyncFbSyncPtHasSignalledSW,
+                                    uiTimelineNameSize,
+                                    pszTimelineName,
+                                    ppsTimeline);
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                       SOFTWARE_TIMELINE FUNCTIONS                         */
+/*                                                                           */
+/*****************************************************************************/
+static PVRSRV_ERROR _SyncFbSWTimelineFenceCreate(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          PVRSRV_TIMELINE_SERVER *psTl,
+                                          IMG_UINT32 uiFenceNameSize,
+                                          const IMG_CHAR *pszFenceName,
+                                          PVRSRV_FENCE_SERVER **ppsOutputFence,
+                                          IMG_UINT64 *pui64SyncPtIdx)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_FENCE_SERVER *psNewFence;
+       PVRSRV_SYNC_PT *psNewSyncPt = NULL;
+       PVRSRV_SYNC_PT_SW *psNewSWSyncPt;
+       PVRSRV_SYNC_SIGNAL_CB *psNewSyncSignalCB;
+       IMG_INT iNextSeqNum;
+
+       if (_SyncFbTimelineHandleType(psTl) != PVRSRV_SYNC_HANDLE_SW)
+       {
+               ERR("Passed timeline is not a SW timeline.");
+               eError = PVRSRV_ERROR_NOT_SW_TIMELINE;
+               goto e1;
+       }
+
+       /* Allocate:
+        *              Fence
+        *              Sync Signal CB
+        *              SyncPt List
+        *              SW Sync
+        *              SyncPt
+        *              Handle
+        *      Setup
+        */
+       psNewFence = OSAllocMem(sizeof(*psNewFence));
+       PVR_LOG_GOTO_IF_NOMEM(psNewFence, eError, e1);
+
+       psNewFence->psDevNode = psDeviceNode;
+
+       psNewSyncSignalCB = OSAllocMem(sizeof(*psNewSyncSignalCB));
+       PVR_LOG_GOTO_IF_NOMEM(psNewSyncSignalCB, eError, e2);
+
+       psNewFence->apsFenceSyncList = OSAllocMem(sizeof(*(psNewFence->apsFenceSyncList)));
+       PVR_LOG_GOTO_IF_NOMEM(psNewFence->apsFenceSyncList, eError, e3);
+
+       psNewSWSyncPt = OSAllocMem(sizeof(*psNewSWSyncPt));
+       PVR_LOG_GOTO_IF_NOMEM(psNewSWSyncPt, eError, e4);
+
+       /* Lock down TL until new point is fully created and inserted */
+       OSLockAcquire(psTl->hTlLock);
+
+       /* Sample our next sync pt value - we won't actually increment
+        * iSeqNum for this SW timeline until we know the fence has been
+        * successfully created.
+        */
+       iNextSeqNum = OSAtomicRead(&psTl->iSeqNum) + 1;
+
+       eError = _SyncFbSyncPtCreate(&psNewSyncPt, psTl, iNextSeqNum);
+       if (eError != PVRSRV_OK)
+       {
+               OSLockRelease(psTl->hTlLock);
+               goto e5;
+       }
+
+       if (OSAtomicRead(&psTl->iLastSignalledSeqNum) < psNewSyncPt->uiSeqNum)
+       {
+               psNewSWSyncPt->bSignalled = IMG_FALSE;
+       }
+       else
+       {
+               psNewSWSyncPt->bSignalled = IMG_TRUE;
+               OSAtomicWrite(&psNewSyncPt->iStatus, PVRSRV_SYNC_SIGNALLED);
+       }
+
+       /* Init Sync Signal CB */
+       psNewSyncSignalCB->hAttachedSync = (IMG_HANDLE) psNewSWSyncPt;
+       psNewSyncSignalCB->pfnSignal = &_SyncFbSyncPtSignalSW;
+       psNewSyncSignalCB->pfnSyncFree = &_SyncFbSyncPtFreeSW;
+       psNewSyncSignalCB->hPrivData = NULL;
+
+       dllist_add_to_tail(&psNewSyncPt->sSignalCallbacks,
+                                          &psNewSyncSignalCB->sCallbackNode);
+
+       /* Now that the fence has been created, increment iSeqNum */
+       OSAtomicIncrement(&psTl->iSeqNum);
+
+       OSLockRelease(psTl->hTlLock);
+
+       if (pszFenceName)
+       {
+               /* Init Fence */
+               OSStringLCopy(psNewFence->pszName,
+                             pszFenceName,
+                             SYNC_FB_FENCE_MAX_LENGTH);
+       }
+       else
+       {
+               psNewFence->pszName[0] = '\0';
+       }
+
+       psNewFence->apsFenceSyncList[0] = psNewSyncPt;
+       psNewFence->uiNumSyncs = 1;
+       FENCE_REF_SET(&psNewFence->iRef, 1, psNewFence);
+       OSAtomicWrite(&psNewFence->iStatus, PVRSRV_FENCE_NOT_SIGNALLED);
+       psNewFence->iUID = (IMG_INT64)(uintptr_t) psNewFence;
+
+       _SyncFbFenceListAdd(psNewFence);
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0,
+                                                 "Allocated SW Fence %s (ID:%"IMG_UINT64_FMTSPEC") with sequence number %u "
+                                                 "on Timeline %s (ID:%"IMG_UINT64_FMTSPEC")",
+                                                 psNewFence->pszName,
+                                                 psNewFence->iUID,
+                                                 psNewSyncPt->uiSeqNum,
+                                                 psTl->pszName,
+                                                 psTl->iUID);
+
+       *ppsOutputFence = psNewFence;
+       if (pui64SyncPtIdx != NULL)
+       {
+               *pui64SyncPtIdx = psNewSyncPt->uiSeqNum;
+       }
+
+       PVR_DPF_RETURN_RC1(PVRSRV_OK, psNewFence);
+
+e5:
+       OSFreeMem(psNewSWSyncPt);
+e4:
+       OSFreeMem(psNewFence->apsFenceSyncList);
+e3:
+       OSFreeMem(psNewSyncSignalCB);
+e2:
+       OSFreeMem(psNewFence);
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/* Kernel mode function (SyncFb implementation) to create fence on a SW timeline */
+PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           PVRSRV_TIMELINE iSWTimeline,
+                                           const IMG_CHAR *pszFenceName,
+                                           PVRSRV_FENCE *piOutputFence,
+                                           IMG_UINT64 *pui64SyncPtIdx)
+{
+
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_SERVER *psNewFence;
+       PVRSRV_HANDLE_BASE      *psHandleBase;
+       PVRSRV_TIMELINE_SERVER *psTl;
+       IMG_HANDLE hOutFence;
+
+       PVR_DPF_ENTERED;
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(piOutputFence, eError, e0);
+
+       /* Lookup up the ST Timeline (and take a reference on it while
+        * we are creating the new sync pt and fence)
+        */
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE) (uintptr_t) iSWTimeline,
+                                                                        PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+                                                                        IMG_TRUE,
+                                                                        (void**) &psTl,
+                                                                        &psHandleBase);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       eError = _SyncFbSWTimelineFenceCreate(psDeviceNode, psTl,
+                                             OSStringLength(pszFenceName),
+                                             pszFenceName,
+                                             &psNewFence,
+                                             pui64SyncPtIdx);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       eError = PVRSRVAllocHandle(psHandleBase,
+                                                          &hOutFence,
+                                                          (void*) psNewFence,
+                                                          PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                                          PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+                                                          (PFN_HANDLE_RELEASE) &SyncFbFenceRelease);
+       PVR_GOTO_IF_ERROR(eError, e2);
+
+       /* Drop the reference we took on the timeline earlier */
+       PVRSRVReleaseHandle(psHandleBase,
+                           (IMG_HANDLE) (uintptr_t) iSWTimeline,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+
+       *piOutputFence = (PVRSRV_FENCE) (uintptr_t) hOutFence;
+
+       return PVRSRV_OK;
+
+e2:
+       /* Release the fence we created, as we failed to
+        * allocate a handle for it */
+       SyncFbFenceRelease(psNewFence);
+
+e1:
+       /* Drop the reference we took on the timeline earlier */
+       PVRSRVReleaseHandle(psHandleBase,
+                           (IMG_HANDLE) (uintptr_t) iSWTimeline,
+                           PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+e0:
+       PVR_DPF_RETURN_RC(eError);
+
+}
+
+/* Client (bridge) interface to the SyncSWTimelineFenceCreateKM() function */
+PVRSRV_ERROR SyncFbFenceCreateSW(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 PVRSRV_TIMELINE_SERVER *psTimeline,
+                                 IMG_UINT32 uiFenceNameSize,
+                                 const IMG_CHAR *pszFenceName,
+                                 PVRSRV_FENCE_SERVER **ppsOutputFence,
+                                 IMG_UINT64 *pui64SyncPtIdx)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       eError =  _SyncFbSWTimelineFenceCreate(psDeviceNode,
+                                              psTimeline,
+                                              0,
+                                              pszFenceName,
+                                              ppsOutputFence,
+                                              pui64SyncPtIdx);
+
+       return eError;
+}
+
+static PVRSRV_ERROR _SyncSWTimelineAdvanceSigErr(PVRSRV_TIMELINE_SERVER *psTl,
+                                                 PVRSRV_SYNC_STATE eState,
+                                                 IMG_UINT64 *pui64SyncPtIdx)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PDLLIST_NODE psPtNode, psNextNode;
+       PVRSRV_SYNC_PT *psSyncPt;
+       IMG_INT32 uiTlSeqNum;
+
+       PVR_DPF_ENTERED1(psTl);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psTl, eError, errExit);
+
+       if (_SyncFbTimelineHandleType(psTl) != PVRSRV_SYNC_HANDLE_SW)
+       {
+               ERR("Passed timeline is not a SW timeline.");
+               eError = PVRSRV_ERROR_NOT_SW_TIMELINE;
+               goto errExit;
+       }
+
+       OSLockAcquire(psTl->hTlLock);
+
+       /* Don't allow incrementing of SW timeline beyond its last created pt */
+       if (OSAtomicRead(&psTl->iLastSignalledSeqNum) == OSAtomicRead(&psTl->iSeqNum))
+       {
+               DBG(("%s: !! TL<%p> (%d->%d/%d) !!", __func__, (void*)psTl, OSAtomicRead(&psTl->iLastSignalledSeqNum), OSAtomicRead(&psTl->iLastSignalledSeqNum)+1, OSAtomicRead(&psTl->iSeqNum)));
+               WRN("Attempt to advance SW timeline beyond last created point.");
+               eError = PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT;
+               goto errUnlockAndExit;
+       }
+
+       uiTlSeqNum = OSAtomicIncrement(&psTl->iLastSignalledSeqNum);
+
+       /* Go through list of active sync pts and
+        * signal the points that are met now */
+       dllist_foreach_node(&psTl->sSyncActiveList, psPtNode, psNextNode)
+       {
+               psSyncPt = IMG_CONTAINER_OF(psPtNode,
+                                           PVRSRV_SYNC_PT,
+                                           sTlSyncActiveList);
+               if (psSyncPt->uiSeqNum <= uiTlSeqNum)
+               {
+                       _SyncFbSyncPtSignal(psSyncPt, eState);
+
+                       /* Signal all other attached syncs */
+                       PVR_LOG_IF_ERROR(_SyncFbSyncPtSignalAttached(psSyncPt, eState),
+                                        "_SyncFbSyncPtSignalAttached");
+
+                       dllist_remove_node(psPtNode);
+               }
+       }
+
+       OSLockRelease(psTl->hTlLock);
+
+       if (pui64SyncPtIdx != NULL)
+       {
+               *pui64SyncPtIdx = uiTlSeqNum;
+       }
+
+       /* A completed SW operation may un-block the GPU */
+       PVRSRVCheckStatus(NULL);
+
+       eError = _SyncFbSignalEO();
+       PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbSignalEO", errExit);
+
+       PVR_DPF_RETURN_OK;
+
+errUnlockAndExit:
+       OSLockRelease(psTl->hTlLock);
+errExit:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+static void _SyncSWTimelineCheckForUnsignalledPts(PVRSRV_TIMELINE_SERVER *psTl)
+{
+       IMG_UINT32 ui32TlSeqNum = OSAtomicRead(&psTl->iSeqNum);
+       IMG_UINT32 ui32TlLastSigSeqNum = OSAtomicRead(&psTl->iLastSignalledSeqNum);
+
+       while (ui32TlLastSigSeqNum < ui32TlSeqNum)
+       {
+               ui32TlLastSigSeqNum++;
+               PVR_DPF((PVR_DBG_WARNING,"%s: Found unsignalled SW timeline <%p> '%s' sync pt (%d/%d)",__func__, psTl, psTl->pszName, ui32TlLastSigSeqNum, ui32TlSeqNum));
+               _SyncSWTimelineAdvanceSigErr(psTl, PVRSRV_SYNC_ERRORED, NULL);
+       }
+}
+
+/* kernel mode function to advance a SW timeline */
+PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj, IMG_UINT64 *pui64SyncPtIdx)
+{
+       return _SyncSWTimelineAdvanceSigErr(pvSWTimelineObj,
+                                           PVRSRV_SYNC_SIGNALLED,
+                                           pui64SyncPtIdx);
+}
+
+/* Client (bridge) interface to the SyncSWTimelineAdvanceKM() function */
+PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline,
+                                     IMG_UINT64 *pui64SyncPtIdx)
+{
+       return _SyncSWTimelineAdvanceSigErr(psTimeline,
+                                           PVRSRV_SYNC_SIGNALLED,
+                                           pui64SyncPtIdx);
+}
+
+PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED1(pvFenceObj);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(pvFenceObj, eError, e0);
+
+       eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER*) pvFenceObj);
+
+e0:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline,
+                                    void **ppvSWTimelineObj)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_HANDLE_BASE *psHB;
+
+       PVR_DPF_ENTERED1(iSWTimeline);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(iSWTimeline != PVRSRV_NO_TIMELINE, eError, err_out);
+
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE)(uintptr_t) iSWTimeline,
+                                        PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+                                        IMG_FALSE,
+                                        ppvSWTimelineObj,
+                                        &psHB);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbLookupProcHandle", err_out);
+
+       _SyncFbTimelineAcquire((PVRSRV_TIMELINE_SERVER*) *ppvSWTimelineObj);
+
+err_out:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence,
+                               void **ppvFenceObj)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_HANDLE_BASE *psHB;
+
+       PVR_DPF_ENTERED1(iFence);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(iFence != PVRSRV_NO_FENCE, eError, err_out);
+
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE)(uintptr_t) iFence,
+                                        PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+                                        IMG_FALSE,
+                                        ppvFenceObj,
+                                        &psHB);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbLookupProcHandle", err_out);
+
+       _SyncFbFenceAcquire((PVRSRV_FENCE_SERVER*) *ppvFenceObj);
+
+err_out:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj,
+                                  DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile)
+{
+       PVRSRV_FENCE_SERVER *psFence = (PVRSRV_FENCE_SERVER *) pvSWFenceObj;
+       IMG_UINT32 i;
+
+       PVR_DUMPDEBUG_LOG("Fence: %s, %-9s - <%#"IMG_UINT64_FMTSPECx">",
+                         psFence->pszName,
+                         _SyncFbFenceSyncsHaveSignalled(psFence) ?
+                             "Signalled" : "Active",
+                         psFence->iUID);
+
+       /* ... all sync points in the fence */
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               _SyncFbDebugRequestPrintSyncPt(psFence->apsFenceSyncList[i],
+                                              IMG_TRUE,
+                                              pfnDumpDebugPrintf,
+                                              pvDumpDebugFile);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj,
+                                    DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                    void *pvDumpDebugFile)
+{
+       PVRSRV_TIMELINE_SERVER *psTl = (PVRSRV_TIMELINE_SERVER *) pvSWTimelineObj;
+
+       OSLockAcquire(psTl->hTlLock);
+
+       PVR_DUMPDEBUG_LOG("TL: %s, SeqNum: %d/%d - <%#"IMG_UINT64_FMTSPECx">",
+                         psTl->pszName,
+                         OSAtomicRead(&psTl->iLastSignalledSeqNum),
+                         OSAtomicRead(&psTl->iSeqNum),
+                         psTl->iUID);
+
+       OSLockRelease(psTl->hTlLock);
+
+       return PVRSRV_OK;
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                       IMPORT/EXPORT FUNCTIONS                             */
+/*                                                                           */
+/*****************************************************************************/
+
+static PVRSRV_ERROR _SyncFbFenceExport(PVRSRV_FENCE_SERVER *psFence,
+                                       PVRSRV_FENCE_EXPORT **ppsExport)
+{
+       PVRSRV_FENCE_EXPORT *psExport;
+       PVRSRV_ERROR eError;
+
+       psExport = OSAllocMem(sizeof(*psExport));
+       PVR_LOG_GOTO_IF_NOMEM(psExport, eError, err_out);
+
+       _SyncFbFenceAcquire(psFence);
+
+       psExport->psFence = psFence;
+       *ppsExport = psExport;
+
+       eError = PVRSRV_OK;
+err_out:
+       return eError;
+}
+
+static PVRSRV_ERROR _SyncFbFenceExportDestroy(PVRSRV_FENCE_EXPORT *psExport)
+{
+       PVRSRV_ERROR eError;
+
+       eError = SyncFbFenceRelease(psExport->psFence);
+       PVR_LOG_IF_ERROR(eError, "SyncFbFenceRelease");
+
+       OSFreeMem(psExport);
+
+       return eError;
+}
+
+static PVRSRV_ERROR _SyncFbFenceImport(PVRSRV_FENCE_EXPORT *psImport,
+                                       PVRSRV_FENCE_SERVER **psFenceOut)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_SERVER *psFence;
+
+       psFence = psImport->psFence;
+       _SyncFbFenceAcquire(psFence);
+
+       *psFenceOut = psFence;
+
+       eError = PVRSRV_OK;
+       return eError;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence,
+                                       PVRSRV_FENCE_EXPORT **ppsExport)
+{
+       return _SyncFbFenceExport(psFence, ppsExport);
+}
+
+PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport)
+{
+       return _SyncFbFenceExportDestroy(psExport);
+}
+
+PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection,
+                                       PVRSRV_DEVICE_NODE *psDevice,
+                                       PVRSRV_FENCE_EXPORT *psImport,
+                                       PVRSRV_FENCE_SERVER **psFenceOut)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDevice);
+
+       return _SyncFbFenceImport(psImport, psFenceOut);
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED1(psExport);
+
+       eError = _SyncFbFenceExportDestroy(psExport);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+static PVRSRV_ERROR _SyncFbReleaseSecureExport(void *pvExport)
+{
+       return SyncFbFenceExportDestroySecure(pvExport);
+}
+
+PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE * psDevNode,
+                                     PVRSRV_FENCE_SERVER *psFence,
+                                     IMG_SECURE_TYPE *phSecure,
+                                     PVRSRV_FENCE_EXPORT **ppsExport,
+                                     CONNECTION_DATA **ppsSecureConnection)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_EXPORT *psExport;
+
+       PVR_DPF_ENTERED1(psFence);
+
+       PVR_UNREFERENCED_PARAMETER(ppsSecureConnection);
+
+       eError = _SyncFbFenceExport(psFence, &psExport);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbFenceExport", err_out);
+
+       /* Transform it into a secure export */
+       eError = OSSecureExport("fallback_fence",
+                               _SyncFbReleaseSecureExport,
+                               (void *) psExport,
+                               phSecure);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSSecureExport", err_export);
+
+       *ppsExport = psExport;
+       PVR_DPF_RETURN_OK;
+err_export:
+       _SyncFbFenceExportDestroy(psExport);
+err_out:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDevice,
+                                     IMG_SECURE_TYPE hSecure,
+                                     PVRSRV_FENCE_SERVER **ppsFence)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_FENCE_EXPORT *psImport;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDevice);
+
+       PVR_DPF_ENTERED1(hSecure);
+
+       eError = OSSecureImport(hSecure, (void **) &psImport);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSSecureImport", err_out);
+
+       eError = _SyncFbFenceImport(psImport, ppsFence);
+
+       PVR_DPF_RETURN_OK;
+err_out:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                            TESTING FUNCTIONS                              */
+/*                                                                           */
+/*****************************************************************************/
+#if defined(PVR_TESTING_UTILS)
+
+static void _GetCheckContext(PVRSRV_DEVICE_NODE *psDevNode,
+                             PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
+{
+       *ppsSyncCheckpointContext = psDevNode->hSyncCheckpointContext;
+}
+
+PVRSRV_ERROR TestIOCTLSyncFbFenceSignalPVR(CONNECTION_DATA *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDevNode,
+                                           void *psFenceIn)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_SYNC_PT *psSyncPt;
+       IMG_UINT32 i;
+       PVRSRV_FENCE_SERVER *psFence = psFenceIn;
+
+       PVR_DPF_ENTERED;
+
+       for (i = 0; i < psFence->uiNumSyncs; i++)
+       {
+               psSyncPt = psFence->apsFenceSyncList[i];
+               OSAtomicWrite(&psSyncPt->iStatus, PVRSRV_SYNC_SIGNALLED);
+
+               OSLockAcquire(psSyncPt->psTl->hTlLock);
+               eError = _SyncFbSyncPtSignalAttached(psSyncPt, PVRSRV_SYNC_SIGNALLED);
+               PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbSyncPtSignalAttached", eSignal);
+
+               OSLockRelease(psSyncPt->psTl->hTlLock);
+       }
+
+       eError = _SyncFbSignalEO();
+       PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbSignalEO", eExit);
+
+       PVR_DPF_RETURN_OK;
+
+eSignal:
+       OSLockRelease(psSyncPt->psTl->hTlLock);
+eExit:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+PVRSRV_ERROR TestIOCTLSyncFbFenceCreatePVR(CONNECTION_DATA *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDevNode,
+                                           IMG_UINT32 uiNameLength,
+                                           const IMG_CHAR *pszName,
+                                           PVRSRV_TIMELINE iTL,
+                                           PVRSRV_FENCE *piOutFence)
+{
+       PSYNC_CHECKPOINT_CONTEXT psContext = NULL;
+       PSYNC_CHECKPOINT psCheckpoint;
+       PVRSRV_FENCE iFence;
+       PVRSRV_ERROR eError;
+       IMG_UINT64 uiFenceUID;
+
+       PVR_DPF_ENTERED;
+
+       if (iTL == PVRSRV_NO_TIMELINE)
+       {
+               WRN("Supplied invalid timeline, returning invalid fence!");
+               *piOutFence = PVRSRV_NO_FENCE;
+
+               eError = PVRSRV_OK;
+               goto e1;
+       }
+
+       _GetCheckContext(psDevNode,
+                        &psContext);
+
+       eError = SyncFbFenceCreatePVR(psDevNode,
+                                     pszName,
+                                     iTL,
+                                     psContext,
+                                     &iFence,
+                                     &uiFenceUID,
+                                     NULL,
+                                     &psCheckpoint,
+                                     NULL,
+                                     NULL);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbFenceCreatePVR", e1);
+
+       *piOutFence = iFence;
+
+       PVR_DPF_RETURN_OK;
+
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR TestIOCTLSyncFbFenceResolvePVR(CONNECTION_DATA *psConnection,
+                                            PVRSRV_DEVICE_NODE *psDevNode,
+                                            PVRSRV_FENCE iFence)
+{
+       PSYNC_CHECKPOINT_CONTEXT psContext = NULL;
+       PVRSRV_ERROR eError;
+       PSYNC_CHECKPOINT *apsChecks = NULL;
+       IMG_UINT32 uiNumChecks, i;
+       IMG_UINT64 uiFenceUID;
+
+       PVR_DPF_ENTERED;
+
+       _GetCheckContext(psDevNode,
+                        &psContext);
+
+       eError = SyncFbFenceResolvePVR(psContext,
+                                      iFence,
+                                      &uiNumChecks,
+                                      &apsChecks,
+                                      &uiFenceUID);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbFenceResolvePVR", eExit);
+
+       /* Close Checkpoints */
+       for (i = 0; i < uiNumChecks; i++)
+       {
+               SyncCheckpointFree(apsChecks[i]);
+       }
+
+       OSFreeMem(apsChecks);
+
+       PVR_DPF_RETURN_OK;
+
+eExit:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR TestIOCTLSyncFbSWTimelineAdvance(CONNECTION_DATA * psConnection,
+                                              PVRSRV_DEVICE_NODE *psDevNode,
+                                              PVRSRV_TIMELINE iSWTl)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_TIMELINE_SERVER *psSWTl;
+       PVRSRV_HANDLE_BASE *psHB;
+       SYNC_TIMELINE_OBJ sSWTimelineObj;
+
+       PVR_DPF_ENTERED;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       eError = _SyncFbLookupProcHandle((IMG_HANDLE) (uintptr_t) iSWTl,
+                                        PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+                                        IMG_FALSE,
+                                        (void**) &psSWTl,
+                                        &psHB);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_SyncFbLookupProcHandle", e0);
+
+       sSWTimelineObj.pvTlObj = psSWTl;
+       sSWTimelineObj.hTimeline = iSWTl;
+
+       eError = SyncSWTimelineAdvanceKM(psDevNode, &sSWTimelineObj);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncSWTimelineAdvanceKM", e0);
+
+       PVR_DPF_RETURN_OK;
+
+e0:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR TestIOCTLSyncFbSWFenceCreate(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE *psDevNode,
+                                          PVRSRV_TIMELINE iTl,
+                                          IMG_UINT32 uiFenceNameLength,
+                                          const IMG_CHAR *pszFenceName,
+                                          PVRSRV_FENCE *piFence)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       eError = SyncSWTimelineFenceCreateKM(psDevNode,
+                                            iTl,
+                                            pszFenceName,
+                                            piFence);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SyncSWTimelineFenceCreateKM", e0);
+
+       PVR_DPF_RETURN_OK;
+
+e0:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+#endif /* PVR_TESTING_UTILS */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/sync_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/sync_server.c
new file mode 100644 (file)
index 0000000..7398f44
--- /dev/null
@@ -0,0 +1,1223 @@
+/*************************************************************************/ /*!
+@File           sync_server.c
+@Title          Server side synchronisation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side functions that for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include "sync_server.h"
+#include "allocmem.h"
+#include "device.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pdump_km.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+#include "htbuffer.h"
+#include "rgxhwperf.h"
+#include "info_page.h"
+
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint.h"
+
+/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */
+#include "sync_checkpoint_external.h"
+
+/* Include this to obtain PVRSRV_MAX_DEV_VARS */
+#include "pvrsrv_devvar.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "ossecure_export.h"
+#endif
+
+/* Set this to enable debug relating to the construction and maintenance of the sync address list */
+#define SYNC_ADDR_LIST_DEBUG 0
+
+/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST.
+ * This should allow for PVRSRV_MAX_DEV_VARS dev vars plus
+ * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints for check fences.
+ * The same SYNC_ADDR_LIST is also used to hold UFOs for updates. While this
+ * may need to accommodate the additional sync prim update returned by Native
+ * sync implementation (used for timeline debug), the size calculated from
+ * PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE should be ample.
+ */
+#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE)
+/* Check that helper functions will not be preparing longer lists of
+ * UFOs than the FW can handle.
+ */
+static_assert(PVRSRV_MAX_SYNC_ADDR_LIST_SIZE <= RGXFWIF_CCB_CMD_MAX_UFOS,
+              "PVRSRV_MAX_SYNC_ADDR_LIST_SIZE > RGXFWIF_CCB_CMD_MAX_UFOS.");
+
+/* Max number of syncs allowed in a sync prim op */
+#define SYNC_PRIM_OP_MAX_SYNCS 1024
+
+struct _SYNC_PRIMITIVE_BLOCK_
+{
+       PVRSRV_DEVICE_NODE      *psDevNode;
+       DEVMEM_MEMDESC          *psMemDesc;
+       IMG_UINT32                      *pui32LinAddr;
+       IMG_UINT32                      ui32BlockSize;          /*!< Size of the Sync Primitive Block */
+       ATOMIC_T                        sRefCount;
+       DLLIST_NODE                     sConnectionNode;
+       SYNC_CONNECTION_DATA *psSyncConnectionData;     /*!< Link back to the sync connection data if there is one */
+       PRGXFWIF_UFO_ADDR               uiFWAddr;       /*!< The firmware address of the sync prim block */
+};
+
+struct _SYNC_CONNECTION_DATA_
+{
+       DLLIST_NODE     sListHead;  /*!< list of sync block associated with / created against this connection */
+       ATOMIC_T        sRefCount;  /*!< number of references to this object */
+       POS_LOCK        hLock;      /*!< lock protecting the list of sync blocks */
+};
+
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+/* this is the max number of syncs we will search or dump
+ * at any time.
+ */
+#define SYNC_RECORD_LIMIT 20000
+
+enum SYNC_RECORD_TYPE
+{
+       SYNC_RECORD_TYPE_UNKNOWN = 0,
+       SYNC_RECORD_TYPE_CLIENT,
+       SYNC_RECORD_TYPE_SERVER,
+};
+
+struct SYNC_RECORD
+{
+       PVRSRV_DEVICE_NODE              *psDevNode;
+       SYNC_PRIMITIVE_BLOCK    *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */
+       IMG_UINT32                              ui32SyncOffset;                 /*!< offset to sync in block */
+       IMG_UINT32                              ui32FwBlockAddr;
+       IMG_PID                                 uiPID;
+       IMG_UINT64                              ui64OSTime;
+       enum SYNC_RECORD_TYPE   eRecordType;
+       DLLIST_NODE                             sNode;
+       IMG_CHAR                                szClassName[PVRSRV_SYNC_NAME_LENGTH];
+};
+
+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG)
+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(SYNC_DEBUG)
+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_UPDATES_PRINT(fmt, ...)
+#endif
+
+/*!
+*****************************************************************************
+ @Function      : SyncPrimitiveBlockToFWAddr
+
+ @Description   : Given a pointer to a sync primitive block and an offset,
+                  returns the firmware address of the sync.
+
+ @Input           psSyncPrimBlock : Sync primitive block which contains the sync
+ @Input           ui32Offset      : Offset of sync within the sync primitive block
+ @Output          psAddrOut       : Absolute FW address of the sync is written out through
+                                    this pointer
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+                                                       IMG_UINT32 ui32Offset,
+                                               PRGXFWIF_UFO_ADDR *psAddrOut)
+{
+       /* check offset is legal */
+       if (unlikely((ui32Offset >= psSyncPrimBlock->ui32BlockSize) ||
+               (ui32Offset % sizeof(IMG_UINT32))))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "SyncPrimitiveBlockToFWAddr: parameters check failed"));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset;
+       return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListGrow
+
+ @Description   : Grow the SYNC_ADDR_LIST so it can accommodate the given
+                  number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS.
+
+ @Input           psList       : The SYNC_ADDR_LIST to grow
+ @Input           ui32NumSyncs : The number of sync addresses to be able to hold
+ @Return :        PVRSRV_OK on success
+*****************************************************************************/
+
+static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs)
+{
+       if (unlikely(ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __func__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s:     Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+       if (ui32NumSyncs > psList->ui32NumSyncs)
+       {
+               if (psList->pasFWAddrs == NULL)
+               {
+                       psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE);
+                       PVR_RETURN_IF_NOMEM(psList->pasFWAddrs);
+               }
+
+               psList->ui32NumSyncs = ui32NumSyncs;
+       }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s:     Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+       return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListInit
+
+ @Description   : Initialise a SYNC_ADDR_LIST structure ready for use
+
+ @Input           psList        : The SYNC_ADDR_LIST structure to initialise
+ @Return        : None
+*****************************************************************************/
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList)
+{
+       psList->ui32NumSyncs = 0;
+       psList->pasFWAddrs   = NULL;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListDeinit
+
+ @Description   : Frees any resources associated with the given SYNC_ADDR_LIST
+
+ @Input           psList        : The SYNC_ADDR_LIST structure to deinitialise
+ @Return        : None
+*****************************************************************************/
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList)
+{
+       if (psList->pasFWAddrs != NULL)
+       {
+               OSFreeMem(psList->pasFWAddrs);
+       }
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListPopulate
+
+ @Description   : Populate the given SYNC_ADDR_LIST with the FW addresses
+                  of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets
+
+ @Input           ui32NumSyncs    : The number of syncs being passed in
+ @Input           apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures
+                                    in which the syncs are based
+ @Input           paui32SyncOffset: Array of offsets within each of the sync primitive blocks
+                                    where the syncs are located
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+                                               IMG_UINT32 ui32NumSyncs,
+                                               SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+                                               IMG_UINT32 *paui32SyncOffset)
+{
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+       if (ui32NumSyncs > psList->ui32NumSyncs)
+       {
+               eError = SyncAddrListGrow(psList, ui32NumSyncs);
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       psList->ui32NumSyncs = ui32NumSyncs;
+
+       for (i = 0; i < ui32NumSyncs; i++)
+       {
+               eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i],
+                                                               paui32SyncOffset[i],
+                                                               &psList->pasFWAddrs[i]);
+
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST          *psList,
+                                                  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32FwAddr = 0;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs));
+#endif
+       /* Ensure there's room in psList for the additional sync prim update */
+       eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __func__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1));
+#endif
+       psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       {
+               IMG_UINT32 iii;
+
+               PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs));
+               for (iii=0; iii<psList->ui32NumSyncs; iii++)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __func__, iii, psList->pasFWAddrs[iii].ui32Addr));
+               }
+       }
+#endif
+e0:
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __func__, (void*)psList, psList->ui32NumSyncs));
+#endif
+       return eError;
+}
+
+
+static PVRSRV_ERROR
+_AppendCheckpoints(SYNC_ADDR_LIST *psList,
+                                  IMG_UINT32 ui32NumCheckpoints,
+                                  PSYNC_CHECKPOINT *apsSyncCheckpoint,
+                                  IMG_BOOL bDeRefCheckpoints)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32SyncCheckpointIndex;
+       IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+       /* Ensure there's room in psList for the sync checkpoints */
+       eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+               goto e0;
+       }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __func__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize));
+       if (ui32RollbackSize > 0)
+       {
+               {
+                       IMG_UINT32 kk;
+                       for (kk=0; kk<ui32RollbackSize; kk++)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:    <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__,
+                                                (void*)&psList->pasFWAddrs[kk], kk,
+                                                psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+                       }
+               }
+       }
+       PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __func__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0]));
+#endif
+       for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+       {
+               psList->pasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s:  SyncCheckpointCCBEnqueued(<%p>)", __func__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex]));
+               PVR_DPF((PVR_DBG_ERROR, "%s:                           ID:%d", __func__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex])));
+#endif
+               SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+               if (bDeRefCheckpoints)
+               {
+                       /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+                       SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+               }
+       }
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       if (psList->ui32NumSyncs > 0)
+       {
+               IMG_UINT32 kk;
+               for (kk=0; kk<psList->ui32NumSyncs; kk++)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s:    <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__,
+                                (void*)&psList->pasFWAddrs[kk], kk,
+                                psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+               }
+       }
+#endif
+       return eError;
+
+e0:
+       for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+       {
+               if (bDeRefCheckpoints)
+               {
+                       /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+                       SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+               }
+       }
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+       return eError;
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListAppendCheckpoints
+
+ @Description   : Append the FW addresses of the sync checkpoints given in
+                  the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST
+
+ @Input           ui32NumSyncCheckpoints : The number of sync checkpoints
+                                           being passed in
+ @Input           apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+                                      are to be appended to the SYNC_ADDR_LIST
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+                                                               IMG_UINT32 ui32NumCheckpoints,
+                                                               PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+       return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE);
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListAppendAndDeRefCheckpoints
+
+ @Description   : Append the FW addresses of the sync checkpoints given in
+                  the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST.
+                  A reference is dropped for each of the checkpoints.
+
+ @Input           ui32NumSyncCheckpoints : The number of sync checkpoints
+                                           being passed in
+ @Input           apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+                                      are to be appended to the SYNC_ADDR_LIST
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+                                                                         IMG_UINT32 ui32NumCheckpoints,
+                                                                         PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+       return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE);
+}
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+                                                        PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+       IMG_UINT32 ui32SyncCheckpointIndex;
+
+       for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+       {
+               /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+               SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+       }
+}
+
+/*!
+*****************************************************************************
+ @Function      : SyncAddrListRollbackCheckpoints
+
+ @Description   : Rollback the enqueued count of each sync checkpoint in
+                  the given SYNC_ADDR_LIST. This needs to be done in the
+                  event of the kick call failing, so that the reference
+                  taken on each sync checkpoint on the firmware's behalf
+                  is dropped.
+
+ @Input           psList        : The SYNC_ADDR_LIST structure containing
+                                  sync checkpoints to be rolled back
+
+ @Return :        PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+                  parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32SyncIndex;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+       PVR_DPF((PVR_DBG_ERROR, "%s: called (psList=<%p>)", __func__, (void*)psList));
+#endif
+       if (psList)
+       {
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs));
+#endif
+               for (ui32SyncIndex=0; ui32SyncIndex<psList->ui32NumSyncs; ui32SyncIndex++)
+               {
+                       if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1)
+                       {
+                               SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr);
+                       }
+               }
+       }
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+                                         PVRSRV_DEVICE_NODE *psDevNode,
+                                         SYNC_RECORD_HANDLE *phRecord,
+                                         SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+                                         IMG_UINT32 ui32FwBlockAddr,
+                                         IMG_UINT32 ui32SyncOffset,
+                                         IMG_BOOL bServerSync,
+                                         IMG_UINT32 ui32ClassNameSize,
+                                         const IMG_CHAR *pszClassName)
+{
+       struct SYNC_RECORD * psSyncRec;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       RGXSRV_HWPERF_ALLOC(psDevNode, SYNC,
+                           ui32FwBlockAddr + ui32SyncOffset,
+                           pszClassName,
+                           ui32ClassNameSize);
+
+       PVR_RETURN_IF_INVALID_PARAM(phRecord);
+
+       *phRecord = NULL;
+
+       psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+       PVR_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc);
+
+       psSyncRec->psDevNode = psDevNode;
+       psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock;
+       psSyncRec->ui32SyncOffset = ui32SyncOffset;
+       psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+       psSyncRec->ui64OSTime = OSClockns64();
+       psSyncRec->uiPID = OSGetCurrentProcessID();
+       psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT;
+
+       if (pszClassName)
+       {
+               if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH)
+                       ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH;
+               /* Copy over the class name annotation */
+               OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+       }
+       else
+       {
+               /* No class name annotation */
+               psSyncRec->szClassName[0] = 0;
+       }
+
+       OSLockAcquire(psDevNode->hSyncServerRecordLock);
+       if (psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT)
+       {
+               dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode);
+               psDevNode->ui32SyncServerRecordCount++;
+
+               if (psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark)
+               {
+                       psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.",
+                                                                                       __func__,
+                                                                                       pszClassName,
+                                                                                       psDevNode->ui32SyncServerRecordCount));
+               OSFreeMem(psSyncRec);
+               psSyncRec = NULL;
+               eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+       }
+       OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+       *phRecord = (SYNC_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+                       SYNC_RECORD_HANDLE hRecord)
+{
+       struct SYNC_RECORD **ppFreedSync;
+       struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       PVR_RETURN_IF_INVALID_PARAM(hRecord);
+
+       psDevNode = pSync->psDevNode;
+
+       OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+       RGXSRV_HWPERF_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset);
+
+       dllist_remove_node(&pSync->sNode);
+
+       if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range",
+                                __func__));
+               psDevNode->uiSyncServerRecordFreeIdx = 0;
+       }
+       ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx];
+       psDevNode->uiSyncServerRecordFreeIdx =
+               (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+       if (*ppFreedSync)
+       {
+               OSFreeMem(*ppFreedSync);
+       }
+       pSync->psServerSyncPrimBlock = NULL;
+       pSync->ui64OSTime = OSClockns64();
+       *ppFreedSync = pSync;
+
+       psDevNode->ui32SyncServerRecordCount--;
+
+       OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_BOOL bServerSync,
+                       IMG_UINT32 ui32FWAddr,
+                       IMG_UINT32 ui32ClassNameSize,
+                       const IMG_CHAR *pszClassName)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_UINT32 ui32FWAddr)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       RGXSRV_HWPERF_FREE(psDevNode, SYNC, ui32FWAddr);
+
+       return PVRSRV_OK;
+}
+
+static
+void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+       IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount);
+
+       SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+                                               __func__, psSyncConnectionData, iRefCount);
+       PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static
+void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+       IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount);
+       if (iRefCount == 0)
+       {
+               SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+                                   __func__, psSyncConnectionData, iRefCount);
+
+               PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead));
+               OSLockDestroy(psSyncConnectionData->hLock);
+               OSFreeMem(psSyncConnectionData);
+       }
+       else
+       {
+               SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+                                   __func__, psSyncConnectionData, iRefCount);
+               PVR_ASSERT(iRefCount > 0);
+       }
+}
+
+static
+void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+       if (psConnection)
+       {
+               SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData;
+
+               /*
+                       Make sure the connection doesn't go away. It doesn't matter that we will release
+                       the lock between as the refcount and list don't have to be atomic w.r.t. to each other
+               */
+               _SyncConnectionRef(psSyncConnectionData);
+
+               OSLockAcquire(psSyncConnectionData->hLock);
+               if (psConnection != NULL)
+               {
+                       dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
+               }
+               OSLockRelease(psSyncConnectionData->hLock);
+               psBlock->psSyncConnectionData = psSyncConnectionData;
+       }
+       else
+       {
+               psBlock->psSyncConnectionData = NULL;
+       }
+}
+
+static
+void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+       SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData;
+
+       if (psBlock->psSyncConnectionData)
+       {
+               OSLockAcquire(psSyncConnectionData->hLock);
+               dllist_remove_node(&psBlock->sConnectionNode);
+               OSLockRelease(psSyncConnectionData->hLock);
+
+               _SyncConnectionUnref(psBlock->psSyncConnectionData);
+       }
+}
+
+static inline
+void _DoPrimBlockFree(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+       SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+                           __func__, psSyncBlk, OSAtomicRead(&psSyncBlk->sRefCount));
+
+       PVR_ASSERT(OSAtomicRead(&psSyncBlk->sRefCount) == 1);
+
+       _SyncConnectionRemoveBlock(psSyncBlk);
+       DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+       psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+       OSFreeMem(psSyncBlk);
+}
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+                                                               SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+                                                               IMG_UINT32 *puiSyncPrimVAddr,
+                                                               IMG_UINT32 *puiSyncPrimBlockSize,
+                                                               PMR        **ppsSyncPMR)
+{
+       SYNC_PRIMITIVE_BLOCK *psNewSyncBlk;
+       PVRSRV_ERROR eError;
+
+       psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK));
+       PVR_GOTO_IF_NOMEM(psNewSyncBlk, eError, e0);
+
+       psNewSyncBlk->psDevNode = psDevNode;
+
+       PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block");
+
+       eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+                                                                                &psNewSyncBlk->psMemDesc,
+                                                                                &psNewSyncBlk->uiFWAddr.ui32Addr,
+                                                                                &psNewSyncBlk->ui32BlockSize);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr;
+
+       eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc,
+                                                                         (void **) &psNewSyncBlk->pui32LinAddr);
+       PVR_GOTO_IF_ERROR(eError, e2);
+
+       eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR);
+
+       PVR_GOTO_IF_ERROR(eError, e3);
+
+       OSAtomicWrite(&psNewSyncBlk->sRefCount, 1);
+
+       /* If there is a connection pointer then add the new block onto it's list */
+       _SyncConnectionAddBlock(psConnection, psNewSyncBlk);
+
+       *ppsSyncBlk = psNewSyncBlk;
+       *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize;
+
+       PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS,
+                                                 "Allocated UFO block (FirmwareVAddr = 0x%08x)",
+                                                 *puiSyncPrimVAddr);
+
+       return PVRSRV_OK;
+
+e3:
+       DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc);
+e2:
+       psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc);
+e1:
+       OSFreeMem(psNewSyncBlk);
+e0:
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+
+       /* This function is an alternative to the above without reference counting.
+        * With the removal of sync prim ops for server syncs we no longer have to
+        * reference count prim blocks as the reference will never be incremented /
+        * decremented by a prim op */
+       _DoPrimBlockFree(psSyncBlk);
+       return PVRSRV_OK;
+}
+
+static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+                                                       IMG_UINT32 ui32Index)
+{
+       return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+                                       IMG_UINT32 ui32Value)
+{
+       if (_CheckSyncIndex(psSyncBlk, ui32Index))
+       {
+               psSyncBlk->pui32LinAddr[ui32Index] = ui32Value;
+               return PVRSRV_OK;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for "
+                                                       "0x%08X byte sync block (value 0x%08X)",
+                                                       ui32Index,
+                                                       psSyncBlk->ui32BlockSize,
+                                                       ui32Value));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+       /*
+               We might be ask to PDump sync state outside of capture range
+               (e.g. texture uploads) so make this continuous.
+       */
+       DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc,
+                                          ui32Offset,
+                                          ui32Value,
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+       /*
+               We might be ask to PDump sync state outside of capture range
+               (e.g. texture uploads) so make this continuous.
+       */
+       DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
+                                          ui32Offset,
+                                          sizeof(IMG_UINT32),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+                                                IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+                                                PDUMP_POLL_OPERATOR eOperator,
+                                                PDUMP_FLAGS_T ui32PDumpFlags)
+{
+       DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc,
+                                                  ui32Offset,
+                                                  ui32Value,
+                                                  ui32Mask,
+                                                  eOperator,
+                                                  ui32PDumpFlags);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+                                                IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+                                                IMG_UINT64 uiBufferSize)
+{
+       DevmemPDumpCBP(psSyncBlk->psMemDesc,
+                                  ui32Offset,
+                                  uiWriteOffset,
+                                  uiPacketSize,
+                                  uiBufferSize);
+       return PVRSRV_OK;
+}
+#endif
+
+/* SyncRegisterConnection */
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData)
+{
+       SYNC_CONNECTION_DATA *psSyncConnectionData;
+       PVRSRV_ERROR eError;
+
+       psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA));
+       if (psSyncConnectionData == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_alloc;
+       }
+
+       eError = OSLockCreate(&psSyncConnectionData->hLock);
+       PVR_GOTO_IF_ERROR(eError, fail_lockcreate);
+       dllist_init(&psSyncConnectionData->sListHead);
+       OSAtomicWrite(&psSyncConnectionData->sRefCount, 1);
+
+       *ppsSyncConnectionData = psSyncConnectionData;
+       return PVRSRV_OK;
+
+fail_lockcreate:
+       OSFreeMem(psSyncConnectionData);
+fail_alloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/* SyncUnregisterConnection */
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+       _SyncConnectionUnref(psSyncConnectionData);
+}
+
+void SyncConnectionPDumpSyncBlocks(PVRSRV_DEVICE_NODE *psDevNode, void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent)
+{
+       if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED))
+       {
+               SYNC_CONNECTION_DATA *psSyncConnectionData = hSyncPrivData;
+               DLLIST_NODE *psNode, *psNext;
+
+               OSLockAcquire(psSyncConnectionData->hLock);
+
+               PDUMPCOMMENT(psDevNode, "Dump client Sync Prim state");
+               dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext)
+               {
+                       SYNC_PRIMITIVE_BLOCK *psSyncBlock =
+                               IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
+
+                       DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
+                                       0,
+                                       psSyncBlock->ui32BlockSize,
+                                       PDUMP_FLAGS_CONTINUOUS);
+               }
+
+               OSLockRelease(psSyncConnectionData->hLock);
+       }
+}
+
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+                                         IMG_CHAR * pszSyncInfo, size_t len)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_INT iEnd;
+       IMG_BOOL bFound = IMG_FALSE;
+
+       if (!pszSyncInfo)
+       {
+               return;
+       }
+
+       OSLockAcquire(psDevNode->hSyncServerRecordLock);
+       pszSyncInfo[0] = '\0';
+
+       dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+       {
+               struct SYNC_RECORD *psSyncRec =
+                       IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+               if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr
+                       && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType
+                       && psSyncRec->psServerSyncPrimBlock
+                       && psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+                       )
+               {
+                       IMG_UINT32 *pui32SyncAddr;
+                       pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+                               + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+                       iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)",
+                               *pui32SyncAddr,
+                               ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+                               psSyncRec->uiPID,
+                               psSyncRec->szClassName
+                               );
+                       if (iEnd >= 0 && iEnd < len)
+                       {
+                               pszSyncInfo[iEnd] = '\0';
+                       }
+                       bFound = IMG_TRUE;
+                       break;
+               }
+       }
+
+       OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+       if (!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT))
+       {
+               OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+       }
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec,
+                                       IMG_UINT64 ui64TimeNow,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock;
+
+       if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType)
+       {
+               IMG_UINT64 ui64DeltaS;
+               IMG_UINT32 ui32DeltaF;
+               IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime;
+               ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+               if (psSyncBlock && psSyncBlock->pui32LinAddr)
+               {
+                       IMG_UINT32 *pui32SyncAddr;
+                       pui32SyncAddr = psSyncBlock->pui32LinAddr
+                               + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+
+                       PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=0x%08x (%s)",
+                               ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+                               psSyncRec->uiPID,
+                               ui64DeltaS, ui32DeltaF,
+                               (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+                               *pui32SyncAddr,
+                               psSyncRec->szClassName
+                               );
+               }
+               else
+               {
+                       PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=<null_ptr> (%s)",
+                               ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+                               psSyncRec->uiPID,
+                               ui64DeltaS, ui32DeltaF,
+                               (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+                               psSyncRec->szClassName
+                               );
+               }
+       }
+}
+
+static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                       IMG_UINT32 ui32VerbLevel,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+       IMG_UINT64 ui64TimeNowS;
+       IMG_UINT32 ui32TimeNowF;
+       IMG_UINT64 ui64TimeNow = OSClockns64();
+       DLLIST_NODE *psNode, *psNext;
+
+       ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+       {
+               IMG_UINT32 i;
+               OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+               PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05" IMG_UINT64_FMTSPEC ".%09u",
+                                                                               psDevNode->ui32SyncServerRecordCount,
+                                                                               psDevNode->ui32SyncServerRecordCountHighWatermark,
+                                                                               ui64TimeNowS,
+                                                                               ui32TimeNowF);
+               if (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)
+               {
+                       PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+                                                                                                                SYNC_RECORD_LIMIT);
+               }
+
+               PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+                                       "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+
+               dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+               {
+                       struct SYNC_RECORD *psSyncRec =
+                               IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+                       _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+                       }
+
+               PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05" IMG_UINT64_FMTSPEC ".%09u",
+                                                 ui64TimeNowS, ui32TimeNowF);
+               PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+                                       "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+               for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+                        i != psDevNode->uiSyncServerRecordFreeIdx;
+                        i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+               {
+                       if (psDevNode->apsSyncServerRecordsFreed[i])
+                       {
+                               _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i],
+                                                                ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+                       }
+                       else
+                       {
+                               break;
+                       }
+               }
+
+               OSLockRelease(psDevNode->hSyncServerRecordLock);
+       }
+}
+#undef NS_IN_S
+
+static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       PVRSRV_ERROR eError;
+
+       psDevNode->ui32SyncServerRecordCount = 0;
+       psDevNode->ui32SyncServerRecordCountHighWatermark = 0;
+
+       eError = OSLockCreate(&psDevNode->hSyncServerRecordLock);
+       PVR_GOTO_IF_ERROR(eError, fail_lock_create);
+       dllist_init(&psDevNode->sSyncServerRecordList);
+
+       eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncServerRecordNotify,
+                                                         psDevNode,
+                                                         _SyncRecordRequest,
+                                                         DEBUG_REQUEST_SYNCTRACKING,
+                                                         psDevNode);
+
+       PVR_GOTO_IF_ERROR(eError, fail_dbg_register);
+
+       return PVRSRV_OK;
+
+fail_dbg_register:
+       OSLockDestroy(psDevNode->hSyncServerRecordLock);
+fail_lock_create:
+       return eError;
+}
+
+static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       DLLIST_NODE *psNode, *psNext;
+       int i;
+
+       OSLockAcquire(psDevNode->hSyncServerRecordLock);
+       dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+       {
+               struct SYNC_RECORD *pSyncRec =
+                       IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+
+               dllist_remove_node(psNode);
+               OSFreeMem(pSyncRec);
+       }
+
+       for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+       {
+               if (psDevNode->apsSyncServerRecordsFreed[i])
+               {
+                       OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]);
+                       psDevNode->apsSyncServerRecordsFreed[i] = NULL;
+               }
+       }
+       OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+       if (psDevNode->hSyncServerRecordNotify)
+       {
+               PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncServerRecordNotify);
+       }
+       OSLockDestroy(psDevNode->hSyncServerRecordLock);
+}
+
+PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       PVRSRV_ERROR eError;
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+       {
+               eError = SyncRecordListInit(psDevNode);
+               PVR_GOTO_IF_ERROR(eError, fail_record_list);
+       }
+
+       return PVRSRV_OK;
+
+fail_record_list:
+       return eError;
+}
+
+void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+       {
+               SyncRecordListDeinit(psDevNode);
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/tlintern.c b/drivers/gpu/drm/img/img-rogue/services/server/common/tlintern.c
new file mode 100644 (file)
index 0000000..70d8b09
--- /dev/null
@@ -0,0 +1,473 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer functions available to driver components in
+                the driver.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "devicemem.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "tlintern.h"
+
+/*
+ * Make functions
+ */
+PTL_STREAM_DESC
+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3)
+{
+       PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC));
+       if (ps == NULL)
+       {
+               return NULL;
+       }
+       ps->psNode = f1;
+       ps->ui32Flags = f2;
+       ps->hReadEvent = f3;
+       ps->uiRefCount = 1;
+
+       if (f2 & PVRSRV_STREAM_FLAG_READ_LIMIT)
+       {
+               ps->ui32ReadLimit = f1->psStream->ui32Write;
+       }
+       return ps;
+}
+
+PTL_SNODE
+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4)
+{
+       PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE));
+       if (ps == NULL)
+       {
+               return NULL;
+       }
+       ps->hReadEventObj = f2;
+       ps->psStream = f3;
+       ps->psRDesc = f4;
+       f3->psNode = ps;
+       return ps;
+}
+
+/*
+ * Transport Layer Global top variables and functions
+ */
+static TL_GLOBAL_DATA sTLGlobalData;
+
+TL_GLOBAL_DATA *TLGGD(void) /* TLGetGlobalData() */
+{
+       return &sTLGlobalData;
+}
+
+/* TLInit must only be called once at driver initialisation.
+ * An assert is provided to check this condition on debug builds.
+ */
+PVRSRV_ERROR
+TLInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL);
+
+       /* Allocate a lock for TL global data, to be used while updating the TL data.
+        * This is for making TL global data multi-thread safe */
+       eError = OSLockCreate(&sTLGlobalData.hTLGDLock);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       /* Allocate the event object used to signal global TL events such as
+        * a new stream created */
+       eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       PVR_DPF_RETURN_OK;
+
+/* Don't allow the driver to start up on error */
+e1:
+       OSLockDestroy (sTLGlobalData.hTLGDLock);
+       sTLGlobalData.hTLGDLock = NULL;
+e0:
+       PVR_DPF_RETURN_RC (eError);
+}
+
+static void RemoveAndFreeStreamNode(PTL_SNODE psRemove)
+{
+       TL_GLOBAL_DATA*  psGD = TLGGD();
+       PTL_SNODE*       last;
+       PTL_SNODE        psn;
+       PVRSRV_ERROR     eError;
+
+       PVR_DPF_ENTERED;
+
+       /* Unlink the stream node from the master list */
+       PVR_ASSERT(psGD->psHead);
+       last = &psGD->psHead;
+       for (psn = psGD->psHead; psn; psn=psn->psNext)
+       {
+               if (psn == psRemove)
+               {
+                       /* Other calling code may have freed and zeroed the pointers */
+                       if (psn->psRDesc)
+                       {
+                               OSFreeMem(psn->psRDesc);
+                               psn->psRDesc = NULL;
+                       }
+                       if (psn->psStream)
+                       {
+                               OSFreeMem(psn->psStream);
+                               psn->psStream = NULL;
+                       }
+                       *last = psn->psNext;
+                       break;
+               }
+               last = &psn->psNext;
+       }
+
+       /* Release the event list object owned by the stream node */
+       if (psRemove->hReadEventObj)
+       {
+               eError = OSEventObjectDestroy(psRemove->hReadEventObj);
+               PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+               psRemove->hReadEventObj = NULL;
+       }
+
+       /* Release the memory of the stream node */
+       OSFreeMem(psRemove);
+
+       PVR_DPF_RETURN;
+}
+
+static void FreeGlobalData(void)
+{
+       PTL_SNODE psCurrent = sTLGlobalData.psHead;
+       PTL_SNODE psNext;
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       /* Clean up the SNODE list */
+       if (psCurrent)
+       {
+               while (psCurrent)
+               {
+                       psNext = psCurrent->psNext;
+
+                       /* Other calling code may have freed and zeroed the pointers */
+                       if (psCurrent->psRDesc)
+                       {
+                               OSFreeMem(psCurrent->psRDesc);
+                               psCurrent->psRDesc = NULL;
+                       }
+                       if (psCurrent->psStream)
+                       {
+                               OSFreeMem(psCurrent->psStream);
+                               psCurrent->psStream = NULL;
+                       }
+
+                       /* Release the event list object owned by the stream node */
+                       if (psCurrent->hReadEventObj)
+                       {
+                               eError = OSEventObjectDestroy(psCurrent->hReadEventObj);
+                               PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+                               psCurrent->hReadEventObj = NULL;
+                       }
+
+                       OSFreeMem(psCurrent);
+                       psCurrent = psNext;
+               }
+
+               sTLGlobalData.psHead = NULL;
+       }
+
+       PVR_DPF_RETURN;
+}
+
+void
+TLDeInit(void)
+{
+       PVR_DPF_ENTERED;
+
+       if (sTLGlobalData.uiClientCnt)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt));
+               sTLGlobalData.uiClientCnt = 0;
+       }
+
+       FreeGlobalData();
+
+       /* Clean up the TL global event object */
+       if (sTLGlobalData.hTLEventObj)
+       {
+               OSEventObjectDestroy(sTLGlobalData.hTLEventObj);
+               sTLGlobalData.hTLEventObj = NULL;
+       }
+
+       /* Destroy the TL global data lock */
+       if (sTLGlobalData.hTLGDLock)
+       {
+               OSLockDestroy (sTLGlobalData.hTLGDLock);
+               sTLGlobalData.hTLGDLock = NULL;
+       }
+
+       PVR_DPF_RETURN;
+}
+
+void TLAddStreamNode(PTL_SNODE psAdd)
+{
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psAdd);
+       psAdd->psNext = TLGGD()->psHead;
+       TLGGD()->psHead = psAdd;
+
+       PVR_DPF_RETURN;
+}
+
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName)
+{
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       PTL_SNODE psn;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(pszName);
+
+       for (psn = psGD->psHead; psn; psn=psn->psNext)
+       {
+               if (psn->psStream && OSStringNCompare(psn->psStream->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE)==0)
+               {
+                       PVR_DPF_RETURN_VAL(psn);
+               }
+       }
+
+       PVR_DPF_RETURN_VAL(NULL);
+}
+
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       PTL_SNODE psn;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psDesc);
+
+       for (psn = psGD->psHead; psn; psn=psn->psNext)
+       {
+               if (psn->psRDesc == psDesc || psn->psWDesc == psDesc)
+               {
+                       PVR_DPF_RETURN_VAL(psn);
+               }
+       }
+       PVR_DPF_RETURN_VAL(NULL);
+}
+
+static inline IMG_BOOL IsDigit(IMG_CHAR c)
+{
+       return c >= '0' && c <= '9';
+}
+
+static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer,
+                                  IMG_UINT32 *pui32Number)
+{
+       IMG_CHAR acTmp[11] = {0}; /* max 10 digits */
+       IMG_UINT32 ui32Result;
+       IMG_UINT i;
+
+       for (i = 0; i < sizeof(acTmp) - 1; i++)
+       {
+               if (!IsDigit(*pszBuffer))
+                       break;
+               acTmp[i] = *pszBuffer++;
+       }
+
+       /* if there are no digits or there is something after the number */
+       if (i == 0 || *pszBuffer != '\0')
+               return IMG_FALSE;
+
+       if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK)
+               return IMG_FALSE;
+
+       *pui32Number = ui32Result;
+
+       return IMG_TRUE;
+}
+
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+                          IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+                          IMG_UINT32 ui32Max)
+{
+       TL_GLOBAL_DATA *psGD = TLGGD();
+       PTL_SNODE psn;
+       IMG_UINT32 ui32Count = 0;
+       size_t uiLen;
+
+       PVR_ASSERT(pszNamePattern);
+
+       if ((uiLen = OSStringLength(pszNamePattern)) == 0)
+               return 0;
+
+       for (psn = psGD->psHead; psn; psn = psn->psNext)
+       {
+               if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0)
+                       continue;
+
+               /* If aaszStreams is NULL we only count how many string match
+                * the given pattern. If it's a valid pointer we also return
+                * the names. */
+               if (aaszStreams != NULL)
+               {
+                       if (ui32Count >= ui32Max)
+                               break;
+
+                       /* all of names are shorter than MAX and null terminated */
+                       OSStringLCopy(aaszStreams[ui32Count], psn->psStream->szName,
+                                     PRVSRVTL_MAX_STREAM_NAME_SIZE);
+               }
+
+               ui32Count++;
+       }
+
+       return ui32Count;
+}
+
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+       PTL_SNODE psn;
+
+       PVR_DPF_ENTERED;
+
+       psn = TLFindStreamNodeByDesc(psDesc);
+       if (psn == NULL)
+               PVR_DPF_RETURN_VAL(NULL);
+
+       PVR_ASSERT(psDesc == psn->psWDesc);
+
+       psn->uiWRefCount++;
+       psDesc->uiRefCount++;
+
+       PVR_DPF_RETURN_VAL(psn);
+}
+
+void TLReturnStreamNode(PTL_SNODE psNode)
+{
+       psNode->uiWRefCount--;
+       psNode->psWDesc->uiRefCount--;
+
+       PVR_ASSERT(psNode->uiWRefCount > 0);
+       PVR_ASSERT(psNode->psWDesc->uiRefCount > 0);
+}
+
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove)
+{
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psRemove);
+
+       /* If there is a client connected to this stream, defer stream's deletion */
+       if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL)
+       {
+               PVR_DPF_RETURN_VAL(IMG_FALSE);
+       }
+
+       /* Remove stream from TL_GLOBAL_DATA's list and free stream node */
+       psRemove->psStream = NULL;
+       RemoveAndFreeStreamNode(psRemove);
+
+       PVR_DPF_RETURN_VAL(IMG_TRUE);
+}
+
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove,
+                                          PTL_STREAM_DESC psSD)
+{
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psNodeToRemove);
+       PVR_ASSERT(psSD);
+
+       /* Decrement reference count. For descriptor obtained by reader it must
+        * reach 0 (only single reader allowed) and for descriptors obtained by
+        * writers it must reach value greater or equal to 0 (multiple writers
+        * model). */
+       psSD->uiRefCount--;
+
+       if (psSD == psNodeToRemove->psRDesc)
+       {
+               PVR_ASSERT(0 == psSD->uiRefCount);
+               /* Remove stream descriptor (i.e. stream reader context) */
+               psNodeToRemove->psRDesc = NULL;
+       }
+       else if (psSD == psNodeToRemove->psWDesc)
+       {
+               PVR_ASSERT(0 <= psSD->uiRefCount);
+
+               psNodeToRemove->uiWRefCount--;
+
+               /* Remove stream descriptor if reference == 0 */
+               if (0 == psSD->uiRefCount)
+               {
+                       psNodeToRemove->psWDesc = NULL;
+               }
+       }
+
+       /* Do not Free Stream Node if there is a write reference (a producer
+        * context) to the stream */
+       if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc ||
+           0 != psNodeToRemove->uiWRefCount)
+       {
+               PVR_DPF_RETURN_VAL(IMG_FALSE);
+       }
+
+       /* Make stream pointer NULL to prevent it from being destroyed in
+        * RemoveAndFreeStreamNode. Cleanup of stream should be done by the
+        * calling context */
+       psNodeToRemove->psStream = NULL;
+       RemoveAndFreeStreamNode(psNodeToRemove);
+
+       PVR_DPF_RETURN_VAL(IMG_TRUE);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/tlserver.c b/drivers/gpu/drm/img/img-rogue/services/server/common/tlserver.c
new file mode 100644 (file)
index 0000000..c250dd3
--- /dev/null
@@ -0,0 +1,747 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+
+/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "connection_server.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+#include "tlserver.h"
+
+#include "pvrsrv_tlstreams.h"
+#define NO_STREAM_WAIT_PERIOD_US 2000000ULL
+#define NO_DATA_WAIT_PERIOD_US    500000ULL
+#define NO_ACQUIRE               0xffffffffU
+
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+PVRSRV_ERROR
+TLServerOpenStreamKM(const IMG_CHAR*   pszName,
+                     IMG_UINT32        ui32Mode,
+                     PTL_STREAM_DESC*  ppsSD,
+                     PMR**             ppsTLPMR)
+{
+       PVRSRV_ERROR    eError = PVRSRV_OK;
+       PVRSRV_ERROR    eErrorEO = PVRSRV_OK;
+       PTL_SNODE               psNode;
+       PTL_STREAM              psStream;
+       TL_STREAM_DESC *psNewSD = NULL;
+       IMG_HANDLE              hEvent;
+       IMG_BOOL                bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+                                      IMG_TRUE : IMG_FALSE;
+       IMG_BOOL                bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ?
+                                      IMG_TRUE : IMG_FALSE;
+       IMG_BOOL                bNoOpenCB    = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ?
+                                      IMG_TRUE : IMG_FALSE;
+       PTL_GLOBAL_DATA psGD = TLGGD();
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+       PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode));
+#endif
+
+       PVR_ASSERT(pszName);
+
+       /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName
+        * returns NON NULL PTL_SNODE, we try updating the global data client count and
+        * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has
+        * not been deleted) while we are updating it
+        */
+       OSLockAcquire (psGD->hTLGDLock);
+
+       psNode = TLFindStreamNodeByName(pszName);
+       if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT))
+       {       /* Blocking code to wait for stream to be created if it does not exist */
+               eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent);
+               PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectOpen", e0);
+
+               do
+               {
+                       if ((psNode = TLFindStreamNodeByName(pszName)) == NULL)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName));
+
+                               /* Release TL_GLOBAL_DATA lock before sleeping */
+                               OSLockRelease (psGD->hTLGDLock);
+
+                               /* Will exit OK or with timeout, both cases safe to ignore */
+                               eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD_US);
+
+                               /* Acquire lock after waking up */
+                               OSLockAcquire (psGD->hTLGDLock);
+                       }
+               }
+               while ((psNode == NULL) && (eErrorEO == PVRSRV_OK));
+
+               eError = OSEventObjectClose(hEvent);
+               PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectClose", e0);
+       }
+
+       /* Make sure we have found a stream node after wait/search */
+       if (psNode == NULL)
+       {
+               /* Did we exit the wait with timeout, inform caller */
+               if (eErrorEO == PVRSRV_ERROR_TIMEOUT)
+               {
+                       eError = eErrorEO;
+               }
+               else
+               {
+                       eError = PVRSRV_ERROR_NOT_FOUND;
+                       PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName));
+               }
+               goto e0;
+       }
+
+       psStream = psNode->psStream;
+
+       /* Allocate memory for the stream. The memory will be allocated with the
+        * first call. */
+       eError = TLAllocSharedMemIfNull(psStream);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream"
+                               " \"%s\"", pszName));
+               goto e0;
+       }
+
+       if (bIsWriteOnly)
+       {
+
+               /* If psWDesc == NULL it means that this is the first attempt
+                * to open stream for write. If yes create the descriptor or increment
+                * reference count otherwise. */
+               if (psNode->psWDesc == NULL)
+               {
+                       psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL);
+                       psNode->psWDesc = psNewSD;
+               }
+               else
+               {
+                       psNewSD = psNode->psWDesc;
+                       psNode->psWDesc->uiRefCount++;
+               }
+
+               PVR_LOG_GOTO_IF_NOMEM(psNewSD, eError, e0);
+
+               psNode->uiWRefCount++;
+       }
+       else
+       {
+               /* Only one reader per stream supported */
+               if (psNode->psRDesc != NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already"
+                               " opened", pszName));
+                       eError = PVRSRV_ERROR_ALREADY_OPEN;
+                       goto e0;
+               }
+
+               /* Create an event handle for this client to wait on when no data in
+                * stream buffer. */
+               eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "OSEventObjectOpen");
+                       eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+                       goto e0;
+               }
+
+               psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent);
+               psNode->psRDesc = psNewSD;
+
+               if (!psNewSD)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor"));
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto e1;
+               }
+
+               PVR_DPF((PVR_DBG_VERBOSE,
+                       "TLServerOpenStreamKM evList=%p, evObj=%p",
+                       psNode->hReadEventObj,
+                       psNode->psRDesc->hReadEvent));
+       }
+
+       /* Copy the import handle back to the user mode API to enable access to
+        * the stream buffer from user-mode process. */
+       eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream),
+                                           (void**) ppsTLPMR);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2);
+
+       psGD->uiClientCnt++;
+
+       /* Global data updated. Now release global lock */
+       OSLockRelease (psGD->hTLGDLock);
+
+       *ppsSD = psNewSD;
+
+       if (bResetOnOpen)
+       {
+               TLStreamReset(psStream);
+       }
+
+       /* This callback is executed only on reader open. There are some actions
+        * executed on reader open that don't make much sense for writers e.g.
+        * injection on time synchronisation packet into the stream. */
+       if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB)
+       {
+               psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData);
+       }
+
+       /* psNode->uiWRefCount is set to '1' on stream create so the first open
+        * is '2'. */
+       if (bIsWriteOnly && psStream->psNotifStream != NULL &&
+           psNode->uiWRefCount == 2)
+       {
+               TLStreamMarkStreamOpen(psStream);
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName,
+               ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read"));
+
+       PVR_DPF_RETURN_OK;
+
+e2:
+       OSFreeMem(psNewSD);
+e1:
+       if (!bIsWriteOnly)
+               OSEventObjectClose(hEvent);
+e0:
+       OSLockRelease (psGD->hTLGDLock);
+       PVR_DPF_RETURN_RC (eError);
+}
+
+PVRSRV_ERROR
+TLServerCloseStreamKM(PTL_STREAM_DESC psSD)
+{
+       PVRSRV_ERROR    eError = PVRSRV_OK;
+       PTL_GLOBAL_DATA psGD = TLGGD();
+       PTL_SNODE               psNode;
+       PTL_STREAM      psStream;
+       IMG_BOOL        bDestroyStream;
+       IMG_BOOL        bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ?
+                                  IMG_TRUE : IMG_FALSE;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psSD);
+
+       /* Quick exit if there are no streams */
+       if (psGD->psHead == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+       /* Check stream still valid */
+       psNode = TLFindStreamNodeByDesc(psSD);
+       if ((psNode == NULL) || (psNode != psSD->psNode))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+       /* Since the descriptor is valid, the stream should not have been made NULL */
+       PVR_ASSERT (psNode->psStream);
+
+       /* Save the stream's reference in-case its destruction is required after this
+        * client is removed */
+       psStream = psNode->psStream;
+
+       /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode
+        * call will update the TL_SNODE's descriptor value */
+       OSLockAcquire (psGD->hTLGDLock);
+
+       /* Close event handle because event object list might be destroyed in
+        * TLUnrefDescAndTryFreeStreamNode(). */
+       if (!bIsWriteOnly)
+       {
+               /* Reset the read position on close if the stream requires it. */
+               TLStreamResetReadPos(psStream);
+
+               /* Close and free the event handle resource used by this descriptor */
+               eError = OSEventObjectClose(psSD->hReadEvent);
+               if (eError != PVRSRV_OK)
+               {
+                       /* Log error but continue as it seems best */
+                       PVR_LOG_ERROR(eError, "OSEventObjectClose");
+                       eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+               }
+       }
+       else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL)
+       {
+               /* psNode->uiWRefCount is set to '1' on stream create so the last close
+                * before destruction is '2'. */
+               TLStreamMarkStreamClose(psStream);
+       }
+
+       /* Remove descriptor from stream object/list */
+       bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD);
+
+       /* Check the counter is sensible after input data validated. */
+       PVR_ASSERT(psGD->uiClientCnt > 0);
+       psGD->uiClientCnt--;
+
+       OSLockRelease (psGD->hTLGDLock);
+
+       /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */
+       if (bDestroyStream)
+       {
+               TLStreamDestroy (psStream);
+               psStream = NULL;
+       }
+
+       PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__));
+
+       /* Free the descriptor if ref count reaches 0. */
+       if (psSD->uiRefCount == 0)
+       {
+               /* Free the stream descriptor object */
+               OSFreeMem(psSD);
+       }
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+                        IMG_UINT32* ui32BufferOffset,
+                        IMG_UINT32 ui32Size,
+                        IMG_UINT32 ui32SizeMin,
+                        IMG_UINT32* pui32Available)
+{
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       PTL_SNODE psNode;
+       IMG_UINT8* pui8Buffer = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psSD);
+
+       if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Quick exit if there are no streams */
+       if (psGD->psHead == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+       }
+
+       /* Acquire the global lock. We have to be sure that no one modifies
+        * the list while we are looking for our stream. */
+       OSLockAcquire(psGD->hTLGDLock);
+       /* Check stream still valid */
+       psNode = TLFindAndGetStreamNodeByDesc(psSD);
+       OSLockRelease(psGD->hTLGDLock);
+
+       if ((psNode == NULL) || (psNode != psSD->psNode))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+
+       /* Since we have a valid stream descriptor, the stream should not have been
+        * made NULL by any producer context. */
+       PVR_ASSERT (psNode->psStream);
+
+       /* The TL writers that currently land here are at a very low to none risk
+        * to breach max TL packet size constraint (even if there is no reader
+        * connected to the TL stream and hence eventually will cause the TL stream
+        * to be full). Hence no need to know the status of TL stream reader
+        * connection.
+        */
+       eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size,
+                                 ui32SizeMin, pui32Available, NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.",
+                               ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError)));
+       }
+       else if (pui8Buffer == NULL)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream."));
+               eError = PVRSRV_ERROR_STREAM_FULL;
+       }
+       else
+       {
+               *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer;
+               PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size);
+       }
+
+       OSLockAcquire(psGD->hTLGDLock);
+       TLReturnStreamNode(psNode);
+       OSLockRelease(psGD->hTLGDLock);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+                       IMG_UINT32 ui32Size)
+{
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       PTL_SNODE psNode;
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psSD);
+
+       if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Quick exit if there are no streams */
+       if (psGD->psHead == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+       }
+
+       /* Acquire the global lock. We have to be sure that no one modifies
+        * the list while we are looking for our stream. */
+       OSLockAcquire(psGD->hTLGDLock);
+       /* Check stream still valid */
+       psNode = TLFindAndGetStreamNodeByDesc(psSD);
+       OSLockRelease(psGD->hTLGDLock);
+
+       if ((psNode == NULL) || (psNode != psSD->psNode))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+       /* Since we have a valid stream descriptor, the stream should not have been
+        * made NULL by any producer context. */
+       PVR_ASSERT (psNode->psStream);
+
+       eError = TLStreamCommit(psNode->psStream, ui32Size);
+       PVR_LOG_IF_ERROR(eError, "TLStreamCommit");
+
+       OSLockAcquire(psGD->hTLGDLock);
+       TLReturnStreamNode(psNode);
+       OSLockRelease(psGD->hTLGDLock);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+                          IMG_UINT32 ui32Size,
+                          IMG_CHAR *pszStreams,
+                          IMG_UINT32 *pui32NumFound)
+{
+       PTL_SNODE psNode = NULL;
+       IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] =
+                       (IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) (void *)pszStreams;
+
+       if (*pszNamePattern == '\0')
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       /* Quick exit if there are no streams */
+       if (TLGGD()->psHead == NULL)
+       {
+               *pui32NumFound = 0;
+               return PVRSRV_OK;
+       }
+
+       OSLockAcquire(TLGGD()->hTLGDLock);
+
+       *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams,
+                                         ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE);
+
+       /* Find "tlctrl" stream and reset it */
+       psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM);
+       if (psNode != NULL)
+               TLStreamReset(psNode->psStream);
+
+       OSLockRelease(TLGGD()->hTLGDLock);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+                      IMG_UINT32*     puiReadOffset,
+                      IMG_UINT32*     puiReadLen)
+{
+       PVRSRV_ERROR    eError = PVRSRV_OK;
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       IMG_UINT32      uiTmpOffset;
+       IMG_UINT32      uiTmpLen = 0;
+       PTL_SNODE       psNode;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psSD);
+
+       TL_COUNTER_INC(psSD->ui32AcquireCount);
+
+       /* Quick exit if there are no streams */
+       if (psGD->psHead == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+       }
+
+       /* Check stream still valid */
+       psNode = TLFindStreamNodeByDesc(psSD);
+       if ((psNode == NULL) || (psNode != psSD->psNode))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+       /* If we are here, the stream will never be made NULL until this context itself
+        * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will
+        * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode)
+        * when a valid stream descriptor is present (i.e. a client is connected).
+        * Hence, no checks for stream being NON NULL are required after this. */
+       PVR_ASSERT (psNode->psStream);
+
+       psSD->ui32ReadLen = 0;  /* Handle NULL read returns */
+
+       do
+       {
+               uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset);
+
+               /* Check we have not already exceeded read limit with just offset
+                * regardless of data length to ensure the client sees the RC */
+               if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT)
+               {
+                       /* Check to see if we are reading beyond the read limit */
+                       if (uiTmpOffset >= psSD->ui32ReadLimit)
+                       {
+                               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_READLIMIT_REACHED);
+                       }
+               }
+
+               if (uiTmpLen > 0)
+               { /* Data found */
+
+                       /* Check we have not already exceeded read limit offset+len */
+                       if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT)
+                       {
+                               /* Adjust the read length if it goes beyond the read limit
+                                * limit always guaranteed to be on packet */
+                               if ((uiTmpOffset + uiTmpLen) >= psSD->ui32ReadLimit)
+                               {
+                                       uiTmpLen = psSD->ui32ReadLimit - uiTmpOffset;
+                               }
+                       }
+
+                       *puiReadOffset = uiTmpOffset;
+                       *puiReadLen = uiTmpLen;
+                       psSD->ui32ReadLen = uiTmpLen;   /* Save the original data length in the stream desc */
+                       PVR_DPF_RETURN_OK;
+               }
+               else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING))
+               { /* No data found blocking */
+
+                       /* Instead of doing a complete sleep for `NO_DATA_WAIT_PERIOD_US` us, we sleep in chunks
+                        * of 168 ms. In a "deferred" signal scenario from writer, this gives us a chance to
+                        * wake-up (timeout) early and continue reading in-case some data is available */
+                       IMG_UINT64 ui64WaitInChunksUs = MIN(NO_DATA_WAIT_PERIOD_US, 168000ULL);
+                       IMG_BOOL bDataFound = IMG_FALSE;
+
+                       TL_COUNTER_INC(psSD->ui32NoDataSleep);
+
+                       LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US)
+                       {
+                               eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs);
+                               if (eError == PVRSRV_OK)
+                               {
+                                       bDataFound = IMG_TRUE;
+                                       TL_COUNTER_INC(psSD->ui32Signalled);
+                                       break;
+                               }
+                               else if (eError == PVRSRV_ERROR_TIMEOUT)
+                               {
+                                       if (TLStreamOutOfData(psNode->psStream))
+                                       {
+                                               /* Return on timeout if stream empty, else let while exit and return data */
+                                               continue;
+                                       }
+                                       else
+                                       {
+                                               bDataFound = IMG_TRUE;
+                                               TL_COUNTER_INC(psSD->ui32TimeoutData);
+                                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Data found at timeout. Current BuffUt = %u",
+                                                                                                __func__, TLStreamGetUT(psNode->psStream)));
+                                               break;
+                                       }
+                               }
+                               else
+                               { /* Some other system error with event objects */
+                                       PVR_DPF_RETURN_RC(eError);
+                               }
+                       } END_LOOP_UNTIL_TIMEOUT();
+
+                       if (bDataFound)
+                       {
+                               continue;
+                       }
+                       else
+                       {
+                               TL_COUNTER_INC(psSD->ui32TimeoutEmpty);
+                               return PVRSRV_ERROR_TIMEOUT;
+                       }
+               }
+               else
+               { /* No data non-blocking */
+                       TL_COUNTER_INC(psSD->ui32NoData);
+
+                       /* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE
+                        * signifying there's no need of Release call */
+                       *puiReadOffset = NO_ACQUIRE;
+                       *puiReadLen = 0;
+                       PVR_DPF_RETURN_OK;
+               }
+       }
+       while (1);
+}
+
+PVRSRV_ERROR
+TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+                      IMG_UINT32      uiReadOffset,
+                      IMG_UINT32      uiReadLen)
+{
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       PTL_SNODE psNode;
+
+       PVR_DPF_ENTERED;
+
+       /* Unreferenced in release builds */
+       PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+
+       PVR_ASSERT(psSD);
+
+       /* Quick exit if there are no streams */
+       if (psGD->psHead == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+       }
+
+       if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Check stream still valid */
+       psNode = TLFindStreamNodeByDesc(psSD);
+       if ((psNode == NULL) || (psNode != psSD->psNode))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+       /* Since we have a valid stream descriptor, the stream should not have been
+        * made NULL by any producer context. */
+       PVR_ASSERT (psNode->psStream);
+
+       PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen));
+
+       /* Move read position on to free up space in stream buffer */
+       PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen));
+}
+
+PVRSRV_ERROR
+TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+                    IMG_UINT32 ui32Size,
+                    IMG_BYTE* pui8Data)
+{
+       TL_GLOBAL_DATA* psGD = TLGGD();
+       PTL_SNODE psNode;
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psSD);
+
+       if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Quick exit if there are no streams */
+       if (psGD->psHead == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+       }
+
+       OSLockAcquire(psGD->hTLGDLock);
+       /* Check stream still valid */
+       psNode = TLFindAndGetStreamNodeByDesc(psSD);
+       OSLockRelease(psGD->hTLGDLock);
+
+       if ((psNode == NULL) || (psNode != psSD->psNode))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+       }
+
+       /* Since we have a valid stream descriptor, the stream should not have been
+        * made NULL by any producer context. */
+       PVR_ASSERT (psNode->psStream);
+
+       eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size);
+       /* propagate error up but don't print anything here */
+
+       OSLockAcquire(psGD->hTLGDLock);
+       TLReturnStreamNode(psNode);
+       OSLockRelease(psGD->hTLGDLock);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/******************************************************************************
+ End of file (tlserver.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/tlstream.c b/drivers/gpu/drm/img/img-rogue/services/server/common/tlstream.c
new file mode 100644 (file)
index 0000000..a80792e
--- /dev/null
@@ -0,0 +1,1625 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer API implementation.
+                These functions are provided to driver components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "log2.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+
+#include "pvrsrv.h"
+
+#define EVENT_OBJECT_TIMEOUT_US 1000000ULL
+#define READ_PENDING_TIMEOUT_US 100000ULL
+
+/*! Compute maximum TL packet size for this stream. Max packet size will be
+ * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation
+ * is required to avoid a corner case that was observed when TL buffer size is
+ * smaller than twice of TL max packet size and read, write index are positioned
+ * in such a way that the TL packet (write packet + padding packet) size is may
+ * be bigger than the buffer size itself.
+ */
+#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) )
+
+/* Given the state of the buffer it returns a number of bytes that the client
+ * can use for a successful allocation. */
+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead,
+                                          IMG_UINT32 ui32LWrite,
+                                          IMG_UINT32 ui32CBSize,
+                                          IMG_UINT32 ui32ReqSizeMin,
+                                          IMG_UINT32 ui32MaxPacketSize)
+{
+       IMG_UINT32 ui32AvSpace = 0;
+
+       /* This could be written in fewer lines using the ? operator but it
+               would not be kind to potential readers of this source at all. */
+       if (ui32LRead > ui32LWrite)                          /* Buffer WRAPPED */
+       {
+               if ((ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE))
+               {
+                       ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+               }
+       }
+       else                                                  /* Normal, no wrap */
+       {
+               if ((ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE))
+               {
+                       ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+               }
+               else if ((ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE))
+               {
+                       ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+               }
+       }
+       /* The max size of a TL packet currently is UINT16. adjust accordingly */
+       return MIN(ui32AvSpace, ui32MaxPacketSize);
+}
+
+/* Returns bytes left in the buffer. Negative if there is not any.
+ * two 8b aligned values are reserved, one for the write failed buffer flag
+ * and one to be able to distinguish the buffer full state to the buffer
+ * empty state.
+ * Always returns free space -8 even when the "write failed" packet may be
+ * already in the stream before this write. */
+static INLINE IMG_INT
+circbufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+       /* We need to reserve 8b (one packet) in the buffer to be able to tell empty
+        * buffers from full buffers and one more for packet write fail packet */
+       if (ui32Read > ui32Write)
+       {
+               return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE;
+       }
+       else
+       {
+               return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE;
+       }
+}
+
+IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream)
+{
+       PTL_STREAM psStream = (PTL_STREAM) hStream;
+       IMG_UINT32 ui32LRead = psStream->ui32Read, ui32LWrite = psStream->ui32Write;
+
+       if (ui32LWrite >= ui32LRead)
+       {
+               return (ui32LWrite-ui32LRead);
+       }
+       else
+       {
+               return (psStream->ui32Size-ui32LRead+ui32LWrite);
+       }
+}
+
+PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream)
+{
+       PTL_STREAM psStream = (PTL_STREAM) hStream;
+       PVRSRV_ERROR eError;
+
+       /* CPU Local memory used as these buffers are not accessed by the device.
+        * CPU Uncached write combine memory used to improve write performance,
+        * memory barrier added in TLStreamCommit to ensure data written to memory
+        * before CB write point is updated before consumption by the reader.
+        */
+       IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20];
+       PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); /* TL for now is only used by host driver, so cpulocal mem suffices */
+
+       /* Exit if memory has already been allocated. */
+       if (psStream->pbyBuffer != NULL)
+               return PVRSRV_OK;
+
+       OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s",
+                  psStream->szName);
+
+
+       /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster
+        * accesses to CPU local memory. When the framework to access CPU_LOCAL device
+        * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for
+        * TL buffers */
+       eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+                                         (IMG_DEVMEM_SIZE_T) psStream->ui32Size,
+                                         (IMG_DEVMEM_ALIGN_T) OSGetPageSize(),
+                                         ExactLog2(OSGetPageSize()),
+                                         uiMemFlags,
+                                         pszBufferLabel,
+                                         &psStream->psStreamMemDesc);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+       eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc,
+                                         (void**) &psStream->pbyBuffer);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1);
+
+       return PVRSRV_OK;
+
+e1:
+       DevmemFree(psStream->psStreamMemDesc);
+e0:
+       return eError;
+}
+
+void TLFreeSharedMem(IMG_HANDLE hStream)
+{
+       PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+       if (psStream->pbyBuffer != NULL)
+       {
+               DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc);
+               psStream->pbyBuffer = NULL;
+       }
+       if (psStream->psStreamMemDesc != NULL)
+       {
+               DevmemFree(psStream->psStreamMemDesc);
+               psStream->psStreamMemDesc = NULL;
+       }
+}
+
+/* Special space left routine for TL_FLAG_PERMANENT_NO_WRAP streams */
+static INLINE IMG_UINT
+bufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+        /* buffers from full buffers and one more for packet write fail packet */
+       PVR_ASSERT(ui32Read<=ui32Write);
+       return ui32size - ui32Write;
+}
+
+/*******************************************************************************
+ * TL Server public API implementation.
+ ******************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+               const IMG_CHAR *szStreamName,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+               void *pvOnReaderOpenUD,
+               TL_STREAM_SOURCECB pfProducerCB,
+               void *pvProducerUD)
+{
+       PTL_STREAM    psTmp;
+       PVRSRV_ERROR  eError;
+       IMG_HANDLE    hEventList;
+       PTL_SNODE     psn;
+       TL_OPMODE     eOpMode;
+
+       PVR_DPF_ENTERED;
+       /* Parameter checks: non NULL handler required */
+       if (NULL == phStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       if (szStreamName == NULL || *szStreamName == '\0' ||
+           OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+       if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName()
+        * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */
+       OSLockAcquire (TLGGD()->hTLGDLock);
+
+       /* Check if there already exists a stream with this name. */
+       psn = TLFindStreamNodeByName( szStreamName );
+       if (NULL != psn)
+       {
+               eError = PVRSRV_ERROR_ALREADY_EXISTS;
+               goto e0;
+       }
+
+       /* Allocate stream structure container (stream struct) for the new stream */
+       psTmp = OSAllocZMem(sizeof(TL_STREAM));
+       if (NULL == psTmp)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e0;
+       }
+
+       OSStringLCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE);
+
+       if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH)
+       {
+               psTmp->bWaitForEmptyOnDestroy = IMG_TRUE;
+       }
+
+       psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE;
+       psTmp->bNoWrapPermanent = (ui32StreamFlags&TL_FLAG_PERMANENT_NO_WRAP) ? IMG_TRUE : IMG_FALSE;
+
+       psTmp->eOpMode = eOpMode;
+       if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+       {
+               /* Only allow drop properties to be mixed with no-wrap type streams
+                * since space does not become available when reads take place hence
+                * no point blocking.
+                */
+               if (psTmp->bNoWrapPermanent)
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto e1;
+               }
+       }
+
+       /* Additional synchronisation object required for some streams e.g. blocking */
+       eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj);
+       PVR_GOTO_IF_ERROR(eError, e1);
+       /* Create an event handle for this kind of stream */
+       eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent);
+       PVR_GOTO_IF_ERROR(eError, e2);
+
+       psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB;
+       psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD;
+       /* Remember producer supplied CB and data for later */
+       psTmp->pfProducerCallback = (void(*)(void))pfProducerCB;
+       psTmp->pvProducerUserData = pvProducerUD;
+
+       psTmp->psNotifStream = NULL;
+
+       /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */
+       psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size);
+
+       /* Signalling from TLStreamCommit is deferred until buffer is slightly (~12%) filled */
+       psTmp->ui32ThresholdUsageForSignal = psTmp->ui32Size >> 3;
+       psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size);
+       psTmp->ui32Read = 0;
+       psTmp->ui32Write = 0;
+       psTmp->ui32Pending = NOTHING_PENDING;
+       psTmp->bReadPending = IMG_FALSE;
+       psTmp->bSignalPending = IMG_FALSE;
+
+#if defined(TL_BUFFER_STATS)
+       OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0);
+       /* Setting MAX possible value for "minimum" time to full,
+        * helps in the logic which calculates this time */
+       psTmp->ui32MinTimeToFullInUs = IMG_UINT32_MAX;
+#endif
+
+       /* Memory will be allocated on first connect to the stream */
+       if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN))
+       {
+               /* Allocate memory for the circular buffer and export it to user space. */
+               eError = TLAllocSharedMemIfNull(psTmp);
+               PVR_LOG_GOTO_IF_ERROR(eError, "TLAllocSharedMem", e3);
+       }
+
+       /* Synchronisation object to synchronise with user side data transfers. */
+       eError = OSEventObjectCreate(psTmp->szName, &hEventList);
+       PVR_GOTO_IF_ERROR(eError, e4);
+
+       eError = OSLockCreate (&psTmp->hStreamWLock);
+       PVR_GOTO_IF_ERROR(eError, e5);
+
+       eError = OSLockCreate (&psTmp->hReadLock);
+       PVR_GOTO_IF_ERROR(eError, e6);
+
+       /* Now remember the stream in the global TL structures */
+       psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL);
+       PVR_GOTO_IF_NOMEM(psn, eError, e7);
+
+       /* Stream node created, now reset the write reference count to 1
+        * (i.e. this context's reference) */
+       psn->uiWRefCount = 1;
+
+       TLAddStreamNode(psn);
+
+       /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */
+       OSLockRelease (TLGGD()->hTLGDLock);
+
+       /* Best effort signal, client wait timeout will ultimately let it find the
+        * new stream if this fails, acceptable to avoid clean-up as it is tricky
+        * at this point */
+       (void) OSEventObjectSignal(TLGGD()->hTLEventObj);
+
+       /* Pass the newly created stream handle back to caller */
+       *phStream = (IMG_HANDLE)psTmp;
+       PVR_DPF_RETURN_OK;
+
+e7:
+       OSLockDestroy(psTmp->hReadLock);
+e6:
+       OSLockDestroy(psTmp->hStreamWLock);
+e5:
+       OSEventObjectDestroy(hEventList);
+e4:
+       TLFreeSharedMem(psTmp);
+e3:
+       OSEventObjectClose(psTmp->hProducerEvent);
+e2:
+       OSEventObjectDestroy(psTmp->hProducerEventObj);
+e1:
+       OSFreeMem(psTmp);
+e0:
+       OSLockRelease (TLGGD()->hTLGDLock);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+void TLStreamReset(IMG_HANDLE hStream)
+{
+       PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+       PVR_ASSERT(psStream != NULL);
+
+       OSLockAcquire(psStream->hStreamWLock);
+
+       while (psStream->ui32Pending != NOTHING_PENDING)
+       {
+               PVRSRV_ERROR eError;
+
+               /* We're in the middle of a write so we cannot reset the stream.
+                * We are going to wait until the data is committed. Release lock while
+                * we're here. */
+               OSLockRelease(psStream->hStreamWLock);
+
+               /* Event when psStream->bNoSignalOnCommit is set we can still use
+                * the timeout capability of event object API (time in us). */
+               eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100);
+               if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK)
+               {
+                       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectWaitTimeout");
+               }
+
+               OSLockAcquire(psStream->hStreamWLock);
+
+               /* Either timeout occurred or the stream has been signalled.
+                * If former we have to check if the data was committed and if latter
+                * if the stream hasn't been re-reserved. Either way we have to go
+                * back to the condition.
+                * If the stream has been released we'll exit with the lock held so
+                * we can finally go and reset the stream. */
+       }
+
+       psStream->ui32Read = 0;
+       psStream->ui32Write = 0;
+       /* we know that ui32Pending already has correct value (no need to set) */
+
+       OSLockRelease(psStream->hStreamWLock);
+}
+
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream)
+{
+       PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+       if (hStream == NULL || hNotifStream == NULL)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       psStream->psNotifStream = (PTL_STREAM) hNotifStream;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReconfigure(
+               IMG_HANDLE hStream,
+               IMG_UINT32 ui32StreamFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PTL_STREAM psTmp;
+       TL_OPMODE eOpMode;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == hStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+       if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       psTmp = (PTL_STREAM)hStream;
+
+       /* Prevent the TL Stream buffer from being written to
+        * while its mode is being reconfigured
+        */
+       OSLockAcquire (psTmp->hStreamWLock);
+       if (NOTHING_PENDING != psTmp->ui32Pending)
+       {
+               OSLockRelease (psTmp->hStreamWLock);
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+       }
+       psTmp->ui32Pending = 0;
+       OSLockRelease (psTmp->hStreamWLock);
+
+       psTmp->eOpMode = eOpMode;
+       if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+       {
+               /* Only allow drop properties to be mixed with no-wrap type streams
+                * since space does not become available when reads take place hence
+                * no point blocking.
+                */
+               if (psTmp->bNoWrapPermanent)
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto e1;
+               }
+       }
+
+       OSLockAcquire (psTmp->hStreamWLock);
+       psTmp->ui32Pending = NOTHING_PENDING;
+       OSLockRelease (psTmp->hStreamWLock);
+e1:
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE     *phStream,
+             const IMG_CHAR *szStreamName)
+{
+       PTL_SNODE psTmpSNode;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == phStream || NULL == szStreamName)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Acquire the TL_GLOBAL_DATA lock first to ensure,
+        * the TL_STREAM while returned and being modified,
+        * is not deleted by some other context */
+       OSLockAcquire (TLGGD()->hTLGDLock);
+
+       /* Search for a stream node with a matching stream name */
+       psTmpSNode = TLFindStreamNodeByName(szStreamName);
+
+       if (NULL == psTmpSNode)
+       {
+               OSLockRelease (TLGGD()->hTLGDLock);
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND);
+       }
+
+       if (psTmpSNode->psStream->psNotifStream != NULL &&
+           psTmpSNode->uiWRefCount == 1)
+       {
+               TLStreamMarkStreamOpen(psTmpSNode->psStream);
+       }
+
+       /* The TL_SNODE->uiWRefCount governs the presence of this node in the
+        * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing
+        * this node from the TL_GLOBAL_DATA list. Hence, is protected using the
+        * TL_GLOBAL_DATA lock and not TL_STREAM lock */
+       psTmpSNode->uiWRefCount++;
+
+       OSLockRelease (TLGGD()->hTLGDLock);
+
+       /* Return the stream handle to the caller */
+       *phStream = (IMG_HANDLE)psTmpSNode->psStream;
+
+       PVR_DPF_RETURN_VAL(PVRSRV_OK);
+}
+
+void
+TLStreamClose(IMG_HANDLE hStream)
+{
+       PTL_STREAM      psTmp;
+       IMG_BOOL        bDestroyStream;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == hStream)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                                "TLStreamClose failed as NULL stream handler passed, nothing done."));
+               PVR_DPF_RETURN;
+       }
+
+       psTmp = (PTL_STREAM)hStream;
+
+       /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required
+        * in-case this TL_STREAM node is to be deleted */
+       OSLockAcquire (TLGGD()->hTLGDLock);
+
+       /* Decrement write reference counter of the stream */
+       psTmp->psNode->uiWRefCount--;
+
+       if (0 != psTmp->psNode->uiWRefCount)
+       {
+               /* The stream is still being used in other context(s) do not destroy
+                * anything */
+
+               /* uiWRefCount == 1 means that stream was closed for write. Next
+                * close is pairing TLStreamCreate(). Send notification to indicate
+                * that no writer are connected to the stream any more. */
+               if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1)
+               {
+                       TLStreamMarkStreamClose(psTmp);
+               }
+
+               OSLockRelease (TLGGD()->hTLGDLock);
+               PVR_DPF_RETURN;
+       }
+       else
+       {
+               /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */
+
+               if (psTmp->bWaitForEmptyOnDestroy)
+               {
+                       /* We won't require the TL_STREAM lock to be acquired here for accessing its read
+                        * and write offsets. REASON: We are here because there is no producer context
+                        * referencing this TL_STREAM, hence its ui32Write offset won't be changed now.
+                        * Also, the update of ui32Read offset is not protected by locks */
+                       while (psTmp->ui32Read != psTmp->ui32Write)
+                       {
+                               /* Release lock before sleeping */
+                               OSLockRelease (TLGGD()->hTLGDLock);
+
+                               OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US);
+
+                               OSLockAcquire (TLGGD()->hTLGDLock);
+
+                               /* Ensure destruction of stream is still required */
+                               if (0 != psTmp->psNode->uiWRefCount)
+                               {
+                                       OSLockRelease (TLGGD()->hTLGDLock);
+                                       PVR_DPF_RETURN;
+                               }
+                       }
+               }
+
+               /* Try removing the stream from TL_GLOBAL_DATA */
+               bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode);
+
+               OSLockRelease (TLGGD()->hTLGDLock);
+
+               if (bDestroyStream)
+               {
+                       /* Destroy the stream if it was removed from TL_GLOBAL_DATA */
+                       TLStreamDestroy (psTmp);
+                       psTmp = NULL;
+               }
+               PVR_DPF_RETURN;
+       }
+}
+
+/*
+ * DoTLSetPacketHeader
+ *
+ * Ensure that whenever we update a Header we always add the RESERVED field
+ */
+static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32);
+static inline void
+DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr,
+                               IMG_UINT32 ui32Val)
+{
+       PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0);
+
+       /* Check that this is a correctly aligned packet header. */
+       if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0)
+       {
+               /* Should return an error because the header is misaligned */
+               PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr));
+               pHdr->uiTypeSize = ui32Val;
+       }
+       else
+       {
+               pHdr->uiTypeSize = ui32Val;
+               pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED;
+       }
+}
+
+static PVRSRV_ERROR
+DoTLStreamReserve(IMG_HANDLE hStream,
+                               IMG_UINT8 **ppui8Data,
+                               IMG_UINT32 ui32ReqSize,
+                               IMG_UINT32 ui32ReqSizeMin,
+                               PVRSRVTL_PACKETTYPE ePacketType,
+                               IMG_UINT32* pui32AvSpace,
+                               IMG_UINT32* pui32Flags)
+{
+       PTL_STREAM psTmp;
+       IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace;
+       IMG_UINT32 ui32InputFlags = 0;
+       IMG_INT pad, iFreeSpace;
+       IMG_UINT8 *pui8IncrRead = NULL;
+       PVRSRVTL_PPACKETHDR pHdr;
+
+       PVR_DPF_ENTERED;
+       if (pui32AvSpace) *pui32AvSpace = 0;
+       if (pui32Flags)
+       {
+               ui32InputFlags = *pui32Flags;
+               *pui32Flags = 0;
+       }
+
+       if (NULL == hStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       psTmp = (PTL_STREAM)hStream;
+
+       /* Assert used as the packet type parameter is currently only provided
+        * by the TL APIs, not the calling client */
+       PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType));
+
+       /* The buffer is only used in "rounded" (aligned) chunks */
+       lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize);
+
+       /* Lock the stream before reading it's pending value, because if pending is set
+        * to NOTHING_PENDING, we update the pending value such that subsequent calls to
+        * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */
+       OSLockAcquire (psTmp->hStreamWLock);
+
+#if defined(TL_BUFFER_STATS)
+       /* If writing into an empty buffer, start recording time-to-full */
+       if (psTmp->ui32Read == psTmp->ui32Write)
+       {
+               OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 1);
+               psTmp->ui32TimeStart = OSClockus();
+       }
+
+       if (ui32ReqSize > psTmp->ui32MaxReserveWatermark)
+       {
+               psTmp->ui32MaxReserveWatermark = ui32ReqSize;
+       }
+#endif
+
+       /* Get a local copy of the stream buffer parameters */
+       ui32LRead  = psTmp->ui32Read;
+       ui32LWrite = psTmp->ui32Write;
+       ui32LPending = psTmp->ui32Pending;
+
+       /* Multiple pending reserves are not supported. */
+       if (NOTHING_PENDING != ui32LPending)
+       {
+               OSLockRelease (psTmp->hStreamWLock);
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+       }
+
+       if (psTmp->ui32MaxPacketSize < lReqSizeAligned)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize));
+               psTmp->ui32Pending = NOTHING_PENDING;
+               if (pui32AvSpace)
+               {
+                       *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+                       if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+                       {
+                               *pui32AvSpace = psTmp->ui32MaxPacketSize;
+                               PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u", *pui32AvSpace));
+                       }
+               }
+               OSLockRelease (psTmp->hStreamWLock);
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED);
+       }
+
+       /* Prevent other threads from entering this region before we are done
+        * updating the pending value and write offset (in case of padding). This
+        * is not exactly a lock but a signal for other contexts that there is a
+        * TLStreamCommit operation pending on this stream */
+       psTmp->ui32Pending = 0;
+
+       OSLockRelease (psTmp->hStreamWLock);
+
+       /* If there is enough contiguous space following the current Write
+        * position then no padding is required */
+       if (  psTmp->ui32Size
+               < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) )
+       {
+               pad = psTmp->ui32Size - ui32LWrite;
+       }
+       else
+       {
+               pad = 0;
+       }
+
+       lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad;
+       if (psTmp->bNoWrapPermanent)
+       {
+               iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+       }
+       else
+       {
+               iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+       }
+
+       if (iFreeSpace < (IMG_INT) lReqSizeActual)
+       {
+               /* If this is a blocking reserve and there is not enough space then wait. */
+               if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+               {
+                       /* Stream create should stop us entering here when
+                        * psTmp->bNoWrapPermanent is true as it does not make sense to
+                        * block on permanent data streams. */
+                       PVR_ASSERT(psTmp->bNoWrapPermanent == IMG_FALSE);
+                       while ( ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+                                <(IMG_INT) lReqSizeActual ) )
+                       {
+                               /* The TL bridge is lockless now, so changing to OSEventObjectWait() */
+                               OSEventObjectWait(psTmp->hProducerEvent);
+                               // update local copies.
+                               ui32LRead  = psTmp->ui32Read;
+                               ui32LWrite = psTmp->ui32Write;
+                       }
+               }
+               /* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */
+               else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+               {
+                       OSLockAcquire(psTmp->hReadLock);
+
+                       while (psTmp->bReadPending)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete."));
+                               OSLockRelease(psTmp->hReadLock);
+#if defined(TL_BUFFER_STATS)
+                               TL_COUNTER_INC(psTmp->ui32CntWriteWaits);
+#endif
+                               (void) OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US);
+                               OSLockAcquire(psTmp->hReadLock);
+                       }
+
+#if defined(TL_BUFFER_STATS)
+                       TL_COUNTER_INC(psTmp->ui32CntWriteSuccesses);
+#endif
+                       ui32LRead = psTmp->ui32Read;
+
+                       if ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+                            < (IMG_INT) lReqSizeActual )
+                       {
+                               ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100);
+                               if (ui32CreateFreeSpace < lReqSizeActual)
+                               {
+                                       ui32CreateFreeSpace = lReqSizeActual;
+                               }
+
+                               while (ui32CreateFreeSpace > (IMG_UINT32)circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size))
+                               {
+                                       pui8IncrRead = &psTmp->pbyBuffer[ui32LRead];
+                                       ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) ));
+
+                                       /* Check if buffer needs to wrap */
+                                       if (ui32LRead >= psTmp->ui32Size)
+                                       {
+                                               ui32LRead = 0;
+                                       }
+                               }
+                               psTmp->ui32Read = ui32LRead;
+                               pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read];
+
+                               pHdr = GET_PACKET_HDR(pui8IncrRead);
+                               DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr));
+                       }
+                       /* else fall through as there is enough space now to write the data */
+
+                       OSLockRelease(psTmp->hReadLock);
+                       /* If we accepted a flag var set the OVERWRITE bit*/
+                       if (pui32Flags) *pui32Flags |= TL_FLAG_OVERWRITE_DETECTED;
+               }
+               /* No data overwriting, insert write_failed flag and return */
+               else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER)
+               {
+                       /* Caller should not try to use ppui8Data,
+                        * NULLify to give user a chance of avoiding memory corruption */
+                       *ppui8Data = NULL;
+
+                       /* This flag should not be inserted two consecutive times, so
+                        * check the last ui32 in case it was a packet drop packet. */
+                       pui32Buf = ui32LWrite
+                                         ?
+                                           (void *)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)]
+                                          : // Previous four bytes are not guaranteed to be a packet header...
+                                           (void *)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT];
+
+                       pHdr = GET_PACKET_HDR(pui32Buf);
+                       if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED
+                                !=
+                                GET_PACKET_TYPE( pHdr ) && (ui32InputFlags & TL_FLAG_NO_WRITE_FAILED) == 0)
+                       {
+                               /* Insert size-stamped packet header */
+                               pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite];
+                               pHdr = GET_PACKET_HDR(pui32Buf);
+                               DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED);
+                               ui32LWrite += sizeof(PVRSRVTL_PACKETHDR);
+                               ui32LWrite %= psTmp->ui32Size;
+                               iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR);
+                       }
+
+                       OSLockAcquire (psTmp->hStreamWLock);
+                       psTmp->ui32Write = ui32LWrite;
+                       psTmp->ui32Pending = NOTHING_PENDING;
+                       OSLockRelease (psTmp->hStreamWLock);
+
+                       if (pui32AvSpace)
+                       {
+                               *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+                       }
+
+                       /* Inform call of permanent stream misuse, no space left,
+                        * the size of the stream will need to be increased. */
+                       if (psTmp->bNoWrapPermanent)
+                       {
+                               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE);
+                       }
+
+                       PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL);
+               }
+       }
+
+       /* The easy case: buffer has enough space to hold the requested packet (data + header) */
+
+       /* Should we treat the buffer as non-circular buffer? */
+       if (psTmp->bNoWrapPermanent)
+       {
+               iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+       }
+       else
+       {
+               iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+       }
+
+       if (iFreeSpace >= (IMG_INT) lReqSizeActual)
+       {
+               if (pad)
+               {
+                       /* Inserting padding packet. */
+                       pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite];
+                       pHdr = GET_PACKET_HDR(pui32Buf);
+                       DoTLSetPacketHeader(pHdr,
+                               PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR)));
+
+                       /* CAUTION: the used pad value should always result in a properly
+                        *          aligned ui32LWrite pointer, which in this case is 0 */
+                       ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size;
+                       /* Detect unaligned pad value */
+                       PVR_ASSERT(ui32LWrite == 0);
+               }
+               /* Insert size-stamped packet header */
+               pui32Buf = (void *) &psTmp->pbyBuffer[ui32LWrite];
+
+               pHdr = GET_PACKET_HDR(pui32Buf);
+               DoTLSetPacketHeader(pHdr,
+                       PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType));
+
+               /* return the next position in the buffer to the user */
+               *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ];
+
+               /* update pending offset: size stamp + data */
+               ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR);
+       }
+       else
+       {
+               OSLockAcquire (psTmp->hStreamWLock);
+               psTmp->ui32Pending = NOTHING_PENDING;
+               OSLockRelease (psTmp->hStreamWLock);
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+       }
+
+       /* Acquire stream lock for updating stream parameters */
+       OSLockAcquire (psTmp->hStreamWLock);
+       psTmp->ui32Write = ui32LWrite;
+       psTmp->ui32Pending = ui32LPending;
+       OSLockRelease (psTmp->hStreamWLock);
+
+#if defined(TL_BUFFER_STATS)
+       TL_COUNTER_INC(psTmp->ui32CntNumWriteSuccess);
+#endif
+
+       PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+                               IMG_UINT8 **ppui8Data,
+                               IMG_UINT32 ui32Size)
+{
+       return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available,
+                IMG_BOOL* pbIsReaderConnected)
+{
+       PVRSRV_ERROR eError;
+
+       eError = DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available, NULL);
+       if (eError != PVRSRV_OK && pbIsReaderConnected != NULL)
+       {
+               *pbIsReaderConnected = TLStreamIsOpenForReading(hStream);
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+TLStreamReserveReturnFlags(IMG_HANDLE hStream,
+                                                  IMG_UINT8  **ppui8Data,
+                                                  IMG_UINT32 ui32Size,
+                                                  IMG_UINT32* pui32Flags)
+{
+       return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, pui32Flags);
+}
+
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize)
+{
+       PTL_STREAM psTmp;
+       IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending;
+       PVRSRV_ERROR eError;
+
+#if defined(TL_BUFFER_STATS)
+       IMG_UINT32 ui32UnreadBytes;
+#endif
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == hStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       psTmp = (PTL_STREAM)hStream;
+
+       /* Get a local copy of the stream buffer parameters */
+       ui32LRead = psTmp->ui32Read;
+       ui32LWrite = psTmp->ui32Write;
+       ui32LPending = psTmp->ui32Pending;
+
+       ui32OldWrite = ui32LWrite;
+
+       // Space in buffer is aligned
+       ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR);
+
+       /* Check pending reserver and ReqSize + packet header size. */
+       if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending))
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+       }
+
+       /* Update pointer to written data. */
+       ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size;
+
+       /* and reset LPending to 0 since data are now submitted */
+       ui32LPending = NOTHING_PENDING;
+
+#if defined(TL_BUFFER_STATS)
+       /* Calculate new number of bytes unread */
+       if (ui32LWrite > ui32LRead)
+       {
+               ui32UnreadBytes = (ui32LWrite-ui32LRead);
+       }
+       else if (ui32LWrite < ui32LRead)
+       {
+               ui32UnreadBytes = (psTmp->ui32Size-ui32LRead+ui32LWrite);
+       }
+       else
+       { /* else equal, ignore */
+               ui32UnreadBytes = 0;
+       }
+
+       /* Calculate high water mark for debug purposes */
+       if (ui32UnreadBytes > psTmp->ui32BufferUt)
+       {
+               psTmp->ui32BufferUt = ui32UnreadBytes;
+       }
+#endif
+
+       /* Memory barrier required to ensure prior data written by writer is
+        * flushed from WC buffer to main memory. */
+       OSWriteMemoryBarrier(NULL);
+
+       /* Acquire stream lock to ensure other context(s) (if any)
+        * wait on the lock (in DoTLStreamReserve) for consistent values
+        * of write offset and pending value */
+       OSLockAcquire (psTmp->hStreamWLock);
+
+       /* Update stream buffer parameters to match local copies */
+       psTmp->ui32Write = ui32LWrite;
+       psTmp->ui32Pending = ui32LPending;
+
+       /* Ensure write pointer is flushed */
+       OSWriteMemoryBarrier(&psTmp->ui32Write);
+
+       TL_COUNTER_ADD(psTmp->ui32ProducerByteCount, ui32ReqSize);
+       TL_COUNTER_INC(psTmp->ui32NumCommits);
+
+#if defined(TL_BUFFER_STATS)
+       /* IF there has been no-reader since first reserve on an empty-buffer,
+        * AND current utilisation is considerably high (90%), calculate the
+        * time taken to fill up the buffer */
+       if ((OSAtomicRead(&psTmp->bNoReaderSinceFirstReserve) == 1) &&
+           (TLStreamGetUT(psTmp) >= 90 * psTmp->ui32Size/100))
+       {
+               IMG_UINT32 ui32TimeToFullInUs = OSClockus() - psTmp->ui32TimeStart;
+               if (psTmp->ui32MinTimeToFullInUs > ui32TimeToFullInUs)
+               {
+                       psTmp->ui32MinTimeToFullInUs = ui32TimeToFullInUs;
+               }
+               /* Following write ensures ui32MinTimeToFullInUs doesn't lose its
+                * real (expected) value in case there is no reader until next Commit call */
+               OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0);
+       }
+#endif
+
+       if (!psTmp->bNoSignalOnCommit)
+       {
+               /* If we have transitioned from an empty buffer to a non-empty buffer, we
+                * must signal possibly waiting consumer. BUT, let the signal be "deferred"
+                * until buffer is at least 'ui32ThresholdUsageForSignal' bytes full. This
+                * avoids a race between OSEventObjectSignal and OSEventObjectWaitTimeout
+                * (in TLServerAcquireDataKM), where a "signal" might happen before "wait",
+                * resulting into signal being lost and stream-reader waiting even though
+                * buffer is no-more empty */
+               if (ui32OldWrite == ui32LRead)
+               {
+                       psTmp->bSignalPending = IMG_TRUE;
+               }
+
+               if (psTmp->bSignalPending && (TLStreamGetUT(psTmp) >= psTmp->ui32ThresholdUsageForSignal))
+               {
+                       TL_COUNTER_INC(psTmp->ui32SignalsSent);
+                       psTmp->bSignalPending = IMG_FALSE;
+
+                       /* Signal consumers that may be waiting */
+                       eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+                       if (eError != PVRSRV_OK)
+                       {
+                               OSLockRelease (psTmp->hStreamWLock);
+                               PVR_DPF_RETURN_RC(eError);
+                       }
+               }
+               else
+               {
+                       TL_COUNTER_INC(psTmp->ui32SignalNotSent);
+               }
+       }
+       OSLockRelease (psTmp->hStreamWLock);
+
+       PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size)
+{
+       IMG_BYTE *pbyDest = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == hStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       eError = TLStreamReserve(hStream, &pbyDest, ui32Size);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+       else
+       {
+               OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+               eError = TLStreamCommit(hStream, ui32Size);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF_RETURN_RC(eError);
+               }
+       }
+
+       PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWriteRetFlags(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size, IMG_UINT32 *pui32Flags){
+       IMG_BYTE *pbyDest = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == hStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       eError = TLStreamReserveReturnFlags(hStream, &pbyDest, ui32Size, pui32Flags);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+       else
+       {
+               OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+               eError = TLStreamCommit(hStream, ui32Size);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF_RETURN_RC(eError);
+               }
+       }
+
+       PVR_DPF_RETURN_OK;
+}
+
+void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo)
+{
+       IMG_DEVMEM_SIZE_T actual_req_size;
+       IMG_DEVMEM_ALIGN_T align = 4; /* Low fake value so the real value can be obtained */
+
+       actual_req_size = 2;
+       /* ignore error as OSGetPageShift() should always return correct value */
+       (void) DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align);
+
+       psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR);
+       psInfo->minReservationSize = sizeof(IMG_UINT32);
+       psInfo->pageSize = (IMG_UINT32)(actual_req_size);
+       psInfo->pageAlign = (IMG_UINT32)(align);
+       psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize;
+}
+
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld)
+{
+       PTL_STREAM   psTmp;
+       PVRSRV_ERROR eError;
+       IMG_UINT8*   pData;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == psStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       psTmp = (PTL_STREAM)psStream;
+
+       /* Do not support EOS packets on permanent stream buffers at present,
+        * EOS is best used with streams where data is consumed. */
+       if (psTmp->bNoWrapPermanent)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+       }
+
+       if (bRemoveOld)
+       {
+               eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD, NULL, NULL);
+       }
+       else
+       {
+       eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL);
+       }
+
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0));
+}
+
+
+static PVRSRV_ERROR
+_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType)
+{
+       PVRSRV_ERROR eError;
+       PTL_STREAM psStream = hStream;
+       IMG_UINT32 ui32Size;
+       IMG_UINT8 *pData;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == psStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       if (NULL == psStream->psNotifStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM);
+       }
+
+       ui32Size = OSStringLength(psStream->szName) + 1;
+
+       eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size,
+                                  ui32Size, ePacketType, NULL, NULL);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       OSDeviceMemCopy(pData, psStream->szName, ui32Size);
+
+       PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size));
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE psStream)
+{
+       return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE psStream)
+{
+       return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE psStream)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PTL_STREAM   psTmp;
+
+       PVR_DPF_ENTERED;
+
+       if (NULL == psStream)
+       {
+               PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       psTmp = (PTL_STREAM)psStream;
+
+       /* If read client exists and has opened stream in blocking mode,
+        * signal when data is available to read. */
+       if (psTmp->psNode->psRDesc &&
+                (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) &&
+                       psTmp->ui32Read != psTmp->ui32Write)
+       {
+               TL_COUNTER_INC(psTmp->ui32ManSyncs);
+               eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+       }
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+IMG_BOOL
+TLStreamIsOpenForReading(IMG_HANDLE hStream)
+{
+       PTL_STREAM psTmp;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(hStream);
+       psTmp = (PTL_STREAM)hStream;
+
+       PVR_DPF_RETURN_VAL(psTmp->psNode->psRDesc != NULL);
+}
+
+IMG_BOOL
+TLStreamOutOfData(IMG_HANDLE hStream)
+{
+       PTL_STREAM psTmp;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(hStream);
+       psTmp = (PTL_STREAM)hStream;
+
+       /* If both pointers are equal then the buffer is empty */
+       PVR_DPF_RETURN_VAL(psTmp->ui32Read == psTmp->ui32Write);
+}
+
+
+PVRSRV_ERROR
+TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value)
+{
+       PTL_STREAM   psTmp;
+       IMG_UINT32   ui32LRead, ui32LWrite;
+       PVRSRV_ERROR eErr = PVRSRV_OK;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(hStream);
+       psTmp = (PTL_STREAM)hStream;
+       ui32LRead = psTmp->ui32Read;
+       ui32LWrite = psTmp->ui32Write;
+
+       if (ui32LRead != ui32LWrite)
+       {
+               eErr = PVRSRV_ERROR_STREAM_MISUSE;
+       }
+#if defined(TL_BUFFER_STATS)
+       psTmp->ui32ProducerByteCount = ui32Value;
+#else
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+#endif
+       PVR_DPF_RETURN_RC(eErr);
+}
+/*
+ * Internal stream APIs to server part of Transport Layer, declared in
+ * header tlintern.h. Direct pointers to stream objects are used here as
+ * these functions are internal.
+ */
+IMG_UINT32
+TLStreamAcquireReadPos(PTL_STREAM psStream,
+                       IMG_BOOL bDisableCallback,
+                       IMG_UINT32* puiReadOffset)
+{
+       IMG_UINT32 uiReadLen = 0;
+       IMG_UINT32 ui32LRead, ui32LWrite;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psStream);
+       PVR_ASSERT(puiReadOffset);
+
+       if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+       {
+               if (!OSTryLockAcquire(psStream->hReadLock))
+               {
+                       /*
+                        * This is a normal event when the system is under load.
+                        * An example of how to produce this is to run testrunner /
+                        * regression/ddk_test_seq2_host_fw_mem.conf with HTB / pvrhtbd
+                        * configured as
+                        *
+                        * # pvrdebug -log trace -loggroups main,pow,debug \
+                        * -hostloggroups main,ctrl,sync,brg -hostlogtype dropoldest
+                        *
+                        * # pvrhtbd -hostloggroups main,ctrl,sync,brg
+                        *
+                        * We will see a small number of these collisions but as this is
+                        * an expected calling path, and an expected return code, we drop
+                        * the severity to just be a debug MESSAGE instead of WARNING
+                        */
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                           "%s: Read lock on stream '%s' is acquired by some writer, "
+                           "hence reader failed to acquire read lock.", __func__,
+                           psStream->szName));
+#if defined(TL_BUFFER_STATS)
+                       TL_COUNTER_INC(psStream->ui32CntReadFails);
+#endif
+                       PVR_DPF_RETURN_VAL(0);
+               }
+       }
+
+#if defined(TL_BUFFER_STATS)
+       TL_COUNTER_INC(psStream->ui32CntReadSuccesses);
+#endif
+
+       /* Grab a local copy */
+       ui32LRead = psStream->ui32Read;
+       ui32LWrite = psStream->ui32Write;
+
+       if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+       {
+               psStream->bReadPending = IMG_TRUE;
+               OSLockRelease(psStream->hReadLock);
+       }
+
+       /* No data available and CB defined - try and get data */
+       if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback)
+       {
+               PVRSRV_ERROR eRc;
+               IMG_UINT32   ui32Resp = 0;
+
+               eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS,
+                               &ui32Resp, psStream->pvProducerUserData);
+               PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback");
+
+               ui32LWrite = psStream->ui32Write;
+       }
+
+       /* No data available... */
+       if (ui32LRead == ui32LWrite)
+       {
+               if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+               {
+                       psStream->bReadPending = IMG_FALSE;
+               }
+               PVR_DPF_RETURN_VAL(0);
+       }
+
+#if defined(TL_BUFFER_STATS)
+       /* The moment reader knows it will see a non-zero data, it marks its presence in writer's eyes */
+       OSAtomicWrite (&psStream->bNoReaderSinceFirstReserve, 0);
+#endif
+
+       /* Data is available to read... */
+       *puiReadOffset = ui32LRead;
+
+       /*PVR_DPF((PVR_DBG_VERBOSE,
+        *              "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d",
+        *              ui32LWrite, ui32LRead, psStream->ui32Size));
+        */
+
+       if (ui32LRead > ui32LWrite)
+       {       /* CB has wrapped around. */
+               PVR_ASSERT(!psStream->bNoWrapPermanent);
+               /* Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer]
+                * and let a subsequent AcquireReadPos read the rest of the Buffer */
+               /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/
+               uiReadLen = psStream->ui32Size - ui32LRead;
+               TL_COUNTER_INC(psStream->ui32AcquireRead2);
+       }
+       else
+       {       /* CB has not wrapped */
+               uiReadLen = ui32LWrite - ui32LRead;
+               TL_COUNTER_INC(psStream->ui32AcquireRead1);
+       }
+
+       PVR_DPF_RETURN_VAL(uiReadLen);
+}
+
+PVRSRV_ERROR
+TLStreamAdvanceReadPos(PTL_STREAM psStream,
+                       IMG_UINT32 uiReadLen,
+                       IMG_UINT32 uiOrigReadLen)
+{
+       IMG_UINT32 uiNewReadPos;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psStream);
+
+       /*
+        * This API does not use Read lock as 'bReadPending' is sufficient
+        * to keep Read index safe by preventing a write from updating the
+        * index and 'bReadPending' itself is safe as it can only be modified
+        * by readers and there can be only one reader in action at a time.
+        */
+
+       /* Update the read offset by the length provided in a circular manner.
+        * Assuming the update to be atomic hence, avoiding use of locks
+        */
+       uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size;
+
+       /* Must validate length is on a packet boundary, for
+        * TLReleaseDataLess calls.
+        */
+       if (uiReadLen != uiOrigReadLen) /* buffer not empty */
+       {
+               PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos);
+               PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr);
+
+               if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) ||
+                       (eType == PVRSRVTL_PACKETTYPE_UNDEF) ||
+                       (eType >= PVRSRVTL_PACKETTYPE_LAST))
+               {
+                       PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT);
+               }
+               /* else OK, on a packet boundary */
+       }
+       /* else no check needed */
+
+       psStream->ui32Read = uiNewReadPos;
+
+       if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+       {
+               psStream->bReadPending = IMG_FALSE;
+       }
+
+       /* notify reserves that may be pending */
+       /* The producer event object is used to signal the StreamReserve if the TL
+        * Buffer is in blocking mode and is full.
+        * Previously this event was only signalled if the buffer was created in
+        * blocking mode. Since the buffer mode can now change dynamically the event
+        * is signalled every time to avoid any potential race where the signal is
+        * required, but not produced.
+        */
+       {
+               PVRSRV_ERROR eError;
+               eError = OSEventObjectSignal(psStream->hProducerEventObj);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u",
+                                        eError));
+                       /* We've failed to notify the producer event. This means there may
+                        * be a delay in generating more data to be consumed until the next
+                        * Write() generating action occurs.
+                        */
+               }
+       }
+
+       PVR_DPF((PVR_DBG_VERBOSE,
+                        "TLStreamAdvanceReadPos Read now at: %d",
+                       psStream->ui32Read));
+       PVR_DPF_RETURN_OK;
+}
+
+void
+TLStreamResetReadPos(PTL_STREAM psStream)
+{
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psStream);
+
+       if (psStream->bNoWrapPermanent)
+       {
+
+               /* Update the read offset by the length provided in a circular manner.
+                * Assuming the update to be atomic hence, avoiding use of locks */
+               psStream->ui32Read = 0;
+
+               PVR_DPF((PVR_DBG_VERBOSE,
+                                "TLStreamResetReadPos Read now at: %d",
+                                       psStream->ui32Read));
+       }
+       else
+       {
+               /* else for other stream types this is a no-op */
+               PVR_DPF((PVR_DBG_VERBOSE,
+                               "No need to reset read position of circular tlstream"));
+       }
+
+       PVR_DPF_RETURN;
+}
+
+void
+TLStreamDestroy (PTL_STREAM psStream)
+{
+       PVR_ASSERT (psStream);
+
+       OSLockDestroy (psStream->hStreamWLock);
+       OSLockDestroy (psStream->hReadLock);
+
+       OSEventObjectClose(psStream->hProducerEvent);
+       OSEventObjectDestroy(psStream->hProducerEventObj);
+
+       TLFreeSharedMem(psStream);
+       OSFreeMem(psStream);
+}
+
+DEVMEM_MEMDESC*
+TLStreamGetBufferPointer(PTL_STREAM psStream)
+{
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psStream);
+
+       PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/vmm_pvz_client.c b/drivers/gpu/drm/img/img-rogue/services/server/common/vmm_pvz_client.c
new file mode 100644 (file)
index 0000000..427811a
--- /dev/null
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@File                  vmm_pvz_client.c
+@Title          VM manager client para-virtualization
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VMM client para-virtualization APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vmm_pvz_client.h"
+
+
+static inline void
+PvzClientLockAcquire(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzClientLockRelease(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+/*
+ * ===========================================================
+ *  The following client para-virtualization (pvz) functions
+ *  are exclusively called by guests to initiate a pvz call
+ *  to the host via hypervisor (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEV_PHYADDR sDevPAddr;
+       VMM_PVZ_CONNECTION *psVmmPvz;
+       IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP;
+       PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+
+       eError = PhysHeapGetDevPAddr(psFwPhysHeap, &sDevPAddr);
+
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+{
+       /* Host expects PA rather than IPA address, so on the platforms where
+        * IPA-PA translation is not done in hw, performs a software translation */
+
+       IMG_DEV_PHYADDR sDevPAddrTranslated;
+
+       PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr);
+       sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr;
+}
+#endif
+
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapGetDevPAddr");
+       PVR_LOG_RETURN_IF_FALSE((sDevPAddr.uiAddr != 0), "PhysHeapGetDevPAddr", PVRSRV_ERROR_INVALID_PARAMS);
+
+       psVmmPvz = PvzConnectionAcquire();
+       PvzClientLockAcquire();
+
+       eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(uiFuncID,
+                                                                                                           0,
+                                                                                                           RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                                                                                           sDevPAddr.uiAddr);
+
+       PvzClientLockRelease();
+       PvzConnectionRelease(psVmmPvz);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP;
+       VMM_PVZ_CONNECTION *psVmmPvz = PvzConnectionAcquire();
+       PVR_ASSERT(psVmmPvz);
+
+       PvzClientLockAcquire();
+
+       PVR_ASSERT(psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap);
+
+       eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap(uiFuncID, 0);
+
+       PvzClientLockRelease();
+       PvzConnectionRelease(psVmmPvz);
+
+       return eError;
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_client.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/vmm_pvz_server.c b/drivers/gpu/drm/img/img-rogue/services/server/common/vmm_pvz_server.c
new file mode 100644 (file)
index 0000000..f2c77e8
--- /dev/null
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@File                  vmm_pvz_server.c
+@Title          VM manager server para-virtualization handlers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VMM server para-virtz handler APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxfwutils.h"
+
+#include "vz_vm.h"
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vmm_pvz_server.h"
+
+static inline void
+PvzServerLockAcquire(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzServerLockRelease(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+
+/*
+ * ===========================================================
+ *  The following server para-virtualization (pvz) functions
+ *  are exclusively called by the VM manager (hypervisor) on
+ *  behalf of guests to complete guest pvz calls
+ *  (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+                                               IMG_UINT32 ui32FuncID,
+                                               IMG_UINT32 ui32DevID,
+                                               IMG_UINT64 ui64Size,
+                                               IMG_UINT64 ui64PAddr)
+{
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+               /*
+                * Reject hypercall if called on a system configured at build time to
+                * preallocate the Guest's firmware heaps from static carveout memory.
+                */
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Host PVZ config: Does not match with Guest PVZ config\n"
+                        "    Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__));
+               return PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+#else
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS);
+
+       if (ui32FuncID != PVZ_BRIDGE_MAPDEVICEPHYSHEAP)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d",
+                               __func__,
+                               ui32OSID,
+                               (IMG_UINT32)PVZ_BRIDGE_MAPDEVICEPHYSHEAP,
+                               ui32FuncID));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PvzServerLockAcquire();
+
+#if defined(SUPPORT_RGX)
+       if (IsVmOnline(ui32OSID))
+       {
+               PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+               PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+               IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr};
+               IMG_UINT32 sync;
+
+               eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32OSID, sDevPAddr, ui64Size);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0);
+
+               /* Invalidate MMU cache in preparation for a kick from this Guest */
+               eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &sync);
+               PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0);
+
+               /* Everything is ready for the firmware to start interacting with this OS */
+               eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE);
+       }
+e0:
+#endif /* defined(SUPPORT_RGX) */
+       PvzServerLockRelease();
+
+       return eError;
+#endif
+}
+
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+                                                 IMG_UINT32 ui32FuncID,
+                                                 IMG_UINT32 ui32DevID)
+{
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+               /*
+                * Reject hypercall if called on a system configured at built time to
+                * preallocate the Guest's firmware heaps from static carveout memory.
+                */
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Host PVZ config: Does not match with Guest PVZ config\n"
+                        "    Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__));
+               return PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+#else
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS);
+
+       if (ui32FuncID != PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d",
+                               __func__,
+                               ui32OSID,
+                               (IMG_UINT32)PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP,
+                               ui32FuncID));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PvzServerLockAcquire();
+
+#if defined(SUPPORT_RGX)
+       if (IsVmOnline(ui32OSID))
+       {
+               PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+               PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+               /* Order firmware to offload this OS' data and stop accepting commands from it */
+               eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_OFFLINE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0);
+
+               /* it is now safe to remove the Guest's memory mappings  */
+               RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+       }
+e0:
+#endif
+
+       PvzServerLockRelease();
+
+       return eError;
+#endif
+}
+
+/*
+ * ============================================================
+ *  The following server para-virtualization (pvz) functions
+ *  are exclusively called by the VM manager (hypervisor) to
+ *  pass side band information to the host (vm manager -> host)
+ * ============================================================
+ */
+
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID)
+{
+       PVRSRV_ERROR eError;
+
+       PvzServerLockAcquire();
+
+       eError = PvzOnVmOnline(ui32OSID);
+
+       PvzServerLockRelease();
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID)
+{
+       PVRSRV_ERROR eError;
+
+       PvzServerLockAcquire();
+
+       eError = PvzOnVmOffline(ui32OSID);
+
+       PvzServerLockRelease();
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+       PVRSRV_ERROR eError;
+
+       PvzServerLockAcquire();
+
+       eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue);
+
+       PvzServerLockRelease();
+
+       return eError;
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_server.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/vz_vmm_pvz.c b/drivers/gpu/drm/img/img-rogue/services/server/common/vz_vmm_pvz.c
new file mode 100644 (file)
index 0000000..39a52b5
--- /dev/null
@@ -0,0 +1,183 @@
+/*************************************************************************/ /*!
+@File           vz_vmm_pvz.c
+@Title          VM manager para-virtualization APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    VM manager para-virtualization management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "pvrsrv.h"
+#include "vz_vmm_pvz.h"
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+static PVRSRV_ERROR
+PvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       VMM_PVZ_CONNECTION *psVmmPvz;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /*
+        * Acquire the underlying VM manager PVZ connection & validate it.
+        */
+       psVmmPvz = PvzConnectionAcquire();
+       if (psVmmPvz == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: %s PVZ config: Unable to acquire PVZ connection",
+                               __func__, PVRSRV_VZ_MODE_IS(GUEST) ? "Guest" : "Host"));
+               eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+               goto e0;
+       }
+
+       /* Log which PVZ setup type is being used by driver */
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+       /*
+        *  Static PVZ bootstrap setup
+        *
+        *  This setup uses carve-out memory, has no hypercall mechanism & does not support
+        *  out-of-order initialisation of host/guest VMs/drivers. The host driver has all
+        *  the information needed to initialize all OSIDs firmware state when it's loaded
+        *  and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ
+        *  initialisation. Having no out-of-order initialisation support, the guest driver
+        *  can only submit a workload to the device after the host driver has completely
+        *  initialized the firmware, the VZ hypervisor/VM setup must guarantee this.
+        */
+       PVR_LOG(("Using static PVZ bootstrap setup"));
+#else
+       /*
+        *  Dynamic PVZ bootstrap setup
+        *
+        *  This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order
+        *  initialisation of host/guest VMs/drivers. The host driver initializes only its
+        *  own OSID-0 firmware state when its loaded and each guest driver will use its PVZ
+        *  interface to hypercall to the host driver to both synchronise its initialisation
+        *  so it does not submit any workload to the firmware before the host driver has
+        *  had a chance to initialize the firmware and to also initialize its own OSID-x
+        *  firmware state.
+        */
+       PVR_LOG(("Using dynamic PVZ bootstrap setup"));
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST)           &&
+                        (psVmmPvz->sServerFuncTab.pfnMapDevPhysHeap      == NULL ||
+                         psVmmPvz->sServerFuncTab.pfnUnmapDevPhysHeap    == NULL))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Host PVZ config: Functions for mapping a Guest's heaps not implemented\n", __func__));
+               eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+       }
+#endif
+
+       PvzConnectionRelease(psVmmPvz);
+e0:
+       return eError;
+}
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+#if (RGX_NUM_OS_SUPPORTED == 1)
+# if !defined(PVRSRV_NEED_PVR_DPF)
+       PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+# endif
+       PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_OS_SUPPORTED > 1"));
+       PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode",
+                       psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest"));
+       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       goto e0;
+#else
+
+       /* Create para-virtualization connection lock */
+       eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
+
+       /* Create VM manager para-virtualization connection */
+       eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection);
+       if (eError != PVRSRV_OK)
+       {
+               OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+               psPVRSRVData->hPvzConnectionLock = NULL;
+
+               PVR_LOG_ERROR(eError, "VMMCreatePvzConnection");
+               goto e0;
+       }
+
+       /* Ensure pvz connection is configured correctly */
+       eError = PvzConnectionValidate(psDevConfig);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PvzConnectionValidate");
+
+       psPVRSRVData->abVmOnline[RGXFW_HOST_OS] = IMG_TRUE;
+#endif
+e0:
+       return eError;
+}
+
+void PvzConnectionDeInit(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection);
+       psPVRSRVData->hPvzConnection = NULL;
+
+       OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+       psPVRSRVData->hPvzConnectionLock = NULL;
+}
+
+VMM_PVZ_CONNECTION* PvzConnectionAcquire(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL);
+       return psPVRSRVData->hPvzConnection;
+}
+
+void PvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       /* Nothing to do, just validate the pointer we're passed back */
+       PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection);
+}
+
+/******************************************************************************
+ End of file (vz_vmm_pvz.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/common/vz_vmm_vm.c b/drivers/gpu/drm/img/img-rogue/services/server/common/vz_vmm_vm.c
new file mode 100644 (file)
index 0000000..488c8b4
--- /dev/null
@@ -0,0 +1,221 @@
+/*************************************************************************/ /*!
+@File                  vz_vmm_vm.c
+@Title          System virtualization VM support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System virtualization VM support functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv.h"
+#include "pvrsrv_error.h"
+#include "vz_vm.h"
+#include "rgxfwutils.h"
+
+bool IsVmOnline(IMG_UINT32 ui32OSID)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       return (ui32OSID >= RGX_NUM_OS_SUPPORTED) ? (false) : (psPVRSRVData->abVmOnline[ui32OSID]);
+}
+
+PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid)
+{
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)
+       PVRSRV_ERROR       eError          = PVRSRV_ERROR_INVALID_PARAMS;
+#else
+       PVRSRV_ERROR       eError          = PVRSRV_OK;
+       PVRSRV_DATA        *psPVRSRVData   = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: invalid OSID (%d)",
+                                __func__, ui32OSid));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e0;
+       }
+
+       if (psPVRSRVData->abVmOnline[ui32OSid])
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: OSID %d is already enabled.",
+                                __func__, ui32OSid));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e0;
+       }
+
+       /* For now, limit support to single device setups */
+       psDevNode = psPVRSRVData->psDeviceNodeList;
+
+       if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+       {
+
+               /* Firmware not initialized yet, do it here */
+               eError = PVRSRVCommonDeviceInitialise(psDevNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: failed to initialize firmware (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       goto e0;
+               }
+       }
+
+       eError = RGXFWHealthCheckCmd(psDevNode->pvDevice);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE;
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+       /* Everything is ready for the firmware to start interacting with this OS */
+       eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32OSid, RGXFWIF_OS_ONLINE);
+#endif
+
+e0:
+#endif
+       return eError;
+}
+
+PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid)
+{
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)
+       PVRSRV_ERROR       eError          = PVRSRV_ERROR_INVALID_PARAMS;
+#else
+       PVRSRV_ERROR      eError          = PVRSRV_OK;
+       PVRSRV_DATA       *psPVRSRVData   = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDevNode;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: invalid OSID (%d)",
+                                __func__, ui32OSid));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e0;
+       }
+
+       if (!psPVRSRVData->abVmOnline[ui32OSid])
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: OSID %d is already disabled.",
+                                __func__, ui32OSid));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e0;
+       }
+
+       /* For now, limit support to single device setups */
+       psDevNode = psPVRSRVData->psDeviceNodeList;
+       psDevInfo = psDevNode->pvDevice;
+
+       eError = RGXFWSetFwOsState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+       if (eError == PVRSRV_OK)
+       {
+               psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE;
+       }
+
+e0:
+#endif
+       return eError;
+}
+
+PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       psDeviceNode = psPVRSRVData->psDeviceNodeList;
+       psDevInfo = psDeviceNode->pvDevice;
+
+       switch (eVMMParamType)
+       {
+#if defined(SUPPORT_RGX)
+               case VMM_CONF_PRIO_OSID0:
+               case VMM_CONF_PRIO_OSID1:
+               case VMM_CONF_PRIO_OSID2:
+               case VMM_CONF_PRIO_OSID3:
+               case VMM_CONF_PRIO_OSID4:
+               case VMM_CONF_PRIO_OSID5:
+               case VMM_CONF_PRIO_OSID6:
+               case VMM_CONF_PRIO_OSID7:
+               {
+                       IMG_UINT32 ui32OSid = eVMMParamType;
+                       IMG_UINT32 ui32Prio = ui32ParamValue;
+
+                       if (ui32OSid < RGX_NUM_OS_SUPPORTED)
+                       {
+                               eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio);
+                       }
+                       else
+                       {
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       }
+                       break;
+               }
+               case VMM_CONF_HCS_DEADLINE:
+               {
+                       IMG_UINT32 ui32HCSDeadline = ui32ParamValue;
+                       eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline);
+                       break;
+               }
+#else
+       PVR_UNREFERENCED_PARAMETER(ui32ParamValue);
+#endif
+               default:
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+
+       return eError;
+}
+
+/******************************************************************************
+ End of file (vz_vmm_vm.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgx_bridge_init.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgx_bridge_init.c
new file mode 100644 (file)
index 0000000..1b1f81d
--- /dev/null
@@ -0,0 +1,111 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR device dependent bridge Init/Deinit Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements device dependent PVR Bridge init/deinit code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_bridge_init.h"
+#include "rgxdevice.h"
+
+#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+void DeinitRGXTQ2Bridge(void);
+#endif
+PVRSRV_ERROR InitRGXCMPBridge(void);
+void DeinitRGXCMPBridge(void);
+#if defined(SUPPORT_RGXRAY_BRIDGE)
+PVRSRV_ERROR InitRGXRAYBridge(void);
+void DeinitRGXRAYBridge(void);
+#endif
+
+PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR eError;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))
+       {
+               eError = InitRGXCMPBridge();
+               PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge");
+       }
+
+#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+       {
+               eError = InitRGXTQ2Bridge();
+               PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge");
+       }
+#endif
+
+#if defined(SUPPORT_RGXRAY_BRIDGE)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+               RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0)
+       {
+               eError = InitRGXRAYBridge();
+               PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXRAYBridge");
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))
+       {
+               DeinitRGXCMPBridge();
+       }
+
+#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+       {
+               DeinitRGXTQ2Bridge();
+       }
+#endif
+
+#if defined(SUPPORT_RGXRAY_BRIDGE)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+               RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0)
+       {
+               DeinitRGXRAYBridge();
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgx_bridge_init.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgx_bridge_init.h
new file mode 100644 (file)
index 0000000..10e8e72
--- /dev/null
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR device dependent bridge Init/Deinit Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements device dependent PVR Bridge init/deinit code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_BRIDGE_INIT_H)
+#define RGX_BRIDGE_INIT_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo);
+void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGX_BRIDGE_INIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbreakpoint.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbreakpoint.c
new file mode 100644 (file)
index 0000000..bd147dc
--- /dev/null
@@ -0,0 +1,290 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Breakpoint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Breakpoint routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbreakpoint.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA    * psConnection,
+                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                      IMG_HANDLE           hMemCtxPrivData,
+                                      RGXFWIF_DM           eFWDataMaster,
+                                      IMG_UINT32           ui32BPAddr,
+                                      IMG_UINT32           ui32HandlerAddr,
+                                      IMG_UINT32           ui32DataMaster)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sBPCmd;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       OSLockAcquire(psDevInfo->hBPLock);
+
+       if (psDevInfo->bBPSet)
+       {
+               eError = PVRSRV_ERROR_BP_ALREADY_SET;
+               goto unlock;
+       }
+
+       sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+       sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
+       sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
+       sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+       sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE;
+       sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster;
+
+       eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+                               psFWMemContextMemDesc,
+                               0 ,
+                               RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock);
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                 eFWDataMaster,
+                                                 &sBPCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock);
+
+       /* Wait for FW to complete command execution */
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock);
+
+       psDevInfo->eBPDM = eFWDataMaster;
+       psDevInfo->bBPSet = IMG_TRUE;
+
+unlock:
+       OSLockRelease(psDevInfo->hBPLock);
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA    * psConnection,
+                                        PVRSRV_DEVICE_NODE * psDeviceNode,
+                                        IMG_HANDLE           hMemCtxPrivData)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sBPCmd;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+       sBPCmd.uCmdData.sBPData.ui32BPAddr = 0;
+       sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0;
+       sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL;
+       sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM;
+
+       OSLockAcquire(psDevInfo->hBPLock);
+
+       eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+                               psFWMemContextMemDesc,
+                               0 ,
+                               RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock);
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                 psDevInfo->eBPDM,
+                                                 &sBPCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock);
+
+       /* Wait for FW to complete command execution */
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock);
+
+       psDevInfo->bBPSet = IMG_FALSE;
+
+unlock:
+       OSLockRelease(psDevInfo->hBPLock);
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                         PVRSRV_DEVICE_NODE * psDeviceNode,
+                                         IMG_HANDLE           hMemCtxPrivData)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sBPCmd;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       OSLockAcquire(psDevInfo->hBPLock);
+
+       if (psDevInfo->bBPSet == IMG_FALSE)
+       {
+               eError = PVRSRV_ERROR_BP_NOT_SET;
+               goto unlock;
+       }
+
+       sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+       sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL | RGXFWIF_BPDATA_FLAGS_ENABLE;
+       sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM;
+
+       eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+                               psFWMemContextMemDesc,
+                               0 ,
+                               RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock);
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                 psDevInfo->eBPDM,
+                                                 &sBPCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock);
+
+       /* Wait for FW to complete command execution */
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock);
+
+unlock:
+       OSLockRelease(psDevInfo->hBPLock);
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_HANDLE           hMemCtxPrivData)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sBPCmd;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       OSLockAcquire(psDevInfo->hBPLock);
+
+       if (psDevInfo->bBPSet == IMG_FALSE)
+       {
+               eError = PVRSRV_ERROR_BP_NOT_SET;
+               goto unlock;
+       }
+
+       sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+       sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL;
+       sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM;
+
+       eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+                               psFWMemContextMemDesc,
+                               0 ,
+                               RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock);
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                 psDevInfo->eBPDM,
+                                                 &sBPCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock);
+
+       /* Wait for FW to complete command execution */
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock);
+
+unlock:
+       OSLockRelease(psDevInfo->hBPLock);
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA    * psConnection,
+                                                PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                IMG_UINT32           ui32TempRegs,
+                                                IMG_UINT32           ui32SharedRegs)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sBPCmd;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+       sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_REGS;
+       sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs;
+       sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs;
+       sBPCmd.uCmdData.sBPData.psFWMemContext.ui32Addr = 0U;
+       sBPCmd.uCmdData.sBPData.eDM = RGXFWIF_DM_GP;
+
+       OSLockAcquire(psDevInfo->hBPLock);
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                                 RGXFWIF_DM_GP,
+                                                 &sBPCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock);
+
+       /* Wait for FW to complete command execution */
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock);
+
+unlock:
+       OSLockRelease(psDevInfo->hBPLock);
+
+       return eError;
+}
+
+/******************************************************************************
+ End of file (rgxbreakpoint.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbreakpoint.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbreakpoint.h
new file mode 100644 (file)
index 0000000..1a0b87b
--- /dev/null
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX breakpoint functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX breakpoint functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXBREAKPOINT_H)
+#define RGXBREAKPOINT_H
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXSetBreakpointKM
+
+ @Description
+       Server-side implementation of RGXSetBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input eDataMaster - Data Master to schedule command for
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32BPAddr - Address of breakpoint
+ @Input ui32HandlerAddr - Address of breakpoint handler
+ @Input ui32BPCtl - Breakpoint controls
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA    * psConnection,
+                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                      IMG_HANDLE           hMemCtxPrivData,
+                                      RGXFWIF_DM           eFWDataMaster,
+                                      IMG_UINT32           ui32BPAddr,
+                                      IMG_UINT32           ui32HandlerAddr,
+                                      IMG_UINT32           ui32DataMaster);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXClearBreakpointKM
+
+ @Description
+       Server-side implementation of RGXClearBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA    * psConnection,
+                                        PVRSRV_DEVICE_NODE * psDeviceNode,
+                                        IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXEnableBreakpointKM
+
+ @Description
+       Server-side implementation of RGXEnableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                         PVRSRV_DEVICE_NODE * psDeviceNode,
+                                         IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXDisableBreakpointKM
+
+ @Description
+       Server-side implementation of RGXDisableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA    * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_HANDLE           hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXOverallocateBPRegistersKM
+
+ @Description
+       Server-side implementation of RGXOverallocateBPRegisters
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui32TempRegs - Number of temporary registers to overallocate
+ @Input ui32SharedRegs - Number of shared registers to overallocate
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA    * psConnection,
+                                                PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                IMG_UINT32           ui32TempRegs,
+                                                IMG_UINT32           ui32SharedRegs);
+#endif /* RGXBREAKPOINT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbvnc.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbvnc.c
new file mode 100644 (file)
index 0000000..6c29bee
--- /dev/null
@@ -0,0 +1,852 @@
+/*************************************************************************/ /*!
+@File
+@Title          BVNC handling specific routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Functions used for BNVC related work
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxbvnc.h"
+#define RGXBVNC_C
+#include "rgx_bvnc_table_km.h"
+#undef RGXBVNC_C
+#include "oskm_apphint.h"
+#include "pvrsrv.h"
+#include "pdump_km.h"
+#include "rgx_compat_bvnc.h"
+
+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1)
+
+/* This function searches the given array for a given search value */
+static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array,
+                                                               IMG_UINT uiEnd,
+                                                               IMG_UINT64 ui64SearchValue,
+                                                               IMG_UINT uiColCount)
+{
+       IMG_UINT uiStart = 0, index;
+       IMG_UINT64 value, *pui64Ptr = NULL;
+
+       while (uiStart < uiEnd)
+       {
+               index = (uiStart + uiEnd)/2;
+               pui64Ptr = pui64Array + (index * uiColCount);
+               value = *(pui64Ptr);
+
+               if (value == ui64SearchValue)
+               {
+                       return pui64Ptr;
+               }
+
+               if (value > ui64SearchValue)
+               {
+                       uiEnd = index;
+               }else
+               {
+                       uiStart = index + 1;
+               }
+       }
+       return NULL;
+}
+#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \
+                                ARRAY_SIZE(t), (b), \
+                                sizeof((t)[0])/sizeof(IMG_UINT64)) )
+
+
+#if defined(DEBUG)
+
+#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature)                                                                                                                    \
+       if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED )                  \
+               { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); }         \
+       else                                                                                                                            \
+               { PVR_LOG(("%s N/A", szShortName)); }
+
+static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1;
+
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC:       ", NUM_CLUSTERS);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF:      ", CDM_CONTROL_STREAM_FORMAT);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA:   ", FBCDC_ARCHITECTURE);
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META:     ", META);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB:     ", META_COREMEM_BANKS);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS:     ", META_COREMEM_SIZE);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt:  ", META_DMA_CHANNEL_COUNT);
+#endif
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP:     ", NUM_ISP_IPP_PIPES);
+#if defined(RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX)
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIPS:     ", NUM_ISP_PER_SPU);
+#endif
+#if defined(RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX)
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PPS:      ", PBE_PER_SPU);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NSPU:     ", NUM_SPU);
+#endif
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW:      ", PHYS_BUS_WIDTH);
+#if defined(RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX)
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch:  ", SCALABLE_TE_ARCH);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA:    ", SCALABLE_VCE);
+#endif
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS:   ", SLC_CACHE_LINE_SIZE_BITS);
+       PVR_LOG(("SLCSize:   %d",  psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes));
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB:     ", VIRTUAL_ADDRESS_SPACE_BITS);
+       PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NOSIDS:   ", NUM_OSIDS);
+
+#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX)
+       /* Dump the features with no values */
+       ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features;
+       while (ui64Mask)
+       {
+               if (ui64Mask & 0x01)
+               {
+                       if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX)
+                       {
+                               PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1]));
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                        "Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx,
+                                        ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1))));
+                       }
+               }
+               ui64Mask >>= 1;
+               ui32IdOrNameIdx++;
+       }
+#endif
+
+#if defined(ERNSBRNS_IDS_MAX_IDX)
+       /* Dump the ERN and BRN flags for this core */
+       ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+       ui32IdOrNameIdx = 1;
+
+       while (ui64Mask)
+       {
+               if (ui64Mask & 0x1)
+               {
+                       if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX)
+                       {
+                               PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1]));
+                       }
+                       else
+                       {
+                               PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1))));
+                       }
+               }
+               ui64Mask >>= 1;
+               ui32IdOrNameIdx++;
+       }
+#endif
+
+}
+#endif
+
+static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg)
+{
+       IMG_UINT32 ui32Index;
+
+       /* Read the feature values for the runtime BVNC */
+       for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++)
+       {
+               IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index];
+               IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64];
+               IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64);
+
+               if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index])
+               {
+                       if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED)
+                       {
+                               psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED;
+                       }
+                       else
+                       {
+                               psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex];
+                       }
+               }
+               else
+               {
+                       /* This case should never be reached */
+                       psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID;
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex));
+                       PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]);
+               }
+       }
+
+#if defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX)
+       /* Code path for Volcanic */
+
+       psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1;
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+               RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1)
+       {
+               psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_RAY+1);
+       }
+#if defined(SUPPORT_AGP)
+       psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1);
+#if defined(SUPPORT_AGP4)
+       psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM4+1);
+#endif
+#endif
+
+       /* Get the max number of dusts in the core */
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS))
+       {
+               RGX_LAYER_PARAMS sParams = {.psDevInfo = psDevInfo};
+
+               if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 1)
+               {
+                       /* per SPU power island */
+                       psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2));
+               }
+               else if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) >= 2)
+               {
+                       /* per Cluster power island */
+                       psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS);
+               }
+               else
+               {
+                       /* All volcanic cores support power islanding */
+                       psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID;
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__));
+                       PVR_ASSERT(0);
+               }
+
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+                       RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1)
+               {
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RT_RAC_PER_SPU))
+                       {
+                               psDevInfo->sDevFeatureCfg.ui32MAXRACCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU);
+                       }
+                       else
+                       {
+                               psDevInfo->sDevFeatureCfg.ui32MAXRACCount = 1;
+                       }
+               }
+       }
+       else
+       {
+               /* This case should never be reached as all cores have clusters */
+               psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID;
+               PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__));
+               PVR_ASSERT(0);
+       }
+#else /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */
+       /* Code path for Rogue and Oceanic */
+
+       psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1;
+#if defined(SUPPORT_AGP)
+       psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1);
+#endif
+
+       /* Meta feature not present in oceanic */
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED;
+       }
+#endif
+
+       /* Get the max number of dusts in the core */
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS))
+       {
+               psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2));
+       }
+       else
+       {
+               /* This case should never be reached as all cores have clusters */
+               psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID;
+               PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__));
+               PVR_ASSERT(0);
+       }
+#endif /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */
+
+       /* Meta feature not present in oceanic */
+#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX)
+       /* Transform the META coremem size info in bytes */
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+       {
+               psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024;
+       }
+#endif
+}
+
+static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount)
+{
+       const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC;
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32BVNCCount = 0;
+       IMG_BOOL bRet;
+       IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE];
+       IMG_CHAR *pszCurrentBVNC = szBVNCAppHint;
+       szBVNCAppHint[0] = '\0';
+
+       OSCreateKMAppHintState(&pvAppHintState);
+
+       bRet = (IMG_BOOL)OSGetKMAppHintSTRING(APPHINT_NO_DEVICE,
+                                               pvAppHintState,
+                                               RGXBVNC,
+                                               pszAppHintDefault,
+                                               szBVNCAppHint,
+                                               sizeof(szBVNCAppHint));
+
+       OSFreeKMAppHintState(pvAppHintState);
+
+       if (!bRet || (szBVNCAppHint[0] == '\0'))
+       {
+               return;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint));
+
+       while (*pszCurrentBVNC != '\0')
+       {
+               IMG_CHAR *pszNext = pszCurrentBVNC;
+
+               if (ui32BVNCCount >= PVRSRV_MAX_DEVICES)
+               {
+                       break;
+               }
+
+               while (1)
+               {
+                       if (*pszNext == ',')
+                       {
+                               pszNext[0] = '\0';
+                               pszNext++;
+                               break;
+                       } else if (*pszNext == '\0')
+                       {
+                               break;
+                       }
+                       pszNext++;
+               }
+
+               if (ui32BVNCCount == ui32RGXDevCount)
+               {
+                       OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX);
+                       return;
+               }
+
+               ui32BVNCCount++;
+               pszCurrentBVNC = pszNext;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than "
+       "number of actual devices", __func__));
+
+       /* If only one BVNC parameter is specified, the same is applied for all RGX
+        * devices detected */
+       if (1 == ui32BVNCCount)
+       {
+               OSStringLCopy(pszBVNC, szBVNCAppHint, RGX_BVNC_STR_SIZE_MAX);
+       }
+}
+
+/* Function that parses the BVNC List passed as module parameter */
+static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB,
+                                                                         IMG_UINT32 *pV,
+                                                                         IMG_UINT32 *pN,
+                                                                         IMG_UINT32 *pC,
+                                                                         const IMG_UINT32 ui32RGXDevCount)
+{
+       unsigned int ui32ScanCount = 0;
+       IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX];
+
+       aszBVNCString[0] = '\0';
+
+       /* 4 components of a BVNC string is B, V, N & C */
+#define RGX_BVNC_INFO_PARAMS (4)
+
+       _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount);
+
+       if ('\0' == aszBVNCString[0])
+       {
+               return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+       }
+
+       /* Parse the given RGX_BVNC string */
+       ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC);
+       if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+       {
+               ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC);
+       }
+       if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+       {
+               return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+       }
+       PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString));
+
+       return PVRSRV_OK;
+}
+
+#if !defined(NO_HARDWARE)
+/*
+ * This function obtains the SLCSize from the physical device for GPUs which provide
+ * this information. If the GPU does not provide support we return a value of 0 which will
+ * result in the BVNC supplied definition being used to provide the SLCSize.
+ * Must only be called from driver-live with hardware powered-on.
+ */
+static IMG_UINT32 _RGXBvncReadSLCSize(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT64 ui64SLCSize = 0ULL;
+
+#if defined(RGX_CR_SLC_SIZE_IN_KB)
+       /* Rogue and Oceanic hardware */
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_CONFIGURABLE))
+       {
+               ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_SIZE_IN_KB);
+               if (ui64SLCSize == 0ULL)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: RGX_CR_SIZE_IN_KB = %u", __func__,
+                               (IMG_UINT32) ui64SLCSize));
+               }
+       }
+#else
+       /* Volcanic hardware */
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_ADJUSTMENT))
+       {
+               ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_STATUS2);
+               ui64SLCSize &= ~RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK;
+               ui64SLCSize >>= RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT;
+
+               if (ui64SLCSize == 0ULL)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC_SIZE_IN_KILOBYTES = %u", __func__,
+                               (IMG_UINT32) ui64SLCSize));
+               }
+       }
+#endif
+
+       return (IMG_UINT32)ui64SLCSize * 1024U;
+}
+#endif /* !defined(NO_HARDWARE) */
+
+/* This function detects the Rogue variant and configures the essential
+ * config info associated with such a device.
+ * The config info includes features, errata, etc
+ */
+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       static IMG_UINT32 ui32RGXDevCnt = 0;
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT64 ui64BVNC=0;
+       IMG_UINT32 B=0, V=0, N=0, C=0;
+       IMG_UINT64 *pui64Cfg = NULL;
+       IMG_UINT32 ui32Cores = 1U;
+       IMG_UINT32 ui32SLCSize = 0;
+
+       /* Check for load time RGX BVNC parameter */
+       eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt);
+       if (PVRSRV_OK == eError)
+       {
+               PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC
+                               " from driver load parameter", B, V, N, C));
+
+               /* Extract the BVNC config from the Features table */
+               ui64BVNC = BVNC_PACK(B,0,N,C);
+               pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+               PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!");
+       }
+
+       {
+               void *pvAppHintState = NULL;
+               const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC;
+
+               OSCreateKMAppHintState(&pvAppHintState);
+               OSGetKMAppHintBOOL(APPHINT_NO_DEVICE,
+                                                       pvAppHintState,
+                                                       IgnoreHWReportedBVNC,
+                                                       &bAppHintDefault,
+                                                       &psDevInfo->bIgnoreHWReportedBVNC);
+               OSFreeKMAppHintState(pvAppHintState);
+       }
+
+#if !defined(NO_HARDWARE)
+
+       /* Try to detect the RGX BVNC from the HW device */
+       if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC)
+       {
+               IMG_UINT64 ui32ID;
+               IMG_BOOL bPowerDown = (psDeviceNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF);
+
+               /* Power-up the device as required to read the registers */
+               if (bPowerDown)
+               {
+                       eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
+                       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON");
+               }
+
+#if defined(RGX_CR_CORE_ID__PBVNC)
+               /* Core ID reading code for Rogue */
+
+               /* Read the BVNC, in to new way first, if B not set, use old scheme */
+               ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC);
+
+               if (GET_B(ui32ID))
+               {
+                       B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT;
+                       V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT;
+                       N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT;
+                       C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT;
+
+               }
+               else
+               {
+                       IMG_UINT64 ui32CoreID, ui32CoreRev;
+                       ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION);
+                       ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID);
+                       B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_REVISION_MAJOR_SHIFT;
+                       V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_REVISION_MINOR_SHIFT;
+                       N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_ID_CONFIG_N_SHIFT;
+                       C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >>
+                                                                                                       RGX_CR_CORE_ID_CONFIG_C_SHIFT;
+               }
+#else
+               /* Core ID reading code for Volcanic */
+
+               ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID);
+
+               B = (ui32ID & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >>
+                                                                                               RGX_CR_CORE_ID_BRANCH_ID_SHIFT;
+               V = (ui32ID & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >>
+                                                                                               RGX_CR_CORE_ID_VERSION_ID_SHIFT;
+               N = (ui32ID & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >>
+                                                                                               RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT;
+               C = (ui32ID & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >>
+                                                                                               RGX_CR_CORE_ID_CONFIG_ID_SHIFT;
+#endif
+
+               PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC
+                               " from HW device registers", B, V, N, C));
+
+               if (!PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       /* Read the number of cores in the system for newer BVNC (Branch ID > 20) */
+                       if (B > 20)
+                       {
+                               ui32Cores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM);
+                       }
+               }
+
+               /* Obtain the SLC size from the device */
+               ui32SLCSize = _RGXBvncReadSLCSize(psDeviceNode);
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC Size reported as %u", __func__, ui32SLCSize));
+
+               if (bPowerDown)
+               {
+                       eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF);
+                       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF");
+               }
+
+               /* Extract the BVNC config from the Features table */
+               ui64BVNC = BVNC_PACK(B,0,N,C);
+               if (ui64BVNC != 0)
+               {
+                       pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+                       PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!");
+               }
+               else if (!PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       /*
+                        * On host OS we should not get here as CORE_ID should not be zero, so flag an error.
+                        * On older cores, guest OS only has CORE_ID if defined(RGX_FEATURE_COREID_PER_OS)
+                        */
+                       PVR_LOG_ERROR(PVRSRV_ERROR_DEVICE_REGISTER_FAILED, "CORE_ID register returns zero. Unknown BVNC");
+               }
+       }
+#endif
+
+#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C)
+       if (NULL == pui64Cfg)
+       {
+               /* We reach here if the HW is not present,
+                * or we are running in a guest OS with no COREID_PER_OS feature,
+                * or HW is unstable during register read giving invalid values,
+                * or runtime detection has been disabled - fall back to compile time BVNC
+                */
+               B = RGX_BVNC_KM_B;
+               N = RGX_BVNC_KM_N;
+               C = RGX_BVNC_KM_C;
+               {
+                       IMG_UINT32      ui32ScanCount = 0;
+                       ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V);
+                       if (1 != ui32ScanCount)
+                       {
+                               ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V);
+                               if (1 != ui32ScanCount)
+                               {
+                                       V = 0;
+                               }
+                       }
+               }
+               PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM));
+
+               /* Extract the BVNC config from the Features table */
+               ui64BVNC = BVNC_PACK(B,0,N,C);
+               pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+               PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!");
+       }
+#endif /* defined(RGX_BVNC) */
+
+       /* Have we failed to identify the BVNC to use? */
+       if (NULL == pui64Cfg)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. "
+                   "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC));
+               return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016"
+           IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016"
+           IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n", __func__,
+           pui64Cfg[0], pui64Cfg[1], pui64Cfg[2], pui64Cfg[3]));
+
+       /* Parsing feature config depends on available features on the core
+        * hence this parsing should always follow the above feature assignment */
+       psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1];
+       _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg);
+
+       /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */
+       ui64BVNC = BVNC_PACK(B,V,N,C);
+       pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC);
+       if (NULL == pui64Cfg)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. "
+                   "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC));
+               psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0;
+               return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx
+           " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1]));
+       psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1];
+
+       psDevInfo->sDevFeatureCfg.ui32B = B;
+       psDevInfo->sDevFeatureCfg.ui32V = V;
+       psDevInfo->sDevFeatureCfg.ui32N = N;
+       psDevInfo->sDevFeatureCfg.ui32C = C;
+
+
+       /*
+        * Store the SLCSize in the device info field. If 0 it means the device uses the BVNC
+        * values so grab them here as we've already populated the internal structures.
+        */
+       if (ui32SLCSize == 0U)
+       {
+               ui32SLCSize = RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) * 1024U;
+
+               /* Verify that we have a valid value returned from the BVNC */
+               PVR_ASSERT(ui32SLCSize != 0U);
+       }
+       psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes = ui32SLCSize;
+
+       /* Message to confirm configuration look up was a success */
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+       {
+#if defined(NO_HARDWARE)
+               {
+                       PVR_UNREFERENCED_PARAMETER(ui32Cores);
+                       PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC,
+                                       B, V, N, C));
+               }
+#else
+               {
+                       PVR_LOG(("RGX Device registered BVNC " RGX_BVNC_STR_FMTSPEC
+                                       " with %u %s in the system", B ,V ,N ,C, ui32Cores ,
+                                       ((ui32Cores == 1U)?"core":"cores")));
+               }
+#endif
+       }
+       else
+       {
+               PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC,
+                                       B, V, N, C));
+       }
+
+       ui32RGXDevCnt++;
+
+#if defined(DEBUG)
+       _RGXBvncDumpParsedConfig(psDeviceNode);
+#endif
+       return PVRSRV_OK;
+}
+
+/*
+ * This function checks if a particular feature is available on the given rgx device */
+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+
+       if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask)
+       {
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+/*
+ * This function returns the value of a feature on the given rgx device */
+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+
+       if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX)
+       {
+               return -1;
+       }
+
+       if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED)
+       {
+               return -1;
+       }
+
+       return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex];
+}
+
+/**************************************************************************/ /*!
+@Function       RGXVerifyBVNC
+@Description    Checks that the device's BVNC registers have the correct values.
+@Input          psDeviceNode           Device node
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+#define NUM_RGX_CORE_IDS    8
+PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT64 ui64MatchBVNC;
+       IMG_UINT32 i;
+
+       PVR_ASSERT(psDeviceNode != NULL);
+       PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+
+       /* The device info */
+       psDevInfo = psDeviceNode->pvDevice;
+
+       PDUMPCOMMENT(psDeviceNode, "PDUMP VERIFY CORE_ID registers for all OSIDs\n");
+
+       /* construct the value to match against */
+       if ((ui64GivenBVNC | ui64CoreIdMask) == 0) /* both zero means use configured DDK value */
+       {
+               ui64MatchBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+                                                                       psDevInfo->sDevFeatureCfg.ui32V,
+                                                                       psDevInfo->sDevFeatureCfg.ui32N,
+                                                                       psDevInfo->sDevFeatureCfg.ui32C);
+       }
+       else
+       {
+               /* use the value in CORE_ID for any zero elements in the BVNC */
+               ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID) & ui64CoreIdMask);
+       }
+       PVR_LOG(("matchBVNC %d.%d.%d.%d",
+               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff),
+               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff),
+               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff),
+               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff)));
+
+       /* read in all the CORE_ID registers */
+       for (i = 0; i < NUM_RGX_CORE_IDS; ++i)
+       {
+#if !defined(NO_HARDWARE)
+               IMG_UINT64 ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (i << 16));
+
+               PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i,
+                       (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff),
+                       (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff),
+                       (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff),
+                       (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff)));
+
+               if (ui64BVNC != ui64MatchBVNC)
+               {
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i,
+                               (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff),
+                               (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff),
+                               (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff),
+                               (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff),
+                               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff),
+                               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff),
+                               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff),
+                               (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff)));
+                       break;
+               }
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+               /* check upper DWORD */
+               eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME,
+                                    (RGX_CR_CORE_ID + 4) + (i << 16),
+                                    (IMG_UINT32)(ui64MatchBVNC >> 32),
+                                    0xFFFFFFFF,
+                                    PDUMP_FLAGS_CONTINUOUS,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+               if (eError == PVRSRV_OK)
+               {
+                       /* check lower DWORD */
+                       eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME,
+                                            RGX_CR_CORE_ID + (i << 16),
+                                            (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF),
+                                            0xFFFFFFFF,
+                                            PDUMP_FLAGS_CONTINUOUS,
+                                            PDUMP_POLL_OPERATOR_EQUAL);
+               }
+#endif
+       }
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbvnc.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxbvnc.h
new file mode 100644 (file)
index 0000000..64c418b
--- /dev/null
@@ -0,0 +1,90 @@
+/*************************************************************************/ /*!
+@File
+@Title          BVNC handling specific header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the BVNC related work
+                (see hwdefs/km/rgx_bvnc_table_km.h and
+                hwdefs/km/rgx_bvnc_defs_km.h
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXBVNC_H)
+#define RGXBVNC_H
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "rgxdevice.h"
+
+/*************************************************************************/ /*!
+@brief         This function detects the Rogue variant and configures the
+                       essential config info associated with such a device.
+                       The config info includes features, errata, etc
+@param         psDeviceNode - Device Node pointer
+@return                PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@brief         This function checks if a particular feature is available on
+                       the given rgx device
+@param         psDeviceNode - Device Node pointer
+@param         ui64FeatureMask - feature to be checked
+@return                true if feature is supported, false otherwise
+*/ /**************************************************************************/
+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask);
+
+/*************************************************************************/ /*!
+@brief         This function returns the value of a feature on the given
+                       rgx device
+@param         psDeviceNode - Device Node pointer
+@param         ui64FeatureMask - feature for which to return the value
+@return                the value for the specified feature
+*/ /**************************************************************************/
+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex);
+
+/*************************************************************************/ /*!
+@brief         This function validates that the BVNC values in CORE_ID regs are
+                       consistent and correct.
+@param         psDeviceNode - Device Node pointer
+@param         GivenBVNC - BVNC to be verified against as supplied by caller
+@param         CoreIdMask - mask of components to pull from CORE_ID register
+@return                success or fail
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask);
+
+#endif /* RGXBVNC_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxccb.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxccb.c
new file mode 100644 (file)
index 0000000..7a76f80
--- /dev/null
@@ -0,0 +1,2803 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX CCB routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX CCB routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgx_memallocflags.h"
+#include "devicemem_pdump.h"
+#include "dllist.h"
+#if defined(__linux__)
+#include "trace_events.h"
+#endif
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "rgxutils.h"
+#include "info_page.h"
+#include "rgxtimerquery.h"
+
+#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK)
+#include "cache_km.h"
+#endif
+
+/*
+ *  Uncomment PVRSRV_ENABLE_CCCB_UTILISATION_INFO define for verbose
+ *  info and statistics regarding CCB usage.
+ */
+//#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO
+
+/* Default threshold (as a percentage) for the PVRSRV_ENABLE_CCCB_UTILISATION_INFO feature. */
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD  (90)
+
+/*
+ * Defines the number of fence updates to record so that future fences in the
+ * CCB. Can be checked to see if they are already known to be satisfied.
+ */
+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE  (32)
+
+#define RGX_UFO_PTR_ADDR(ufoptr) \
+       (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC)
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+       ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+       (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB 0x4
+
+typedef struct _RGX_CLIENT_CCB_UTILISATION_
+{
+       /* the threshold in bytes.
+        * when the CCB utilisation hits the threshold then we will print
+        * a warning message.
+        */
+       IMG_UINT32 ui32ThresholdBytes;
+       /* Maximum cCCB usage at some point in time */
+       IMG_UINT32 ui32HighWaterMark;
+       /* keep track of the warnings already printed.
+        * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz
+        */
+       IMG_UINT32 ui32Warnings;
+       /* Keep track how many times CCB was full.
+        * Counters are reset after every grow.
+        */
+       IMG_UINT32 ui32CCBFull;
+       IMG_UINT32 ui32CCBAcquired;
+} RGX_CLIENT_CCB_UTILISATION;
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+struct _RGX_CLIENT_CCB_ {
+       volatile RGXFWIF_CCCB_CTL       *psClientCCBCtrl;                               /*!< CPU mapping of the CCB control structure used by the fw */
+       void                                            *pvClientCCB;                                   /*!< CPU mapping of the CCB */
+       DEVMEM_MEMDESC                          *psClientCCBMemDesc;                    /*!< MemDesc for the CCB */
+       DEVMEM_MEMDESC                          *psClientCCBCtrlMemDesc;                /*!< MemDesc for the CCB control */
+       IMG_UINT32                                      ui32HostWriteOffset;                    /*!< CCB write offset from the driver side */
+       IMG_UINT32                                      ui32LastPDumpWriteOffset;               /*!< CCB write offset from the last time we submitted a command in capture range */
+       IMG_UINT32                                      ui32FinishedPDumpWriteOffset;   /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */
+       IMG_UINT32                                      ui32LastROff;                                   /*!< Last CCB Read offset to help detect any CCB wedge */
+       IMG_UINT32                                      ui32LastWOff;                                   /*!< Last CCB Write offset to help detect any CCB wedge */
+       IMG_UINT32                                      ui32ByteCount;                                  /*!< Count of the number of bytes written to CCCB */
+       IMG_UINT32                                      ui32LastByteCount;                              /*!< Last value of ui32ByteCount to help detect any CCB wedge */
+       IMG_UINT32                                      ui32Size;                                               /*!< Size of the CCB */
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       POS_LOCK                                        hCCBGrowLock;                                   /*!< Prevents CCB Grow while DumpCCB() is called and vice versa */
+       IMG_UINT32                                      ui32VirtualAllocSize;                   /*!< Virtual size of the CCB */
+       IMG_UINT32                                      ui32ChunkSize;                                  /*!< CCB Sparse allocation chunk size */
+       IMG_PUINT32                                     pui32MappingTable;                              /*!< Mapping table for sparse allocation of the CCB */
+#endif
+       DLLIST_NODE                                     sNode;                                                  /*!< Node used to store this CCB on the per connection list */
+       PDUMP_CONNECTION_DATA           *psPDumpConnectionData;                 /*!< Pointer to the per connection data in which we reside */
+       void                                            *hTransition;                                   /*!< Handle for Transition callback */
+       IMG_CHAR                                        szName[MAX_CLIENT_CCB_NAME];    /*!< Name of this client CCB */
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;                 /*!< Parent server common context that this CCB belongs to */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       RGX_CCB_REQUESTOR_TYPE          eRGXCCBRequestor;
+       RGX_CLIENT_CCB_UTILISATION      sUtilisation;                                   /*!< CCB utilisation data */
+#endif
+#if defined(DEBUG)
+       IMG_UINT32                                      ui32UpdateEntries;                              /*!< Number of Fence Updates in asFenceUpdateList */
+       RGXFWIF_UFO                                     asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */
+#endif
+       IMG_UINT32                                      ui32CCBFlags;                   /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */
+};
+
+/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for
+   DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings:
+       { "FwClientCCB:" <requestor_name>, "FwClientCCBControl:" <requestor_name>, <requestor_name> },
+   The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl
+   structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following
+   build assert. */
+const IMG_CHAR *const aszCCBRequestors[][3] =
+{
+#define REQUESTOR_STRING(prefix,req) #prefix ":" #req
+#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req },
+       RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE)
+#undef FORM_REQUESTOR_TUPLE
+};
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+                                               IMG_UINT32 ui32PDumpFlags)
+{
+
+       IMG_UINT32 ui32PollOffset;
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext);
+#endif
+
+       if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN))
+       {
+               /* Draining CCB on a command that hasn't finished, and FW isn't expected
+                * to have updated Roff up to Woff. Only drain to the first
+                * finished command prior to this. The Roff for this
+                * is stored in ui32FinishedPDumpWriteOffset.
+                */
+               ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset;
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                                                         ui32PDumpFlags,
+                                                         "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)",
+                                                         psClientCCB->szName,
+                                                         psClientCCB,
+                                                         ui32PollOffset);
+       }
+       else
+       {
+               /* Command to a finished CCB stream and FW is drained to empty
+                * out remaining commands until R==W.
+                */
+               ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset;
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                                                         ui32PDumpFlags,
+                                                         "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)",
+                                                         psClientCCB->szName,
+                                                         psClientCCB,
+                                                         ui32PollOffset);
+       }
+
+       return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
+                                                                       offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+                                                                       ui32PollOffset,
+                                                                       0xffffffff,
+                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                       ui32PDumpFlags);
+}
+
+/******************************************************************************
+ FUNCTION      : RGXCCBPDumpSyncCCB
+
+ PURPOSE       : Synchronise Client CCBs from both live and playback contexts.
+               Waits for live-FW to empty live-CCB.
+               Waits for sim-FW to empty sim-CCB by adding POL
+
+ PARAMETERS    : psClientCCB           - The client CCB
+              ui32PDumpFlags    - PDump flags
+
+ RETURNS       : PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       /* Wait for the live FW to catch up/empty CCB. This is done by returning
+        * retry which will get pushed back out to Services client where it
+        * waits on the event object and then resubmits the command.
+        */
+       if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
+       {
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       /* Wait for the sim FW to catch up/empty sim CCB.
+        * We drain whenever capture range is entered, even if no commands
+        * have been issued on this CCB when out of capture range. We have to
+        * wait for commands that might have been issued in the last capture
+        * range to finish so the connection's sync block snapshot dumped after
+        * all the PDumpTransition callbacks have been execute doesn't clobber
+        * syncs which the sim FW is currently working on.
+        *
+        * Although this is sub-optimal for play-back - while out of capture
+        * range for every continuous operation we synchronise the sim
+        * play-back processing the script and the sim FW, there is no easy
+        * solution. Not all modules that work with syncs register a
+        * PDumpTransition callback and thus we have no way of knowing if we
+        * can skip this sim CCB drain and sync block dump or not.
+        */
+
+       eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB");
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* Live CCB and simulation CCB now empty, FW idle on CCB in both
+        * contexts.
+        */
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXCCBPDumpFastForwardCCB
+
+ PURPOSE       : Fast-forward sim-CCB and live-CCB offsets to live app-thread
+              values.
+               This helps to skip any commands submitted when out of capture
+               range and start with first command in capture range in both
+               live and playback contexts. In case of Block mode, this helps
+               to playback any intermediate PDump block directly after first
+               block.
+
+
+ PARAMETERS    : psClientCCB           - The client CCB
+                         ui32PDumpFlags    - PDump flags
+
+ RETURNS       : void
+******************************************************************************/
+static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags)
+{
+       volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext);
+#endif
+
+       /* Make sure that we have synced live-FW and live-App threads */
+       PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset);
+
+       psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+       psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+       psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
+#if defined(SUPPORT_AGP)
+       psCCBCtl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset;
+#if defined(SUPPORT_AGP4)
+       psCCBCtl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset;
+       psCCBCtl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset;
+#endif
+#endif
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                       ui32PDumpFlags,
+                       "cCCB(%s@%p): Fast-forward from %d to %d",
+                       psClientCCB->szName,
+                       psClientCCB,
+                       psClientCCB->ui32LastPDumpWriteOffset,
+                       psClientCCB->ui32HostWriteOffset);
+
+       DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+                       0,
+                       sizeof(RGXFWIF_CCCB_CTL),
+                       ui32PDumpFlags);
+
+       /* Although we've entered capture range for this process connection
+        * we might not do any work on this CCB so update the
+        * ui32LastPDumpWriteOffset to reflect where we got to for next
+        * time so we start the drain from where we got to last time.
+        */
+       psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+
+}
+
+static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags)
+{
+       RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice;
+#endif
+       PVRSRV_ERROR eError;
+
+       /* Block mode:
+        * Here is block structure at transition (ui32BlockLength=N frames):
+        *
+        * ...
+        * ...
+        * PDUMP_BLOCK_START_0x0000000x{
+        *   <Fast-forward sim-CCCB>
+        *   <Re-dump SyncBlocks>
+        *   ...
+        *   ...
+        *   ... (N frames data)
+        *   ...
+        *   ...
+        *   <(1) Drain sim-KCCB>                     ''|
+        *   <(2) Sync live and sim CCCB>               |
+        * }PDUMP_BLOCK_END_0x0000000x                  | <- BlockTransition Steps
+        *   <(3) Split MAIN and BLOCK stream script>   |
+        * PDUMP_BLOCK_START_0x0000000y{                |
+        *   <(4) Fast-forward sim-CCCB>                |
+        *   <(5) Re-dump SyncBlocks>                 ,,|
+        *   ...
+        *   ...
+        *   ... (N frames data)
+        *   ...
+        *   ...
+        *   <Drain sim-KCCB>
+        *   <Sync live and sim CCCB>
+        * }PDUMP_BLOCK_END_0x0000000y
+        * ...
+        * ...
+        *
+        * Steps (3) and (5) are done in pdump_server.c
+        * */
+       switch (eEvent)
+       {
+               case PDUMP_TRANSITION_EVENT_RANGE_ENTERED:
+                       {
+                               /* We're about to transition into capture range and we've submitted
+                                * new commands since the last time we entered capture range so drain
+                                * the live CCB and simulation (sim) CCB as required, i.e. leave CCB
+                                * idle in both live and sim contexts.
+                                * This requires the host driver to ensure the live FW & the sim FW
+                                * have both emptied out the remaining commands until R==W (CCB empty).
+                                */
+
+                               eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags);
+                               PVR_RETURN_IF_ERROR(eError);
+
+                               if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset)
+                               {
+                                       /* If new commands have been written when out of capture range in
+                                        * the live CCB then we need to fast forward the sim CCBCtl
+                                        * offsets past uncaptured commands. This is done by PDUMPing
+                                        * the CCBCtl memory to align sim values with the live CCBCtl
+                                        * values. Both live and sim FWs can start with the 1st command
+                                        * which is in the new capture range.
+                                        */
+                                       RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags);
+                               }
+                               break;
+                       }
+               case PDUMP_TRANSITION_EVENT_RANGE_EXITED:
+                       {
+                               /* Nothing to do */
+                               break;
+                       }
+               case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED:
+                       {
+                               /* (1) Drain KCCB from current block before starting new:
+                                *
+                                * At playback, this will ensure that sim-FW drains all commands in KCCB
+                                * belongs to current block before 'jumping' to any future commands (from
+                                * next block). This will synchronise script-thread and sim-FW thread KCCBs
+                                * at end of each pdump block.
+                                *
+                                * This will additionally force redump of KCCBCtl structure at start of next/new block.
+                                * */
+
+#if defined(PDUMP)
+                               eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+                               PVR_LOG_RETURN_IF_ERROR(eError, "RGXPdumpDrainKCCB");
+#endif
+
+                               /* (2) Synchronise Client CCBs from live and playback contexts before starting new block:
+                                *
+                                * This operation will,
+                                * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait
+                                *    for live-FW to empty live Client CCB).
+                                *
+                                * b. Next, it will dump poll command to drain Client CCB at end of every
+                                *    pdump block. At playback time this will synchronise sim-FW and
+                                *    script-thread Client CCBs at end of each block.
+                                *
+                                * This is to ensure that all commands in CCB from current block are processed
+                                * before moving on to future commands.
+                                * */
+
+                               eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags);
+                               PVR_RETURN_IF_ERROR(eError);
+                               break;
+                       }
+               case PDUMP_TRANSITION_EVENT_BLOCK_STARTED:
+                       {
+                               /* (4) Fast-forward CCB write offsets to current live values:
+                                *
+                                * We have already synchronised live-FW and app-thread above at end of each
+                                * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of
+                                * current app-thread values at start of every block. This will allow us to
+                                * skip any intermediate pdump blocks and start with last (or any next) block
+                                * immediately after first pdump block.
+                                * */
+
+                               RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags);
+                               break;
+                       }
+               case PDUMP_TRANSITION_EVENT_NONE:
+                       /* Invalid event for transition */
+               default:
+                       {
+                               /* Unknown Transition event */
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+                       }
+       }
+       return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+       psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */
+       psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size *
+                                                       PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD)  / 100;
+       psClientCCB->sUtilisation.ui32Warnings = 0;
+       psClientCCB->sUtilisation.ui32CCBAcquired = 0;
+       psClientCCB->sUtilisation.ui32CCBFull = 0;
+}
+
+static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB,
+                                               IMG_UINT32 ui32WarningType,
+                                               IMG_UINT32 ui32CmdSize)
+{
+       /* in VERBOSE mode we will print a message for each different
+        * event type as they happen.
+        */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType))
+       {
+               if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED)
+               {
+                       PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize));
+               }
+
+               PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)",
+                                                                       __func__,
+                                                                       psClientCCB->szName,
+                                                                       psClientCCB->sUtilisation.ui32HighWaterMark,
+                                                                       psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size,
+                                                                       psClientCCB->ui32Size));
+
+               /* record that we have issued a warning of this type */
+               psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType;
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psClientCCB);
+       PVR_UNREFERENCED_PARAMETER(ui32WarningType);
+       PVR_UNREFERENCED_PARAMETER(ui32CmdSize);
+#endif
+}
+
+/* Check the current CCB utilisation. Print a one-time warning message if it is above the
+ * specified threshold
+ */
+static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+       /* Print a warning message if the cCCB watermark is above the threshold value */
+       if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes)
+       {
+               _RGXCCBUtilisationEvent(psClientCCB,
+                                       PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD,
+                                       0);
+       }
+}
+
+/* Update the cCCB high watermark level if necessary */
+static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+       IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage;
+
+       ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+                                                                         psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+                                                                         psClientCCB->ui32Size);
+       ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace;
+
+       if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark)
+       {
+               psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage;
+
+               /* The high water mark has increased. Check if it is above the
+                * threshold so we can print a warning if necessary.
+                */
+               _RGXCheckCCBUtilisation(psClientCCB);
+       }
+}
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                                 IMG_UINT32                    ui32CCBSizeLog2,
+                                                 IMG_UINT32                    ui32CCBMaxSizeLog2,
+                                                 IMG_UINT32                    ui32ContextFlags,
+                                                 CONNECTION_DATA               *psConnectionData,
+                                                 RGX_CCB_REQUESTOR_TYPE                eRGXCCBRequestor,
+                                                 RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                 RGX_CLIENT_CCB                **ppsClientCCB,
+                                                 DEVMEM_MEMDESC                **ppsClientCCBMemDesc,
+                                                 DEVMEM_MEMDESC                **ppsClientCCBCtrlMemDesc)
+{
+       PVRSRV_ERROR    eError = PVRSRV_OK;
+       PVRSRV_MEMALLOCFLAGS_T  uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
+       IMG_UINT32              ui32FWMainLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap);
+       IMG_UINT32              ui32ChunkSize = (1U << ui32FWMainLog2PageSize);
+       IMG_UINT32              ui32AllocSize = MAX((1U << ui32CCBSizeLog2), ui32ChunkSize);
+       IMG_UINT32              ui32MinAllocSize = MAX((1U << MIN_SAFE_CCB_SIZE_LOG2), ui32ChunkSize);
+       RGX_CLIENT_CCB  *psClientCCB;
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       IMG_UINT32              ui32NumChunks = ui32AllocSize / ui32ChunkSize;
+       IMG_UINT32              ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2);
+       IMG_UINT32              ui32NumVirtChunks = ui32VirtualAllocSize / ui32ChunkSize;
+       IMG_UINT32              i;
+
+       /* For the allocation request to be valid, at least one page is required.
+        * This is relevant on systems where the page size is greater than the client CCB size. */
+       ui32NumVirtChunks = MAX(1, ui32NumVirtChunks);
+       PVR_ASSERT((ui32ChunkSize >= (1U << PAGE_SHIFT)));
+#else
+       PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2);
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+
+       /* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */
+       if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) ||
+               (ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s CCB size is invalid (%d). Should be from %d to %d",
+                        __func__,
+                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                        ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) ||
+               (ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: %s CCB maximum size is invalid (%d). Should be from %d to %d",
+                        __func__,
+                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                        ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+#endif
+
+       psClientCCB = OSAllocMem(sizeof(*psClientCCB));
+       if (psClientCCB == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_alloc;
+       }
+       psClientCCB->psServerCommonContext = psServerCommonContext;
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       psClientCCB->ui32VirtualAllocSize = 0;
+       psClientCCB->pui32MappingTable = NULL;
+       psClientCCB->ui32ChunkSize = ui32ChunkSize;
+#endif
+
+       uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN);
+
+       uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                               PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                                               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED |
+                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN);
+
+       /* If connection data indicates Sync Lockup Recovery (SLR) should be disabled,
+        * or if the caller has set ui32ContextFlags to disable SLR for this context,
+        * indicate this in psClientCCB->ui32CCBFlags.
+        */
+       if ((psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED) ||
+           (ui32ContextFlags & RGX_CONTEXT_FLAG_DISABLESLR))
+       {
+               BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED);
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGXFW cCCB");
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN))
+       {
+               PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+               PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap);
+
+               psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize;
+
+               /*
+                * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks
+                * because another ui32NumVirtChunks/2 is already allocated.
+                * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed.
+                */
+               psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32));
+               if (psClientCCB->pui32MappingTable == NULL)
+               {
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto fail_alloc_mtable;
+               }
+               for (i = 0; i < ui32NumChunks; i++)
+               {
+                       psClientCCB->pui32MappingTable[i] = i;
+               }
+
+               if (eHeapType == PHYS_HEAP_TYPE_LMA ||
+                       eHeapType == PHYS_HEAP_TYPE_DMA)
+               {
+                       /*
+                        * On LMA sparse memory can't be mapped to kernel.
+                        * To work around this whole ccb memory is allocated at once as contiguous.
+                        */
+                       eError = DevmemFwAllocate(psDevInfo,
+                                                                       ui32VirtualAllocSize,
+                                                                       uiClientCCBMemAllocFlags,
+                                                                       aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+                                                                       &psClientCCB->psClientCCBMemDesc);
+               }
+               else
+               {
+                       eError = DevmemFwAllocateSparse(psDevInfo,
+                                                                                       ui32VirtualAllocSize,
+                                                                                       ui32ChunkSize,
+                                                                                       ui32NumChunks,
+                                                                                       ui32NumVirtChunks,
+                                                                                       psClientCCB->pui32MappingTable,
+                                                                                       uiClientCCBMemAllocFlags,
+                                                                                       aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+                                                                                       &psClientCCB->psClientCCBMemDesc);
+               }
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               OSFreeMem(psClientCCB->pui32MappingTable);
+               psClientCCB->pui32MappingTable = NULL;
+               psClientCCB->ui32VirtualAllocSize = 0;
+       }
+
+       if (!BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN) ||
+           (eError != PVRSRV_OK))
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+       {
+               /* Allocate ui32AllocSize, or the next best POT allocation */
+               do
+               {
+                       eError = DevmemFwAllocate(psDevInfo,
+                                                                       ui32AllocSize,
+                                                                       uiClientCCBMemAllocFlags,
+                                                                       aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+                                                                       &psClientCCB->psClientCCBMemDesc);
+                       if (eError != PVRSRV_OK)
+                       {
+                               /* Failed to allocate - ensure CCB grow is disabled from
+                                * now on for this device.
+                                */
+                               BITMASK_UNSET(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN);
+
+                               /* Failed to allocate, try next POT down */
+                               ui32AllocSize >>= 1;
+                       }
+               } while ((eError != PVRSRV_OK) && (ui32AllocSize > ui32MinAllocSize));
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to allocate RGX client CCB (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+               goto fail_alloc_ccb;
+       }
+
+       OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s",
+                                                                       aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                                                                       (unsigned long) OSGetCurrentClientProcessIDKM(),
+                                                                       (unsigned long) OSGetCurrentClientThreadIDKM(),
+                                                                       OSGetCurrentClientProcessNameKM());
+
+       if (ui32AllocSize < (1U << ui32CCBSizeLog2))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Unable to allocate %d bytes for RGX client CCB (%s) but allocated %d bytes",
+                        __func__,
+                        (1U << ui32CCBSizeLog2),
+                        psClientCCB->szName,
+                        ui32AllocSize));
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+                                                                         &psClientCCB->pvClientCCB);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to map RGX client CCB (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+               goto fail_map_ccb;
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGXFW cCCB control");
+       eError = DevmemFwAllocate(psDevInfo,
+                                                                               sizeof(RGXFWIF_CCCB_CTL),
+                                                                               uiClientCCBCtlMemAllocFlags,
+                                                                               aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING],
+                                                                               &psClientCCB->psClientCCBCtrlMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to allocate RGX client CCB control (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+               goto fail_alloc_ccbctrl;
+       }
+
+
+       eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
+                                                                         (void **) &psClientCCB->psClientCCBCtrl);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to map RGX client CCB control (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+               goto fail_map_ccbctrl;
+       }
+
+       /* psClientCCBCtrlMemDesc was zero alloc'd so no need to initialise offsets. */
+       psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "cCCB control");
+       DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_CCCB_CTL),
+                                          PDUMP_FLAGS_CONTINUOUS);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       psClientCCB->ui32HostWriteOffset = 0;
+       psClientCCB->ui32LastPDumpWriteOffset = 0;
+       psClientCCB->ui32FinishedPDumpWriteOffset = 0;
+       psClientCCB->ui32Size = ui32AllocSize;
+       psClientCCB->ui32LastROff = ui32AllocSize - 1;
+       psClientCCB->ui32ByteCount = 0;
+       psClientCCB->ui32LastByteCount = 0;
+       BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       eError = OSLockCreate(&psClientCCB->hCCBGrowLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to create hCCBGrowLock (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+               goto fail_create_ccbgrow_lock;
+       }
+#endif
+#if defined(DEBUG)
+       psClientCCB->ui32UpdateEntries = 0;
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       _RGXInitCCBUtilisation(psClientCCB);
+       psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor;
+#endif
+       eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
+                                                                                         _RGXCCBPDumpTransition,
+                                                                                         psClientCCB,
+                                                                                         psDevInfo,
+                                                                                         &psClientCCB->hTransition);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_pdumpreg;
+       }
+
+       /*
+        * Note:
+        * Save the PDump specific structure, which is ref counted unlike
+        * the connection data, to ensure it's not freed too early
+        */
+       psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "New RGXFW cCCB(%s@%p) created",
+                                psClientCCB->szName,
+                                psClientCCB);
+
+       *ppsClientCCB = psClientCCB;
+       *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+       *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
+       return PVRSRV_OK;
+
+fail_pdumpreg:
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockDestroy(psClientCCB->hCCBGrowLock);
+fail_create_ccbgrow_lock:
+#endif
+       DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+fail_map_ccbctrl:
+       DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+fail_alloc_ccbctrl:
+       DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+fail_map_ccb:
+       DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+fail_alloc_ccb:
+       if ( psClientCCB->ui32VirtualAllocSize > 0)
+       {
+               OSFreeMem(psClientCCB->pui32MappingTable);
+       }
+fail_alloc_mtable:
+#else
+fail_alloc_ccb:
+#endif
+       OSFreeMem(psClientCCB);
+fail_alloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB)
+{
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       if (psClientCCB->sUtilisation.ui32CCBFull)
+       {
+               PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. "
+                               "This is not an error but the application may not run optimally.",
+                               aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                               psClientCCB->sUtilisation.ui32CCBFull,
+                               psClientCCB->sUtilisation.ui32CCBAcquired));
+       }
+#endif
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockDestroy(psClientCCB->hCCBGrowLock);
+#endif
+       PDumpUnregisterTransitionCallback(psClientCCB->hTransition);
+       DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+       DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       if (psClientCCB->pui32MappingTable)
+       {
+               OSFreeMem(psClientCCB->pui32MappingTable);
+       }
+#endif
+       OSFreeMem(psClientCCB);
+}
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB,
+                                                                                 IMG_UINT32 ui32AllocPageCount)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32       i;
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+       DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+#endif
+
+       for (i = 0; i < ui32AllocPageCount; i++)
+       {
+               psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i;
+       }
+
+       /* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */
+       eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc,
+                                                                       ui32AllocPageCount,
+                                                                       psClientCCB->pui32MappingTable,
+                                                                       0,
+                                                                       NULL,
+#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE)
+                                                                       SPARSE_MAP_CPU_ADDR |
+#endif
+                                                                       SPARSE_RESIZE_ALLOC);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)",
+                               PVRSRVGetErrorString(eError)));
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+               if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+                                                                       &psClientCCB->pvClientCCB) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping"));
+                       psClientCCB->pvClientCCB = NULL;
+               }
+#endif
+
+               return eError;
+       }
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+       eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+                                                                       &psClientCCB->pvClientCCB);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to map RGX client CCB (%s)",
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+
+PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize)
+{
+       IMG_UINT32 ui32FreeSpace;
+
+       /* Check that the CCB can hold this command + padding */
+       if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB"
+                       " (%d bytes)", ui32CmdSize, psClientCCB->ui32Size));
+               return PVRSRV_ERROR_CMD_TOO_BIG;
+       }
+
+       /*
+               Check we don't overflow the end of the buffer and make sure we have
+               enough space for the padding command. If we don't have enough space
+               (including the minimum amount for the padding command) we need to make
+               sure we insert a padding command now and wrap before adding the main
+               command.
+       */
+       if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size)
+       {
+               ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+                                             psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+                                             psClientCCB->ui32Size);
+
+               /* Don't allow all the space to be used */
+               if (ui32FreeSpace > ui32CmdSize)
+               {
+                       return PVRSRV_OK;
+               }
+
+               goto e_retry;
+       }
+       else
+       {
+               IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+               ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+                                             psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+                                             psClientCCB->ui32Size);
+
+               /* Check there is space for both the command and the padding command */
+               if (ui32FreeSpace > ui32Remain + ui32CmdSize)
+               {
+                       return PVRSRV_OK;
+               }
+
+               goto e_retry;
+       }
+
+e_retry:
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       _RGXCCBUtilisationEvent(psClientCCB,
+                   PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB,
+                   ui32CmdSize);
+#endif  /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+       return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXAcquireCCB
+
+ PURPOSE       : Obtains access to write some commands to a CCB
+
+ PARAMETERS    : psClientCCB           - The client CCB
+                         ui32CmdSize           - How much space is required
+                         ppvBufferSpace        - Pointer to space in the buffer
+                         ui32PDumpFlags - Should this be PDump continuous?
+
+ RETURNS       : PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+                                                                               IMG_UINT32              ui32CmdSize,
+                                                                               void                    **ppvBufferSpace,
+                                                                               IMG_UINT32              ui32PDumpFlags)
+{
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       IMG_UINT32      ui32RetryCount = 2;
+#endif
+
+#if defined(PDUMP)
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext);
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       IMG_BOOL        bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags);
+       IMG_BOOL        bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags);
+
+       /*
+               PDumpSetFrame will detect as we Transition into capture range for
+               frame based data but if we are PDumping continuous data then we
+               need to inform the PDump layer ourselves
+
+               First check is to confirm we are in continuous mode
+               Second check is to confirm the pdump client is connected and ready.
+               Third check is to confirm we are not in capture range.
+       */
+       if (bPDumpFlagsContinuous &&
+               bPDumpEnabled &&
+               !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE))
+       {
+               eError = PDumpTransition(psDeviceNode,
+                                        psClientCCB->psPDumpConnectionData,
+                                        PDUMP_TRANSITION_EVENT_RANGE_ENTERED,
+                                        ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       }
+#endif
+
+       /* Check that the CCB can hold this command + padding */
+       if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)",
+                                                               ui32CmdSize, psClientCCB->ui32Size));
+               return PVRSRV_ERROR_CMD_TOO_BIG;
+       }
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       while (ui32RetryCount--)
+#endif
+       {
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+               psClientCCB->sUtilisation.ui32CCBAcquired++;
+#endif
+
+               /*
+                       Check we don't overflow the end of the buffer and make sure we have
+                       enough space for the padding command. We don't have enough space (including the
+                       minimum amount for the padding command) we will need to make sure we insert a
+                       padding command now and wrap before adding the main command.
+               */
+               if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size)
+               {
+                       /* The command can fit without wrapping... */
+                       IMG_UINT32 ui32FreeSpace;
+
+#if defined(PDUMP)
+                       /* Wait for sufficient CCB space to become available */
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0,
+                                                                 "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+                                                                 ui32CmdSize, psClientCCB->ui32HostWriteOffset,
+                                                                 psClientCCB->szName);
+                       DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+                                               offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+                                               psClientCCB->ui32HostWriteOffset,
+                                               ui32CmdSize,
+                                               psClientCCB->ui32Size);
+#endif
+
+                       ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+                                                                               psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+                                                                               psClientCCB->ui32Size);
+
+                       /* Can command fit? */
+                       if (ui32FreeSpace > ui32CmdSize)
+                       {
+                               *ppvBufferSpace = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset);
+                               return PVRSRV_OK;
+                       }
+                       /* There is not enough free space in CCB. */
+                       goto e_retry;
+               }
+               else
+               {
+                       /*
+                               We're at the end of the buffer without enough contiguous space.
+                               The command cannot fit without wrapping, we need to insert a
+                               padding command and wrap. We need to do this in one go otherwise
+                               we would be leaving unflushed commands and forcing the client to
+                               deal with flushing the padding command but not the command they
+                               wanted to write. Therefore we either do all or nothing.
+                       */
+                       RGXFWIF_CCB_CMD_HEADER *psHeader;
+                       IMG_UINT32 ui32FreeSpace;
+                       IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+                       /* Check this is a growable CCB */
+                       if (psClientCCB->ui32VirtualAllocSize > 0)
+                       {
+                               PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext);
+
+                               ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+                                                                                       psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+                                                                                       psClientCCB->ui32Size);
+                               /*
+                                * Check if CCB should grow or be wrapped.
+                                * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow,
+                                * and when is free space for command and padding.
+                                */
+                               if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) &&
+                                       (ui32FreeSpace > ui32Remain + ui32CmdSize))
+                               {
+                                       /* Wrap CCB */
+                                       psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset);
+                                       psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+                                       psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+#if defined(PDUMP)
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags,
+                                                                                 "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+                                       if (bPDumpEnabled)
+                                       {
+                                               DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+                                                                               psClientCCB->ui32HostWriteOffset,
+                                                                               ui32Remain,
+                                                                               ui32PDumpFlags);
+                                       }
+#endif
+
+                                       *ppvBufferSpace = psClientCCB->pvClientCCB;
+                                       return PVRSRV_OK;
+                               }
+                               else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) &&
+                                        (psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset))
+                               {
+                                       /* Grow CCB */
+                                       PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+                                       PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap);
+                                       PVRSRV_ERROR eErr = PVRSRV_OK;
+
+                                       /* Something went wrong if we are here a second time */
+                                       PVR_ASSERT(ui32RetryCount != 0);
+                                       OSLockAcquire(psClientCCB->hCCBGrowLock);
+
+                                       /*
+                                        * On LMA sparse memory can't be mapped to kernel.
+                                        * To work around this whole ccb memory was allocated at once as contiguous.
+                                        * In such case below sparse change is not needed because memory is already allocated.
+                                        */
+                                       if (eHeapType != PHYS_HEAP_TYPE_LMA &&
+                                               eHeapType != PHYS_HEAP_TYPE_DMA)
+                                       {
+                                               IMG_UINT32 ui32AllocChunkCount = psClientCCB->ui32Size / psClientCCB->ui32ChunkSize;
+
+                                               eErr = _RGXCCBMemChangeSparse(psClientCCB, ui32AllocChunkCount);
+                                       }
+
+                                       /* Setup new CCB size */
+                                       if (eErr == PVRSRV_OK)
+                                       {
+                                               psClientCCB->ui32Size += psClientCCB->ui32Size;
+                                       }
+                                       else
+                                       {
+                                               PVR_LOG(("%s: Client CCB (%s) grow failed (%s)", __func__, psClientCCB->szName, PVRSRVGetErrorString(eErr)));
+                                               OSLockRelease(psClientCCB->hCCBGrowLock);
+                                               goto e_retry;
+                                       }
+
+#if defined(PDUMP)
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "cCCB update for grow");
+                                       if (bPDumpEnabled)
+                                       {
+                                               DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+                                                                                       offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask),
+                                                                                       sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask),
+                                                                                       ui32PDumpFlags);
+                                               DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+                                                                                       offsetof(RGX_CLIENT_CCB, ui32Size),
+                                                                                       sizeof(psClientCCB->ui32Size),
+                                                                                       ui32PDumpFlags);
+                                       }
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+                                       PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size));
+                                       /* Reset counters */
+                                       _RGXInitCCBUtilisation(psClientCCB);
+#endif
+
+                                       /* CCB doubled the size so retry now. */
+                                       OSLockRelease(psClientCCB->hCCBGrowLock);
+                               }
+                               else
+                               {
+                                       /* CCB can't grow anymore and can't be wrapped */
+#if defined(PDUMP)
+                                       /* Wait for sufficient CCB space to become available */
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0,
+                                                                                 "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+                                                                                 ui32Remain, psClientCCB->ui32HostWriteOffset,
+                                                                                 psClientCCB->szName);
+                                       DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+                                                               offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+                                                               psClientCCB->ui32HostWriteOffset,
+                                                               ui32Remain,
+                                                               psClientCCB->ui32Size);
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0,
+                                                                                 "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+                                                                                 ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+                                                                                 psClientCCB->szName);
+                                       DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+                                                               offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+                                                               0 /*ui32HostWriteOffset after wrap */,
+                                                               ui32CmdSize,
+                                                               psClientCCB->ui32Size);
+                                       /* CCB has now space for our command so try wrapping again. Retry now. */
+#else /* defined(PDUMP) */
+                                       goto e_retry;
+#endif /* defined(PDUMP) */
+                               }
+                       }
+                       else
+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */
+                       {
+#if defined(PDUMP)
+                               /* Wait for sufficient CCB space to become available */
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0,
+                                                                         "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+                                                                         ui32Remain, psClientCCB->ui32HostWriteOffset,
+                                                                         psClientCCB->szName);
+                               DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+                                                       offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+                                                       psClientCCB->ui32HostWriteOffset,
+                                                       ui32Remain,
+                                                       psClientCCB->ui32Size);
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0,
+                                                                         "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+                                                                         ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+                                                                         psClientCCB->szName);
+                               DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+                                                       offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+                                                       0 /*ui32HostWriteOffset after wrap */,
+                                                       ui32CmdSize,
+                                                       psClientCCB->ui32Size);
+#endif
+                               ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+                                                                                       psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+                                                                                       psClientCCB->ui32Size);
+
+                               if (ui32FreeSpace > ui32Remain + ui32CmdSize)
+                               {
+                                       psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset);
+                                       psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+                                       psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+#if defined(PDUMP)
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+                                       if (bPDumpEnabled)
+                                       {
+                                               DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+                                                                               psClientCCB->ui32HostWriteOffset,
+                                                                               ui32Remain,
+                                                                               ui32PDumpFlags);
+                                       }
+#endif
+
+                                       *ppvBufferSpace = psClientCCB->pvClientCCB;
+                                       return PVRSRV_OK;
+                               }
+
+                               goto e_retry;
+                       }
+               }
+       }
+e_retry:
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       psClientCCB->sUtilisation.ui32CCBFull++;
+       _RGXCCBUtilisationEvent(psClientCCB,
+                               PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED,
+                               ui32CmdSize);
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+       return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXReleaseCCB
+
+ PURPOSE       : Release a CCB that we have been writing to.
+
+ PARAMETERS    : psDevData                     - device data
+                         psCCB                         - the CCB
+
+ RETURNS       : None
+******************************************************************************/
+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+                                                               IMG_UINT32              ui32CmdSize,
+                                                               IMG_UINT32              ui32PDumpFlags)
+{
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext);
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       IMG_BOOL        bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags);
+       IMG_BOOL        bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags);
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockAcquire(psClientCCB->hCCBGrowLock);
+#endif
+       /*
+        * If a padding command was needed then we should now move ui32HostWriteOffset
+        * forward. The command has already be dumped (if bPDumpEnabled).
+        */
+       if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size)
+       {
+               IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+               UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+                                                 ui32Remain,
+                                                 psClientCCB->ui32Size);
+               psClientCCB->ui32ByteCount += ui32Remain;
+       }
+
+#if defined(PDUMP)
+       /* Dump the CCB data */
+       if (bPDumpEnabled)
+       {
+               DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+                                                  psClientCCB->ui32HostWriteOffset,
+                                                  ui32CmdSize,
+                                                  ui32PDumpFlags);
+       }
+#endif
+
+       /*
+        * Check if there any fences being written that will already be
+        * satisfied by the last written update command in this CCB. At the
+        * same time we can ASSERT that all sync addresses are not NULL.
+        */
+#if defined(DEBUG)
+       {
+               void *pvBufferStart = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset);
+               void *pvBufferEnd   = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset + ui32CmdSize);
+               IMG_BOOL  bMessagePrinted  = IMG_FALSE;
+
+               /* Walk through the commands in this section of CCB being released... */
+               while (pvBufferStart < pvBufferEnd)
+               {
+                       RGXFWIF_CCB_CMD_HEADER *psCmdHeader = pvBufferStart;
+
+                       if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+                       {
+                               /* If an UPDATE then record the values incase an adjacent fence uses it. */
+                               IMG_UINT32  ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+                               RGXFWIF_UFO *psUFOPtr   = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                               psClientCCB->ui32UpdateEntries = 0;
+                               while (ui32NumUFOs-- > 0)
+                               {
+                                       PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+                                       if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE)
+                                       {
+                                               psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++;
+                                       }
+                               }
+                       }
+                       else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+                       {
+                               /* If a FENCE then check the values against the last UPDATE issued. */
+                               IMG_UINT32  ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+                               RGXFWIF_UFO *psUFOPtr   = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                               while (ui32NumUFOs-- > 0)
+                               {
+                                       PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+
+                                       if (bMessagePrinted == IMG_FALSE)
+                                       {
+                                               RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList;
+                                               IMG_UINT32  ui32UpdateIndex;
+
+                                               for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++)
+                                               {
+                                                       if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+                                                       {
+                                                               if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr))
+                                                               {
+                                                                       PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x",
+                                                                                       psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value));
+                                                                       bMessagePrinted = IMG_TRUE;
+                                                                       break;
+                                                               }
+                                                       }
+                                                       else
+                                                       {
+                                                               if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
+                                                                       psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
+                                                               {
+                                                                       PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x",
+                                                                                       psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+                                                                       bMessagePrinted = IMG_TRUE;
+                                                                       break;
+                                                               }
+                                                       }
+                                                       psUpdatePtr++;
+                                               }
+                                       }
+
+                                       psUFOPtr++;
+                               }
+                       }
+                       else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR  ||
+                                        psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE)
+                       {
+                               /* For all other UFO ops check the UFO address is not NULL. */
+                               IMG_UINT32  ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+                               RGXFWIF_UFO *psUFOPtr   = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                               while (ui32NumUFOs-- > 0)
+                               {
+                                       PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+                                       psUFOPtr++;
+                               }
+                       }
+
+                       /* Move to the next command in this section of CCB being released... */
+                       pvBufferStart = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize);
+               }
+       }
+#endif /* REDUNDANT_SYNCS_DEBUG */
+
+
+#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK)
+       {
+               DEVMEM_MEMDESC* psClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+               void *pvClientCCBAddr = psClientCCB->pvClientCCB;
+               PMR *psClientCCBMemDescPMR = NULL;
+               IMG_DEVMEM_OFFSET_T uiPMROffset;
+
+               DevmemGetPMRData(psClientCCBMemDesc,
+                                    (IMG_HANDLE*)&psClientCCBMemDescPMR,
+                                    &uiPMROffset);
+
+               CacheOpValExec(psClientCCBMemDescPMR,
+                                          (IMG_UINT64)(uintptr_t) pvClientCCBAddr,
+                                          uiPMROffset,
+                                          psClientCCBMemDesc->uiAllocSize,
+                                          PVRSRV_CACHE_OP_FLUSH);
+
+       }
+#endif
+       /*
+        * Update the CCB write offset.
+        */
+       UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+                                         ui32CmdSize,
+                                         psClientCCB->ui32Size);
+       psClientCCB->ui32ByteCount += ui32CmdSize;
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+       _RGXUpdateCCBUtilisation(psClientCCB);
+#endif
+       /*
+               PDumpSetFrame will detect as we Transition out of capture range for
+               frame based data but if we are PDumping continuous data then we
+               need to inform the PDump layer ourselves
+
+               First check is to confirm we are in continuous mode
+               Second check is to confirm the pdump client is connected and ready.
+               Third check is to confirm we are not in capture range.
+       */
+#if defined(PDUMP)
+       if (bPDumpFlagsContinuous &&
+               bPDumpEnabled &&
+               !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE))
+       {
+               PVRSRV_ERROR eError;
+
+               /* Only Transitioning into capture range can cause an error */
+               eError = PDumpTransition(psDeviceNode,
+                                        psClientCCB->psPDumpConnectionData,
+                                        PDUMP_TRANSITION_EVENT_RANGE_EXITED,
+                                        ui32PDumpFlags);
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+
+       if (bPDumpEnabled)
+       {
+               if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN))
+               {
+                       /* Store offset to last finished CCB command. This offset can
+                        * be needed when appending commands to a non finished CCB.
+                        */
+                       psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset;
+               }
+
+               /* Update the PDump write offset to show we PDumped this command */
+               psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+       }
+#endif
+
+#if defined(NO_HARDWARE)
+       /*
+               The firmware is not running, it cannot update these; we do here instead.
+       */
+       psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+       psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+#if defined(SUPPORT_AGP)
+       psClientCCB->psClientCCBCtrl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset;
+#endif
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockRelease(psClientCCB->hCCBGrowLock);
+#endif
+}
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+       return psClientCCB->ui32HostWriteOffset;
+}
+
+IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+       return psClientCCB->ui32Size-1;
+}
+
+PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB,
+                                                       IMG_UINT32              ui32Flags)
+{
+       if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR))
+       {
+               BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED);
+       }
+       else
+       {
+               BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED);
+       }
+       return PVRSRV_OK;
+}
+
+void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                        IMG_UINT64 ui64FBSCEntryMask,
+                                        IMG_UINT32 ui32ClientFenceCount,
+                                        IMG_UINT32 ui32ClientUpdateCount,
+                                        IMG_UINT32 ui32CmdSize,
+                                        PRGXFWIF_TIMESTAMP_ADDR   *ppPreAddr,
+                                        PRGXFWIF_TIMESTAMP_ADDR   *ppPostAddr,
+                                        PRGXFWIF_UFO_ADDR         *ppRMWUFOAddr,
+                                        RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       IMG_BOOL bCacheInval = IMG_TRUE;
+       /* Init the generated data members */
+       psCmdHelperData->ui32FBSCInvalCmdSize = 0;
+       psCmdHelperData->ui64FBSCEntryMask = 0;
+       psCmdHelperData->ui32FenceCmdSize = 0;
+       psCmdHelperData->ui32UpdateCmdSize = 0;
+       psCmdHelperData->ui32PreTimeStampCmdSize = 0;
+       psCmdHelperData->ui32PostTimeStampCmdSize = 0;
+       psCmdHelperData->ui32RMWUFOCmdSize = 0;
+
+       /* Only compile if RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE is defined to avoid
+        * compilation errors on rogue cores.
+        */
+#if defined(RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE)
+       bCacheInval = !(PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) &&
+                                   PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, USC_INSTRUCTION_CACHE_AUTO_INVALIDATE) &&
+                                   PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, TDM_SLC_MMU_AUTO_CACHE_OPS) &&
+                                   PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GEOM_SLC_MMU_AUTO_CACHE_OPS) &&
+                                   PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, FRAG_SLC_MMU_AUTO_CACHE_OPS) &&
+                                   PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, COMPUTE_SLC_MMU_AUTO_CACHE_OPS)) ||
+                                   RGX_IS_BRN_SUPPORTED(psDevInfo, 71960) ||
+                                   RGX_IS_BRN_SUPPORTED(psDevInfo, 72143);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+
+       /* Total FBSC invalidate command size (header plus command data) */
+       if (bCacheInval)
+       {
+               if (ui64FBSCEntryMask != 0)
+               {
+                       psCmdHelperData->ui32FBSCInvalCmdSize =
+                               RGX_CCB_FWALLOC_ALIGN(sizeof(psCmdHelperData->ui64FBSCEntryMask) +
+                                                     sizeof(RGXFWIF_CCB_CMD_HEADER));
+                       psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask;
+               }
+       }
+
+       /* total DM command size (header plus command data) */
+
+       psCmdHelperData->ui32DMCmdSize =
+               RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+       if (ui32ClientFenceCount != 0)
+       {
+               psCmdHelperData->ui32FenceCmdSize =
+                       RGX_CCB_FWALLOC_ALIGN(ui32ClientFenceCount * sizeof(RGXFWIF_UFO) +
+                                             sizeof(RGXFWIF_CCB_CMD_HEADER));
+       }
+
+       if (ui32ClientUpdateCount != 0)
+       {
+               psCmdHelperData->ui32UpdateCmdSize =
+                       RGX_CCB_FWALLOC_ALIGN(ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) +
+                                             sizeof(RGXFWIF_CCB_CMD_HEADER));
+       }
+
+       if (ppPreAddr && (ppPreAddr->ui32Addr != 0))
+       {
+               psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+                       + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+       }
+
+       if (ppPostAddr && (ppPostAddr->ui32Addr != 0))
+       {
+               psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+                       + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+       }
+
+       if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0))
+       {
+               psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO);
+       }
+}
+
+/*
+       Work out how much space this command will require
+*/
+void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB            *psClientCCB,
+                                      IMG_UINT32                ui32ClientFenceCount,
+                                      PRGXFWIF_UFO_ADDR         *pauiFenceUFOAddress,
+                                      IMG_UINT32                *paui32FenceValue,
+                                      IMG_UINT32                ui32ClientUpdateCount,
+                                      PRGXFWIF_UFO_ADDR         *pauiUpdateUFOAddress,
+                                      IMG_UINT32                *paui32UpdateValue,
+                                      IMG_UINT32                ui32CmdSize,
+                                      IMG_PBYTE                 pui8DMCmd,
+                                      PRGXFWIF_TIMESTAMP_ADDR   *ppPreAddr,
+                                      PRGXFWIF_TIMESTAMP_ADDR   *ppPostAddr,
+                                      PRGXFWIF_UFO_ADDR         *ppRMWUFOAddr,
+                                      RGXFWIF_CCB_CMD_TYPE      eType,
+                                      IMG_UINT32                ui32ExtJobRef,
+                                      IMG_UINT32                ui32IntJobRef,
+                                      IMG_UINT32                ui32PDumpFlags,
+                                      RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                                      IMG_CHAR                  *pszCommandName,
+                                      IMG_BOOL                  bCCBStateOpen,
+                                      RGX_CCB_CMD_HELPER_DATA   *psCmdHelperData)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = NULL;
+
+       /* Job reference values */
+       psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef;
+       psCmdHelperData->ui32IntJobRef = ui32IntJobRef;
+
+       /* Save the data we require in the submit call */
+       psCmdHelperData->psClientCCB = psClientCCB;
+#if defined(PDUMP)
+       psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags;
+       psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+       psCmdHelperData->pszCommandName = pszCommandName;
+       if (bCCBStateOpen)
+       {
+               BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+       }
+       else
+       {
+               BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+       }
+
+       /* Client sync data */
+       psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount;
+       psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress;
+       psCmdHelperData->paui32FenceValue = paui32FenceValue;
+       psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount;
+       psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress;
+       psCmdHelperData->paui32UpdateValue = paui32UpdateValue;
+
+       /* Command data */
+       psCmdHelperData->ui32CmdSize = ui32CmdSize;
+       psCmdHelperData->pui8DMCmd = pui8DMCmd;
+       psCmdHelperData->eType = eType;
+
+       if (ppPreAddr)
+       {
+               psCmdHelperData->pPreTimestampAddr = *ppPreAddr;
+       }
+
+       if (ppPostAddr)
+       {
+               psCmdHelperData->pPostTimestampAddr = *ppPostAddr;
+       }
+
+       if (ppRMWUFOAddr)
+       {
+               psCmdHelperData->pRMWUFOAddr = *ppRMWUFOAddr;
+       }
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                       "%s Command Server Init on FWCtx %08x", pszCommandName,
+                       FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Workload Data added */
+       psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+#endif
+}
+
+/*
+       Work out how much space this command will require
+*/
+void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO         *psDevInfo,
+                            RGX_CLIENT_CCB            *psClientCCB,
+                            IMG_UINT64                ui64FBSCEntryMask,
+                            IMG_UINT32                ui32ClientFenceCount,
+                            PRGXFWIF_UFO_ADDR         *pauiFenceUFOAddress,
+                            IMG_UINT32                *paui32FenceValue,
+                            IMG_UINT32                ui32ClientUpdateCount,
+                            PRGXFWIF_UFO_ADDR         *pauiUpdateUFOAddress,
+                            IMG_UINT32                *paui32UpdateValue,
+                            IMG_UINT32                ui32CmdSize,
+                            IMG_PBYTE                 pui8DMCmd,
+                            PRGXFWIF_TIMESTAMP_ADDR   *ppPreAddr,
+                            PRGXFWIF_TIMESTAMP_ADDR   *ppPostAddr,
+                            PRGXFWIF_UFO_ADDR         *ppRMWUFOAddr,
+                            RGXFWIF_CCB_CMD_TYPE      eType,
+                            IMG_UINT32                ui32ExtJobRef,
+                            IMG_UINT32                ui32IntJobRef,
+                            IMG_UINT32                ui32PDumpFlags,
+                            RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                            IMG_CHAR                  *pszCommandName,
+                            IMG_BOOL                  bCCBStateOpen,
+                            RGX_CCB_CMD_HELPER_DATA   *psCmdHelperData)
+{
+       RGXCmdHelperInitCmdCCB_CommandSize(psDevInfo,
+                                        ui64FBSCEntryMask,
+                                        ui32ClientFenceCount,
+                                        ui32ClientUpdateCount,
+                                        ui32CmdSize,
+                                        ppPreAddr,
+                                        ppPostAddr,
+                                        ppRMWUFOAddr,
+                                        psCmdHelperData);
+
+       RGXCmdHelperInitCmdCCB_OtherData(psClientCCB,
+                                        ui32ClientFenceCount,
+                                        pauiFenceUFOAddress,
+                                        paui32FenceValue,
+                                        ui32ClientUpdateCount,
+                                        pauiUpdateUFOAddress,
+                                        paui32UpdateValue,
+                                        ui32CmdSize,
+                                        pui8DMCmd,
+                                        ppPreAddr,
+                                        ppPostAddr,
+                                        ppRMWUFOAddr,
+                                        eType,
+                                        ui32ExtJobRef,
+                                        ui32IntJobRef,
+                                        ui32PDumpFlags,
+                                        psWorkEstKickData,
+                                        pszCommandName,
+                                        bCCBStateOpen,
+                                        psCmdHelperData);
+}
+
+/*
+       Reserve space in the CCB and fill in the command and client sync data
+*/
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+                                                                          RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+       const IMG_UINT32 ui32MaxUFOCmdSize = RGX_CCB_FWALLOC_ALIGN((RGXFWIF_CCB_CMD_MAX_UFOS * sizeof(RGXFWIF_UFO)) +
+                                                                  sizeof(RGXFWIF_CCB_CMD_HEADER));
+       IMG_UINT32 ui32AllocSize = 0;
+       IMG_UINT32 i;
+       void *pvStartPtr;
+       PVRSRV_ERROR eError;
+#if defined(PDUMP)
+       PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(asCmdHelperData->psClientCCB->psServerCommonContext);
+#endif
+
+       /*
+               Check the number of fences & updates are valid.
+       */
+       for (i = 0; i < ui32CmdCount; i++)
+       {
+               RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+
+               if (psCmdHelperData->ui32FenceCmdSize > ui32MaxUFOCmdSize ||
+                   psCmdHelperData->ui32UpdateCmdSize > ui32MaxUFOCmdSize)
+               {
+                       return PVRSRV_ERROR_TOO_MANY_SYNCS;
+               }
+       }
+
+       /*
+               Work out how much space we need for all the command(s)
+       */
+       ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+#if defined(PDUMP)
+       for (i = 0; i < ui32CmdCount; i++)
+       {
+               if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d",
+                                        __func__,
+                                        PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+                                        PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+                                        ui32CmdCount));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+#endif
+
+       /*
+               Acquire space in the CCB for all the command(s).
+       */
+       eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB,
+                                                  ui32AllocSize,
+                                                  &pvStartPtr,
+                                                  asCmdHelperData[0].ui32PDumpFlags);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               return eError;
+       }
+
+       /*
+               For each command fill in the fence, DM, and update command
+
+       */
+       for (i = 0; i < ui32CmdCount; i++)
+       {
+               RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i];
+               void *pvCmdPtr;
+#if defined(PDUMP)
+               IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr;
+               IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext));
+#endif
+
+               if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+               {
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                                "Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+                                                psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+               }
+
+               pvCmdPtr = pvStartPtr;
+
+               /*
+                       Create the fence command.
+               */
+               if (psCmdHelperData->ui32FenceCmdSize)
+               {
+                       RGXFWIF_CCB_CMD_HEADER *psHeader;
+                       IMG_UINT k, uiNextValueIndex;
+
+                       psHeader = pvCmdPtr;
+                       psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE;
+
+                       psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+                       psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+                       psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                       psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+                       psHeader->sWorkEstKickData.ui64Deadline = 0;
+                       psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+#endif
+
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                       /* Fill in the client fences */
+                       uiNextValueIndex = 0;
+                       for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++)
+                       {
+                               RGXFWIF_UFO *psUFOPtr = pvCmdPtr;
+
+                               psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k];
+
+                               if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+                               {
+                                       psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+                               }
+                               else
+                               {
+                                       /* Only increment uiNextValueIndex for non sync checkpoints
+                                        * (as paui32FenceValue only contains values for sync prims)
+                                        */
+                                       psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++];
+                               }
+                               pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO));
+
+#if defined(SYNC_COMMAND_DEBUG)
+                               PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x",
+                                               psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+                               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                                        ".. %s client sync fence - 0x%x -> 0x%x",
+                                                        psCmdHelperData->psClientCCB->szName,
+                                                        psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+
+                       }
+               }
+
+               /*
+                       Create the FBSC invalidate command.
+               */
+               if (psCmdHelperData->ui32FBSCInvalCmdSize)
+               {
+                       RGXFWIF_CCB_CMD_HEADER *psHeader;
+                       IMG_UINT64 *pui64FBSCInvalCmdData;
+
+                       /* pui8CmdPtr */
+
+                       psHeader = pvCmdPtr;
+                       psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE;
+
+                       psHeader->ui32CmdSize = psCmdHelperData->ui32FBSCInvalCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+                       psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+                       psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                       psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+                       psHeader->sWorkEstKickData.ui64Deadline = 0;
+                       psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+#endif
+                       pui64FBSCInvalCmdData = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER));
+                       *pui64FBSCInvalCmdData = psCmdHelperData->ui64FBSCEntryMask;
+                       /* leap over the FBSC invalidate command */
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32FBSCInvalCmdSize);
+
+               }
+
+               /*
+                 Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to
+                 sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have
+                 the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+               */
+               if (psCmdHelperData->ui32PreTimeStampCmdSize != 0)
+               {
+                       RGXWriteTimestampCommand(&pvCmdPtr,
+                                                RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP,
+                                                psCmdHelperData->pPreTimestampAddr);
+               }
+
+               /*
+                       Create the DM command
+               */
+               if (psCmdHelperData->ui32DMCmdSize)
+               {
+                       RGXFWIF_CCB_CMD_HEADER *psHeader;
+
+                       psHeader = pvCmdPtr;
+                       psHeader->eCmdType = psCmdHelperData->eType;
+
+                       psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+                       psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+                       psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                       if (psCmdHelperData->psWorkEstKickData != NULL &&
+                               psCmdHelperData->eType != RGXFWIF_CCB_CMD_TYPE_NULL)
+                       {
+                               PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_GEOM ||
+                                          psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D ||
+                                          psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_CDM ||
+                                          psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TQ_TDM);
+                               psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
+                       }
+                       else
+                       {
+                               psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+                               psHeader->sWorkEstKickData.ui64Deadline = 0;
+                               psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+                       }
+#endif
+
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                       /* The buffer is write-combine, so no special device memory treatment required. */
+                       OSCachedMemCopy(pvCmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize);
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32CmdSize);
+               }
+
+
+               if (psCmdHelperData->ui32PostTimeStampCmdSize != 0)
+               {
+                       RGXWriteTimestampCommand(&pvCmdPtr,
+                                                RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP,
+                                                psCmdHelperData->pPostTimestampAddr);
+               }
+
+
+               if (psCmdHelperData->ui32RMWUFOCmdSize != 0)
+               {
+                       RGXFWIF_CCB_CMD_HEADER * psHeader;
+                       RGXFWIF_UFO            * psUFO;
+
+                       psHeader = (RGXFWIF_CCB_CMD_HEADER *) pvCmdPtr;
+                       psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE;
+                       psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+                       psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+                       psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                       psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+                       psHeader->sWorkEstKickData.ui64Deadline = 0;
+                       psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+#endif
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                       psUFO = (RGXFWIF_UFO *) pvCmdPtr;
+                       psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr;
+
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO));
+               }
+
+               /*
+                       Create the update command.
+               */
+               if (psCmdHelperData->ui32UpdateCmdSize)
+               {
+                       RGXFWIF_CCB_CMD_HEADER *psHeader;
+                       IMG_UINT k, uiNextValueIndex;
+
+                       psHeader = pvCmdPtr;
+                       psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE;
+                       psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+                       psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+                       psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                       psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+                       psHeader->sWorkEstKickData.ui64Deadline = 0;
+                       psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+#endif
+                       pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+                       /* Fill in the client updates */
+                       uiNextValueIndex = 0;
+                       for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++)
+                       {
+                               RGXFWIF_UFO *psUFOPtr = pvCmdPtr;
+
+                               psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k];
+                               if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+                               {
+                                       psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+                               }
+                               else
+                               {
+                                       /* Only increment uiNextValueIndex for non sync checkpoints
+                                        * (as paui32UpdateValue only contains values for sync prims)
+                                        */
+                                       psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++];
+                               }
+                               pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO));
+
+#if defined(SYNC_COMMAND_DEBUG)
+                               PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x",
+                                               psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+                               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                                        ".. %s client sync update - 0x%x -> 0x%x",
+                                                        psCmdHelperData->psClientCCB->szName,
+                                                        psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+                       }
+               }
+
+               /* Set the start pointer for the next iteration around the loop */
+               pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr,
+                       psCmdHelperData->ui32FenceCmdSize         +
+                       psCmdHelperData->ui32FBSCInvalCmdSize     +
+                       psCmdHelperData->ui32PreTimeStampCmdSize  +
+                       psCmdHelperData->ui32DMCmdSize            +
+                       psCmdHelperData->ui32PostTimeStampCmdSize +
+                       psCmdHelperData->ui32RMWUFOCmdSize        +
+                       psCmdHelperData->ui32UpdateCmdSize        );
+
+               if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+               {
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                                "End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+                                                psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+               }
+               else
+               {
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                                "No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+                                                psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+/*
+       Fill in the server syncs data and release the CCB space
+*/
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+                                                          RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                                          const IMG_CHAR *pcszDMName,
+                                                          IMG_UINT32 ui32CtxAddr)
+{
+       IMG_UINT32 ui32AllocSize = 0;
+       IMG_UINT32 i;
+#if defined(__linux__)
+       IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced();
+       IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced();
+#endif
+
+       /*
+               Work out how much space we need for all the command(s)
+       */
+       ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+       /*
+               For each command fill in the server sync info
+       */
+       for (i=0;i<ui32CmdCount;i++)
+       {
+               RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+#if defined(PDUMP)
+               PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext);
+#endif
+
+#if (!defined(__linux__) || !defined(SUPPORT_RGX)) && !defined(PDUMP)
+               PVR_UNREFERENCED_PARAMETER(psCmdHelperData);
+#endif
+
+#if defined(__linux__) && defined(SUPPORT_RGX)
+               if (bTraceChecks)
+               {
+                       trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+                                                                        pcszDMName,
+                                                                        ui32CtxAddr,
+                                                                        psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+                                                                        psCmdHelperData->ui32ClientFenceCount,
+                                                                        psCmdHelperData->pauiFenceUFOAddress,
+                                                                        psCmdHelperData->paui32FenceValue);
+               }
+               if (bTraceUpdates)
+               {
+                       trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+                                                                         pcszDMName,
+                                                                         ui32CtxAddr,
+                                                                         psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+                                                                         psCmdHelperData->ui32ClientUpdateCount,
+                                                                         psCmdHelperData->pauiUpdateUFOAddress,
+                                                                         psCmdHelperData->paui32UpdateValue);
+               }
+#endif
+
+               /*
+                       All the commands have been filled in so release the CCB space.
+                       The FW still won't run this command until we kick it
+               */
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                               psCmdHelperData->ui32PDumpFlags,
+                               "%s Command Server Release on FWCtx %08x",
+                               psCmdHelperData->pszCommandName, ui32CtxAddr);
+       }
+
+       RGXReleaseCCB(asCmdHelperData[0].psClientCCB,
+                                 ui32AllocSize,
+                                 asCmdHelperData[0].ui32PDumpFlags);
+
+       BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN);
+}
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32              ui32CmdCount,
+                                      RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+       IMG_UINT32 ui32AllocSize = 0;
+       IMG_UINT32 i;
+
+       /*
+               Work out how much space we need for all the command(s)
+       */
+       for (i = 0; i < ui32CmdCount; i++)
+       {
+               ui32AllocSize +=
+                       asCmdHelperData[i].ui32FenceCmdSize          +
+                       asCmdHelperData[i].ui32FBSCInvalCmdSize      +
+                       asCmdHelperData[i].ui32DMCmdSize             +
+                       asCmdHelperData[i].ui32UpdateCmdSize         +
+                       asCmdHelperData[i].ui32PreTimeStampCmdSize   +
+                       asCmdHelperData[i].ui32PostTimeStampCmdSize  +
+                       asCmdHelperData[i].ui32RMWUFOCmdSize;
+       }
+
+       return ui32AllocSize;
+}
+
+/* Work out how much of an offset there is to a specific command. */
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                        IMG_UINT32              ui32Cmdindex)
+{
+       IMG_UINT32 ui32Offset = 0;
+       IMG_UINT32 i;
+
+       for (i = 0; i < ui32Cmdindex; i++)
+       {
+               ui32Offset +=
+                       asCmdHelperData[i].ui32FenceCmdSize          +
+                       asCmdHelperData[i].ui32FBSCInvalCmdSize      +
+                       asCmdHelperData[i].ui32DMCmdSize             +
+                       asCmdHelperData[i].ui32UpdateCmdSize         +
+                       asCmdHelperData[i].ui32PreTimeStampCmdSize   +
+                       asCmdHelperData[i].ui32PostTimeStampCmdSize  +
+                       asCmdHelperData[i].ui32RMWUFOCmdSize;
+       }
+
+       return ui32Offset;
+}
+
+/* Returns the offset of the data master command from a write offset */
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+       return psCmdHelperData->ui32FenceCmdSize +
+                  psCmdHelperData->ui32PreTimeStampCmdSize +
+                  psCmdHelperData->ui32FBSCInvalCmdSize;
+}
+
+static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType)
+{
+       switch (cmdType)
+       {
+               case RGXFWIF_CCB_CMD_TYPE_GEOM: return "TA";
+               case RGXFWIF_CCB_CMD_TYPE_3D: return "3D";
+               case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR";
+               case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM";
+               case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D";
+               case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D";
+               case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM";
+               case RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE: return "FBSC_INVALIDATE";
+               case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL";
+               case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE";
+               case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE";
+               case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR";
+               case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY";
+               case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE";
+               case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP";
+               case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE";
+               case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP";
+               case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE";
+               case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING";
+
+               default:
+                       PVR_ASSERT(IMG_FALSE);
+               break;
+       }
+
+       return "INVALID";
+}
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+       volatile RGXFWIF_CCCB_CTL       *psClientCCBCtrl;
+       IMG_UINT32                                      ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff, ui32WrapMask;
+       PVRSRV_ERROR                            eError = PVRSRV_OK;
+
+       if (psCurrentClientCCB == NULL)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL"));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       /* If CCB grow is enabled, take the lock while sampling offsets
+        * (to guard against a grow happening mid-sample)
+        */
+       OSLockAcquire(psCurrentClientCCB->hCCBGrowLock);
+#endif
+       /* NB. use psCurrentClientCCB->ui32Size as basis for wrap mask (rather than psClientCCBCtrl->ui32WrapMask)
+        * as if CCB grow happens, psCurrentClientCCB->ui32Size will have been updated but
+        * psClientCCBCtrl->ui32WrapMask is only updated once the firmware sees the CCB has grown.
+        * If we use the wrong value, we might incorrectly determine that the offsets are invalid.
+        */
+       ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB);
+       psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+       ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+       ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset;
+       ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockRelease(psCurrentClientCCB->hCCBGrowLock);
+#endif
+
+       if (ui32SampledRdOff > ui32WrapMask ||
+               ui32SampledDpOff > ui32WrapMask ||
+               ui32SampledWrOff > ui32WrapMask)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)",
+                               ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff));
+               return PVRSRV_ERROR_INVALID_OFFSET;
+       }
+
+       if (ui32SampledRdOff != ui32SampledWrOff &&
+                               psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff &&
+                               ui32SampledRdOff == psCurrentClientCCB->ui32LastROff &&
+                               (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size)
+       {
+               PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice;
+
+               /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle).
+                * Guest drivers do not initialize psRGXFWIfFwSysData, so they assume FW internal state is ON. */
+               if (((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON)) &&
+                       (psDevInfo->ui32SLRHoldoffCounter == 0))
+               {
+                       static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+                                       "force";
+#else
+                                       "warn";
+#endif
+                       /* Don't log this by default unless debugging since a higher up
+                        * function will log the stalled condition. Helps avoid double
+                        * messages in the log.
+                        */
+                       PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"",
+                                       __func__, pszStalledAction, ui32SampledRdOff,
+                                       ui32SampledDpOff, ui32SampledWrOff,
+                                       psCurrentClientCCB->szName));
+                       eError = PVRSRV_ERROR_CCCB_STALLED;
+
+                       {
+                               void                            *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB;
+                               RGXFWIF_CCB_CMD_HEADER  *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff);
+                               PVRSRV_RGXDEV_INFO              *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext);
+
+                               /* Special case - if readOffset is on a PADDING packet, CCB has wrapped.
+                                * In this case, skip over the PADDING packet.
+                                */
+                               if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING)
+                               {
+                                       psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff,
+                                                                                    ((ui32SampledRdOff +
+                                                                                      psCommandHeader->ui32CmdSize +
+                                                                                      sizeof(RGXFWIF_CCB_CMD_HEADER))
+                                                                                     & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask));
+                               }
+
+                               /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could
+                                * take a long time to complete, during which time the CCB ptrs would not advance.
+                                */
+                               if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) ||
+                                    (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) &&
+                                   (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)))
+                               {
+                                       /* Acquire the cCCB recovery lock */
+                                       OSLockAcquire(psDevInfo->hCCBRecoveryLock);
+
+                                       if (!psDevInfo->pvEarliestStalledClientCCB)
+                                       {
+                                               psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB;
+                                               psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef;
+                                       }
+                                       else
+                                       {
+                                               /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking
+                                                * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes
+                                                * our preferred fence to be unblocked/
+                                                */
+                                               if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) &&
+                                                   ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000))
+                                               {
+                                                       psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB;
+                                                       psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef;
+                                               }
+                                       }
+
+                                       /* Release the cCCB recovery lock */
+                                       OSLockRelease(psDevInfo->hCCBRecoveryLock);
+                               }
+                       }
+               }
+       }
+
+       psCurrentClientCCB->ui32LastROff = ui32SampledRdOff;
+       psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff;
+       psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount;
+
+       return eError;
+}
+
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                       PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+                       RGX_CLIENT_CCB *psCurrentClientCCB,
+                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl;
+       void *pvClientCCBBuff;
+       IMG_UINT32 ui32Offset;
+       IMG_UINT32 ui32DepOffset;
+       IMG_UINT32 ui32EndOffset;
+       IMG_UINT32 ui32WrapMask;
+       IMG_CHAR * pszState = "Ready";
+
+       /* Ensure hCCBGrowLock is acquired before reading
+        * psCurrentClientCCB->pvClientCCB as a CCB grow
+        * could remap the virtual addresses.
+        */
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockAcquire(psCurrentClientCCB->hCCBGrowLock);
+#endif
+       psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+       pvClientCCBBuff = psCurrentClientCCB->pvClientCCB;
+       ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset;
+       OSMemoryBarrier(NULL);
+       ui32Offset = psClientCCBCtrl->ui32ReadOffset;
+       ui32DepOffset = psClientCCBCtrl->ui32DepOffset;
+       /* NB. Use psCurrentClientCCB->ui32Size as basis for wrap mask (rather
+        * than psClientCCBCtrl->ui32WrapMask) as if CCB grow happened,
+        * psCurrentClientCCB->ui32Size will have been updated but
+        * psClientCCBCtrl->ui32WrapMask is only updated once the firmware
+        * sees the CCB has grown. If we use the wrong value, ui32NextOffset
+        * can end up being wrapped prematurely and pointing to garbage.
+        */
+       ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB);
+
+       PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr, psCurrentClientCCB->szName);
+       if (ui32Offset == ui32EndOffset)
+       {
+               PVR_DUMPDEBUG_LOG("  `--<Empty>");
+       }
+
+       while (ui32Offset != ui32EndOffset)
+       {
+               RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset);
+               IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask;
+               IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE;
+               IMG_BOOL bLastUFO;
+               #define CCB_SYNC_INFO_LEN 80
+               IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN];
+               IMG_UINT32 ui32NoOfUpdates, i;
+               RGXFWIF_UFO *psUFOPtr;
+
+               ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+               psUFOPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER));
+               pszSyncInfo[0] = '\0';
+
+               if (ui32Offset == ui32DepOffset)
+               {
+                       pszState = "Waiting";
+               }
+
+               PVR_DUMPDEBUG_LOG("  %s--%s %s @ %u Int=%u Ext=%u",
+                       bLastCommand? "`": "|",
+                       pszState, _CCBCmdTypename(psCmdHeader->eCmdType),
+                       ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef
+                       );
+
+               /* switch on type and write checks and updates */
+               switch (psCmdHeader->eCmdType)
+               {
+                       case RGXFWIF_CCB_CMD_TYPE_UPDATE:
+                       case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE:
+                       case RGXFWIF_CCB_CMD_TYPE_FENCE:
+                       case RGXFWIF_CCB_CMD_TYPE_FENCE_PR:
+                       {
+                               for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+                               {
+                                       bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+
+                                       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+                                       {
+                                               if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+                                               {
+                                                       SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+                                                                               pszSyncInfo, CCB_SYNC_INFO_LEN);
+                                               }
+                                               else
+                                               {
+                                                       SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+                                                                               pszSyncInfo, CCB_SYNC_INFO_LEN);
+                                               }
+                                       }
+
+                                       PVR_DUMPDEBUG_LOG("  %s  %s--Addr:0x%08x Val=0x%08x %s",
+                                               bLastCommand? " ": "|",
+                                               bLastUFO? "`": "|",
+                                               psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value,
+                                               pszSyncInfo
+                                               );
+                               }
+                               break;
+                       }
+                       case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE:
+                       case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE:
+                       {
+                               for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+                               {
+                                       bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+
+                                       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+                                       {
+                                               if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+                                               {
+                                                       SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+                                                                               pszSyncInfo, CCB_SYNC_INFO_LEN);
+                                               }
+                                               else
+                                               {
+                                                       SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+                                                                               pszSyncInfo, CCB_SYNC_INFO_LEN);
+                                               }
+                                       }
+
+                                       PVR_DUMPDEBUG_LOG("  %s  %s--Addr:0x%08x Val++ %s",
+                                               bLastCommand? " ": "|",
+                                               bLastUFO? "`": "|",
+                                               psUFOPtr->puiAddrUFO.ui32Addr,
+                                               pszSyncInfo
+                                               );
+                               }
+                               break;
+                       }
+                       default:
+                               break;
+               }
+               ui32Offset = ui32NextOffset;
+       }
+
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       OSLockRelease(psCurrentClientCCB->hCCBGrowLock);
+#endif
+}
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+                               RGX_CLIENT_CCB *psCurrentClientCCB,
+                               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile)
+{
+       volatile RGXFWIF_CCCB_CTL       *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+       void                                    *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB;
+       volatile void                   *pvPtr;
+       IMG_UINT32                                      ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+       IMG_UINT32                                      ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
+       IMG_UINT32                                      ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+       pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff);
+
+       if ((ui32SampledRdOff == ui32SampledDepOff) &&
+               (ui32SampledRdOff != ui32SampledWrOff))
+       {
+               volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff);
+               RGXFWIF_CCB_CMD_TYPE    eCommandType = psCommandHeader->eCmdType;
+               volatile void                           *pvPtr = psCommandHeader;
+
+               /* CCB is stalled on a fence... */
+               if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+               {
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+                       PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext);
+                       IMG_UINT32 ui32Val;
+#endif
+                       RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader));
+                       IMG_UINT32 jj;
+
+                       /* Display details of the fence object on which the context is pending */
+                       PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:",
+                                                          sFWCommonContext.ui32Addr,
+                                                          ui32SampledRdOff,
+                                                          psCurrentClientCCB->szName,
+                                                          _CCBCmdTypename(eCommandType));
+                       for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+                       {
+#if !defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+                               PVR_DUMPDEBUG_LOG("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+                               ui32Val = 0;
+                               RGXReadFWModuleAddr(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val);
+                               PVR_DUMPDEBUG_LOG("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+                                                  psUFOPtr[jj].puiAddrUFO.ui32Addr,
+                                                  psUFOPtr[jj].ui32Value, ui32Val);
+#endif
+                       }
+
+                       /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */
+                       pvPtr = IMG_OFFSET_ADDR(psUFOPtr, psCommandHeader->ui32CmdSize);
+                       psCommandHeader = pvPtr;
+                       if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))
+                       {
+                               PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType));
+                               /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */
+                               pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize);
+                               psCommandHeader = pvPtr;
+                               /* If the next command is an update, display details of that so we can see what would then become unblocked */
+                               if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))
+                               {
+                                       eCommandType = psCommandHeader->eCmdType;
+
+                                       if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+                                       {
+                                               psUFOPtr = IMG_OFFSET_ADDR(psCommandHeader, sizeof(*psCommandHeader));
+                                               PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType));
+                                               for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+                                               {
+#if !defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+                                                       PVR_DUMPDEBUG_LOG("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+                                                       ui32Val = 0;
+                                                       RGXReadFWModuleAddr(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val);
+                                                       PVR_DUMPDEBUG_LOG("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+                                                                          psUFOPtr[jj].puiAddrUFO.ui32Addr,
+                                                                          psUFOPtr[jj].ui32Value,
+                                                                          ui32Val);
+#endif
+                                               }
+                                       }
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+                               }
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+                       }
+               }
+       }
+}
+
+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGX_CLIENT_CCB *psStalledClientCCB;
+
+       PVR_ASSERT(psDevInfo);
+
+       psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB;
+
+       if (psStalledClientCCB)
+       {
+               volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl;
+               IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset;
+               void                 *pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset);
+               RGXFWIF_CCB_CMD_HEADER    *psCommandHeader = pvPtr;
+               RGXFWIF_CCB_CMD_TYPE      eCommandType = psCommandHeader->eCmdType;
+
+               if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+               {
+                       RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader));
+                       IMG_UINT32 jj;
+                       IMG_UINT32 ui32NumUnsignalledUFOs = 0;
+                       IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS];
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+                       if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0])
+                       {
+                               OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp);
+                               psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO));
+                               psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr;
+                               OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName,
+                                             psStalledClientCCB->szName,
+                                             MAX_CLIENT_CCB_NAME);
+                       }
+                       else
+                       {
+                               OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp);
+                               psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO));
+                               psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr;
+                               OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName,
+                                             psStalledClientCCB->szName,
+                                             MAX_CLIENT_CCB_NAME);
+                               psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES;
+                       }
+                       psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++;
+                       /* flush write buffers for psRGXFWIfFwOsData */
+                       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp]);
+#endif
+                       PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs",
+                                FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr,
+                                psStalledClientCCB->szName, ui32SampledDepOffset,
+                                (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO))));
+
+                       for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+                       {
+                               if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj]))
+                               {
+                                       IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode,
+                                                                                  psUFOPtr[jj].puiAddrUFO.ui32Addr);
+                                       PVR_LOG(("  %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1,
+                                                          (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)),
+                                                          psUFOPtr[jj].puiAddrUFO.ui32Addr,
+                                                          psUFOPtr[jj].ui32Value,
+                                                          ui32ReadValue));
+                                       /* If fence is unmet, dump debug info on it */
+                                       if (ui32ReadValue != psUFOPtr[jj].ui32Value)
+                                       {
+                                               /* Add to our list to pass to pvr_sync */
+                                               ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr;
+                                               ui32NumUnsignalledUFOs++;
+                                       }
+                               }
+                               else
+                               {
+                                       PVR_LOG(("  %d/%d FWAddr 0x%x requires 0x%x", jj+1,
+                                                          (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)),
+                                                          psUFOPtr[jj].puiAddrUFO.ui32Addr,
+                                                          psUFOPtr[jj].ui32Value));
+                               }
+                       }
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC)
+                       if (ui32NumUnsignalledUFOs > 0)
+                       {
+                               IMG_UINT32 ui32NumSyncsOwned;
+                               PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned);
+
+                               PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed.");
+                       }
+#endif
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+                       if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED))
+                       {
+                               PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext);
+
+                               PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr));
+                       }
+                       else
+                       {
+                               if (ui32NumUnsignalledUFOs > 0)
+                               {
+                                       RGXFWIF_KCCB_CMD sSignalFencesCmd;
+
+                                       sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE;
+                                       sSignalFencesCmd.ui32KCCBFlags = 0;
+                                       sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext);
+                                       sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset;
+
+                                       PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr));
+
+                                       RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext),
+                                                          RGXFWIF_DM_GP,
+                                                          &sSignalFencesCmd,
+                                                          PDUMP_FLAGS_CONTINUOUS);
+                               }
+                       }
+#endif
+               }
+               psDevInfo->pvEarliestStalledClientCCB = NULL;
+       }
+}
+
+/******************************************************************************
+ End of file (rgxccb.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxccb.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxccb.h
new file mode 100644 (file)
index 0000000..0dddee1
--- /dev/null
@@ -0,0 +1,356 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Circular Command Buffer functionality.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Circular Command Buffer functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXCCB_H)
+#define RGXCCB_H
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "rgxdefs_km.h"
+#include "pvr_notifier.h"
+
+#define MAX_CLIENT_CCB_NAME    30
+#define SYNC_FLAG_MASK_ALL  IMG_UINT32_MAX
+
+/*
+ * This size is to be used when a client CCB is found to consume very
+ * negligible space (e.g. a few hundred bytes to few KBs - less than a page).
+ * In such a case, instead of allocating CCB of size of only a few KBs, we
+ * allocate at-least this much to be future risk-free.
+ */
+#define MIN_SAFE_CCB_SIZE_LOG2         13  /* 8K (2 Pages) */
+#define MAX_SAFE_CCB_SIZE_LOG2         18  /* 256K (64 Pages) */
+
+#define RGX_TQ3D_CCB_SIZE_LOG2         PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D
+static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_TQ3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is invalid");
+#define RGX_TQ3D_CCB_MAX_SIZE_LOG2             PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D
+static_assert(RGX_TQ3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D
+       && RGX_TQ3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D max CCB size is invalid");
+
+#define RGX_TQ2D_CCB_SIZE_LOG2         PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D
+static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_TQ2D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is invalid");
+#define RGX_TQ2D_CCB_MAX_SIZE_LOG2             PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D
+static_assert(RGX_TQ2D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D &&
+       RGX_TQ2D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D max CCB size is invalid");
+
+#define RGX_CDM_CCB_SIZE_LOG2          PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM
+static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid");
+#define RGX_CDM_CCB_MAX_SIZE_LOG2              PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM
+static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM &&
+       RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid");
+
+#define RGX_TA_CCB_SIZE_LOG2           PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA
+static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid");
+#define RGX_TA_CCB_MAX_SIZE_LOG2               PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA
+static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA &&
+       RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid");
+
+#define RGX_3D_CCB_SIZE_LOG2           PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D
+static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid");
+#define RGX_3D_CCB_MAX_SIZE_LOG2               PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D
+static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D &&
+       RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid");
+
+#define RGX_KICKSYNC_CCB_SIZE_LOG2     PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC
+static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid");
+#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC
+static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC &&
+       RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid");
+
+#define RGX_TDM_CCB_SIZE_LOG2         PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM
+static_assert(RGX_TDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_TDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM CCB size is invalid");
+#define RGX_TDM_CCB_MAX_SIZE_LOG2              PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM
+static_assert(RGX_TDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM &&
+       RGX_TDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM max CCB size is invalid");
+
+#define RGX_RDM_CCB_SIZE_LOG2          PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM
+static_assert(RGX_RDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 &&
+       RGX_RDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "RDM CCB size is invalid");
+#define RGX_RDM_CCB_MAX_SIZE_LOG2              PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM
+static_assert(RGX_RDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM &&
+       RGX_RDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "RDM max CCB size is invalid");
+
+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB;
+
+/*
+       This structure is declared here as it's allocated on the heap by
+       the callers
+*/
+
+typedef struct _RGX_CCB_CMD_HELPER_DATA_ {
+       /* Data setup at command init time */
+       RGX_CLIENT_CCB                  *psClientCCB;
+       IMG_CHAR                                *pszCommandName;
+       IMG_UINT32                              ui32PDumpFlags;
+
+       IMG_UINT32                              ui32ClientFenceCount;
+       PRGXFWIF_UFO_ADDR               *pauiFenceUFOAddress;
+       IMG_UINT32                              *paui32FenceValue;
+       IMG_UINT32                              ui32ClientUpdateCount;
+       PRGXFWIF_UFO_ADDR               *pauiUpdateUFOAddress;
+       IMG_UINT32                              *paui32UpdateValue;
+       RGXFWIF_CCB_CMD_TYPE    eType;
+       IMG_UINT32                              ui32CmdSize;
+       IMG_UINT8                               *pui8DMCmd;
+       IMG_UINT32                              ui32FenceCmdSize;
+       IMG_UINT32                              ui32FBSCInvalCmdSize;
+       IMG_UINT32                              ui32DMCmdSize;
+       IMG_UINT32                              ui32UpdateCmdSize;
+
+       /* data for FBSC invalidate command */
+       IMG_UINT64                              ui64FBSCEntryMask;
+
+       /* timestamp commands */
+       PRGXFWIF_TIMESTAMP_ADDR pPreTimestampAddr;
+       IMG_UINT32              ui32PreTimeStampCmdSize;
+       PRGXFWIF_TIMESTAMP_ADDR pPostTimestampAddr;
+       IMG_UINT32              ui32PostTimeStampCmdSize;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+       IMG_UINT32              ui32RMWUFOCmdSize;
+
+       /* Job reference fields */
+       IMG_UINT32                              ui32ExtJobRef;
+       IMG_UINT32                              ui32IntJobRef;
+
+       /* FW Memdesc for Workload information */
+       RGXFWIF_WORKEST_KICK_DATA       *psWorkEstKickData;
+
+} RGX_CCB_CMD_HELPER_DATA;
+
+#define PADDING_COMMAND_SIZE   (sizeof(RGXFWIF_CCB_CMD_HEADER))
+
+
+#define RGX_CCB_REQUESTORS(TYPE) \
+       /* for debugging purposes */ TYPE(UNDEF)        \
+       TYPE(TA)        \
+       TYPE(3D)        \
+       TYPE(CDM)       \
+       TYPE(SH)        \
+       TYPE(RS)        \
+       TYPE(TQ_3D)     \
+       TYPE(TQ_2D)     \
+       TYPE(TQ_TDM)    \
+       TYPE(KICKSYNC)  \
+       TYPE(RAY)       \
+
+/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as
+   an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere
+   to the following build assert.
+*/
+typedef enum _RGX_CCB_REQUESTOR_TYPE_
+{
+#define CONSTRUCT_ENUM(req) REQ_TYPE_##req,
+       RGX_CCB_REQUESTORS (CONSTRUCT_ENUM)
+#undef CONSTRUCT_ENUM
+
+       /* should always be at the end */
+       REQ_TYPE_TOTAL_COUNT,
+} RGX_CCB_REQUESTOR_TYPE;
+
+/* Tuple describing the columns of the following table */
+typedef enum _RGX_CCB_REQUESTOR_TUPLE_
+{
+       REQ_RGX_FW_CLIENT_CCB_STRING,          /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */
+       REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING,  /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */
+       REQ_PDUMP_COMMENT,                     /* Index to comment to be dumped in PDUMPs */
+
+       /* should always be at the end */
+       REQ_TUPLE_CARDINALITY,
+} RGX_CCB_REQUESTOR_TUPLE;
+
+/* Unpack U8 values from U32. */
+#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF)
+#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF)
+#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF)
+#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF)
+
+/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_
+ *
+ *   ( X = taken/in use, - = available/unused )
+ *
+ *   31                             10
+ *    |                             ||
+ *    ------------------------------XX
+ *  Bit   Meaning
+ *    0 = If set, CCB is still open and commands will be appended to it
+ *    1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB
+ */
+#define CCB_FLAGS_CCB_STATE_OPEN (0)  /*!< This bit is set to indicate CCB is in the 'Open' state. */
+#define CCB_FLAGS_SLR_DISABLED   (1)  /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */
+
+
+/*     Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in
+       this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for
+       use in other modules.
+*/
+extern const IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY];
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+                                       IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                                 IMG_UINT32                    ui32CCBSizeLog2,
+                                                 IMG_UINT32                    ui32CCBMaxSizeLog2,
+                                                 IMG_UINT32                    ui32ContextFlags,
+                                                 CONNECTION_DATA               *psConnectionData,
+                                                 RGX_CCB_REQUESTOR_TYPE        eCCBRequestor,
+                                                 RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                 RGX_CLIENT_CCB                **ppsClientCCB,
+                                                 DEVMEM_MEMDESC                **ppsClientCCBMemDesc,
+                                                 DEVMEM_MEMDESC                **ppsClientCCBCtlMemDesc);
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize);
+
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+                                                  IMG_UINT32           ui32CmdSize,
+                                                  void                         **ppvBufferSpace,
+                                                  IMG_UINT32           ui32PDumpFlags);
+
+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+                                                               IMG_UINT32              ui32CmdSize,
+                                                               IMG_UINT32              ui32PDumpFlags);
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
+IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB,
+                                                       IMG_UINT32              ui32Flags);
+
+void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                               IMG_UINT64 ui64FBSCEntryMask,
+                                        IMG_UINT32 ui32ClientFenceCount,
+                                        IMG_UINT32 ui32ClientUpdateCount,
+                                        IMG_UINT32 ui32CmdSize,
+                                        PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+                                        PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+                                        PRGXFWIF_UFO_ADDR       *ppRMWUFOAddr,
+                                        RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB,
+                                      IMG_UINT32 ui32ClientFenceCount,
+                                      PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress,
+                                      IMG_UINT32 *paui32FenceValue,
+                                      IMG_UINT32 ui32ClientUpdateCount,
+                                      PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress,
+                                      IMG_UINT32 *paui32UpdateValue,
+                                      IMG_UINT32 ui32CmdSize,
+                                      IMG_PBYTE pui8DMCmd,
+                                      PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+                                      PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+                                      PRGXFWIF_UFO_ADDR       *ppRMWUFOAddr,
+                                      RGXFWIF_CCB_CMD_TYPE eType,
+                                      IMG_UINT32 ui32ExtJobRef,
+                                      IMG_UINT32 ui32IntJobRef,
+                                      IMG_UINT32 ui32PDumpFlags,
+                                      RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                                      IMG_CHAR *pszCommandName,
+                                      IMG_BOOL bCCBStateOpen,
+                                      RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                       RGX_CLIENT_CCB          *psClientCCB,
+                            IMG_UINT64              ui64FBSCEntryMask,
+                            IMG_UINT32              ui32ClientFenceCount,
+                            PRGXFWIF_UFO_ADDR       *pauiFenceUFOAddress,
+                            IMG_UINT32              *paui32FenceValue,
+                            IMG_UINT32              ui32ClientUpdateCount,
+                            PRGXFWIF_UFO_ADDR       *pauiUpdateUFOAddress,
+                            IMG_UINT32              *paui32UpdateValue,
+                            IMG_UINT32              ui32CmdSize,
+                            IMG_UINT8               *pui8DMCmd,
+                            PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+                            PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+                            PRGXFWIF_UFO_ADDR       *ppRMWUFOAddr,
+                            RGXFWIF_CCB_CMD_TYPE    eType,
+                            IMG_UINT32              ui32ExtJobRef,
+                            IMG_UINT32              ui32IntJobRef,
+                            IMG_UINT32              ui32PDumpFlags,
+                            RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+                            IMG_CHAR                *pszCommandName,
+                            IMG_BOOL                bCCBStateOpen,
+                            RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+                                                                          RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+                                                          RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                                          const IMG_CHAR *pcszDMName,
+                                                          IMG_UINT32 ui32CtxAddr);
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+                                                                  RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+                                        IMG_UINT32              ui32Cmdindex);
+
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+                               RGX_CLIENT_CCB  *psCurrentClientCCB,
+                               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile);
+
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                       PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+                       RGX_CLIENT_CCB *psCurrentClientCCB,
+                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile);
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB  *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif /* RGXCCB_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwdbg.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwdbg.c
new file mode 100644 (file)
index 0000000..1e7a51f
--- /dev/null
@@ -0,0 +1,282 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "rgxfwdbg.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "pdump_km.h"
+#include "mmu_common.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugQueryFWLogKM(
+       const CONNECTION_DATA *psConnection,
+       const PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32 *pui32RGXFWLogType)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       if (!psDeviceNode || !pui32RGXFWLogType)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBufCtl)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType;
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetFWLogKM(
+       const CONNECTION_DATA * psConnection,
+       const PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32RGXFWLogType)
+{
+       RGXFWIF_KCCB_CMD sLogTypeUpdateCmd;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32 ui32OldRGXFWLogTpe;
+       IMG_UINT32 ui32kCCBCommandSlot;
+       IMG_BOOL bWaitForFwUpdate = IMG_FALSE;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType;
+
+       /* check log type is valid */
+       if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock);
+
+       /* set the new log type and ensure the new log type is written to memory
+        * before requesting the FW to read it
+        */
+       psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32RGXFWLogType;
+       OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType);
+
+       /* Allocate firmware trace buffer resource(s) if not already done */
+       if (RGXTraceBufferIsInitRequired(psDevInfo))
+       {
+               eError = RGXTraceBufferInitOnDemandResources(psDevInfo, RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS);
+       }
+#if defined(SUPPORT_TBI_INTERFACE)
+       /* Check if LogType is TBI then allocate resource on demand and copy
+        * SFs to it
+        */
+       else if (RGXTBIBufferIsInitRequired(psDevInfo))
+       {
+               eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+       }
+
+       /* TBI buffer address will be 0 if not initialised */
+       sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer;
+#else
+       sLogTypeUpdateCmd.uCmdData.sTBIBuffer.ui32Addr = 0;
+#endif
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to allocate resource on-demand. Reverting to old value",
+                        __func__));
+               psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32OldRGXFWLogTpe;
+               OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType);
+
+               OSLockRelease(psDevInfo->hRGXFWIfBufInitLock);
+
+               return eError;
+       }
+
+       OSLockRelease(psDevInfo->hRGXFWIfBufInitLock);
+
+       eError = PVRSRVPowerLock((PPVRSRV_DEVICE_NODE) psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to acquire power lock (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       eError = PVRSRVGetDevicePowerState((PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState);
+
+       if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               /* Ask the FW to update its cached version of logType value */
+               sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE;
+
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                                                         &sLogTypeUpdateCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock);
+               bWaitForFwUpdate = IMG_TRUE;
+       }
+
+unlock:
+       PVRSRVPowerUnlock( (PPVRSRV_DEVICE_NODE) psDeviceNode);
+       if (bWaitForFwUpdate)
+       {
+               /* Wait for the LogType value to be updated in FW */
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+               PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+       }
+       return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetHCSDeadlineKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32HCSDeadlineMS)
+{
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSidPriorityKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32OSid,
+       IMG_UINT32  ui32OSidPriority)
+{
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSNewOnlineStateKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32OSid,
+       IMG_UINT32  ui32OSNewState)
+{
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_OS_STATE_CHANGE eState;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE);
+       return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugPHRConfigureKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32 ui32PHRMode)
+{
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return RGXFWConfigPHR(psDevInfo,
+                             ui32PHRMode);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugWdgConfigureKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32 ui32WdgPeriodUs)
+{
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return RGXFWConfigWdg(psDevInfo,
+                             ui32WdgPeriodUs);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugDumpFreelistPageListKM(
+       CONNECTION_DATA * psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+       DLLIST_NODE *psNode, *psNext;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (dllist_is_empty(&psDevInfo->sFreeListHead))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------"));
+
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+               RGXDumpFreeListPageList(psFreeList);
+       }
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------"));
+
+       return PVRSRV_OK;
+
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwdbg.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwdbg.h
new file mode 100644 (file)
index 0000000..38d487e
--- /dev/null
@@ -0,0 +1,113 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXFWDBG_H)
+#define RGXFWDBG_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "pmr.h"
+
+#include "connection_server.h"
+
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugInitFWImageKM(
+       PMR *psFWImgDestPMR,
+       PMR *psFWImgSrcPMR,
+       IMG_UINT64 ui64FWImgLen,
+       PMR *psFWImgSigPMR,
+       IMG_UINT64 ui64FWSigLen);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugQueryFWLogKM(
+       const CONNECTION_DATA *psConnection,
+       const PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32 *pui32RGXFWLogType);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetFWLogKM(
+       const CONNECTION_DATA *psConnection,
+       const PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32RGXFWLogType);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetHCSDeadlineKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32HCSDeadlineMS);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSidPriorityKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32OSid,
+       IMG_UINT32  ui32OSidPriority);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetOSNewOnlineStateKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32  ui32OSid,
+       IMG_UINT32  ui32OSNewState);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugPHRConfigureKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32 ui32PHRMode);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugWdgConfigureKM(
+       CONNECTION_DATA *psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode,
+       IMG_UINT32 ui32WdgPeriodUs);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugDumpFreelistPageListKM(
+       CONNECTION_DATA * psConnection,
+       PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwimageutils.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwimageutils.c
new file mode 100644 (file)
index 0000000..0a9813b
--- /dev/null
@@ -0,0 +1,1082 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Firmware image utilities used at init time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services Firmware image utilities used at init time
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxfwimageutils.h"
+#include "pvrsrv.h"
+
+
+/************************************************************************
+* FW layout information
+************************************************************************/
+#define MAX_NUM_ENTRIES (8)
+static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES];
+static IMG_UINT32 ui32LayoutEntryNum;
+
+
+static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < ui32LayoutEntryNum; i++)
+       {
+               if (asRGXFWLayoutTable[i].eId == eId)
+               {
+                       return &asRGXFWLayoutTable[i];
+               }
+       }
+
+       RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n",
+                   __func__, eId);
+
+       return &asRGXFWLayoutTable[0];
+}
+
+/*!
+*******************************************************************************
+
+ @Function      FindMMUSegment
+
+ @Description   Given a 32 bit FW address attempt to find the corresponding
+                pointer to FW allocation
+
+ @Input         ui32OffsetIn             : 32 bit FW address
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem code
+ @Input         uiHostAddrOut            : CPU pointer equivalent to ui32OffsetIn
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn,
+                                   void *pvHostFWCodeAddr,
+                                   void *pvHostFWDataAddr,
+                                   void *pvHostFWCorememCodeAddr,
+                                   void *pvHostFWCorememDataAddr,
+                                   void **uiHostAddrOut)
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < ui32LayoutEntryNum; i++)
+       {
+               if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) &&
+                   (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize)))
+               {
+                       switch (asRGXFWLayoutTable[i].eType)
+                       {
+                               case FW_CODE:
+                                       *uiHostAddrOut = pvHostFWCodeAddr;
+                                       break;
+
+                               case FW_DATA:
+                                       *uiHostAddrOut = pvHostFWDataAddr;
+                                       break;
+
+                               case FW_COREMEM_CODE:
+                                       *uiHostAddrOut = pvHostFWCorememCodeAddr;
+                                       break;
+
+                               case FW_COREMEM_DATA:
+                                       *uiHostAddrOut = pvHostFWCorememDataAddr;
+                                       break;
+
+                               default:
+                                       return PVRSRV_ERROR_INIT_FAILURE;
+                       }
+
+                       goto found;
+               }
+       }
+
+       return PVRSRV_ERROR_INIT_FAILURE;
+
+found:
+       if (*uiHostAddrOut == NULL)
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Direct Mem write to mapped memory */
+       ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr;
+       ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset;
+
+       /* Add offset to pointer to FW allocation only if
+        * that allocation is available
+        */
+       if (*uiHostAddrOut)
+       {
+               *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureSegID
+
+ @Description   Configures a single segment of the Segment MMU
+                (base, limit and out_addr)
+
+ @Input         hPrivate        : Implementation specific data
+ @Input         ui64SegOutAddr  : Segment output base address (40 bit devVaddr)
+ @Input         ui32SegBase     : Segment input base address (32 bit FW address)
+ @Input         ui32SegLimit    : Segment size
+ @Input         ui32SegID       : Segment ID
+ @Input         pszName         : Segment name
+ @Input         ppui32BootConf  : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+static void RGXFWConfigureSegID(const void *hPrivate,
+                                IMG_UINT64 ui64SegOutAddr,
+                                IMG_UINT32 ui32SegBase,
+                                IMG_UINT32 ui32SegLimit,
+                                IMG_UINT32 ui32SegID,
+                                IMG_UINT32 **ppui32BootConf)
+{
+       IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+       IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL;
+       IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL;
+
+       /* META segments have a minimum size */
+       IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ?
+                                 RGXFW_SEGMMU_ALIGN : ui32SegLimit;
+       /* the limit is an offset, therefore off = size - 1 */
+       ui32LimitOff -= 1;
+
+       RGXCommentLog(hPrivate,
+                     "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x",
+                     ui32SegID,
+                     ui32SegBase,
+                     ui64SegOutAddr,
+                     ui32LimitOff);
+
+       ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE;
+
+       *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID);
+       *pui32BootConf++ = ui32SegBase;
+
+       *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID);
+       *pui32BootConf++ = ui32LimitOff;
+
+       *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID);
+       *pui32BootConf++ = ui32SegOutAddr0;
+
+       *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID);
+       *pui32BootConf++ = ui32SegOutAddr1;
+
+       *ppui32BootConf = pui32BootConf;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureSegMMU
+
+ @Description   Configures META's Segment MMU
+
+ @Input         hPrivate             : Implementation specific data
+ @Input         psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input         psFWDataDevVAddrBase : FW data base device virtual address
+ @Input         ppui32BootConf       : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+static void RGXFWConfigureSegMMU(const void       *hPrivate,
+                                 IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+                                 IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+                                 IMG_UINT32       **ppui32BootConf)
+{
+       IMG_UINT64 ui64SegOutAddrTop;
+       IMG_UINT32 i;
+
+       PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+
+       /* Configure Segment MMU */
+       RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********");
+
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+       {
+               ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV);
+       }
+       else
+       {
+               ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, RGXFW_SEGMMU_META_BIFDM_ID);
+       }
+
+       for (i = 0; i < ui32LayoutEntryNum; i++)
+       {
+               /*
+                * FW code is using the bootloader segment which is already configured on boot.
+                * FW coremem code and data don't use the segment MMU.
+                * Only the FW data segment needs to be configured.
+                */
+
+               if (asRGXFWLayoutTable[i].eType == FW_DATA)
+               {
+                       IMG_UINT64 ui64SegOutAddr;
+                       IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID;
+
+                       ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) +
+                                         asRGXFWLayoutTable[i].ui32AllocOffset;
+
+                       RGXFWConfigureSegID(hPrivate,
+                                           ui64SegOutAddr,
+                                           asRGXFWLayoutTable[i].ui32BaseAddr,
+                                           asRGXFWLayoutTable[i].ui32AllocSize,
+                                           ui32SegId,
+                                           ppui32BootConf); /*write the sequence to the bootldr */
+
+                       break;
+               }
+       }
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXFWConfigureMetaCaches
+
+ @Description   Configure and enable the Meta instruction and data caches
+
+ @Input         hPrivate          : Implementation specific data
+ @Input         ui32NumThreads    : Number of FW threads in use
+ @Input         ppui32BootConf    : Pointer to bootloader data
+
+ @Return        void
+
+******************************************************************************/
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+static void RGXFWConfigureMetaCaches(const void *hPrivate,
+                                     IMG_UINT32 ui32NumThreads,
+                                     IMG_UINT32 **ppui32BootConf)
+{
+       IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+       IMG_UINT32 ui32DCacheT0, ui32ICacheT0;
+       IMG_UINT32 ui32DCacheT1, ui32ICacheT1;
+       IMG_UINT32 ui32DCacheT2, ui32ICacheT2;
+       IMG_UINT32 ui32DCacheT3, ui32ICacheT3;
+
+#define META_CR_MMCU_LOCAL_EBCTRL                        (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN                  (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN                  (0x3 << 6)
+#define META_CR_SYSC_DCPART(n)                           (0x04830200 + (n)*0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE         (0x1 << 31)
+#define META_CR_SYSC_ICPART(n)                           (0x04830220 + (n)*0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF  (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE       (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE       (0x7)
+#define META_CR_MMCU_DCACHE_CTRL                         (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL                         (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN           (0x1)
+
+       RGXCommentLog(hPrivate, "********** Meta caches configuration *********");
+
+       /* Initialise I/Dcache settings */
+       ui32DCacheT0 = ui32DCacheT1 = (IMG_UINT32)META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+       ui32DCacheT2 = ui32DCacheT3 = (IMG_UINT32)META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+       ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0;
+
+       if (ui32NumThreads == 1)
+       {
+               ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+               ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+       }
+       else
+       {
+               ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+               ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+
+               ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+                               META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+               ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+                               META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+       }
+
+       /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+       *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL;
+       *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+                          META_CR_MMCU_LOCAL_EBCTRL_DCWIN;
+
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_MMCU_LOCAL_EBCTRL,
+                     META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+       /* Data cache partitioning thread 0 to 3 */
+       *pui32BootConf++ = META_CR_SYSC_DCPART(0);
+       *pui32BootConf++ = ui32DCacheT0;
+       *pui32BootConf++ = META_CR_SYSC_DCPART(1);
+       *pui32BootConf++ = ui32DCacheT1;
+       *pui32BootConf++ = META_CR_SYSC_DCPART(2);
+       *pui32BootConf++ = ui32DCacheT2;
+       *pui32BootConf++ = META_CR_SYSC_DCPART(3);
+       *pui32BootConf++ = ui32DCacheT3;
+
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_DCPART(0), ui32DCacheT0);
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_DCPART(1), ui32DCacheT1);
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_DCPART(2), ui32DCacheT2);
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_DCPART(3), ui32DCacheT3);
+
+       /* Enable data cache hits */
+       *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL;
+       *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_MMCU_DCACHE_CTRL,
+                     META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+       /* Instruction cache partitioning thread 0 to 3 */
+       *pui32BootConf++ = META_CR_SYSC_ICPART(0);
+       *pui32BootConf++ = ui32ICacheT0;
+       *pui32BootConf++ = META_CR_SYSC_ICPART(1);
+       *pui32BootConf++ = ui32ICacheT1;
+       *pui32BootConf++ = META_CR_SYSC_ICPART(2);
+       *pui32BootConf++ = ui32ICacheT2;
+       *pui32BootConf++ = META_CR_SYSC_ICPART(3);
+       *pui32BootConf++ = ui32ICacheT3;
+
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_ICPART(0), ui32ICacheT0);
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_ICPART(1), ui32ICacheT1);
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_ICPART(2), ui32ICacheT2);
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_SYSC_ICPART(3), ui32ICacheT3);
+
+       /* Enable instruction cache hits */
+       *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL;
+       *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                     META_CR_MMCU_ICACHE_CTRL,
+                     META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+       *pui32BootConf++ = 0x040000C0;
+       *pui32BootConf++ = 0;
+
+       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0);
+
+       *ppui32BootConf = pui32BootConf;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessLDRCommandStream
+
+ @Description   Process the output of the Meta toolchain in the .LDR format
+                copying code and data sections into their final location and
+                passing some information to the Meta bootloader
+
+ @Input         hPrivate                 : Implementation specific data
+ @Input         pbLDR                    : Pointer to FW blob
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem data
+ @Input         ppui32BootConf           : Pointer to bootloader data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+                                     const IMG_BYTE* pbLDR,
+                                     void* pvHostFWCodeAddr,
+                                     void* pvHostFWDataAddr,
+                                     void* pvHostFWCorememCodeAddr,
+                                     void* pvHostFWCorememDataAddr,
+                                     IMG_UINT32 **ppui32BootConf)
+{
+       RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR;
+       RGX_META_LDR_L1_DATA_BLK *psL1Data =
+           (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData);
+
+       IMG_UINT32 *pui32BootConf  = ppui32BootConf ? *ppui32BootConf : NULL;
+       IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate);
+
+       RGXCommentLog(hPrivate, "**********************************************");
+       RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************");
+       RGXCommentLog(hPrivate, "**********************************************");
+
+       while (psL1Data != NULL)
+       {
+               if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd))
+               {
+                       /* Don't process comment blocks */
+                       goto NextBlock;
+               }
+
+               switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK)
+               {
+                       case RGX_META_LDR_CMD_LOADMEM:
+                       {
+                               RGX_META_LDR_L2_DATA_BLK *psL2Block =
+                                   (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]);
+                               IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+                               IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+                               void *pvWriteAddr;
+                               PVRSRV_ERROR eError;
+
+                               if (!RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize) &&
+                                   !RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+                               {
+                                       /* Global range is aliased to local range */
+                                       ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+                               }
+
+                               eError = FindMMUSegment(ui32Offset,
+                                                       pvHostFWCodeAddr,
+                                                       pvHostFWDataAddr,
+                                                       pvHostFWCorememCodeAddr,
+                                                       pvHostFWCorememDataAddr,
+                                                       &pvWriteAddr);
+
+                               if (eError != PVRSRV_OK)
+                               {
+                                       RGXErrorLog(hPrivate,
+                                                   "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+                                                   ui32Offset, ui32DataSize);
+                                       return eError;
+                               }
+
+                               /* Write to FW allocation only if available */
+                               if (pvWriteAddr)
+                               {
+                                       RGXMemCopy(hPrivate,
+                                                  pvWriteAddr,
+                                                  psL2Block->aui32BlockData,
+                                                  ui32DataSize);
+                               }
+
+                               break;
+                       }
+                       case RGX_META_LDR_CMD_LOADCORE:
+                       case RGX_META_LDR_CMD_LOADMMREG:
+                       {
+                               return PVRSRV_ERROR_INIT_FAILURE;
+                       }
+                       case RGX_META_LDR_CMD_START_THREADS:
+                       {
+                               /* Don't process this block */
+                               break;
+                       }
+                       case RGX_META_LDR_CMD_ZEROMEM:
+                       {
+                               IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+                               IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1];
+                               void *pvWriteAddr;
+                               PVRSRV_ERROR  eError;
+
+                               if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+                               {
+                                       /* cannot zero coremem directly */
+                                       break;
+                               }
+
+                               /* Global range is aliased to local range */
+                               ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+                               eError = FindMMUSegment(ui32Offset,
+                                                       pvHostFWCodeAddr,
+                                                       pvHostFWDataAddr,
+                                                       pvHostFWCorememCodeAddr,
+                                                       pvHostFWCorememDataAddr,
+                                                       &pvWriteAddr);
+
+                               if (eError != PVRSRV_OK)
+                               {
+                                       RGXErrorLog(hPrivate,
+                                                   "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+                                                   ui32Offset, ui32ByteCount);
+                                       return eError;
+                               }
+
+                               /* Write to FW allocation only if available */
+                               if (pvWriteAddr)
+                               {
+                                       RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount);
+                               }
+
+                               break;
+                       }
+                       case RGX_META_LDR_CMD_CONFIG:
+                       {
+                               RGX_META_LDR_L2_DATA_BLK *psL2Block =
+                                   (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]);
+                               RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData;
+                               IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+                               IMG_UINT32 ui32CurrBlockSize = 0;
+
+                               while (ui32L2BlockSize)
+                               {
+                                       switch (psConfigCommand->ui32Type)
+                                       {
+                                               case RGX_META_LDR_CFG_PAUSE:
+                                               case RGX_META_LDR_CFG_READ:
+                                               {
+                                                       ui32CurrBlockSize = 8;
+                                                       return PVRSRV_ERROR_INIT_FAILURE;
+                                               }
+                                               case RGX_META_LDR_CFG_WRITE:
+                                               {
+                                                       IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0];
+                                                       IMG_UINT32 ui32RegisterValue  = psConfigCommand->aui32BlockData[1];
+
+                                                       /* Only write to bootloader if we got a valid
+                                                        * pointer to the FW code allocation
+                                                        */
+                                                       if (pui32BootConf)
+                                                       {
+                                                               /* Do register write */
+                                                               *pui32BootConf++ = ui32RegisterOffset;
+                                                               *pui32BootConf++ = ui32RegisterValue;
+                                                       }
+
+                                                       RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+                                                                     ui32RegisterOffset, ui32RegisterValue);
+
+                                                       ui32CurrBlockSize = 12;
+                                                       break;
+                                               }
+                                               case RGX_META_LDR_CFG_MEMSET:
+                                               case RGX_META_LDR_CFG_MEMCHECK:
+                                               {
+                                                       ui32CurrBlockSize = 20;
+                                                       return PVRSRV_ERROR_INIT_FAILURE;
+                                               }
+                                               default:
+                                               {
+                                                       return PVRSRV_ERROR_INIT_FAILURE;
+                                               }
+                                       }
+                                       ui32L2BlockSize -= ui32CurrBlockSize;
+                                       psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize);
+                               }
+
+                               break;
+                       }
+                       default:
+                       {
+                               return PVRSRV_ERROR_INIT_FAILURE;
+                       }
+               }
+
+NextBlock:
+
+               if (psL1Data->ui32Next == 0xFFFFFFFF)
+               {
+                       psL1Data = NULL;
+               }
+               else
+               {
+                       psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next);
+               }
+       }
+
+       if (pui32BootConf)
+       {
+               *ppui32BootConf = pui32BootConf;
+       }
+
+       RGXCommentLog(hPrivate, "**********************************************");
+       RGXCommentLog(hPrivate, "************** End Loader Parsing ************");
+       RGXCommentLog(hPrivate, "**********************************************");
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessELFCommandStream
+
+ @Description   Process a file in .ELF format copying code and data sections
+                into their final location
+
+ @Input         hPrivate                 : Implementation specific data
+ @Input         pbELF                    : Pointer to FW blob
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+                                     const IMG_BYTE *pbELF,
+                                     void *pvHostFWCodeAddr,
+                                     void *pvHostFWDataAddr,
+                                     void* pvHostFWCorememCodeAddr,
+                                     void* pvHostFWCorememDataAddr)
+{
+       IMG_UINT32 ui32Entry;
+       IMG_ELF_HDR *psHeader = (IMG_ELF_HDR *)pbELF;
+       IMG_ELF_PROGRAM_HDR *psProgramHeader =
+           (IMG_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff);
+       PVRSRV_ERROR eError;
+
+       for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++)
+       {
+               void *pvWriteAddr;
+
+               /* Only consider loadable entries in the ELF segment table */
+               if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue;
+
+               eError = FindMMUSegment(psProgramHeader->ui32Pvaddr,
+                                       pvHostFWCodeAddr,
+                                       pvHostFWDataAddr,
+                                       pvHostFWCorememCodeAddr,
+                                       pvHostFWCorememDataAddr,
+                                       &pvWriteAddr);
+
+               if (eError != PVRSRV_OK)
+               {
+                       RGXErrorLog(hPrivate,
+                                   "%s: Addr 0x%x (size: %d) not found in any segment",__func__,
+                                   psProgramHeader->ui32Pvaddr,
+                                   psProgramHeader->ui32Pfilesz);
+                       return eError;
+               }
+
+               /* Write to FW allocation only if available */
+               if (pvWriteAddr)
+               {
+                       RGXMemCopy(hPrivate,
+                                  pvWriteAddr,
+                                  (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset),
+                                  psProgramHeader->ui32Pfilesz);
+
+                       RGXMemSet(hPrivate,
+                                 (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz,
+                                 0,
+                                 psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz);
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+       RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+       return psEntry->ui32AllocOffset;
+}
+
+IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+       RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+       return psEntry->ui32MaxSize;
+}
+
+IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+       RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+       return psEntry->ui32AllocSize;
+}
+
+IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId)
+{
+       RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId);
+
+       return psEntry->ui32BaseAddr;
+}
+
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+                                    const IMG_BYTE    *pbRGXFirmware,
+                                    const IMG_UINT32  ui32RGXFirmwareSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize)
+{
+       RGX_FW_INFO_HEADER *psInfoHeader;
+       const IMG_BYTE *pbRGXFirmwareInfo;
+       const IMG_BYTE *pbRGXFirmwareLayout;
+       IMG_UINT32 i;
+
+       if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE)
+       {
+               RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u",
+                           __func__, pbRGXFirmware, ui32RGXFirmwareSize);
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+
+       /*
+        * Acquire pointer to the FW info header within the FW image.
+        * The format of the header in the FW image might not be the one expected
+        * by the driver, but the driver should still be able to correctly read
+        * the information below, as long as new/incompatible elements are added
+        * at the end of the header (they will be ignored by the driver).
+        */
+
+       pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE;
+       psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo;
+
+       /* If any of the following checks fails, the FW will likely not work properly */
+
+       if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION)
+       {
+               RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)",
+                           __func__,
+                           (IMG_UINT32) FW_INFO_VERSION,
+                           psInfoHeader->ui32InfoVersion);
+       }
+
+       if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER))
+       {
+               RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)",
+                           __func__,
+                           (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER),
+                           psInfoHeader->ui32HeaderLen);
+       }
+
+       if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY))
+       {
+               RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)",
+                           __func__,
+                           (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY),
+                           psInfoHeader->ui32LayoutEntrySize);
+       }
+
+       if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES)
+       {
+               RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)",
+                           __func__,
+                           MAX_NUM_ENTRIES,
+                           psInfoHeader->ui32LayoutEntryNum);
+       }
+
+#if defined(RGX_FEATURE_MIPS_BIT_MASK)
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+       {
+               if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate))
+               {
+                       RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)",
+                                   __func__,
+                                   (IMG_UINT32) RGXGetOSPageSize(hPrivate),
+                                   psInfoHeader->ui32FwPageSize);
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+#endif
+
+       ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum;
+
+
+       /*
+        * Copy FW layout table from FW image to local array.
+        * One entry is copied at a time and the copy is limited to what the driver
+        * expects to find in it. Assuming that new/incompatible elements
+        * are added at the end of each entry, the loop below adapts the table
+        * in the FW image into the format expected by the driver.
+        */
+
+       pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen;
+
+       for (i = 0; i < ui32LayoutEntryNum; i++)
+       {
+               RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i];
+
+               RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*)
+                       (pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize);
+
+               RGXMemCopy(hPrivate,
+                          (void*)psOutEntry,
+                          (void*)psInEntry,
+                          sizeof(RGX_FW_LAYOUT_ENTRY));
+       }
+
+
+       /* Calculate how much memory the FW needs for its code and data segments */
+
+       *puiFWCodeAllocSize = 0;
+       *puiFWDataAllocSize = 0;
+       *puiFWCorememCodeAllocSize = 0;
+       *puiFWCorememDataAllocSize = 0;
+
+       for (i = 0; i < ui32LayoutEntryNum; i++)
+       {
+               switch (asRGXFWLayoutTable[i].eType)
+               {
+                       case FW_CODE:
+                               *puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+                               break;
+
+                       case FW_DATA:
+                               *puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+                               break;
+
+                       case FW_COREMEM_CODE:
+                               *puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+                               break;
+
+                       case FW_COREMEM_DATA:
+                               *puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize;
+                               break;
+
+                       default:
+                               RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n",
+                                           __func__, asRGXFWLayoutTable[i].eType);
+                               break;
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+                               const IMG_BYTE *pbRGXFirmware,
+                               void *pvFWCode,
+                               void *pvFWData,
+                               void *pvFWCorememCode,
+                               void *pvFWCorememData,
+                               PVRSRV_FW_BOOT_PARAMS *puFWParams)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bMIPS = IMG_FALSE;
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR);
+#endif
+       IMG_BOOL bMETA;
+
+#if defined(RGX_FEATURE_MIPS_BIT_MASK)
+       bMIPS = (IMG_BOOL)RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       bMETA = (IMG_BOOL)(!bMIPS && !bRISCV);
+#else
+       bMETA = !bMIPS;
+#endif
+
+       if (bMETA)
+       {
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+               IMG_UINT32 *pui32BootConf = NULL;
+               /* Skip bootloader configuration if a pointer to the FW code
+                * allocation is not available
+                */
+               if (pvFWCode)
+               {
+                       /* This variable points to the bootloader code which is mostly
+                        * a sequence of <register address,register value> pairs
+                        */
+                       pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET;
+
+                       /* Slave port and JTAG accesses are privileged */
+                       *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD;
+                       *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN;
+
+                       RGXFWConfigureSegMMU(hPrivate,
+                                            &puFWParams->sMeta.sFWCodeDevVAddr,
+                                            &puFWParams->sMeta.sFWDataDevVAddr,
+                                            &pui32BootConf);
+               }
+
+               /* Process FW image data stream */
+               eError = ProcessLDRCommandStream(hPrivate,
+                                                pbRGXFirmware,
+                                                pvFWCode,
+                                                pvFWData,
+                                                pvFWCorememCode,
+                                                pvFWCorememData,
+                                                &pui32BootConf);
+               if (eError != PVRSRV_OK)
+               {
+                       RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+                       return eError;
+               }
+
+               /* Skip bootloader configuration if a pointer to the FW code
+                * allocation is not available
+                */
+               if (pvFWCode)
+               {
+                       IMG_UINT32 ui32NumThreads   = puFWParams->sMeta.ui32NumThreads;
+
+                       if ((ui32NumThreads == 0) || (ui32NumThreads > 2))
+                       {
+                               RGXErrorLog(hPrivate,
+                                           "ProcessFWImage: Wrong Meta threads configuration, using one thread only");
+
+                               ui32NumThreads = 1;
+                       }
+
+                       RGXFWConfigureMetaCaches(hPrivate,
+                                                ui32NumThreads,
+                                                &pui32BootConf);
+
+                       /* Signal the end of the conf sequence */
+                       *pui32BootConf++ = 0x0;
+                       *pui32BootConf++ = 0x0;
+
+                       if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0))
+                       {
+                               *pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr;
+                               *pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize;
+                       }
+                       else
+                       {
+                               *pui32BootConf++ = 0;
+                               *pui32BootConf++ = 0;
+                       }
+
+#if defined(RGX_FEATURE_META_DMA_BIT_MASK)
+                       if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA))
+                       {
+                               *pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32);
+                               *pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr;
+                       }
+                       else
+#endif
+                       {
+                               *pui32BootConf++ = 0;
+                               *pui32BootConf++ = 0;
+                       }
+               }
+#endif /* defined(RGX_FEATURE_META_MAX_VALUE_IDX) */
+       }
+#if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES)
+       else if (bMIPS)
+       {
+               /* Process FW image data stream */
+               eError = ProcessELFCommandStream(hPrivate,
+                                                pbRGXFirmware,
+                                                pvFWCode,
+                                                pvFWData,
+                                                NULL,
+                                                NULL);
+               if (eError != PVRSRV_OK)
+               {
+                       RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+                       return eError;
+               }
+
+               if (pvFWData)
+               {
+                       RGXMIPSFW_BOOT_DATA *psBootData = (RGXMIPSFW_BOOT_DATA*)
+                               /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */
+                               IMG_OFFSET_ADDR(pvFWData,
+                               /* ... jump to the boot/NMI data page... */
+                               (RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA)
+                               /* ... and then jump to the bootloader data offset within the page */
+                               + RGXMIPSFW_BOOTLDR_CONF_OFFSET));
+
+                       /* Rogue Registers physical address */
+                       psBootData->ui64RegBase = puFWParams->sMips.sGPURegAddr.uiAddr;
+
+                       /* MIPS Page Table physical address */
+                       psBootData->ui32PTLog2PageSize = puFWParams->sMips.ui32FWPageTableLog2PageSize;
+                       psBootData->ui32PTNumPages     = puFWParams->sMips.ui32FWPageTableNumPages;
+                       psBootData->aui64PTPhyAddr[0U] = puFWParams->sMips.asFWPageTableAddr[0U].uiAddr;
+                       psBootData->aui64PTPhyAddr[1U] = puFWParams->sMips.asFWPageTableAddr[1U].uiAddr;
+                       psBootData->aui64PTPhyAddr[2U] = puFWParams->sMips.asFWPageTableAddr[2U].uiAddr;
+                       psBootData->aui64PTPhyAddr[3U] = puFWParams->sMips.asFWPageTableAddr[3U].uiAddr;
+
+                       /* MIPS Stack Pointer Physical Address */
+                       psBootData->ui64StackPhyAddr = puFWParams->sMips.sFWStackAddr.uiAddr;
+
+                       /* Reserved for future use */
+                       psBootData->ui32Reserved1 = 0;
+                       psBootData->ui32Reserved2 = 0;
+               }
+       }
+#endif /* #if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) */
+       else
+       {
+               /* Process FW image data stream */
+               eError = ProcessELFCommandStream(hPrivate,
+                                                pbRGXFirmware,
+                                                pvFWCode,
+                                                pvFWData,
+                                                pvFWCorememCode,
+                                                pvFWCorememData);
+               if (eError != PVRSRV_OK)
+               {
+                       RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+                       return eError;
+               }
+
+               if (pvFWData)
+               {
+                       RGXRISCVFW_BOOT_DATA *psBootData = (RGXRISCVFW_BOOT_DATA*)
+                               IMG_OFFSET_ADDR(pvFWData, RGXRISCVFW_BOOTLDR_CONF_OFFSET);
+
+                       psBootData->ui64CorememCodeDevVAddr = puFWParams->sRISCV.sFWCorememCodeDevVAddr.uiAddr;
+                       psBootData->ui32CorememCodeFWAddr   = puFWParams->sRISCV.sFWCorememCodeFWAddr.ui32Addr;
+                       psBootData->ui32CorememCodeSize     = puFWParams->sRISCV.uiFWCorememCodeSize;
+
+                       psBootData->ui64CorememDataDevVAddr = puFWParams->sRISCV.sFWCorememDataDevVAddr.uiAddr;
+                       psBootData->ui32CorememDataFWAddr   = puFWParams->sRISCV.sFWCorememDataFWAddr.ui32Addr;
+                       psBootData->ui32CorememDataSize     = puFWParams->sRISCV.uiFWCorememDataSize;
+               }
+       }
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwimageutils.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwimageutils.h
new file mode 100644 (file)
index 0000000..e5f9a2a
--- /dev/null
@@ -0,0 +1,223 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services Firmware image utilities used at init time
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for Services Firmware image utilities used at init time
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXFWIMAGEUTILS_H
+#define RGXFWIMAGEUTILS_H
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary.
+ */
+#include "rgxlayer.h"
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionOffset
+
+ @Input        hPrivate : Implementation specific data
+ @Input        eId      : Section id
+
+ @Description  Return offset of a Firmware section, relative to the beginning
+               of the code or data allocation (depending on the section id)
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate,
+                                      RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionMaxSize
+
+ @Input        hPrivate : Implementation specific data
+ @Input        eId      : Section id
+
+ @Description  Return maximum size (not allocation size) of a Firmware section
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate,
+                                       RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionAllocSize
+
+ @Input        hPrivate : Implementation specific data
+ @Input        eId      : Section id
+
+ @Description  Return allocation size of a Firmware section
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate,
+                                         RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageSectionAddress
+
+ @Input        hPrivate : Implementation specific data
+ @Input        eId      : Section id
+
+ @Description  Return base address of a Firmware section
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate,
+                                       RGX_FW_SECTION_ID eId);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXGetFWImageAllocSize
+
+ @Description  Return size of Firmware code/data/coremem code allocations
+
+ @Input        hPrivate            : Implementation specific data
+ @Input        pbRGXFirmware       : Pointer to FW binary
+ @Input        ui32RGXFirmwareSize : FW binary size
+ @Output       puiFWCodeAllocSize  : Code size
+ @Output       puiFWDataAllocSize  : Data size
+ @Output       puiFWCorememCodeAllocSize : Coremem code size (0 if N/A)
+ @Output       puiFWCorememDataAllocSize : Coremem data size (0 if N/A)
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+                                    const IMG_BYTE    *pbRGXFirmware,
+                                    const IMG_UINT32  ui32RGXFirmwareSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize,
+                                    IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize);
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessLDRCommandStream
+
+ @Description   Process the output of the Meta toolchain in the .LDR format
+                copying code and data sections into their final location and
+                passing some information to the Meta bootloader
+
+ @Input         hPrivate                 : Implementation specific data
+ @Input         pbLDR                    : Pointer to FW blob
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem data
+ @Input         ppui32BootConf           : Pointer to bootloader data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+                                     const IMG_BYTE* pbLDR,
+                                     void* pvHostFWCodeAddr,
+                                     void* pvHostFWDataAddr,
+                                     void* pvHostFWCorememCodeAddr,
+                                     void* pvHostFWCorememDataAddr,
+                                     IMG_UINT32 **ppui32BootConf);
+
+/*!
+*******************************************************************************
+
+ @Function      ProcessELFCommandStream
+
+ @Description   Process a file in .ELF format copying code and data sections
+                into their final location
+
+ @Input         hPrivate                 : Implementation specific data
+ @Input         pbELF                    : Pointer to FW blob
+ @Input         pvHostFWCodeAddr         : Pointer to FW code
+ @Input         pvHostFWDataAddr         : Pointer to FW data
+ @Input         pvHostFWCorememCodeAddr  : Pointer to FW coremem code
+ @Input         pvHostFWCorememDataAddr  : Pointer to FW coremem data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+                                     const IMG_BYTE *pbELF,
+                                     void *pvHostFWCodeAddr,
+                                     void *pvHostFWDataAddr,
+                                     void* pvHostFWCorememCodeAddr,
+                                     void* pvHostFWCorememDataAddr);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXProcessFWImage
+
+ @Description  Process the Firmware binary blob copying code and data
+               sections into their final location and passing some
+               information to the Firmware bootloader.
+               If a pointer to the final memory location for FW code or data
+               is not valid (NULL) then the relative section will not be
+               processed.
+
+ @Input        hPrivate        : Implementation specific data
+ @Input        pbRGXFirmware   : Pointer to FW blob
+ @Input        pvFWCode        : Pointer to FW code
+ @Input        pvFWData        : Pointer to FW data
+ @Input        pvFWCorememCode : Pointer to FW coremem code
+ @Input        pvFWCorememData : Pointer to FW coremem data
+ @Input        puFWParams      : Parameters used by the FW at boot time
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+                               const IMG_BYTE *pbRGXFirmware,
+                               void *pvFWCode,
+                               void *pvFWData,
+                               void *pvFWCorememCode,
+                               void *pvFWCorememData,
+                               PVRSRV_FW_BOOT_PARAMS *puFWParams);
+
+#endif /* RGXFWIMAGEUTILS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwtrace_strings.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxfwtrace_strings.c
new file mode 100644 (file)
index 0000000..d950508
--- /dev/null
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File           rgxfwtrace_strings.c
+@Title          RGX Firmware trace strings
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgx_fwif_sf.h"
+#include "fwtrace_string.h"
+
+/* The tuple pairs that will be generated using XMacros will be stored here.
+ * This macro definition must match the definition of SFids in rgx_fwif_sf.h
+ */
+const RGXKM_STID_FMT SFs[]= {
+#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e), d },
+       RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+const IMG_UINT32 g_ui32SFsCount = ARRAY_SIZE(SFs);
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxhwperf_common.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxhwperf_common.c
new file mode 100644 (file)
index 0000000..e2b472d
--- /dev/null
@@ -0,0 +1,3715 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+#include "process_stats.h"
+#include "rgx_hwperf_table.h"
+#include "rgxinit.h"
+
+#include "info_page_defs.h"
+
+/* This is defined by default to enable producer callbacks.
+ * Clients of the TL interface can disable the use of the callback
+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */
+#define SUPPORT_TL_PRODUCER_CALLBACK 1
+
+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */
+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT)
+
+/* Defines size of buffers returned from acquire/release calls */
+#define FW_STREAM_BUFFER_SIZE (0x80000)
+#define HOST_STREAM_BUFFER_SIZE (0x20000)
+
+/* Must be at least as large as two tl packets of maximum size */
+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+              "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+              "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+
+IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **);
+
+static inline IMG_UINT32
+RGXHWPerfGetPackets(IMG_UINT32  ui32BytesExp,
+                    IMG_UINT32  ui32AllowedSize,
+                    RGX_PHWPERF_V2_PACKET_HDR psCurPkt )
+{
+       IMG_UINT32 sizeSum = 0;
+
+       /* Traverse the array to find how many packets will fit in the available space. */
+       while ( sizeSum < ui32BytesExp  &&
+                       sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize )
+       {
+               sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+               psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+       }
+
+       return sizeSum;
+}
+
+static inline void
+RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo,
+                          IMG_BOOL bIsReaderConnected)
+{
+       if (!bIsReaderConnected)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full "
+                       "and no reader is currently connected, suspending event collection. "
+                       "Connect a reader or restart driver to avoid event loss.", __func__));
+               psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE;
+       }
+}
+
+/*
+       RGXHWPerfCopyDataL1toL2
+ */
+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo,
+                                          IMG_BYTE   *pbFwBuffer,
+                                          IMG_UINT32 ui32BytesExp)
+{
+       IMG_HANDLE   hHWPerfStream = psDeviceInfo->hHWPerfStream;
+       IMG_BYTE *   pbL2Buffer;
+       IMG_UINT32   ui32L2BufFree;
+       IMG_UINT32   ui32BytesCopied = 0;
+       IMG_UINT32   ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer));
+       PVRSRV_ERROR eError;
+       IMG_BOOL     bIsReaderConnected;
+
+       /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */
+#ifdef HWPERF_MISR_FUNC_DEBUG
+       static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX;
+#endif
+
+       PVR_DPF_ENTERED;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+       PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d",
+                       pbFwBuffer, ui32BytesExp));
+#endif
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+       {
+               /* Check the incoming buffer of data has not lost any packets */
+               IMG_BYTE *pbFwBufferIter = pbFwBuffer;
+               IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp;
+               do
+               {
+                       RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter);
+                       IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal;
+                       if (gui32Ordinal != IMG_UINT32_MAX)
+                       {
+                               if ((gui32Ordinal+1) != ui32CurOrdinal)
+                               {
+                                       if (gui32Ordinal < ui32CurOrdinal)
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING,
+                                                               "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u",
+                                                               pbFwBufferIter,
+                                                               ui32CurOrdinal - gui32Ordinal - 1,
+                                                               gui32Ordinal,
+                                                               ui32CurOrdinal));
+                                       }
+                                       else
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING,
+                                                               "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u",
+                                                               pbFwBufferIter,
+                                                               gui32Ordinal,
+                                                               ui32CurOrdinal));
+                                       }
+                               }
+                       }
+                       gui32Ordinal = asCurPos->ui32Ordinal;
+                       pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos);
+               } while (pbFwBufferIter < pbFwBufferEnd);
+       }
+#endif
+
+       if (ui32BytesExp > psDeviceInfo->ui32L2BufMaxPacketSize)
+       {
+               IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp,
+                                                        psDeviceInfo->ui32L2BufMaxPacketSize,
+                                                        RGX_HWPERF_GET_PACKET(pbFwBuffer));
+
+               if (0 != sizeSum)
+               {
+                       ui32BytesExp = sizeSum;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as "
+                                       "packet is too big and hence it breaches TL "
+                                       "packet size limit (TLBufferSize / 2.5)"));
+                       goto e0;
+               }
+       }
+
+       /* Try submitting all data in one TL packet. */
+       eError = TLStreamReserve2(hHWPerfStream,
+                                 &pbL2Buffer,
+                                 (size_t)ui32BytesExp, ui32BytesExpMin,
+                                 &ui32L2BufFree, &bIsReaderConnected);
+       if ( eError == PVRSRV_OK )
+       {
+               OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp );
+               eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp);
+               if ( eError != PVRSRV_OK )
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+                                       eError, __func__));
+                       goto e0;
+               }
+               /* Data were successfully written */
+               ui32BytesCopied = ui32BytesExp;
+       }
+       else if (eError == PVRSRV_ERROR_STREAM_FULL)
+       {
+               /* There was not enough space for all data, copy as much as possible */
+               IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer));
+
+               PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree));
+
+               if ( 0 != sizeSum )
+               {
+                       eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum);
+
+                       if ( eError == PVRSRV_OK )
+                       {
+                               OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum );
+                               eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum);
+                               if ( eError != PVRSRV_OK )
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                       "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+                                                       eError, __func__));
+                                       goto e0;
+                               }
+                               /* sizeSum bytes of hwperf packets have been successfully written */
+                               ui32BytesCopied = sizeSum;
+                       }
+                       else if ( PVRSRV_ERROR_STREAM_FULL == eError )
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+                               RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected);
+                       }
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+                       RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected);
+               }
+       }
+       if ( PVRSRV_OK != eError && /* Some other error occurred */
+                       PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller */
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.",
+                               eError));
+       }
+
+e0:
+       /* Return the remaining packets left to be transported. */
+       PVR_DPF_RETURN_VAL(ui32BytesCopied);
+}
+
+
+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx(
+               const IMG_UINT32 ui32BufSize,
+               const IMG_UINT32 ui32Pos,
+               const IMG_UINT32 ui32Size)
+{
+       return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 );
+}
+
+
+/*
+       RGXHWPerfDataStore
+ */
+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO        *psDevInfo)
+{
+       RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       IMG_BYTE*                               psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf;
+       IMG_UINT32                              ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount;
+       IMG_UINT32                              ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+       IMG_UINT32                              ui32BytesExpSum = 0;
+#endif
+
+       PVR_DPF_ENTERED;
+
+       /* Caller should check this member is valid before calling */
+       PVR_ASSERT(psDevInfo->hHWPerfStream);
+
+       if (psDevInfo->bSuspendHWPerfL2DataCopy)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                       "%s : Copying data to host buffer for FW events is "
+                       "suspended. Start HWPerf consumer or restart driver if "
+                       "HWPerf FW events are needed", __func__));
+
+               PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+       }
+
+       /* Get a copy of the current
+        *   read (first packet to read)
+        *   write (empty location for the next write to be inserted)
+        *   WrapCount (size in bytes of the buffer at or past end)
+        * indexes of the FW buffer */
+       ui32SrcRIdx = psFwSysData->ui32HWPerfRIdx;
+       ui32SrcWIdx = psFwSysData->ui32HWPerfWIdx;
+       OSMemoryBarrier(NULL);
+       ui32SrcWrapCount = psFwSysData->ui32HWPerfWrapCount;
+
+#if defined(HWPERF_MISR_FUNC_DEBUG) || defined(EMULATOR)
+       {
+               IMG_UINT32  ui32SrcBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+
+               if (ui32SrcRIdx >= ui32SrcBufSize || ui32SrcWIdx >= ui32SrcBufSize)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s : Invalid read/write offsets found! srcRIdx:%u srcWIdx:%u srcBufSize:%u",
+                               __func__, ui32SrcRIdx, ui32SrcWIdx, ui32SrcBufSize));
+
+                       PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+               }
+       }
+#endif
+
+       /* Is there any data in the buffer not yet retrieved? */
+       if ( ui32SrcRIdx != ui32SrcWIdx )
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx));
+
+               /* Is the write position higher than the read position? */
+               if ( ui32SrcWIdx > ui32SrcRIdx )
+               {
+                       /* Yes, buffer has not wrapped */
+                       ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+                       ui32BytesExpSum += ui32BytesExp;
+#endif
+                       ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+                                                                 psHwPerfInfo + ui32SrcRIdx,
+                                                                 ui32BytesExp);
+
+                       /* Advance the read index and the free bytes counter by the number
+                        * of bytes transported. Items will be left in buffer if not all data
+                        * could be transported. Exit to allow buffer to drain. */
+                       OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfRIdx,
+                                                 RGXHWPerfAdvanceRIdx(psDevInfo->ui32RGXFWIfHWPerfBufSize,
+                                                                      ui32SrcRIdx,
+                                                                      ui32BytesCopied));
+
+                       ui32BytesCopiedSum += ui32BytesCopied;
+               }
+               /* No, buffer has wrapped and write position is behind read position */
+               else
+               {
+                       /* Byte count equal to
+                        *     number of bytes from read position to the end of the buffer,
+                        *   + data in the extra space in the end of the buffer. */
+                       ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+                       ui32BytesExpSum += ui32BytesExp;
+#endif
+                       /* Attempt to transfer the packets to the TL stream buffer */
+                       ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+                                                                 psHwPerfInfo + ui32SrcRIdx,
+                                                                 ui32BytesExp);
+
+                       /* Advance read index as before and Update the local copy of the
+                        * read index as it might be used in the last if branch*/
+                       ui32SrcRIdx = RGXHWPerfAdvanceRIdx(
+                                       psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+                                       ui32BytesCopied);
+
+                       /* Update Wrap Count */
+                       if ( ui32SrcRIdx == 0)
+                       {
+                               OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfWrapCount,
+                                                         psDevInfo->ui32RGXFWIfHWPerfBufSize);
+                       }
+                       OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfRIdx, ui32SrcRIdx);
+
+                       ui32BytesCopiedSum += ui32BytesCopied;
+
+                       /* If all the data in the end of the array was copied, try copying
+                        * wrapped data in the beginning of the array, assuming there is
+                        * any and the RIdx was wrapped. */
+                       if (   (ui32BytesCopied == ui32BytesExp)
+                                       && (ui32SrcWIdx > 0)
+                                       && (ui32SrcRIdx == 0) )
+                       {
+                               ui32BytesExp = ui32SrcWIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+                               ui32BytesExpSum += ui32BytesExp;
+#endif
+                               ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+                                                                         psHwPerfInfo,
+                                                                         ui32BytesExp);
+                               /* Advance the FW buffer read position. */
+                               psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+                                               psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+                                               ui32BytesCopied);
+
+                               ui32BytesCopiedSum += ui32BytesCopied;
+                       }
+               }
+#ifdef HWPERF_MISR_FUNC_DEBUG
+               if (ui32BytesCopiedSum != ui32BytesExpSum)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum));
+               }
+#endif
+
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport"));
+       }
+
+       PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+}
+
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+       IMG_UINT32          ui32BytesCopied;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psDevInfo);
+       psRgxDevInfo = psDevInfo->pvDevice;
+
+       /* Store FW event data if the destination buffer exists.*/
+       if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL)
+       {
+               OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+               ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo);
+               if ( ui32BytesCopied )
+               {       /* Signal consumers that packets may be available to read when
+                * running from a HW kick, not when called by client APP thread
+                * via the transport layer CB as this can lead to stream
+                * corruption.*/
+                       eError = TLStreamSync(psRgxDevInfo->hHWPerfStream);
+                       PVR_ASSERT(eError == PVRSRV_OK);
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied"));
+                       RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo);
+               }
+               OSLockRelease(psRgxDevInfo->hHWPerfLock);
+       }
+
+
+       PVR_DPF_RETURN_OK;
+}
+
+
+/* Currently supported by default */
+#if defined(SUPPORT_TL_PRODUCER_CALLBACK)
+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
+                                  IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser;
+
+       PVR_UNREFERENCED_PARAMETER(hStream);
+       PVR_UNREFERENCED_PARAMETER(ui32Resp);
+
+       PVR_ASSERT(psRgxDevInfo);
+
+       switch (ui32ReqOp)
+       {
+               case TL_SOURCECB_OP_CLIENT_EOS:
+                       /* Keep HWPerf resource init check and use of
+                        * resources atomic, they may not be freed during use
+                        */
+
+                       /* This solution is for avoiding a deadlock situation where -
+                        * in DoTLStreamReserve(), writer has acquired HWPerfLock and
+                        * ReadLock and is waiting on ReadPending (which will be reset
+                        * by reader), And
+                        * the reader after setting ReadPending in TLStreamAcquireReadPos(),
+                        * is waiting for HWPerfLock in RGXHWPerfTLCB().
+                        * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we
+                        * will return to the reader without waiting to acquire HWPerfLock.
+                        */
+                       if (!OSTryLockAcquire(psRgxDevInfo->hHWPerfLock))
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write "
+                                               "operation might already be in process"));
+                               return PVRSRV_OK;
+                       }
+
+                       if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL)
+                       {
+                               (void) RGXHWPerfDataStore(psRgxDevInfo);
+                       }
+                       OSLockRelease(psRgxDevInfo->hHWPerfLock);
+                       break;
+
+               default:
+                       break;
+       }
+
+       return eError;
+}
+#endif
+
+
+static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc)
+       {
+               if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+                       psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL;
+               }
+               DevmemFwUnmapAndFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+               psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfInit
+
+@Description    Called during driver init for initialization of HWPerf module
+                               in the Rogue device driver. This function keeps allocated
+                               only the minimal necessary resources, which are required for
+                               functioning of HWPerf server module.
+
+@Input          psRgxDevInfo   RGX Device Info
+
+@Return                        PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       PVRSRV_ERROR eError;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       PVR_DPF_ENTERED;
+
+       /* expecting a valid device info */
+       PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL);
+
+       /* Create a lock for HWPerf server module used for serializing, L1 to L2
+        * copy calls (e.g. in case of TL producer callback) and L1, L2 resource
+        * allocation */
+       eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
+
+       /* avoid uninitialised data */
+       psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL;
+       psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+
+       PVR_DPF_RETURN_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfIsInitRequired
+
+@Description    Returns true if the HWperf firmware buffer (L1 buffer) and host
+                driver TL buffer (L2 buffer) are not already allocated. Caller
+                must possess hHWPerfLock lock before calling this
+                function so the state tested is not inconsistent.
+
+@Input          psRgxDevInfo RGX Device Info, on which init requirement is
+                checked.
+
+@Return         IMG_BOOL       Whether initialization (allocation) is required
+ */ /**************************************************************************/
+static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock));
+
+#if !defined(NO_HARDWARE)
+       /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver
+        * built for actual hardware (TC, EMU, etc.)
+        */
+       if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL)
+       {
+               /* The allocation API (RGXHWPerfInitOnDemandResources) allocates
+                * device memory for both L1 and L2 without any checks. Hence,
+                * either both should be allocated or both be NULL.
+                *
+                * In-case this changes in future (for e.g. a situation where one
+                * of the 2 buffers is already allocated and other is required),
+                * add required checks before allocation calls to avoid memory leaks.
+                */
+               PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL);
+               return IMG_TRUE;
+       }
+       PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL);
+#else
+       /* On a NO-HW driver L2 is not allocated. So, no point in checking its
+        * allocation */
+       if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL)
+       {
+               return IMG_TRUE;
+       }
+#endif
+       return IMG_FALSE;
+}
+#if !defined(NO_HARDWARE)
+static void _HWPerfFWOnReaderOpenCB(void *pvArg)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg;
+       PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode;
+       RGXFWIF_KCCB_CMD sKccbCmd;
+       IMG_UINT32 ui32kCCBCommandSlot;
+
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       /* Clear any previously suspended state for bSuspendHWPerfL2DataCopy as we
+        * now have a reader attached so the data will be delivered upstream. */
+       if (psRgxDevInfo->bSuspendHWPerfL2DataCopy)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Resuming HWPerf FW event collection.",
+                       __func__));
+               psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE;
+       }
+
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+       sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV;
+       sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0;
+
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevNode->pvDevice,
+                                                                                         RGXFWIF_DM_GP,
+                                                                                         &sKccbCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in "
+                               "firmware (error = %d)", __func__, eError));
+               return;
+       }
+
+       eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+}
+#endif
+/*************************************************************************/ /*!
+@Function       RGXHWPerfInitOnDemandResources
+
+@Description    This function allocates the HWperf firmware buffer (L1 buffer)
+                and host driver TL buffer (L2 buffer) if HWPerf is enabled at
+                driver load time. Otherwise, these buffers are allocated
+                on-demand as and when required. Caller
+                must possess hHWPerfLock lock before calling this
+                function so the state tested is not inconsistent if called
+                outside of driver initialisation.
+
+@Input          psRgxDevInfo RGX Device Info, on which init is done
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
+{
+       IMG_HANDLE hStream = NULL; /* Init required for noHW */
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32L2BufferSize = 0;
+       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags;
+       IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold
+                                                                                                                                                         names up to "hwperf_9999", which is enough */
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       PVR_DPF_ENTERED;
+
+       /* Create the L1 HWPerf buffer on demand, read-only for the CPU
+        * (except for the zero/poison operations) */
+       uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)
+                                               | PVRSRV_MEMALLOCFLAG_GPU_READABLE
+                                               | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+                                               | PVRSRV_MEMALLOCFLAG_GPU_UNCACHED
+                                               | PVRSRV_MEMALLOCFLAG_CPU_READABLE
+                                               | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC
+                                               | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+#if defined(PDUMP) /* Helps show where the packet data ends */
+                                               | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+#else /* Helps show corruption issues in driver-live */
+                                               | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+#endif
+                                               | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN);
+
+       /* Allocate HWPerf FW L1 buffer */
+       eError = DevmemFwAllocate(psRgxDevInfo,
+                                 /* Pad it enough to hold the biggest variable sized packet. */
+                                 psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGX_HWPERF_MAX_PACKET_SIZE,
+                                 uiMemAllocFlags,
+                                 "FwHWPerfBuffer",
+                                 &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate kernel fw hwperf buffer (%u)",
+                               __func__, eError));
+               goto e0;
+       }
+
+       /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory.
+        * Also, make sure the FW address is not already set */
+       PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0);
+
+       /* Meta cached flag removed from this allocation as it was found
+        * FW performance was better without it. */
+       eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf,
+                             psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+                             0, RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e0);
+
+#if defined(RGX_FEATURE_HWPERF_VOLCANIC)
+       RGXSetMetaDMAAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfDMABuf,
+                            psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+                            &psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf,
+                            0);
+#endif
+
+       /* flush write buffers for psRgxDevInfo->psRGXFWIfRuntimeCfg */
+       OSWriteMemoryBarrier(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr);
+
+       eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+                                         (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to acquire kernel hwperf buffer (%u)",
+                               __func__, eError));
+               goto e0;
+       }
+
+       /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence,
+        * L2 buffer is not allocated */
+#if !defined(NO_HARDWARE)
+       /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+        * accessed by the FW. The MISR may try to write one packet the size of the L1
+        * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+        * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+        * are the more chance of this happening.
+        * Size chosen to allow MISR to write an L1 sized packet and for the client
+        * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
+        */
+       ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
+                       (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
+
+       /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+       if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+                      PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+                      psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to form HWPerf stream name for device %d",
+                               __func__,
+                               psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = TLStreamCreate(&hStream,
+                               pszHWPerfStreamName,
+                               ui32L2BufferSize,
+                               TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+                               _HWPerfFWOnReaderOpenCB, psRgxDevInfo,
+#if !defined(SUPPORT_TL_PRODUCER_CALLBACK)
+                               NULL, NULL
+#else
+                               /* Not enabled by default */
+                               RGXHWPerfTLCB, psRgxDevInfo
+#endif
+                               );
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1);
+
+       eError = TLStreamSetNotifStream(hStream,
+                                       PVRSRVGetPVRSRVData()->hTLCtrlStream);
+       /* we can still discover host stream so leave it as is and just log error */
+       PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+       /* send the event here because host stream is implicitly opened for write
+        * in TLStreamCreate and TLStreamOpen is never called (so the event is
+        * never emitted) */
+       TLStreamMarkStreamOpen(hStream);
+
+       {
+               TL_STREAM_INFO sTLStreamInfo;
+
+               TLStreamInfo(hStream, &sTLStreamInfo);
+               psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize;
+
+               psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d  L2: %d",
+                       psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
+
+#else /* defined(NO_HARDWARE) */
+       PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize);
+       PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB);
+       PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName);
+       ui32L2BufferSize = 0;
+#endif
+
+       psRgxDevInfo->hHWPerfStream = hStream;
+       PVR_DPF_RETURN_OK;
+
+#if !defined(NO_HARDWARE)
+e1: /* L2 buffer initialisation failures */
+       psRgxDevInfo->hHWPerfStream = NULL;
+#endif
+e0: /* L1 buffer initialisation failures */
+       RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream;
+
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psRgxDevInfo);
+       psRgxDevInfo->hHWPerfStream = NULL;
+
+       /* Clean up the L2 buffer stream object if allocated */
+       if (hStream)
+       {
+               /* send the event here because host stream is implicitly opened for
+                * write in TLStreamCreate and TLStreamClose is never called (so the
+                * event is never emitted) */
+               TLStreamMarkStreamClose(hStream);
+               TLStreamClose(hStream);
+       }
+
+       /* Cleanup L1 buffer resources */
+       RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+       /* Cleanup the HWPerf server module lock resource */
+       if (psRgxDevInfo->hHWPerfLock)
+       {
+               OSLockDestroy(psRgxDevInfo->hHWPerfLock);
+               psRgxDevInfo->hHWPerfLock = NULL;
+       }
+
+       PVR_DPF_RETURN;
+}
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+
+static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                          IMG_BOOL bToggle,
+                                          IMG_UINT64 ui64Mask)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+       RGXFWIF_KCCB_CMD sKccbCmd;
+       IMG_UINT32 ui32kCCBCommandSlot;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       /* If this method is being used whether to enable or disable
+        * then the hwperf buffers (host and FW) are likely to be needed
+        * eventually so create them, also helps unit testing. Buffers
+        * allocated on demand to reduce RAM foot print on systems not
+        * needing HWPerf resources.
+        * Obtain lock first, test and init if required. */
+       OSLockAcquire(psDevice->hHWPerfLock);
+
+       if (!psDevice->bFirmwareInitialised)
+       {
+               psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter
+               eError = PVRSRV_ERROR_NOT_INITIALISED;
+
+               PVR_DPF((PVR_DBG_ERROR,
+                                "HWPerf has NOT been initialised yet. Mask has been SET to "
+                                "(%" IMG_UINT64_FMTSPECx ")",
+                                ui64Mask));
+
+               goto unlock_and_return;
+       }
+
+       if (RGXHWPerfIsInitRequired(psDevice))
+       {
+               eError = RGXHWPerfInitOnDemandResources(psDevice);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW "
+                                       "resources failed", __func__));
+                       goto unlock_and_return;
+               }
+       }
+
+#if defined(RGX_FEATURE_HWPERF_VOLCANIC) && defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+       if (RGXPowmonBufferIsInitRequired(psDeviceNode->pvDevice))
+       {
+               /* Allocate power monitoring log buffer if enabled */
+               eError = RGXPowmonBufferInitOnDemandResources(psDeviceNode->pvDevice);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand power monitoring "
+                                       "resources failed", __func__));
+                       goto unlock_and_return;
+               }
+       }
+#endif
+
+       /* Unlock here as no further HWPerf resources are used below that would be
+        * affected if freed by another thread */
+       OSLockRelease(psDevice->hHWPerfLock);
+
+       /* Return if the filter is the same */
+       if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask)
+               goto return_;
+
+       /* Prepare command parameters ... */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+       sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET;
+       sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask;
+
+       /* Ask the FW to carry out the HWPerf configuration command */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
+                                                 RGXFWIF_DM_GP,
+                                                 &sKccbCmd,
+                                                 IMG_TRUE,
+                                                 &ui32kCCBCommandSlot);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in "
+                               "firmware (error = %d)", __func__, eError));
+               goto return_;
+       }
+
+       psDevice->ui64HWPerfFilter = bToggle ?
+                       psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask;
+
+       /* Wait for FW to complete */
+       eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_);
+
+#if defined(DEBUG)
+       if (bToggle)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED",
+                               ui64Mask));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+                               ui64Mask));
+       }
+#endif
+
+       return PVRSRV_OK;
+
+unlock_and_return:
+       OSLockRelease(psDevice->hHWPerfLock);
+
+return_:
+       return eError;
+}
+
+#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800
+
+static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_BOOL bToggle,
+                                            IMG_UINT32 ui32Mask)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+       IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter;
+#endif
+
+       OSLockAcquire(psDevice->hLockHWPerfHostStream);
+       if (psDevice->hHWPerfHostStream == NULL)
+       {
+               eError = RGXHWPerfHostInitOnDemandResources(psDevice);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Initialisation of on-demand HWPerfHost resources failed",
+                                        __func__));
+                       OSLockRelease(psDevice->hLockHWPerfHostStream);
+                       return eError;
+               }
+       }
+
+       psDevice->ui32HWPerfHostFilter = bToggle ?
+                       psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask;
+
+       // Deferred creation of host periodic events thread
+       if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))
+       {
+               eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS);
+               PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread");
+       }
+       else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)))
+       {
+               eError = PVRSRVDestroyHWPerfHostThread();
+               PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread");
+       }
+
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+       // Log deferred events stats if filter changed from non-zero to zero
+       if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0))
+       {
+               PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)",
+                               psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS));
+
+               PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) "
+                               "WaitForRightOrdPktHighWatermark(%u)",
+                               psDevice->ui32WaitForAtomicCtxPktHighWatermark,
+                               psDevice->ui32WaitForRightOrdPktHighWatermark));
+       }
+#endif
+
+       OSLockRelease(psDevice->hLockHWPerfHostStream);
+
+#if defined(DEBUG)
+       if (bToggle)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED",
+                               ui32Mask));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)",
+                               ui32Mask));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle,
+                                              IMG_UINT32 ui32InfoPageIdx,
+                                              IMG_UINT32 ui32Mask)
+{
+       PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+       PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START &&
+                         ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info"
+                         " page index", PVRSRV_ERROR_INVALID_PARAMS);
+
+       OSLockAcquire(psData->hInfoPageLock);
+       psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ?
+                       psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask;
+       OSLockRelease(psData->hInfoPageLock);
+
+#if defined(DEBUG)
+       if (bToggle)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED",
+                               ui32InfoPageIdx, ui32Mask));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)",
+                               ui32InfoPageIdx, ui32Mask));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA    *psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  RGX_HWPERF_BVNC    *psBVNC)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_ERROR        eError;
+
+       PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psDeviceNode invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+       psDevInfo = psDeviceNode->pvDevice;
+       eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC);
+
+       return eError;
+}
+
+/*
+       AppHint interfaces
+ */
+static
+PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const void *psPrivate,
+                                  IMG_UINT64 ui64Value)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDeviceInfo;
+
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL);
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL);
+
+       psDeviceInfo = psDeviceNode->pvDevice;
+
+       eError = RGXHWPerfCtrlFwBuffer(psDeviceNode, IMG_FALSE, ui64Value);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "Failed to set HWPerf firmware filter for device (%u)",
+                       psDeviceNode->sDevId.ui32InternalID));
+               return eError;
+       }
+
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT64 *pui64Value)
+{
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL);
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL);
+
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       *pui64Value =
+           ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui64HWPerfFilter;
+
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    const void *psPrivate,
+                                    IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL);
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL);
+
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       eError = RGXHWPerfCtrlHostBuffer(psDeviceNode, IMG_FALSE, ui32Value);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "Failed to set HWPerf firmware filter for device (%u)",
+                       psDeviceNode->sDevId.ui32InternalID));
+               return eError;
+       }
+
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const void *psPrivate,
+                                     IMG_UINT32 *pui32Value)
+{
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL);
+       PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL);
+
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       *pui32Value =
+           ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32HWPerfHostFilter;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+                                      const void *psPrivData,
+                                      IMG_UINT32 *pui32Value)
+{
+       PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+       IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+       PVR_UNREFERENCED_PARAMETER(psDevice);
+
+       OSLockAcquire(psData->hInfoPageLock);
+       *pui32Value = psData->pui32InfoPage[ui32Idx];
+       OSLockRelease(psData->hInfoPageLock);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+                                       const void *psPrivData,
+                                       IMG_UINT32 ui32Value)
+{
+       IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+       PVR_UNREFERENCED_PARAMETER(psDevice);
+
+       return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value);
+}
+
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter,
+                                           RGXHWPerfReadFwFilter,
+                                           RGXHWPerfSetFwFilter,
+                                           psDeviceNode,
+                                           NULL);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter,
+                                           RGXHWPerfReadHostFilter,
+                                           RGXHWPerfSetHostFilter,
+                                           psDeviceNode,
+                                           NULL);
+}
+
+void RGXHWPerfClientInitAppHintCallbacks(void)
+{
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services,
+                                           _ReadClientFilter,
+                                           _WriteClientFilter,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           (void *) HWPERF_FILTER_SERVICES_IDX);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL,
+                                           _ReadClientFilter,
+                                           _WriteClientFilter,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           (void *) HWPERF_FILTER_EGL_IDX);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES,
+                                           _ReadClientFilter,
+                                           _WriteClientFilter,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           (void *) HWPERF_FILTER_OPENGLES_IDX);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL,
+                                           _ReadClientFilter,
+                                           _WriteClientFilter,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           (void *) HWPERF_FILTER_OPENCL_IDX);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan,
+                                           _ReadClientFilter,
+                                           _WriteClientFilter,
+                                           APPHINT_OF_DRIVER_NO_DEVICE,
+                                           (void *) HWPERF_FILTER_VULKAN_IDX);
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGL,
+                                               _ReadClientFilter,
+                                               _WriteClientFilter,
+                                               APPHINT_OF_DRIVER_NO_DEVICE,
+                                               (void *) HWPERF_FILTER_OPENGL_IDX);
+}
+
+static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB)
+{
+       if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX)
+       {
+               /* Size specified as a AppHint but it is too big */
+               PVR_DPF((PVR_DBG_WARNING,
+                        "RGXHWPerfHostInit: HWPerf Host buffer size "
+                        "value (%u) too big, using maximum (%u)",
+                        ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MAX));
+               return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10;
+       }
+       else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN)
+       {
+               return ui32BufSizeKB<<10;
+       }
+       else if (ui32BufSizeKB > 0)
+       {
+               /* Size specified as a AppHint but it is too small */
+               PVR_DPF((PVR_DBG_WARNING,
+                        "RGXHWPerfHostInit: HWPerf Host buffer size "
+                        "value (%u) too small, using minimum (%u)",
+                        ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MIN));
+               return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10;
+       }
+       else
+       {
+               /* 0 size implies AppHint not set or is set to zero,
+                * use default size from driver constant. */
+               return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10;
+       }
+}
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfHostInit
+
+@Description    Called during driver init for initialisation of HWPerfHost
+                stream in the Rogue device driver. This function keeps allocated
+                only the minimal necessary resources, which are required for
+                functioning of HWPerf server module.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB)
+{
+       PVRSRV_ERROR eError;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL);
+
+       eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error);
+
+       psRgxDevInfo->hHWPerfHostStream = NULL;
+       psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */
+       psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1;
+       psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB);
+       psRgxDevInfo->pvHostHWPerfMISR = NULL;
+       psRgxDevInfo->pui8DeferredEvents = NULL;
+       /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic
+        * is maintained */
+       psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0;
+       psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+
+error:
+       return eError;
+}
+
+#define RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE \
+       ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_INFO_DATA, uDetail) + \
+               sizeof(((RGX_HWPERF_HOST_CLIENT_INFO_DETAIL*)0)->sProcName.ui32Count)))
+
+static void _HWPerfHostOnConnectCB(void *pvArg)
+{
+       PVRSRV_RGXDEV_INFO* psDevice;
+       PVRSRV_ERROR eError;
+
+       RGXSRV_HWPERF_CLK_SYNC(pvArg);
+
+       psDevice = (PVRSRV_RGXDEV_INFO*) pvArg;
+
+       /* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter
+        * before the host stream is opened for reading by a HWPerf client.
+        * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */
+       if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))
+       {
+               eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS);
+               PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread");
+       }
+
+       if (RGXHWPerfHostIsEventEnabled(psDevice, RGX_HWPERF_HOST_CLIENT_INFO))
+       {
+               // GCC throws -Werror=frame-larger-than error if the frame size is > 1024 bytes,
+               // so use a heap allocation - is there an alternate solution?
+               IMG_BYTE *pbPktPayload = (IMG_BYTE*)OSAllocMem(RGX_HWPERF_MAX_PAYLOAD_SIZE);
+
+               if (pbPktPayload)
+               {
+                       RGX_HWPERF_HOST_CLIENT_INFO_DATA *psHostClientInfo;
+                       RGX_HWPERF_HOST_CLIENT_PROC_NAME *psProcName;
+                       IMG_UINT32 ui32TotalPayloadSize, ui32NameLen, ui32ProcNamePktSize;
+                       DLLIST_NODE *pNode, *pNext;
+
+                       psHostClientInfo = IMG_OFFSET_ADDR(pbPktPayload,0);
+                       psHostClientInfo->eType = RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME;
+                       psHostClientInfo->uDetail.sProcName.ui32Count = 0U;
+                       psProcName = psHostClientInfo->uDetail.sProcName.asProcNames;
+                       ui32TotalPayloadSize = RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE;
+
+                       OSLockAcquire(psDevice->psDeviceNode->hConnectionsLock);
+
+                       // Announce current client connections to the reader
+                       dllist_foreach_node(&psDevice->psDeviceNode->sConnections, pNode, pNext)
+                       {
+                               CONNECTION_DATA *psData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode);
+
+                               ui32NameLen = OSStringLength(psData->pszProcName) + 1U;
+                               ui32ProcNamePktSize = RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen);
+
+                               // Unlikely case where we have too much data to fit into a single hwperf packet
+                               if (ui32ProcNamePktSize + ui32TotalPayloadSize > RGX_HWPERF_MAX_PAYLOAD_SIZE)
+                               {
+                                       RGXHWPerfHostPostRaw(psDevice, RGX_HWPERF_HOST_CLIENT_INFO, pbPktPayload, ui32TotalPayloadSize);
+
+                                       psHostClientInfo->uDetail.sProcName.ui32Count = 0U;
+                                       psProcName = psHostClientInfo->uDetail.sProcName.asProcNames;
+                                       ui32TotalPayloadSize = RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE;
+                               }
+
+                               // Setup packet data
+                               psHostClientInfo->uDetail.sProcName.ui32Count++;
+                               psProcName->uiClientPID = psData->pid;
+                               psProcName->ui32Length = ui32NameLen;
+                               (void)OSStringLCopy(psProcName->acName, psData->pszProcName, ui32NameLen);
+
+                               psProcName = (RGX_HWPERF_HOST_CLIENT_PROC_NAME*)IMG_OFFSET_ADDR(psProcName, ui32ProcNamePktSize);
+                               ui32TotalPayloadSize += ui32ProcNamePktSize;
+                       }
+
+                       OSLockRelease(psDevice->psDeviceNode->hConnectionsLock);
+                       RGXHWPerfHostPostRaw(psDevice, RGX_HWPERF_HOST_CLIENT_INFO, pbPktPayload, ui32TotalPayloadSize);
+                       OSFreeMem(pbPktPayload);
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for RGX_HWPERF_HOST_CLIENT_INFO_DATA packet.", __func__));
+               }
+       }
+}
+
+/* Avoiding a holder struct using fields below, as a struct gets along padding,
+ * packing, and other compiler dependencies, and we want a continuous stream of
+ * bytes for (header+data) for use in TLStreamWrite. See
+ * _HWPerfHostDeferredEventsEmitter().
+ *
+ * A deferred (UFO) packet is represented in memory as:
+ *     - IMG_BOOL                 --> Indicates whether a packet write is
+ *                                    "complete" by atomic context or not.
+ *     - RGX_HWPERF_V2_PACKET_HDR --.
+ *                                  |--> Fed together to TLStreamWrite for
+ *                                  |    deferred packet to be written to
+ *                                  |    HWPerfHost buffer
+ *     - RGX_HWPERF_HOST_UFO_DATA---`
+ *
+ * PS: Currently only UFO events are supported in deferred list */
+#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\
+               sizeof(RGX_HWPERF_V2_PACKET_HDR) +\
+               sizeof(RGX_HWPERF_HOST_UFO_DATA))
+
+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData);
+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                             IMG_UINT32 ui32MaxOrdinal);
+
+/*************************************************************************/ /*!
+@Function       RGXHWPerfHostInitOnDemandResources
+
+@Description    This function allocates the HWPerfHost buffer if HWPerf is
+                enabled at driver load time. Otherwise, these buffers are
+                allocated on-demand as and when required.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       PVRSRV_ERROR eError;
+       /* 5 makes space up to "hwperf_host_9999" streams */
+       IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5];
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       if (psRgxDevInfo->hHWPerfHostStream != NULL)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "HWPerf host stream already initialised"));
+               return PVRSRV_OK;
+       }
+
+       /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+       if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+                      PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+                      psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to form HWPerf host stream name for device %d",
+                               __func__,
+                               psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream,
+                               pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize,
+                               TL_OPMODE_DROP_NEWER,
+                               _HWPerfHostOnConnectCB, psRgxDevInfo,
+                               NULL, NULL);
+       PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate");
+
+       eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream,
+                                       PVRSRVGetPVRSRVData()->hTLCtrlStream);
+       /* we can still discover host stream so leave it as is and just log error */
+       PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+       /* send the event here because host stream is implicitly opened for write
+        * in TLStreamCreate and TLStreamOpen is never called (so the event is
+        * never emitted) */
+       eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream);
+       PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen");
+
+       /* HWPerfHost deferred events specific initialization */
+       eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR,
+                              RGX_MISRHandler_HWPerfPostDeferredHostEvents,
+                              psRgxDevInfo,
+                              "RGX_HWPerfDeferredEventPoster");
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR", err_install_misr);
+
+       eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create);
+
+       psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS
+                                                     * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE);
+       if (NULL == psRgxDevInfo->pui8DeferredEvents)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for "
+                               "HWPerfHost deferred events array", __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_alloc_deferred_events;
+       }
+       psRgxDevInfo->ui16DEReadIdx = 0;
+       psRgxDevInfo->ui16DEWriteIdx = 0;
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+       psRgxDevInfo->ui32DEHighWatermark = 0;
+       psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0;
+       psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0;
+#endif
+
+       PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB",
+                       psRgxDevInfo->ui32HWPerfHostBufSize));
+
+       return PVRSRV_OK;
+
+err_alloc_deferred_events:
+       OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock);
+       psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+
+err_spinlock_create:
+       (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR);
+       psRgxDevInfo->pvHostHWPerfMISR = NULL;
+
+err_install_misr:
+       TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+       TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+       psRgxDevInfo->hHWPerfHostStream = NULL;
+
+       return eError;
+}
+
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       PVR_ASSERT (psRgxDevInfo);
+
+       if (psRgxDevInfo->pui8DeferredEvents)
+       {
+               OSFreeMem(psRgxDevInfo->pui8DeferredEvents);
+               psRgxDevInfo->pui8DeferredEvents = NULL;
+       }
+
+       if (psRgxDevInfo->hHWPerfHostSpinLock)
+       {
+               OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock);
+               psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+       }
+
+       if (psRgxDevInfo->pvHostHWPerfMISR)
+       {
+               (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR);
+               psRgxDevInfo->pvHostHWPerfMISR = NULL;
+       }
+
+       if (psRgxDevInfo->hHWPerfHostStream)
+       {
+               /* send the event here because host stream is implicitly opened for
+                * write in TLStreamCreate and TLStreamClose is never called (so the
+                * event is never emitted) */
+               TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+               TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+               psRgxDevInfo->hHWPerfHostStream = NULL;
+       }
+
+       if (psRgxDevInfo->hLockHWPerfHostStream)
+       {
+               OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream);
+               psRgxDevInfo->hLockHWPerfHostStream = NULL;
+       }
+}
+
+inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter)
+{
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+       psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter;
+}
+
+inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent)
+{
+       PVR_ASSERT(psRgxDevInfo);
+       return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE;
+}
+
+#define MAX_RETRY_COUNT 80
+static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                         IMG_UINT32 ui32CurrentOrdinal)
+{
+       IMG_UINT32 ui32Retry = MAX_RETRY_COUNT;
+
+       PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL);
+       PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL);
+
+       OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+       /* First, flush pending events (if any) */
+       _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal);
+
+       while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1)
+                  && (--ui32Retry != 0))
+       {
+               /* Release lock and give a chance to a waiting context to emit the
+                * expected packet */
+               OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream);
+               OSSleepms(100);
+               OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+       }
+
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+       if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke))
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Will warn only once! Potential packet(s) lost after ordinal"
+                                " %u (Current ordinal = %u)",
+                                __func__,
+                                psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal));
+               psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE;
+       }
+
+       if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry))
+       {
+               psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry;
+       }
+#endif
+}
+
+static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                         IMG_UINT32 ui32CurrentOrdinal)
+{
+       /* update last ordinal emitted */
+       psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal;
+
+       PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream));
+       OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+       IMG_UINT8 *pui8Dest;
+
+       PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream,
+                                             &pui8Dest, ui32Size);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer"
+                               " (%d). Dropping packet.",
+                               __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+               return NULL;
+       }
+       PVR_ASSERT(pui8Dest != NULL);
+
+       return pui8Dest;
+}
+
+static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+       PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream,
+                                            ui32Size);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s"
+                               " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+       }
+}
+
+/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */
+static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          RGX_HWPERF_V2_PACKET_HDR *psHeader)
+{
+       PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream,
+                                           IMG_OFFSET_ADDR(psHeader, 0), psHeader->ui32Size);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer"
+                               " (%d). Dropping packet.",
+                               __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+       }
+
+       /* Regardless of whether write passed/failed, we consider it "written" */
+       psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal;
+
+       return (eError == PVRSRV_OK);
+}
+
+/* Helper macros for deferred events operations */
+#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS)
+#define GET_DE_EVENT_BASE(_idx)   (IMG_OFFSET_ADDR(psRgxDevInfo->pui8DeferredEvents, \
+               (_idx) * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE))
+
+#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*)((void *)(_base)))
+#define GET_DE_EVENT_DATA(_base)         (IMG_OFFSET_ADDR((_base), sizeof(IMG_BOOL)))
+
+/* Emits HWPerfHost event packets present in the deferred list stopping when one
+ * of the following cases is hit:
+ * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering
+ *         criteria (ordinal == last_ordinal + 1)
+ *
+ * case 2: A packet with ordinal > ui32MaxOrdinal is found
+ *
+ * case 3: Deferred list's (read == write) i.e. no more deferred packets.
+ *
+ * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling
+ *       this function.*/
+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                             IMG_UINT32 ui32MaxOrdinal)
+{
+       RGX_HWPERF_V2_PACKET_HDR *psHeader;
+       IMG_UINT32 ui32Retry;
+       IMG_UINT8  *pui8DeferredEvent;
+       IMG_BOOL   *pbPacketWritten;
+       IMG_BOOL   bWritePassed;
+
+       PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream));
+
+       while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx)
+       {
+               pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx);
+               pbPacketWritten   = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent);
+               psHeader          = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent);
+
+               for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--)
+               {
+                       /* Packet not yet written, re-check after a while. Wait for a short period as
+                        * atomic contexts are generally expected to finish fast */
+                       OSWaitus(10);
+               }
+
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+               if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost))
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "%s: Will warn only once. Dropping a deferred packet as atomic context"
+                                        " took too long to write it",
+                                        __func__));
+                       psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE;
+               }
+
+               if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry))
+               {
+                       psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry;
+               }
+#endif
+
+               if (*pbPacketWritten)
+               {
+                       if ((psHeader->ui32Ordinal > ui32MaxOrdinal) ||
+                                       (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1)))
+                       {
+                               /* Leave remaining events to be emitted by next call to this function */
+                               break;
+                       }
+                       bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader);
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__));
+                       bWritePassed = IMG_FALSE;
+               }
+
+               /* Move on to next packet */
+               psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx);
+
+               if (!bWritePassed // if write failed
+                               && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR
+                               && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events
+               {
+                       /* Stop emitting here and re-schedule MISR */
+                       OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR);
+                       break;
+               }
+       }
+}
+
+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData)
+{
+       PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData;
+
+       OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+       /* Since we're called from MISR, there is no upper cap of ordinal to be emitted.
+        * Send IMG_UINT32_MAX to signify all possible packets. */
+       _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX);
+
+       OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       IMG_UINT32 ui32DEWatermark;
+       IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx;
+       IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx;
+
+       if (ui16LWrite >= ui16LRead)
+       {
+               ui32DEWatermark = ui16LWrite - ui16LRead;
+       }
+       else
+       {
+               ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite);
+       }
+
+       if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark)
+       {
+               psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark;
+       }
+}
+#endif
+
+/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost
+                buffer. Since the data returned by this function is required in both, an
+                atomic as well as a process/sleepable context, it is protected under spinlock
+
+   @Output      pui32Ordinal Pointer to ordinal number assigned to this packet
+   @Output      pui64Timestamp Timestamp value for this packet
+   @Output      ppui8Dest If the current context cannot sleep, pointer to a place in
+                          deferred events buffer where the packet data should be written.
+                          Don't care, otherwise.
+ */
+static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          IMG_UINT32 *pui32Ordinal,
+                                          IMG_UINT64 *pui64Timestamp,
+                                          IMG_UINT8 **ppui8Dest,
+                                          IMG_BOOL    bSleepAllowed)
+{
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       /* Spin lock is required to avoid getting scheduled out by a higher priority
+        * context while we're getting header specific details and packet place in
+        * HWPerf buffer (when in atomic context) for ourselves */
+       OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags);
+
+       *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++;
+       *pui64Timestamp = RGXTimeCorrGetClockus64(psRgxDevInfo->psDeviceNode);
+
+       if (!bSleepAllowed)
+       {
+               /* We're in an atomic context. So return the next position available in
+                * deferred events buffer */
+               IMG_UINT16 ui16NewWriteIdx;
+               IMG_BOOL *pbPacketWritten;
+
+               PVR_ASSERT(ppui8Dest != NULL);
+
+               ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx);
+               if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx)
+               {
+                       /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be
+                        * big enough to avoid any such scenario */
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+                       /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do
+                        * this debug output here when trace_printk support is added to DDK */
+//                     PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u",
+//                     __func__, psRgxDevInfo->ui32DEHighWatermark,
+//                                      HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx,
+//                                      psRgxDevInfo->ui16DEReadIdx));
+#endif
+                       *ppui8Dest = NULL;
+               }
+               else
+               {
+                       /* Return the position where deferred event would be written */
+                       *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx);
+
+                       /* Make sure packet write "state" is "write-pending" _before_ moving write
+                        * pointer forward */
+                       pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest);
+                       *pbPacketWritten = IMG_FALSE;
+
+                       psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx;
+
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+                       _UpdateDEBufferHighWatermark(psRgxDevInfo);
+#endif
+               }
+       }
+
+       OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags);
+}
+
+static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                          IMG_UINT8 *pui8Dest,
+                                          RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+                                          IMG_UINT32 ui32Size,
+                                          IMG_UINT32 ui32Ordinal,
+                                          IMG_UINT64 ui64Timestamp)
+{
+       RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) ((void *)pui8Dest);
+
+       PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE);
+
+       psHeader->ui32Ordinal = ui32Ordinal;
+       psHeader->ui64Timestamp = ui64Timestamp;
+       psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG;
+       psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST,
+                                                  eEvType, 0, 0, 0);
+       psHeader->ui32Size = ui32Size;
+}
+
+static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest,
+                                           RGX_HWPERF_KICK_TYPE eEnqType,
+                                           IMG_UINT32 ui32Pid,
+                                           IMG_UINT32 ui32FWDMContext,
+                                           IMG_UINT32 ui32ExtJobRef,
+                                           IMG_UINT32 ui32IntJobRef,
+                                           PVRSRV_FENCE hCheckFence,
+                                           PVRSRV_FENCE hUpdateFence,
+                                           PVRSRV_TIMELINE hUpdateTimeline,
+                                           IMG_UINT64 ui64CheckFenceUID,
+                                           IMG_UINT64 ui64UpdateFenceUID,
+                                           IMG_UINT64 ui64DeadlineInus,
+                                           IMG_UINT32 ui32CycleEstimate)
+{
+       RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *)
+                                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+       psData->ui32EnqType = eEnqType;
+       psData->ui32PID = ui32Pid;
+       psData->ui32ExtJobRef = ui32ExtJobRef;
+       psData->ui32IntJobRef = ui32IntJobRef;
+       psData->ui32DMContext = ui32FWDMContext;
+       psData->hCheckFence = hCheckFence;
+       psData->hUpdateFence = hUpdateFence;
+       psData->hUpdateTimeline = hUpdateTimeline;
+       psData->ui64CheckFence_UID = ui64CheckFenceUID;
+       psData->ui64UpdateFence_UID = ui64UpdateFenceUID;
+       psData->ui64DeadlineInus = ui64DeadlineInus;
+       psData->ui32CycleEstimate = ui32CycleEstimate;
+}
+
+void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                 RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+                                                 IMG_BYTE *pbPayload,
+                                                 IMG_UINT32 ui32PayloadSize)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32PktSize;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       PVR_ASSERT(ui32PayloadSize <= RGX_HWPERF_MAX_PAYLOAD_SIZE);
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       ui32PktSize = RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32PayloadSize);
+       pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32PktSize);
+
+       if (pui8Dest == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, eEvType, ui32PktSize, ui32Ordinal, ui64Timestamp);
+       OSDeviceMemCopy((IMG_UINT8*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)), pbPayload, ui32PayloadSize);
+       _CommitHWPerfStream(psRgxDevInfo, ui32PktSize);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_KICK_TYPE eEnqType,
+                               IMG_UINT32 ui32Pid,
+                               IMG_UINT32 ui32FWDMContext,
+                               IMG_UINT32 ui32ExtJobRef,
+                               IMG_UINT32 ui32IntJobRef,
+                               PVRSRV_FENCE hCheckFence,
+                               PVRSRV_FENCE hUpdateFence,
+                               PVRSRV_TIMELINE hUpdateTimeline,
+                               IMG_UINT64 ui64CheckFenceUID,
+                               IMG_UINT64 ui64UpdateFenceUID,
+                               IMG_UINT64 ui64DeadlineInus,
+                               IMG_UINT32 ui32CycleEstimate )
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA);
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size,
+                              ui32Ordinal, ui64Timestamp);
+       _SetupHostEnqPacketData(pui8Dest,
+                               eEnqType,
+                               ui32Pid,
+                               ui32FWDMContext,
+                               ui32ExtJobRef,
+                               ui32IntJobRef,
+                               hCheckFence,
+                               hUpdateFence,
+                               hUpdateTimeline,
+                               ui64CheckFenceUID,
+                               ui64UpdateFenceUID,
+                               ui64DeadlineInus,
+                               ui32CycleEstimate);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType)
+{
+       IMG_UINT32 ui32Size =
+                       (IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData);
+       RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+       switch (eUfoType)
+       {
+               case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+               case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+                       ui32Size += sizeof(puData->sCheckSuccess);
+                       break;
+               case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+               case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+                       ui32Size += sizeof(puData->sCheckFail);
+                       break;
+               case RGX_HWPERF_UFO_EV_UPDATE:
+                       ui32Size += sizeof(puData->sUpdate);
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+                                       " event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest,
+                                           RGX_HWPERF_UFO_EV eUfoType,
+                                           RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData)
+{
+       RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *)
+                                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+       RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+                                        psData->aui32StreamData;
+
+       psData->eEvType = eUfoType;
+       /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping
+        * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */
+       psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1,
+                                                           offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData));
+
+       switch (eUfoType)
+       {
+               case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+               case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+                       puData->sCheckSuccess.ui32FWAddr =
+                                       psUFOData->sCheckSuccess.ui32FWAddr;
+                       puData->sCheckSuccess.ui32Value =
+                                       psUFOData->sCheckSuccess.ui32Value;
+                       break;
+               case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+               case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+                       puData->sCheckFail.ui32FWAddr =
+                                       psUFOData->sCheckFail.ui32FWAddr;
+                       puData->sCheckFail.ui32Value =
+                                       psUFOData->sCheckFail.ui32Value;
+                       puData->sCheckFail.ui32Required =
+                                       psUFOData->sCheckFail.ui32Required;
+                       break;
+               case RGX_HWPERF_UFO_EV_UPDATE:
+                       puData->sUpdate.ui32FWAddr =
+                                       psUFOData->sUpdate.ui32FWAddr;
+                       puData->sUpdate.ui32OldValue =
+                                       psUFOData->sUpdate.ui32OldValue;
+                       puData->sUpdate.ui32NewValue =
+                                       psUFOData->sUpdate.ui32NewValue;
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+                                       " event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+}
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_UFO_EV eUfoType,
+                               RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData,
+                               const IMG_BOOL bSleepAllowed)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType);
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+       IMG_BOOL   *pbPacketWritten = NULL;
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     &pui8Dest, bSleepAllowed);
+
+       if (bSleepAllowed)
+       {
+               _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+               if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+               {
+                       goto cleanup;
+               }
+       }
+       else
+       {
+               if (pui8Dest == NULL)
+               {
+                       // Give-up if we couldn't get a place in deferred events buffer
+                       goto cleanup;
+               }
+               pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest);
+               pui8Dest = GET_DE_EVENT_DATA(pui8Dest);
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size,
+                              ui32Ordinal, ui64Timestamp);
+       _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData);
+
+       if (bSleepAllowed)
+       {
+               _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+       }
+       else
+       {
+               *pbPacketWritten = IMG_TRUE;
+               OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR);
+       }
+
+cleanup:
+       if (bSleepAllowed)
+       {
+               _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+       }
+}
+
+#define UNKNOWN_SYNC_NAME "UnknownSync"
+
+static_assert(PVRSRV_SYNC_NAME_LENGTH==PVRSRV_SYNC_NAME_LENGTH, "Sync class name max does not match Fence Sync name max");
+
+static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize(
+               RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+               const IMG_CHAR **ppsName,
+               IMG_UINT32 *ui32NameSize)
+{
+       RGX_HWPERF_HOST_ALLOC_DATA *psData;
+       IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail);
+
+       if (*ppsName != NULL && *ui32NameSize > 0)
+       {
+               /* if string longer than maximum cut it (leave space for '\0') */
+               if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH)
+                       *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid"
+                               " resource name given."));
+               *ppsName = UNKNOWN_SYNC_NAME;
+               *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME);
+       }
+
+       switch (eAllocType)
+       {
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+                       ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+                       *ui32NameSize;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+                       ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+                       *ui32NameSize;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW:
+                       ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+                       *ui32NameSize;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP:
+                       ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH +
+                       *ui32NameSize;
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest,
+                                             RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                             RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail,
+                                             const IMG_CHAR *psName,
+                                             IMG_UINT32 ui32NameSize)
+{
+       RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *)
+                                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+       IMG_CHAR *acName = NULL;
+
+       psData->ui32AllocType = eAllocType;
+
+       switch (eAllocType)
+       {
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+                       psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc;
+                       acName = psData->uAllocDetail.sSyncAlloc.acName;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+                       psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc;
+                       acName = psData->uAllocDetail.sFenceAlloc.acName;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW:
+                       psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc;
+                       acName = psData->uAllocDetail.sSWFenceAlloc.acName;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP:
+                       psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc;
+                       acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName;
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+                       PVR_ASSERT(IMG_FALSE);
+       }
+
+
+       if (acName != NULL)
+       {
+               if (ui32NameSize)
+               {
+                       OSStringLCopy(acName, psName, ui32NameSize);
+               }
+               else
+               {
+                       /* In case no name was given make sure we don't access random
+                        * memory */
+                       acName[0] = '\0';
+               }
+       }
+}
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo,
+                                 RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                 const IMG_CHAR *psName,
+                                 IMG_UINT32 ui32NameSize,
+                                 RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT64 ui64Timestamp;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType,
+                                                                     &psName,
+                                                                     &ui32NameSize);
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size,
+                              ui32Ordinal, ui64Timestamp);
+
+       _SetupHostAllocPacketData(pui8Dest,
+                                 eAllocType,
+                                 puAllocDetail,
+                                 psName,
+                                 ui32NameSize);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest,
+                                            RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                            IMG_UINT64 ui64UID,
+                                            IMG_UINT32 ui32PID,
+                                            IMG_UINT32 ui32FWAddr)
+{
+       RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *)
+                                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+       psData->ui32FreeType = eFreeType;
+
+       switch (eFreeType)
+       {
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+                       psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+                       psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID;
+                       break;
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP:
+                       psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr;
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "RGXHWPerfHostPostFreeEvent: Invalid free event type"));
+                       PVR_ASSERT(IMG_FALSE);
+       }
+}
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                IMG_UINT64 ui64UID,
+                                IMG_UINT32 ui32PID,
+                                IMG_UINT32 ui32FWAddr)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA);
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size,
+                              ui32Ordinal, ui64Timestamp);
+       _SetupHostFreePacketData(pui8Dest,
+                                eFreeType,
+                                ui64UID,
+                                ui32PID,
+                                ui32FWAddr);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize(
+               RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+               const IMG_CHAR **ppsName,
+               IMG_UINT32 *ui32NameSize)
+{
+       RGX_HWPERF_HOST_MODIFY_DATA *psData;
+       RGX_HWPERF_HOST_MODIFY_DETAIL *puData;
+       IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType);
+
+       if (*ppsName != NULL && *ui32NameSize > 0)
+       {
+               /* first strip the terminator */
+               if ((*ppsName)[*ui32NameSize - 1] == '\0')
+                       *ui32NameSize -= 1;
+               /* if string longer than maximum cut it (leave space for '\0') */
+               if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH)
+                       *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid"
+                               " resource name given."));
+               *ppsName = UNKNOWN_SYNC_NAME;
+               *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1;
+       }
+
+       switch (eModifyType)
+       {
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+                       ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH +
+                       *ui32NameSize + 1; /* +1 for '\0' */
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest,
+                                              RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                              IMG_UINT64 ui64NewUID,
+                                              IMG_UINT64 ui64UID1,
+                                              IMG_UINT64 ui64UID2,
+                                              const IMG_CHAR *psName,
+                                              IMG_UINT32 ui32NameSize)
+{
+       RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+       IMG_CHAR *acName = NULL;
+
+       psData->ui32ModifyType = eModifyType;
+
+       switch (eModifyType)
+       {
+               case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+                       psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID;
+                       psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1;
+                       psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2;
+                       acName = psData->uModifyDetail.sFenceMerge.acName;
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+                       PVR_ASSERT(IMG_FALSE);
+       }
+
+       if (acName != NULL)
+       {
+               if (ui32NameSize)
+               {
+                       OSStringLCopy(acName, psName, ui32NameSize);
+               }
+               else
+               {
+                       /* In case no name was given make sure we don't access random
+                        * memory */
+                       acName[0] = '\0';
+               }
+       }
+}
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                  RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                  IMG_UINT64 ui64NewUID,
+                                  IMG_UINT64 ui64UID1,
+                                  IMG_UINT64 ui64UID2,
+                                  const IMG_CHAR *psName,
+                                  IMG_UINT32 ui32NameSize)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT64 ui64Timestamp;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType,
+                                                                      &psName,
+                                                                      &ui32NameSize);
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size,
+                              ui32Ordinal, ui64Timestamp);
+       _SetupHostModifyPacketData(pui8Dest,
+                                  eModifyType,
+                                  ui64NewUID,
+                                  ui64UID1,
+                                  ui64UID2,
+                                  psName,
+                                  ui32NameSize);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest)
+{
+       RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *)
+                                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+       RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb;
+       IMG_UINT32 ui32CurrIdx =
+                       RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+       RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx];
+
+       psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp;
+       psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp;
+       psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed;
+}
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size =
+                       RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA);
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       /* if the buffer for time correlation data is not yet available (possibly
+        * device not initialised yet) skip this event */
+       if (psRgxDevInfo->psRGXFWIfGpuUtilFWCb == NULL)
+       {
+               return;
+       }
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size,
+                              ui32Ordinal, ui64Timestamp);
+       _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus)
+{
+       switch (eDeviceHealthStatus)
+       {
+               case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:                     return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
+               case PVRSRV_DEVICE_HEALTH_STATUS_OK:                            return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK;
+               case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:        return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING;
+               case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:                          return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD;
+               case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:                         return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT;
+               default:                                                                                        return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
+       }
+}
+
+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
+{
+       switch (eDeviceHealthReason)
+       {
+               case PVRSRV_DEVICE_HEALTH_REASON_NONE:                          return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE;
+               case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:                      return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED;
+               case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:          return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING;
+               case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:                      return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS;
+               case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:         return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+               case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:         return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+               case PVRSRV_DEVICE_HEALTH_REASON_IDLING:                        return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING;
+               case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING:            return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING;
+               case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS;
+               default:                                                                                        return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED;
+       }
+}
+
+static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType,
+                                                                                                 PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
+                                                                                                 PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason,
+                                                                                                 IMG_UINT8 *pui8Dest)
+{
+       RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+       psData->eEvType = eEvType;
+
+       switch (eEvType)
+       {
+               case RGX_HWPERF_DEV_INFO_EV_HEALTH:
+                       psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus);
+                       psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason);
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+}
+
+static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType)
+{
+       IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail);
+
+       switch (eEvType)
+       {
+               case RGX_HWPERF_DEV_INFO_EV_HEALTH:
+                       ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus);
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                                RGX_HWPERF_DEV_INFO_EV eEvType,
+                                                                PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
+                                                                PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+       IMG_UINT32 ui32Size;
+
+       OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+       if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL)
+       {
+               _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
+               _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+               ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType);
+
+               if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL)
+               {
+                       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp);
+                       _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest);
+                       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+               }
+
+               _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+       }
+
+       OSLockRelease(psRgxDevInfo->hHWPerfLock);
+}
+
+static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType,
+                                                                                                 IMG_UINT32 ui32TotalMemoryUsage,
+                                                                                                 IMG_UINT32 ui32LivePids,
+                                                                                                 PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage,
+                                                                                                 IMG_UINT8 *pui8Dest)
+{
+       IMG_INT i;
+       RGX_HWPERF_HOST_INFO_DATA *psData = (RGX_HWPERF_HOST_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+       psData->eEvType = eEvType;
+
+       switch (eEvType)
+       {
+               case RGX_HWPERF_INFO_EV_MEM_USAGE:
+                       psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage;
+
+                       if (psPerProcessMemUsage)
+                       {
+                               for (i = 0; i < ui32LivePids; ++i)
+                               {
+                                       psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid;
+                                       psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage;
+                                       psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage;
+                               }
+                       }
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+}
+
+static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType,
+                                                                                                                       IMG_UINT32 *pui32TotalMemoryUsage,
+                                                                                                                       IMG_UINT32 *pui32LivePids,
+                                                                                                                       PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage)
+{
+       IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail);
+
+       switch (eEvType)
+       {
+               case RGX_HWPERF_INFO_EV_MEM_USAGE:
+#if !defined(__QNXNTO__)
+                       if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK)
+                       {
+                               ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size)
+                                       + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage)));
+                       }
+#else
+                       PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
+#endif
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type"));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                                RGX_HWPERF_INFO_EV eEvType)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+       IMG_UINT32 ui32TotalMemoryUsage = 0;
+       PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL;
+       IMG_UINT32 ui32LivePids = 0;
+
+       OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+       if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL)
+       {
+               _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
+               _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+               ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage);
+
+               if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL)
+               {
+                       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp);
+                       _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest);
+                       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+               }
+
+               _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+
+               if (psPerProcessMemUsage)
+                       OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats
+       }
+
+       OSLockRelease(psRgxDevInfo->hHWPerfLock);
+}
+
+static inline IMG_UINT32
+_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType)
+{
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator;
+       IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail);
+
+       switch (eWaitType)
+       {
+               case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN:
+                       ui32Size += sizeof(psSizeCalculator->uDetail.sBegin);
+                       break;
+               case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END:
+                       ui32Size += sizeof(psSizeCalculator->uDetail.sEnd);
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__,
+                                eWaitType));
+                       PVR_ASSERT(IMG_FALSE);
+                       break;
+       }
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void
+_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest,
+                              RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType,
+                              IMG_PID uiPID,
+                              PVRSRV_FENCE hFence,
+                              IMG_UINT32 ui32Data)
+{
+       RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *)
+                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+       psData->eType = eWaitType;
+       psData->uiPID = uiPID;
+       psData->hFence = hFence;
+
+       switch (eWaitType)
+       {
+               case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN:
+                       psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data;
+                       break;
+               case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END:
+                       psData->uDetail.sEnd.eResult =
+                           (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data;
+                       break;
+               default:
+                       // unknown type - this should never happen
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Invalid fence-wait event type", __func__));
+                       PVR_ASSERT(IMG_FALSE);
+       }
+}
+
+void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                               RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType,
+                                                               IMG_PID uiPID,
+                                                               PVRSRV_FENCE hFence,
+                                                               IMG_UINT32 ui32Data)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       ui32Size = _CalculateHostFenceWaitPacketSize(eType);
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT,
+                              ui32Size, ui32Ordinal, ui64Timestamp);
+       _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void)
+{
+       IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA);
+       return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void
+_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest,
+                                  IMG_PID uiPID,
+                                                                 PVRSRV_TIMELINE hSWTimeline,
+                                                                 IMG_UINT64 ui64SyncPtIndex)
+
+{
+       RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = (RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *)
+                       IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+       psData->uiPID = uiPID;
+       psData->hTimeline = hSWTimeline;
+       psData->ui64SyncPtIndex = ui64SyncPtIndex;
+}
+
+void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                    IMG_PID uiPID,
+                                    PVRSRV_TIMELINE hSWTimeline,
+                                    IMG_UINT64 ui64SyncPtIndex)
+{
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+                                     NULL, IMG_TRUE);
+
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       ui32Size = _CalculateHostSWTimelineAdvPacketSize();
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE,
+                              ui32Size, ui32Ordinal, ui64Timestamp);
+       _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+
+}
+
+void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                         IMG_PID uiPID,
+                                                                            const IMG_CHAR *psName)
+{
+       RGX_HWPERF_HOST_CLIENT_INFO_DATA* psPkt;
+       IMG_UINT8 *pui8Dest;
+       IMG_UINT32 ui32Size;
+       IMG_UINT32 ui32NameLen;
+       IMG_UINT32 ui32Ordinal;
+       IMG_UINT64 ui64Timestamp;
+
+       _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
+       _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+       ui32NameLen = OSStringLength(psName) + 1U;
+       ui32Size = RGX_HWPERF_MAKE_SIZE_VARIABLE(RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE
+               + RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen));
+
+       if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+       {
+               goto cleanup;
+       }
+
+       _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLIENT_INFO,
+                              ui32Size, ui32Ordinal, ui64Timestamp);
+
+       psPkt = (RGX_HWPERF_HOST_CLIENT_INFO_DATA*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
+       psPkt->eType = RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME;
+       psPkt->uDetail.sProcName.ui32Count = 1U;
+       psPkt->uDetail.sProcName.asProcNames[0].uiClientPID = uiPID;
+       psPkt->uDetail.sProcName.asProcNames[0].ui32Length = ui32NameLen;
+       (void)OSStringLCopy(psPkt->uDetail.sProcName.asProcNames[0].acName, psName, ui32NameLen);
+
+       _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+cleanup:
+       _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+       PVRSRV_DEVICE_NODE* psRgxDevNode;
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+       /* TL Open/close state */
+       IMG_HANDLE          hSD[RGX_HWPERF_MAX_STREAM_ID];
+
+       /* TL Acquire/release state */
+       IMG_PBYTE                       pHwpBuf[RGX_HWPERF_MAX_STREAM_ID];                      /*!< buffer returned to user in acquire call */
+       IMG_PBYTE                       pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID];           /*!< pointer to end of HwpBuf */
+       IMG_PBYTE                       pTlBuf[RGX_HWPERF_MAX_STREAM_ID];                       /*!< buffer obtained via TlAcquireData */
+       IMG_PBYTE                       pTlBufPos[RGX_HWPERF_MAX_STREAM_ID];            /*!< initial position in TlBuf to acquire packets */
+       IMG_PBYTE                       pTlBufRead[RGX_HWPERF_MAX_STREAM_ID];           /*!< pointer to the last packet read */
+       IMG_UINT32                      ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID];       /*!< length of acquired TlBuf */
+       IMG_BOOL                        bRelease[RGX_HWPERF_MAX_STREAM_ID];             /*!< used to determine whether or not to release currently held TlBuf */
+
+
+} RGX_KM_HWPERF_DEVDATA;
+
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       RGX_KM_HWPERF_DEVDATA *psDevData;
+       RGX_HWPERF_DEVICE *psNewHWPerfDevice;
+       RGX_HWPERF_CONNECTION* psHWPerfConnection;
+       IMG_BOOL bFWActive = IMG_FALSE;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* avoid uninitialised data */
+       PVR_ASSERT(*ppsHWPerfConnection == NULL);
+       PVR_ASSERT(psPVRSRVData);
+
+       /* Allocate connection object */
+       psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection));
+       if (!psHWPerfConnection)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+       /* early save the return pointer to aid clean-up if failure occurs */
+       *ppsHWPerfConnection = psHWPerfConnection;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+       while (psDeviceNode)
+       {
+               if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "%s: HWPerf: Device not currently active. ID:%u",
+                                        __func__,
+                                        psDeviceNode->sDevId.i32OsDeviceID));
+                       psDeviceNode = psDeviceNode->psNext;
+                       continue;
+               }
+               /* Create a list node to be attached to connection object's list */
+               psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice));
+               if (!psNewHWPerfDevice)
+               {
+                       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+               /* Insert node at head of the list */
+               psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList;
+               psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice;
+
+               /* create a device data object for kernel server */
+               psDevData = OSAllocZMem(sizeof(*psDevData));
+               psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData;
+               if (!psDevData)
+               {
+                       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+               if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName),
+                                  "hwperf_device_%d", psDeviceNode->sDevId.i32OsDeviceID) < 0)
+               {
+                       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to form HWPerf device name for device %d",
+                                       __func__,
+                                       psDeviceNode->sDevId.i32OsDeviceID));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               psDevData->psRgxDevNode = psDeviceNode;
+               psDevData->psRgxDevInfo = psDeviceNode->pvDevice;
+
+               psDeviceNode = psDeviceNode->psNext;
+
+               /* At least one device is active */
+               bFWActive = IMG_TRUE;
+       }
+
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       if (!bFWActive)
+       {
+               return PVRSRV_ERROR_NOT_READY;
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+       RGX_KM_HWPERF_DEVDATA *psDevData;
+       RGX_HWPERF_DEVICE *psHWPerfDev;
+       PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+       PVRSRV_ERROR eError;
+       IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+       IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5];
+       IMG_UINT32 ui32BufSize;
+
+       /* Disable producer callback by default for the Kernel API. */
+       IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING |
+                       PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Validate input argument values supplied by the caller */
+       if (!psHWPerfConnection)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+       while (psHWPerfDev)
+       {
+               psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+               psRgxDevInfo = psDevData->psRgxDevInfo;
+
+               /* In the case where the AppHint has not been set we need to
+                * initialise the HWPerf resources here. Allocated on-demand
+                * to reduce RAM foot print on systems not needing HWPerf.
+                */
+               OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+               if (RGXHWPerfIsInitRequired(psRgxDevInfo))
+               {
+                       eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                                "%s: Initialisation of on-demand HWPerfFW resources failed",
+                                                __func__));
+                               OSLockRelease(psRgxDevInfo->hHWPerfLock);
+                               return eError;
+                       }
+               }
+               OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+               OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+               if (psRgxDevInfo->hHWPerfHostStream == NULL)
+               {
+                       eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                                "%s: Initialisation of on-demand HWPerfHost resources failed",
+                                                __func__));
+                               OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+                               return eError;
+                       }
+               }
+               OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+
+               /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+               if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d",
+                              PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+                              psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to form HWPerf stream name for device %d",
+                                       __func__,
+                                       psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+               /* Open the RGX TL stream for reading in this session */
+               eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+                                           pszHWPerfFwStreamName,
+                                           ui32StreamFlags,
+                                           &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]);
+               PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)");
+
+               /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+               if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+                              PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+                              psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to form HWPerf host stream name for device %d",
+                                       __func__,
+                                       psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               /* Open the host TL stream for reading in this session */
+               eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+                                           pszHWPerfHostStreamName,
+                                           PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+                                           &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]);
+               PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)");
+
+               /* Allocate a large enough buffer for use during the entire session to
+                * avoid the need to resize in the Acquire call as this might be in an ISR
+                * Choose size that can contain at least one packet.
+                */
+               /* Allocate buffer for FW Stream */
+               ui32BufSize = FW_STREAM_BUFFER_SIZE;
+               psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize);
+               if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL)
+               {
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+               psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize;
+
+               /* Allocate buffer for Host Stream */
+               ui32BufSize = HOST_STREAM_BUFFER_SIZE;
+               psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize);
+               if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL)
+               {
+                       OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]);
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+               psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize;
+
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+       PVRSRV_ERROR eError;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       eError = RGXHWPerfLazyConnect(ppsHWPerfConnection);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0);
+
+       eError = RGXHWPerfOpen(*ppsHWPerfConnection);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfOpen", e1);
+
+       return PVRSRV_OK;
+
+e1: /* HWPerfOpen might have opened some, and then failed */
+       RGXHWPerfClose(*ppsHWPerfConnection);
+e0: /* LazyConnect might have allocated some resources and then failed,
+        * make sure they are cleaned up */
+       RGXHWPerfFreeConnection(ppsHWPerfConnection);
+       return eError;
+}
+
+/*
+    PVRSRVRGXControlHWPerfBlocksKM
+ */
+PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM(
+               CONNECTION_DATA             * psConnection,
+               PVRSRV_DEVICE_NODE          * psDeviceNode,
+               IMG_BOOL                      bEnable,
+               IMG_UINT32                    ui32ArrayLen,
+               IMG_UINT16                  * psBlockIDs)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sKccbCmd;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO      *psDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       PVR_DPF_ENTERED;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs");
+       PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen");
+
+       PVR_ASSERT(psDeviceNode);
+       psDevice = psDeviceNode->pvDevice;
+
+       /* Fill in the command structure with the parameters needed
+        */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS;
+       sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable;
+       sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen;
+
+       OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16) * ui32ArrayLen);
+
+
+       /* Ask the FW to carry out the HWPerf configuration command
+        */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
+                                                 RGXFWIF_DM_GP,
+                                                 &sKccbCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot");
+
+       /* Wait for FW to complete */
+       eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+
+
+#if defined(DEBUG)
+       if (bEnable)
+               PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen));
+       else
+               PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen));
+#endif
+
+       PVR_DPF_RETURN_OK;
+}
+
+/*
+       PVRSRVRGXCtrlHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+               CONNECTION_DATA         *psConnection,
+               PVRSRV_DEVICE_NODE      *psDeviceNode,
+               RGX_HWPERF_STREAM_ID     eStreamId,
+               IMG_BOOL                 bToggle,
+               IMG_UINT64               ui64Mask)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       PVR_DPF_ENTERED;
+       PVR_ASSERT(psDeviceNode);
+
+       if (eStreamId == RGX_HWPERF_STREAM_ID0_FW)
+       {
+               return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask);
+       }
+       else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST)
+       {
+               return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask);
+       }
+       else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT)
+       {
+               IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32);
+               IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask;
+
+               return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id."));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfControl(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               RGX_HWPERF_STREAM_ID eStreamId,
+               IMG_BOOL             bToggle,
+               IMG_UINT64           ui64Mask)
+{
+       PVRSRV_ERROR           eError;
+       RGX_KM_HWPERF_DEVDATA* psDevData;
+       RGX_HWPERF_DEVICE* psHWPerfDev;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Validate input argument values supplied by the caller */
+       if (!psHWPerfConnection)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+       while (psHWPerfDev)
+       {
+               psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+               /* Call the internal server API */
+               eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+
+       return PVRSRV_OK;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32   ui32NumBlocks,
+               IMG_UINT16*  aeBlockIDs,
+               IMG_BOOL     bToggle,
+               const char* szFunctionString);
+
+IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32   ui32NumBlocks,
+               IMG_UINT16*  aeBlockIDs,
+               IMG_BOOL     bToggle,
+               const char* szFunctionString)
+{
+       PVRSRV_ERROR           eError;
+       RGX_KM_HWPERF_DEVDATA* psDevData;
+       RGX_HWPERF_DEVICE*     psHWPerfDev;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+       while (psHWPerfDev)
+       {
+               psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+               /* Call the internal server API */
+               eError = PVRSRVRGXControlHWPerfBlocksKM(NULL,
+                                                       psDevData->psRgxDevNode,
+                                                       bToggle,
+                                                       ui32NumBlocks,
+                                                       aeBlockIDs);
+
+               PVR_LOG_RETURN_IF_ERROR(eError, szFunctionString);
+
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32            ui32NumBlocks,
+               IMG_UINT16*           aeBlockIDs)
+{
+       return RGXHWPerfToggleCounters(psHWPerfConnection,
+                                               ui32NumBlocks,
+                                               aeBlockIDs,
+                                               IMG_FALSE,
+                                               __func__);
+}
+
+
+PVRSRV_ERROR RGXHWPerfEnableCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32            ui32NumBlocks,
+               IMG_UINT16*           aeBlockIDs)
+{
+       return RGXHWPerfToggleCounters(psHWPerfConnection,
+                                               ui32NumBlocks,
+                                               aeBlockIDs,
+                                               IMG_TRUE,
+                                               __func__);
+}
+
+
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+               IMG_HANDLE  hDevData,
+               RGX_HWPERF_STREAM_ID eStreamId,
+               IMG_PBYTE*  ppBuf,
+               IMG_UINT32* pui32BufLen)
+{
+       PVRSRV_ERROR                    eError;
+       RGX_KM_HWPERF_DEVDATA*  psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+       IMG_PBYTE                               pDataDest;
+       IMG_UINT32                      ui32TlPackets = 0;
+       IMG_PBYTE                       pBufferEnd;
+       PVRSRVTL_PPACKETHDR psHDRptr;
+       PVRSRVTL_PACKETTYPE ui16TlType;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Reset the output arguments in case we discover an error */
+       *ppBuf = NULL;
+       *pui32BufLen = 0;
+
+       /* Valid input argument values supplied by the caller */
+       if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (psDevData->pTlBuf[eStreamId] == NULL)
+       {
+               /* Acquire some data to read from the HWPerf TL stream */
+               eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+                                            psDevData->hSD[eStreamId],
+                                            &psDevData->pTlBuf[eStreamId],
+                                            &psDevData->ui32AcqDataLen[eStreamId]);
+               PVR_LOG_RETURN_IF_ERROR(eError, "TLClientAcquireData");
+
+               psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId];
+       }
+
+       /* TL indicates no data exists so return OK and zero. */
+       if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0))
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Process each TL packet in the data buffer we have acquired */
+       pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId];
+       pDataDest = psDevData->pHwpBuf[eStreamId];
+       psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]);
+       psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId];
+       while (psHDRptr < (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd))
+       {
+               ui16TlType = GET_PACKET_TYPE(psHDRptr);
+               if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+               {
+                       IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+                       if (0 == ui16DataLen)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr));
+                       }
+                       else
+                       {
+                               /* Check next packet does not fill buffer */
+                               if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId])
+                               {
+                                       break;
+                               }
+
+                               /* For valid data copy it into the client buffer and move
+                                * the write position on */
+                               OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen);
+                               pDataDest += ui16DataLen;
+                       }
+               }
+               else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full"));
+               }
+               else
+               {
+                       /* else Ignore padding packet type and others */
+                       PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType ));
+               }
+
+               /* Update loop variable to the next packet and increment counts */
+               psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+               /* Updated to keep track of the next packet to be read. */
+               psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) ((void *)psHDRptr);
+               ui32TlPackets++;
+       }
+
+       PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets));
+
+       psDevData->bRelease[eStreamId] = IMG_FALSE;
+       if (psHDRptr >= (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd))
+       {
+               psDevData->bRelease[eStreamId] = IMG_TRUE;
+       }
+
+       /* Update output arguments with client buffer details and true length */
+       *ppBuf = psDevData->pHwpBuf[eStreamId];
+       *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId];
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+               IMG_HANDLE hDevData,
+               RGX_HWPERF_STREAM_ID eStreamId)
+{
+       PVRSRV_ERROR                    eError = PVRSRV_OK;
+       RGX_KM_HWPERF_DEVDATA*  psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Valid input argument values supplied by the caller */
+       if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (psDevData->bRelease[eStreamId])
+       {
+               /* Inform the TL that we are done with reading the data. */
+               eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]);
+               psDevData->ui32AcqDataLen[eStreamId] = 0;
+               psDevData->pTlBuf[eStreamId] = NULL;
+       }
+       else
+       {
+               psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId];
+       }
+       return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfGetFilter(
+               IMG_HANDLE  hDevData,
+               RGX_HWPERF_STREAM_ID eStreamId,
+               IMG_UINT64 *ui64Filter)
+{
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo =
+                       hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Valid input argument values supplied by the caller */
+       if (!psRgxDevInfo)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* No need to take hHWPerfLock here since we are only reading data
+        * from always existing integers to return to debugfs which is an
+        * atomic operation.
+        */
+       switch (eStreamId) {
+               case RGX_HWPERF_STREAM_ID0_FW:
+                       *ui64Filter = psRgxDevInfo->ui64HWPerfFilter;
+                       break;
+               case RGX_HWPERF_STREAM_ID1_HOST:
+                       *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter;
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID",
+                                       __func__));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+       RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev;
+       RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection;
+
+       /* if connection object itself is NULL, nothing to free */
+       if (psHWPerfConnection == NULL)
+       {
+               return PVRSRV_OK;
+       }
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList;
+       while (psHWPerfNextDev)
+       {
+               psHWPerfDev = psHWPerfNextDev;
+               psHWPerfNextDev = psHWPerfNextDev->psNext;
+
+               /* Free the session memory */
+               if (psHWPerfDev->hDevData)
+                       OSFreeMem(psHWPerfDev->hDevData);
+               OSFreeMem(psHWPerfDev);
+       }
+       OSFreeMem(psHWPerfConnection);
+       *ppsHWPerfConnection = NULL;
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+       RGX_HWPERF_DEVICE *psHWPerfDev;
+       RGX_KM_HWPERF_DEVDATA* psDevData;
+       IMG_UINT uiStreamId;
+       PVRSRV_ERROR eError;
+
+       /* Check session connection is not zero */
+       if (!psHWPerfConnection)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+       while (psHWPerfDev)
+       {
+               psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+               for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++)
+               {
+                       /* If the TL buffer exists they have not called ReleaseData
+                        * before disconnecting so clean it up */
+                       if (psDevData->pTlBuf[uiStreamId])
+                       {
+                               /* TLClientReleaseData call and null out the buffer fields
+                                * and length */
+                               eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]);
+                               psDevData->ui32AcqDataLen[uiStreamId] = 0;
+                               psDevData->pTlBuf[uiStreamId] = NULL;
+                               PVR_LOG_IF_ERROR(eError, "TLClientReleaseData");
+                               /* Packets may be lost if release was not required */
+                               if (!psDevData->bRelease[uiStreamId])
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost."));
+                               }
+                       }
+
+                       /* Close the TL stream, ignore the error if it occurs as we
+                        * are disconnecting */
+                       if (psDevData->hSD[uiStreamId])
+                       {
+                               eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+                                                            psDevData->hSD[uiStreamId]);
+                               PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+                               psDevData->hSD[uiStreamId] = NULL;
+                       }
+
+                       /* Free the client buffer used in session */
+                       if (psDevData->pHwpBuf[uiStreamId])
+                       {
+                               OSFreeMem(psDevData->pHwpBuf[uiStreamId]);
+                               psDevData->pHwpBuf[uiStreamId] = NULL;
+                       }
+               }
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       eError = RGXHWPerfClose(*ppsHWPerfConnection);
+       PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose");
+
+       eError = RGXHWPerfFreeConnection(ppsHWPerfConnection);
+       PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection");
+
+       return eError;
+}
+
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+               IMG_UINT32 ui32ClkSpeed,
+               IMG_UINT64 ui64CorrCRTimeStamp,
+               IMG_UINT64 ui64CorrOSTimeStamp,
+               IMG_UINT64 ui64CRTimeStamp)
+{
+       IMG_UINT64 ui64CRDeltaToOSDeltaKNs;
+       IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+       if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp))
+       {
+               return 0;
+       }
+
+       ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed);
+
+       /* RGX CR timer ticks delta */
+       deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp;
+       /* RGX time delta in nanoseconds */
+       delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+       /* Calculate OS time of HWPerf event */
+       ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns;
+
+       return ui64EventOSTimestamp;
+}
+
+/******************************************************************************
+ End of file (rgxhwperf_common.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxhwperf_common.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxhwperf_common.h
new file mode 100644 (file)
index 0000000..76957c3
--- /dev/null
@@ -0,0 +1,512 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX HWPerf functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_COMMON_H_
+#define RGXHWPERF_COMMON_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "connection_server.h"
+#include "rgxdevice.h"
+#include "rgx_hwperf.h"
+
+/* HWPerf host buffer size constraints in KBs */
+#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB
+#define HWPERF_HOST_TL_STREAM_SIZE_MIN     (32U)
+#define HWPERF_HOST_TL_STREAM_SIZE_MAX     (3072U)
+
+/******************************************************************************
+ * RGX HW Performance decode Bvnc Features for HWPerf
+ *****************************************************************************/
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                       RGX_HWPERF_BVNC    *psBVNC);
+
+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA    *psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                             RGX_HWPERF_BVNC    *psBVNC);
+
+/******************************************************************************
+ * RGX HW Performance Data Transport Routines
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo);
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfClientInitAppHintCallbacks(void);
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s)
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+       CONNECTION_DATA      * psConnection,
+       PVRSRV_DEVICE_NODE   * psDeviceNode,
+        RGX_HWPERF_STREAM_ID  eStreamId,
+       IMG_BOOL               bToggle,
+       IMG_UINT64             ui64Mask);
+
+PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM(
+       CONNECTION_DATA       * psConnection,
+       PVRSRV_DEVICE_NODE    * psDeviceNode,
+       IMG_BOOL              bEnable,
+       IMG_UINT32            ui32ArrayLen,
+       IMG_UINT16            * psBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB);
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO    *psRgxDevInfo);
+
+void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                 IMG_UINT32 ui32Filter);
+
+void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                 RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+                                                 IMG_BYTE *pbPayload,
+                                                 IMG_UINT32 ui32PayloadSize);
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_KICK_TYPE eEnqType,
+                               IMG_UINT32 ui32Pid,
+                               IMG_UINT32 ui32FWDMContext,
+                               IMG_UINT32 ui32ExtJobRef,
+                               IMG_UINT32 ui32IntJobRef,
+                               PVRSRV_FENCE hCheckFence,
+                               PVRSRV_FENCE hUpdateFence,
+                               PVRSRV_TIMELINE hUpdateTimeline,
+                               IMG_UINT64 ui64CheckFenceUID,
+                               IMG_UINT64 ui64UpdateFenceUID,
+                               IMG_UINT64 ui64DeadlineInus,
+                               IMG_UINT32 ui32CycleEstimate);
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                 RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+                                 const IMG_CHAR *psName,
+                                 IMG_UINT32 ui32NameSize,
+                                 RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail);
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+                                IMG_UINT64 ui64UID,
+                                IMG_UINT32 ui32PID,
+                                IMG_UINT32 ui32FWAddr);
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                  RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+                                  IMG_UINT64 ui64NewUID,
+                                  IMG_UINT64 ui64UID1,
+                                  IMG_UINT64 ui64UID2,
+                                  const IMG_CHAR *psName,
+                                  IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                               RGX_HWPERF_UFO_EV eUfoType,
+                               RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData,
+                                                          const IMG_BOOL bSleepAllowed);
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+
+void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                                RGX_HWPERF_DEV_INFO_EV eEvType,
+                                                                PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
+                                                                PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason);
+
+void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                                RGX_HWPERF_INFO_EV eEvType);
+
+void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                                               RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType,
+                                                               IMG_PID uiPID,
+                                                               PVRSRV_FENCE hFence,
+                                                               IMG_UINT32 ui32Data);
+
+void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                    IMG_PID uiPID,
+                                                                       PVRSRV_TIMELINE hSWTimeline,
+                                                                       IMG_UINT64 ui64SyncPtIndex);
+
+void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                         IMG_PID uiPID,
+                                                                            const IMG_CHAR *psName);
+
+IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent);
+
+#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \
+               (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \
+               & RGX_HWPERF_EVENT_MASK_VALUE(EV))
+
+#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \
+               ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)
+
+#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \
+               ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice)
+
+/* Deadline and cycle estimate is not supported for all ENQ events */
+#define NO_DEADLINE 0
+#define NO_CYCEST   0
+
+
+#if defined(SUPPORT_RGX)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param C      Kick context
+ * @param P      Pid of kicking process
+ * @param X      Related FW context
+ * @param E      External job reference
+ * @param I      Job ID
+ * @param K      Kick type
+ * @param CF     Check fence handle
+ * @param UF     Update fence handle
+ * @param UT     Update timeline (on which above UF was created) handle
+ * @param CHKUID Check fence UID
+ * @param UPDUID Update fence UID
+ * @param D      Deadline
+ * @param CE     Cycle estimate
+ */
+#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) \
+               do { \
+                       if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \
+                       { \
+                               RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \
+                                                         (K), (P), (X), (E), (I), \
+                                                         (CF), (UF), (UT), \
+                                                         (CHKUID), (UPDUID), (D), (CE)); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device Info pointer
+ * @param T Host UFO event type
+ * @param D Pointer to UFO data
+ * @param S Is sleeping allowed?
+ */
+#define RGXSRV_HWPERF_UFO(I, T, D, S) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \
+                       { \
+                               RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+                       { \
+                               RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+                               uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \
+                               RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                           RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+                                                                                       (N), (Z), &uAllocDetail); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param PID ID of allocating process
+ * @param FENCE PVRSRV_FENCE object
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z)  \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+                       { \
+                               RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+                               uAllocDetail.sFenceAlloc.uiPID = (PID); \
+                               uAllocDetail.sFenceAlloc.hFence = (FENCE); \
+                               uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \
+                               RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                           RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \
+                                                           N, Z, &uAllocDetail); \
+                       } \
+               } while (0)
+
+/**
+ * @param D Device Node pointer
+ * @param TL PVRSRV_TIMELINE on which CP is allocated
+ * @param PID Allocating process ID of this TL/FENCE
+ * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z)  \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+                       { \
+                               RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+                               uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \
+                               uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \
+                               uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \
+                               uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \
+                               RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                           RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \
+                                                           N, Z, &uAllocDetail); \
+                       } \
+               } while (0)
+
+/**
+ * @param D Device Node pointer
+ * @param PID ID of allocating process
+ * @param SW_FENCE PVRSRV_FENCE object
+ * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated
+ * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z)  \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+                       { \
+                               RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \
+                               uAllocDetail.sSWFenceAlloc.uiPID = (PID); \
+                               uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \
+                               uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \
+                               uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \
+                               RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                           RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \
+                                                           N, Z, &uAllocDetail); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ */
+#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+                       { \
+                               RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                          RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+                                                          (0), (0), (FWADDR)); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param UID ID of input object
+ * @param PID ID of allocating process
+ * @param FWADDR sync firmware address
+ */
+#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+                       { \
+                               RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                          RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+                                                          (UID), (PID), (FWADDR)); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param NEWUID ID of output object
+ * @param UID1 ID of first input object
+ * @param UID2 ID of second input object
+ * @param N string containing new object's name
+ * @param Z string size including null terminating character
+ */
+#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \
+                       { \
+                               RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+                                                            RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+                                                            (NEWUID), (UID1), (UID2), N, Z); \
+                       } \
+               } while (0)
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device info pointer
+ */
+#define RGXSRV_HWPERF_CLK_SYNC(I) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \
+                       { \
+                               RGXHWPerfHostPostClkSyncEvent((I)); \
+                       } \
+               } while (0)
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts a device info event to the HWPerfHost stream.
+ *
+ * @param I      Device info pointer
+ * @param T      Event type
+ * @param H             Health status enum
+ * @param R             Health reason enum
+ */
+#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \
+               do { \
+                       if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \
+                       { \
+                               RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \
+                       } \
+               } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I      Device info pointer
+ * @param T      Event type
+ */
+#define RGXSRV_HWPERF_HOST_INFO(I, T) \
+do { \
+       if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \
+       { \
+               RGXHWPerfHostPostInfo((I), (T)); \
+       } \
+} while (0)
+
+/**
+ * @param I      Device info pointer
+ * @param T      Wait Event type
+ * @param PID    Process ID that the following fence belongs to
+ * @param F      Fence handle
+ * @param D      Data for this wait event type
+ */
+#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \
+do { \
+       if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \
+       { \
+               RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \
+                                          (PID), (F), (D)); \
+       } \
+} while (0)
+
+/**
+ * @param I      Device info pointer
+ * @param PID    Process ID that the following timeline belongs to
+ * @param F      SW-timeline handle
+ * @param SPI    Sync-pt index where this SW-timeline has reached
+ */
+#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\
+do { \
+       if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \
+       { \
+               RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \
+       } \
+} while (0)
+
+/**
+ * @param D      Device Node pointer
+ * @param PID    Process ID that the following timeline belongs to
+ * @param N      Null terminated string containing the process name
+ */
+#define RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(D, PID, N) \
+do { \
+       if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_CLIENT_INFO)) \
+       { \
+               RGXHWPerfHostPostClientInfoProcName(_RGX_DEVICE_INFO_FROM_NODE(D), (PID), (N)); \
+       } \
+} while (0)
+
+#else
+
+#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE)
+#define RGXSRV_HWPERF_UFO(I, T, D, S)
+#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z)
+#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z)
+#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z)
+#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z)
+#define RGXSRV_HWPERF_FREE(D, T, FWADDR)
+#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR)
+#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z)
+#define RGXSRV_HWPERF_CLK_SYNC(I)
+#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R)
+#define RGXSRV_HWPERF_HOST_INFO(I, T)
+#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D)
+#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)
+#define RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(D, PID, N)
+
+#endif
+
+#endif /* RGXHWPERF_COMMON_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxkicksync.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxkicksync.c
new file mode 100644 (file)
index 0000000..73f1b78
--- /dev/null
@@ -0,0 +1,794 @@
+/*************************************************************************/ /*!
+@File           rgxkicksync.c
+@Title          Server side of the sync only kick API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxkicksync.h"
+
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "rgxfwutils.h"
+#include "allocmem.h"
+#include "sync.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_KICKSYNC_UFO_DUMP       0
+
+//#define KICKSYNC_CHECKPOINT_DEBUG 1
+
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_KICKSYNC_CONTEXT_
+{
+       PVRSRV_DEVICE_NODE        * psDeviceNode;
+       RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+       DLLIST_NODE                 sListNode;
+       SYNC_ADDR_LIST              sSyncAddrListFence;
+       SYNC_ADDR_LIST              sSyncAddrListUpdate;
+       POS_LOCK                    hLock;
+};
+
+
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA              *psConnection,
+                                              PVRSRV_DEVICE_NODE           *psDeviceNode,
+                                              IMG_HANDLE                    hMemCtxPrivData,
+                                              IMG_UINT32                    ui32PackedCCBSizeU88,
+                                              IMG_UINT32                    ui32ContextFlags,
+                                              RGX_SERVER_KICKSYNC_CONTEXT **ppsKickSyncContext)
+{
+       PVRSRV_RGXDEV_INFO          * psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC              * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext;
+       RGX_COMMON_CONTEXT_INFO      sInfo;
+       PVRSRV_ERROR                 eError;
+       IMG_UINT32                   ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
+
+       memset(&sInfo, 0, sizeof(sInfo));
+
+       /* Prepare cleanup struct */
+       * ppsKickSyncContext = NULL;
+       psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext));
+       if (psKickSyncContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eError = OSLockCreate(&psKickSyncContext->hLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+                                                                       __func__,
+                                                                       PVRSRVGetErrorString(eError)));
+               goto err_lockcreate;
+       }
+
+       psKickSyncContext->psDeviceNode = psDeviceNode;
+
+       ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
+       ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
+       eError = FWCommonContextAllocate(psConnection,
+                                                                        psDeviceNode,
+                                                                        REQ_TYPE_KICKSYNC,
+                                                                        RGXFWIF_DM_GP,
+                                                                        hMemCtxPrivData,
+                                                                        NULL,
+                                                                        0,
+                                                                        psFWMemContextMemDesc,
+                                                                        NULL,
+                                                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2,
+                                                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2,
+                                                                        ui32ContextFlags,
+                                                                        0, /* priority */
+                                                                        0, /* max deadline MS */
+                                                                        0, /* robustness address */
+                                                                        & sInfo,
+                                                                        & psKickSyncContext->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextalloc;
+       }
+
+       OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+       SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence);
+       SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate);
+
+       * ppsKickSyncContext = psKickSyncContext;
+       return PVRSRV_OK;
+
+fail_contextalloc:
+       OSLockDestroy(psKickSyncContext->hLock);
+err_lockcreate:
+       OSFreeMem(psKickSyncContext);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext)
+{
+       PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice;
+       PVRSRV_ERROR         eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode,
+                                                 psKickSyncContext->psServerCommonContext,
+                                                 RGXFWIF_DM_GP,
+                                                 PDUMP_FLAGS_NONE);
+
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+
+       OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+       dllist_remove_node(&(psKickSyncContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+       FWCommonContextFree(psKickSyncContext->psServerCommonContext);
+
+       SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence);
+       SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate);
+
+       OSLockDestroy(psKickSyncContext->hLock);
+
+       OSFreeMem(psKickSyncContext);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext,
+                                                   RGX_CONTEXT_PROPERTY eContextProperty,
+                                                   IMG_UINT64 ui64Input,
+                                                   IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psKickSyncContext->hLock);
+                       eError = FWCommonContextSetFlags(psKickSyncContext->psServerCommonContext,
+                                                        ui32ContextFlags);
+
+                       OSLockRelease(psKickSyncContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock);
+       dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode);
+
+               if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext)
+               {
+                       DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext,
+                                               pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+               }
+       }
+       OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_UINT32 ui32ContextBitMask = 0;
+
+       OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode);
+
+               if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext)
+               {
+                       if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED)
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP;
+                       }
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock);
+       return ui32ContextBitMask;
+}
+
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext,
+                                 IMG_UINT32                    ui32ClientUpdateCount,
+                                 SYNC_PRIMITIVE_BLOCK       ** pauiClientUpdateUFODevVarBlock,
+                                 IMG_UINT32                  * paui32ClientUpdateOffset,
+                                 IMG_UINT32                  * paui32ClientUpdateValue,
+                                 PVRSRV_FENCE                  iCheckFence,
+                                 PVRSRV_TIMELINE               iUpdateTimeline,
+                                 PVRSRV_FENCE                * piUpdateFence,
+                                 IMG_CHAR                      szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                 IMG_UINT32                    ui32ExtJobRef)
+{
+       RGXFWIF_KCCB_CMD         sKickSyncKCCBCmd;
+       RGX_CCB_CMD_HELPER_DATA  asCmdHelperData[1];
+       PVRSRV_ERROR             eError;
+       PVRSRV_ERROR             eError2;
+       IMG_BOOL                 bCCBStateOpen = IMG_FALSE;
+       PRGXFWIF_UFO_ADDR        *pauiClientFenceUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR        *pauiClientUpdateUFOAddress = NULL;
+       IMG_UINT32               ui32ClientFenceCount = 0;
+       IMG_UINT32               *paui32ClientFenceValue = NULL;
+       PVRSRV_FENCE             iUpdateFence = PVRSRV_NO_FENCE;
+       IMG_UINT32               ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr;
+       PVRSRV_RGXDEV_INFO       *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext);
+       RGX_CLIENT_CCB           *psClientCCB = FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext);
+       IMG_UINT32               ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+       IMG_UINT64               uiCheckFenceUID = 0;
+       IMG_UINT64               uiUpdateFenceUID = 0;
+       PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+       void *pvUpdateFenceFinaliseData = NULL;
+
+       /* Ensure we haven't been given a null ptr to
+        * update values if we have been told we
+        * have dev var updates
+        */
+       if (ui32ClientUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+                                       "paui32ClientUpdateValue NULL but ui32ClientUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       OSLockAcquire(psKickSyncContext->hLock);
+       eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate,
+                                                       ui32ClientUpdateCount,
+                                                       pauiClientUpdateUFODevVarBlock,
+                                                       paui32ClientUpdateOffset);
+
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_syncaddrlist;
+       }
+
+       if (ui32ClientUpdateCount > 0)
+       {
+               pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+       }
+       /* Ensure the string is null-terminated (Required for safety) */
+       szUpdateFenceName[31] = '\0';
+
+       /* This will never be true if called from the bridge since piUpdateFence will always be valid */
+       if (iUpdateTimeline >= 0 && !piUpdateFence)
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto out_unlock;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), "
+                          "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+                          __func__, iCheckFence,
+                          (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext));
+       /* Resolve the sync checkpoints that make up the input fence */
+       eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+                                           iCheckFence,
+                                           &ui32FenceSyncCheckpointCount,
+                                           &apsFenceSyncCheckpoints,
+                                           &uiCheckFenceUID,
+                                           PDUMP_FLAGS_NONE);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_resolve_fence;
+       }
+
+       /* Create the output fence (if required) */
+       if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...",
+                                  __func__, iUpdateTimeline));
+               eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode,
+                                                  szUpdateFenceName,
+                                                  iUpdateTimeline,
+                                                  psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+                                                  &iUpdateFence,
+                                                  &uiUpdateFenceUID,
+                                                  &pvUpdateFenceFinaliseData,
+                                                  &psUpdateSyncCheckpoint,
+                                                  (void*)&psFenceTimelineUpdateSync,
+                                                  &ui32FenceTimelineUpdateValue,
+                                                  PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_OK)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)",
+                                          __func__, eError));
+                       goto fail_create_output_fence;
+               }
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s: ...returned from SyncCheckpointCreateFence "
+                                  "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+                                  "ui32FenceTimelineUpdateValue=%u)",
+                                  __func__, iUpdateFence, psFenceTimelineUpdateSync,
+                                  ui32FenceTimelineUpdateValue));
+
+               /* Append the sync prim update for the timeline (if required) */
+               if (psFenceTimelineUpdateSync)
+               {
+                       IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                       /* Allocate memory to hold the list of update values (including our timeline update) */
+                       pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1));
+                       if (!pui32IntAllocatedUpdateValues)
+                       {
+                               /* Failed to allocate memory */
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto fail_alloc_update_values_mem;
+                       }
+                       OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1));
+                       /* Copy the update values into the new memory, then append our timeline update value */
+                       OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount);
+                       /* Now set the additional update value */
+                       pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount;
+                       *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+                       ui32ClientUpdateCount++;
+                       /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+                       paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               for (iii=0; iii<ui32ClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x",
+                                                          __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Now append the timeline sync prim addr to the kicksync context update list */
+                       SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate,
+                                                  psFenceTimelineUpdateSync);
+               }
+       }
+
+       /* Reset number of fence syncs in kicksync context fence list to 0 */
+       SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence,
+                            0, NULL, NULL);
+
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               /* Append the checks (from input fence) */
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Append %d sync checkpoints to KickSync Fence "
+                                  "(&psKickSyncContext->sSyncAddrListFence=<%p>)...",
+                                  __func__, ui32FenceSyncCheckpointCount,
+                                  (void*)&psKickSyncContext->sSyncAddrListFence));
+               SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence,
+                                                                         ui32FenceSyncCheckpointCount,
+                                                                         apsFenceSyncCheckpoints);
+               if (!pauiClientFenceUFOAddress)
+               {
+                       pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+               }
+               ui32ClientFenceCount += ui32FenceSyncCheckpointCount;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress;
+
+                               for (iii=0; iii<ui32ClientFenceCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s: pauiClientFenceUFOAddress[%d](<%p>) = 0x%x",
+                                                          __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+       }
+
+       if (psUpdateSyncCheckpoint)
+       {
+               PVRSRV_ERROR eErr;
+
+               /* Append the update (from output fence) */
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Append 1 sync checkpoint to KickSync Update "
+                                  "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...",
+                                  __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate));
+               eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate,
+                                                                                        1,
+                                                                                        &psUpdateSyncCheckpoint);
+               if (eErr != PVRSRV_OK)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:  ...done. SyncAddrListAppendCheckpoints() returned error (%d)",
+                                          __func__, eErr));
+               }
+               else
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:  ...done.", __func__));
+               }
+               if (!pauiClientUpdateUFOAddress)
+               {
+                       pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+               }
+               ui32ClientUpdateCount++;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress;
+
+                       for (iii=0; iii<ui32ClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: pauiClientUpdateUFOAddress[%d](<%p>) = 0x%x",
+                                                  __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+       }
+
+#if (ENABLE_KICKSYNC_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...",
+                                __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress;
+                       IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Prepared %d KickSync fence syncs "
+                                        "(&psKickSyncContext->sSyncAddrListFence=<%p>, "
+                                        "pauiClientFenceUFOAddress=<%p>):",
+                                        __func__, ui32ClientFenceCount,
+                                        (void*)&psKickSyncContext->sSyncAddrListFence,
+                                        (void*)pauiClientFenceUFOAddress));
+                       for (ii=0; ii<ui32ClientFenceCount; ii++)
+                       {
+                               if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s:   %d/%d<%p>. FWAddr=0x%x, "
+                                                        "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                                                        __func__, ii + 1, ui32ClientFenceCount,
+                                                        (void*)psTmpIntFenceUFOAddress,
+                                                        psTmpIntFenceUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+                                                        __func__, ii + 1, ui32ClientFenceCount,
+                                                        (void*)psTmpIntFenceUFOAddress,
+                                                        psTmpIntFenceUFOAddress->ui32Addr,
+                                                        *pui32TmpIntFenceValue,
+                                                        *pui32TmpIntFenceValue));
+                                       pui32TmpIntFenceValue++;
+                               }
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Prepared %d KickSync update syncs "
+                                        "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, "
+                                        "pauiClientUpdateUFOAddress=<%p>):",
+                                        __func__, ui32ClientUpdateCount,
+                                        (void*)&psKickSyncContext->sSyncAddrListUpdate,
+                                        (void*)pauiClientUpdateUFOAddress));
+                       for (ii=0; ii<ui32ClientUpdateCount; ii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s:  Line %d, psTmpIntUpdateUFOAddress=<%p>",
+                                                  __func__, __LINE__,
+                                                  (void*)psTmpIntUpdateUFOAddress));
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s:  Line %d, pui32TmpIntUpdateValue=<%p>",
+                                                  __func__, __LINE__,
+                                                  (void*)pui32TmpIntUpdateValue));
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s:   %d/%d<%p>. FWAddr=0x%x, "
+                                                        "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                                                        __func__, ii + 1, ui32ClientUpdateCount,
+                                                        (void*)psTmpIntUpdateUFOAddress,
+                                                        psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d",
+                                                        __func__, ii + 1, ui32ClientUpdateCount,
+                                                        (void*)psTmpIntUpdateUFOAddress,
+                                                        psTmpIntUpdateUFOAddress->ui32Addr,
+                                                        *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+
+       RGXCmdHelperInitCmdCCB(psDevInfo,
+                              psClientCCB,
+                              0, /* empty ui64FBSCEntryMask */
+                              ui32ClientFenceCount,
+                              pauiClientFenceUFOAddress,
+                              paui32ClientFenceValue,
+                              ui32ClientUpdateCount,
+                              pauiClientUpdateUFOAddress,
+                              paui32ClientUpdateValue,
+                              0,
+                              NULL,
+                              NULL,
+                              NULL,
+                              NULL,
+                              RGXFWIF_CCB_CMD_TYPE_NULL,
+                              ui32ExtJobRef,
+                              ui32IntJobRef,
+                              PDUMP_FLAGS_NONE,
+                              NULL,
+                              "KickSync",
+                              bCCBStateOpen,
+                              asCmdHelperData);
+
+       eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_cmdaquire;
+       }
+
+       /*
+        *  We should reserve space in the kernel CCB here and fill in the command
+        *  directly.
+        *  This is so if there isn't space in the kernel CCB we can return with
+        *  retry back to services client before we take any operations
+        */
+
+       /*
+        * We might only be kicking for flush out a padding packet so only submit
+        * the command if the create was successful
+        */
+       if (eError == PVRSRV_OK)
+       {
+               /*
+                * All the required resources are ready at this point, we can't fail so
+                * take the required server sync operations and commit all the resources
+                */
+               RGXCmdHelperReleaseCmdCCB(1,
+                                         asCmdHelperData,
+                                         "KickSync",
+                                         FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
+       }
+
+       /* Construct the kernel kicksync CCB command. */
+       sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+       sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext);
+       sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+       sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+
+       sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+       sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+       /*
+        * Submit the kicksync command to the firmware.
+        */
+       RGXSRV_HWPERF_ENQ(psKickSyncContext,
+                         OSGetCurrentClientProcessIDKM(),
+                         ui32FWCtx,
+                         ui32ExtJobRef,
+                         ui32IntJobRef,
+                         RGX_HWPERF_KICK_TYPE_SYNC,
+                         iCheckFence,
+                         iUpdateFence,
+                         iUpdateTimeline,
+                         uiCheckFenceUID,
+                         uiUpdateFenceUID,
+                         NO_DEADLINE,
+                         NO_CYCEST);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice,
+                                            RGXFWIF_DM_GP,
+                                            & sKickSyncKCCBCmd,
+                                            PDUMP_FLAGS_NONE);
+               if (eError2 != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice,
+                               ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                               RGX_HWPERF_KICK_TYPE_SYNC);
+
+       if (eError2 != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)",
+                        eError));
+               if (eError == PVRSRV_OK)
+               {
+                       eError = eError2;
+               }
+       }
+
+       /*
+        * Now check eError (which may have returned an error from our earlier call
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_cmdaquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x",
+                                  __func__, (void*)psUpdateSyncCheckpoint,
+                                  SyncCheckpointGetId(psUpdateSyncCheckpoint),
+                                  SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+       }
+       if (psFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Updating NOHW sync prim<%p> to %d",
+                                  __func__, (void*)psFenceTimelineUpdateSync,
+                                  ui32FenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+
+       *piUpdateFence = iUpdateFence;
+       if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence,
+                                                                       pvUpdateFenceFinaliseData,
+                                                                       psUpdateSyncCheckpoint, szUpdateFenceName);
+       }
+
+       OSLockRelease(psKickSyncContext->hLock);
+       return PVRSRV_OK;
+
+fail_cmdaquire:
+       SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence);
+       SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate);
+       if (iUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+       }
+
+       /* Free memory allocated to hold update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+       }
+fail_alloc_update_values_mem:
+fail_create_output_fence:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free memory allocated to hold the resolved fence's checkpoints */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+fail_resolve_fence:
+fail_syncaddrlist:
+out_unlock:
+       OSLockRelease(psKickSyncContext->hLock);
+       return eError;
+}
+
+/**************************************************************************//**
+ End of file (rgxkicksync.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxkicksync.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxkicksync.h
new file mode 100644 (file)
index 0000000..57b49a0
--- /dev/null
@@ -0,0 +1,128 @@
+/*************************************************************************/ /*!
+@File           rgxkicksync.h
+@Title          Server side of the sync only kick API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXKICKSYNC_H)
+#define RGXKICKSYNC_H
+
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "sync_server.h"
+#include "rgxdevice.h"
+
+
+typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT;
+
+/**************************************************************************/ /*!
+@Function       DumpKickSyncCtxtsInfo
+@Description    Function that dumps debug info of kick sync ctxs on this device
+@Return         none
+*/ /**************************************************************************/
+void
+DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                      DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                      void *pvDumpDebugFile,
+                      IMG_UINT32 ui32VerbLevel);
+
+/**************************************************************************/ /*!
+@Function       CheckForStalledClientKickSyncCtxt
+@Description    Function that checks if a kick sync client is stalled
+@Return         RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0
+*/ /**************************************************************************/
+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCreateKickSyncContextKM
+@Description    Server-side implementation of RGXCreateKicksyncContext
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA              * psConnection,
+                                 PVRSRV_DEVICE_NODE           * psDeviceNode,
+                                 IMG_HANDLE                     hMemCtxPrivData,
+                                 IMG_UINT32                     ui32PackedCCBSizeU88,
+                                 IMG_UINT32                     ui32ContextFlags,
+                                 RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext);
+
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXDestroyKickSyncContextKM
+@Description    Server-side implementation of RGXDestroyKicksyncContext
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXSetKickSyncContextPropertyKM
+@Description    Server-side implementation of RGXSetKickSyncContextProperty
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext,
+                                                   RGX_CONTEXT_PROPERTY eContextProperty,
+                                                   IMG_UINT64 ui64Input,
+                                                   IMG_UINT64 *pui64Output);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXKickSyncKM
+@Description    Kicks a sync only command
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext,
+                    IMG_UINT32                    ui32ClientUpdateCount,
+                    SYNC_PRIMITIVE_BLOCK       ** pauiClientUpdateUFODevVarBlock,
+                    IMG_UINT32                  * paui32ClientUpdateDevVarOffset,
+                    IMG_UINT32                  * paui32ClientUpdateValue,
+                    PVRSRV_FENCE                  iCheckFence,
+                    PVRSRV_TIMELINE               iUpdateTimeline,
+                    PVRSRV_FENCE                * piUpdateFence,
+                    IMG_CHAR                      szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+
+                    IMG_UINT32                    ui32ExtJobRef);
+
+#endif /* RGXKICKSYNC_H */
+
+/**************************************************************************//**
+ End of file (rgxkicksync.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmem.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmem.c
new file mode 100644 (file)
index 0000000..de38b1c
--- /dev/null
@@ -0,0 +1,947 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_server_utils.h"
+#include "devicemem_pdump.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+#include "rgx_bvnc_defs_km.h"
+#include "info_page.h"
+
+#if defined(PDUMP)
+#include "sync.h"
+#endif
+
+struct SERVER_MMU_CONTEXT_TAG
+{
+       DEVMEM_MEMDESC *psFWMemContextMemDesc;
+       PRGXFWIF_FWMEMCONTEXT sFWMemContextDevVirtAddr;
+       MMU_CONTEXT *psMMUContext;
+       IMG_PID uiPID;
+       IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+       IMG_UINT64 ui64FBSCEntryMask;
+       DLLIST_NODE sNode;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+}; /* SERVER_MMU_CONTEXT is typedef-ed in rgxmem.h */
+
+PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                         MMU_CONTEXT *psMMUContext,
+                                                         IMG_DEV_VIRTADDR sDevVAddr,
+                                                         IMG_DEVMEM_SIZE_T uiSize,
+                                                         IMG_BOOL bInvalidate)
+{
+       PVRSRV_ERROR eError;
+       DLLIST_NODE *psNode, *psNext;
+       RGXFWIF_KCCB_CMD sFlushInvalCmd;
+       SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32 ui32kCCBCommandSlot;
+
+       OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+       {
+               SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+               if (psIter->psMMUContext == psMMUContext)
+               {
+                       psServerMMUContext = psIter;
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+
+       if (! psServerMMUContext)
+       {
+               return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+       }
+
+       /* Schedule the SLC flush command */
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Submit SLC flush and invalidate");
+#endif
+       sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+       sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = bInvalidate;
+       sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+       sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = uiSize;
+       sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = sDevVAddr.uiAddr;
+       eError = RGXGetFWCommonContextAddrFromServerMMUCtx(psDevInfo,
+                                                                                                          psServerMMUContext,
+                                                                                                          &sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo,
+                                                                          &sFlushInvalCmd,
+                                                                          PDUMP_FLAGS_CONTINUOUS,
+                                                                          &ui32kCCBCommandSlot);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "RGXSLCFlush: Failed to schedule SLC flush command with error (%u)",
+                        eError));
+       }
+       else
+       {
+               /* Wait for the SLC flush to complete */
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "RGXSLCFlush: SLC flush and invalidate aborted with error (%u)",
+                                eError));
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       MMU_CONTEXT *psMMUContext,
+                                                                       IMG_UINT64 ui64FBSCEntryMask)
+{
+       DLLIST_NODE *psNode, *psNext;
+       SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+       {
+               SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+               if (psIter->psMMUContext == psMMUContext)
+               {
+                       psServerMMUContext = psIter;
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+
+       if (! psServerMMUContext)
+       {
+               return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+       }
+
+       /* Accumulate the FBSC invalidate request */
+       psServerMMUContext->ui64FBSCEntryMask |= ui64FBSCEntryMask;
+
+       return PVRSRV_OK;
+}
+
+/*
+ * RGXExtractFBSCEntryMaskFromMMUContext
+ *
+ */
+PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                  SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                                                  IMG_UINT64 *pui64FBSCEntryMask)
+{
+       if (!psServerMMUContext)
+       {
+               return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+       }
+
+       *pui64FBSCEntryMask = psServerMMUContext->ui64FBSCEntryMask;
+       psServerMMUContext->ui64FBSCEntryMask = 0;
+
+       return PVRSRV_OK;
+}
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  MMU_CONTEXT *psMMUContext,
+                                                  MMU_LEVEL eMMULevel,
+                                                  IMG_BOOL bUnmap)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       IMG_UINT32 ui32NewCacheFlags;
+
+       PVR_UNREFERENCED_PARAMETER(bUnmap);
+
+       switch (eMMULevel)
+       {
+               case MMU_LEVEL_3:
+                       ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PC;
+
+                       break;
+               case MMU_LEVEL_2:
+                       ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PD;
+
+                       break;
+               case MMU_LEVEL_1:
+                       ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PT;
+
+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+                       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)))
+#endif
+                       {
+                               ui32NewCacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB;
+                       }
+
+                       break;
+               default:
+                       ui32NewCacheFlags = 0;
+                       PVR_ASSERT(0);
+
+                       break;
+       }
+
+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+       {
+               MMU_AppendCacheFlags(psMMUContext, ui32NewCacheFlags);
+       }
+       else
+#endif
+       {
+               MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32NewCacheFlags);
+       }
+}
+
+static inline void _GetAndResetCacheOpsPending(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                               IMG_UINT32 *pui32FWCacheFlags)
+{
+       /*
+        * Atomically exchange flags and 0 to ensure we never accidentally read
+        * state inconsistently or overwrite valid cache flags with 0.
+        */
+       *pui32FWCacheFlags = MMU_ExchangeCacheFlags(psDevInfo->psKernelMMUCtx, 0);
+}
+
+static
+PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           RGXFWIF_DM eDM,
+                                           IMG_UINT32 ui32CacheFlags,
+                                           IMG_BOOL bInterrupt,
+                                           IMG_UINT32 *pui32MMUInvalidateUpdate)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD sFlushCmd;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate++;
+
+       /* Setup cmd and add the device nodes sync object */
+       sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE;
+       sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = *pui32MMUInvalidateUpdate;
+       SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim,
+                               &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr);
+
+       /* Indicate the firmware should signal command completion to the host */
+       if (bInterrupt)
+       {
+               ui32CacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT;
+       }
+
+       sFlushCmd.uCmdData.sMMUCacheData.ui32CacheFlags = ui32CacheFlags;
+
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Submit MMU flush and invalidate (flags = 0x%08x)",
+                             ui32CacheFlags);
+#endif
+
+       /* Schedule MMU cache command */
+       eError = RGXSendCommand(psDevInfo,
+                                                       &sFlushCmd,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to schedule MMU cache command to "
+                        "DM=%d with error (%u)",
+                        __func__, eDM, eError));
+               psDeviceNode->ui32NextMMUInvalidateUpdate--;
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_UINT32 *pui32MMUInvalidateUpdate)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32FWCacheFlags;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+                                       __func__, PVRSRVGetErrorString(eError)));
+               goto RGXMMUCacheInvalidateKick_exit;
+       }
+
+       _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags);
+       if (ui32FWCacheFlags == 0)
+       {
+               /* Nothing to do if no cache ops pending */
+               eError = PVRSRV_OK;
+               goto _PowerUnlockAndReturnErr;
+       }
+
+       /* Ensure device is powered up before sending cache command */
+       PDUMPPOWCMDSTART(psDeviceNode);
+       eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE_ON,
+                                                                                PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+                                       __func__, PVRSRVGetErrorString(eError)));
+               MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32FWCacheFlags);
+               goto _PowerUnlockAndReturnErr;
+       }
+
+       eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, ui32FWCacheFlags,
+                                                                                  IMG_TRUE, pui32MMUInvalidateUpdate);
+       if (eError != PVRSRV_OK)
+       {
+               /* failed to submit cache operations, return failure */
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to submit cache command (%s)",
+                                       __func__, PVRSRVGetErrorString(eError)));
+               MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32FWCacheFlags);
+               goto _PowerUnlockAndReturnErr;
+       }
+
+_PowerUnlockAndReturnErr:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+RGXMMUCacheInvalidateKick_exit:
+       return eError;
+}
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                       RGXFWIF_DM eDM,
+                                                                       IMG_UINT32 *pui32MMUInvalidateUpdate)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       IMG_UINT32 ui32FWCacheFlags;
+
+       /* Caller should ensure that power lock is held before calling this function */
+       PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+
+       _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags);
+       if (ui32FWCacheFlags == 0)
+       {
+               /* Nothing to do if no cache ops pending */
+               return PVRSRV_OK;
+       }
+
+       return _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32FWCacheFlags,
+                                            IMG_FALSE, pui32MMUInvalidateUpdate);
+}
+
+/* page fault debug is the only current use case for needing to find process info
+ * after that process device memory context has been destroyed
+ */
+
+typedef struct _UNREGISTERED_MEMORY_CONTEXT_
+{
+       IMG_PID uiPID;
+       IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+       IMG_DEV_PHYADDR sPCDevPAddr;
+} UNREGISTERED_MEMORY_CONTEXT;
+
+/* must be a power of two */
+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3)
+
+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE];
+static IMG_UINT32 gui32UnregisteredMemCtxsHead;
+
+/* record a device memory context being unregistered.
+ * the list of unregistered contexts can be used to find the PID and process name
+ * belonging to a memory context which has been destroyed
+ */
+static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+       UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+       OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+       psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead];
+
+       gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1)
+                                       & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1);
+
+       OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+       psRecord->uiPID = psServerMMUContext->uiPID;
+       if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK)
+       {
+               PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context"));
+       }
+       OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName));
+}
+
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData)
+{
+       SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo;
+
+#if defined(PDUMP)
+       {
+               RGXFWIF_DEV_VIRTADDR sFWAddr;
+
+               RGXSetFirmwareAddress(&sFWAddr,
+                                     psServerMMUContext->psFWMemContextMemDesc,
+                                     0,
+                                     RFW_FWADDR_NOREF_FLAG);
+
+               /*
+                * MMU cache commands (always dumped) might have a pointer to this FW
+                * memory context, wait until the FW has caught-up to the latest command.
+                */
+               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                            "Ensure FW has executed all MMU invalidations on FW memory "
+                            "context 0x%x before freeing it", sFWAddr.ui32Addr);
+               SyncPrimPDumpPol(psDevInfo->psDeviceNode->psMMUCacheSyncPrim,
+                                psDevInfo->psDeviceNode->ui32NextMMUInvalidateUpdate - 1,
+                                0xFFFFFFFF,
+                                PDUMP_POLL_OPERATOR_GREATEREQUAL,
+                                PDUMP_FLAGS_CONTINUOUS);
+       }
+#endif
+
+       OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+       dllist_remove_node(&psServerMMUContext->sNode);
+       OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext);
+       }
+
+       /*
+        * Release the page catalogue address acquired in RGXRegisterMemoryContext().
+        */
+       MMU_ReleaseBaseAddr(NULL);
+
+       /*
+        * Free the firmware memory context.
+        */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free FW memory context");
+       DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+
+       OSFreeMem(psServerMMUContext);
+}
+
+/*
+ * RGXRegisterMemoryContext
+ */
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                                         MMU_CONTEXT                   *psMMUContext,
+                                                                         IMG_HANDLE                    *hPrivData)
+{
+       PVRSRV_ERROR                    eError;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_MEMALLOCFLAGS_T  uiFWMemContextMemAllocFlags;
+       RGXFWIF_FWMEMCONTEXT    *psFWMemContext;
+       DEVMEM_MEMDESC                  *psFWMemContextMemDesc;
+       SERVER_MMU_CONTEXT *psServerMMUContext;
+
+       if (psDevInfo->psKernelMMUCtx == NULL)
+       {
+               /*
+                * This must be the creation of the Kernel memory context. Take a copy
+                * of the MMU context for use when programming the BIF.
+                */
+               psDevInfo->psKernelMMUCtx = psMMUContext;
+
+#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)
+               /* Setup the BRN71422 mapping in the FW memory context. */
+               if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71422))
+               {
+                       RGXMapBRN71422TargetPhysicalAddress(psMMUContext);
+               }
+#endif
+       }
+       else
+       {
+               psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
+               if (psServerMMUContext == NULL)
+               {
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto fail_alloc_server_ctx;
+               }
+
+               psServerMMUContext->psDevInfo = psDevInfo;
+               psServerMMUContext->ui64FBSCEntryMask = 0;
+               psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = 0;
+
+               /*
+                * This FW MemContext is only mapped into kernel for initialisation purposes.
+                * Otherwise this allocation is only used by the FW.
+                * Therefore the GPU cache doesn't need coherency, and write-combine
+                * will suffice on the CPU side (WC buffer will be flushed at any kick)
+                */
+               uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN);
+
+               /*
+                       Allocate device memory for the firmware memory context for the new
+                       application.
+               */
+               PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGX firmware memory context");
+               eError = DevmemFwAllocate(psDevInfo,
+                                                               sizeof(*psFWMemContext),
+                                                               uiFWMemContextMemAllocFlags | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                                                               "FwMemoryContext",
+                                                               &psFWMemContextMemDesc);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate firmware memory context (%u)",
+                                __func__,
+                                eError));
+                       goto fail_alloc_fw_ctx;
+               }
+
+               /*
+                       Temporarily map the firmware memory context to the kernel.
+               */
+               eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
+                                                                                 (void **)&psFWMemContext);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to map firmware memory context (%u)",
+                                __func__,
+                                eError));
+                       goto fail_acquire_cpu_addr;
+               }
+
+               /*
+                * Write the new memory context's page catalogue into the firmware memory
+                * context for the client.
+                */
+               eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to acquire Page Catalogue address (%u)",
+                                __func__,
+                                eError));
+                       DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+                       goto fail_acquire_base_addr;
+               }
+
+               /*
+                * Set default values for the rest of the structure.
+                */
+               psFWMemContext->uiPageCatBaseRegSet = RGXFW_BIF_INVALID_PCSET;
+               psFWMemContext->uiBreakpointAddr = 0;
+               psFWMemContext->uiBPHandlerAddr = 0;
+               psFWMemContext->uiBreakpointCtl = 0;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+               IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+               IMG_BOOL   bOSidAxiProt;
+
+               MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+               psFWMemContext->ui32OSid     = ui32OSidReg;
+               psFWMemContext->bOSidAxiProt = bOSidAxiProt;
+}
+#endif
+
+#if defined(PDUMP)
+               {
+                       IMG_CHAR                        aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+                       IMG_DEVMEM_OFFSET_T uiOffset = 0;
+
+                       /*
+                        * Dump the Mem context allocation
+                        */
+                       DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
+
+
+                       /*
+                        * Obtain a symbolic addr of the mem context structure
+                        */
+                       eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc,
+                                                                                                  &uiOffset,
+                                                                                                  aszName,
+                                                                                                  PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to generate a Dump Page Catalogue address (%u)",
+                                        __func__,
+                                        eError));
+                               DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+                               goto fail_pdump_cat_base_addr;
+                       }
+
+                       /*
+                        * Dump the Page Cat tag in the mem context (symbolic address)
+                        */
+                       eError = MMU_PDumpWritePageCatBase(psMMUContext,
+                                                                                               aszName,
+                                                                                               uiOffset,
+                                                                                               8, /* 64-bit register write */
+                                                                                               0,
+                                                                                               0,
+                                                                                               0);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to acquire Page Catalogue address (%u)",
+                                        __func__,
+                                        eError));
+                               DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+                               goto fail_pdump_cat_base;
+                       }
+               }
+#endif
+
+               /*
+                * Release kernel address acquired above.
+                */
+               DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+
+               /*
+                * Store the process information for this device memory context
+                * for use with the host page-fault analysis.
+                */
+               psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM();
+               psServerMMUContext->psMMUContext = psMMUContext;
+               psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+               OSStringLCopy(psServerMMUContext->szProcessName,
+                             OSGetCurrentClientProcessNameKM(),
+                             sizeof(psServerMMUContext->szProcessName));
+
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "New memory context: Process Name: %s PID: %u (0x%08X)",
+                                     psServerMMUContext->szProcessName,
+                                     psServerMMUContext->uiPID,
+                                     psServerMMUContext->uiPID);
+
+               OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+               dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
+               OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+               *hPrivData = psServerMMUContext;
+       }
+
+       return PVRSRV_OK;
+
+#if defined(PDUMP)
+fail_pdump_cat_base:
+fail_pdump_cat_base_addr:
+       MMU_ReleaseBaseAddr(NULL);
+#endif
+fail_acquire_base_addr:
+       /* Done before jumping to the fail point as the release is done before exit */
+fail_acquire_cpu_addr:
+       DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+fail_alloc_fw_ctx:
+       OSFreeMem(psServerMMUContext);
+fail_alloc_server_ctx:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv)
+{
+       SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv;
+
+       return psMMUContext->psFWMemContextMemDesc;
+}
+
+void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext,
+                                               RGXFWIF_DEV_VIRTADDR    sFWMemContextAddr)
+{
+       psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = sFWMemContextAddr.ui32Addr;
+}
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_DEV_VIRTADDR *psDevVAddr,
+                               IMG_DEV_PHYADDR *psDevPAddr,
+                               MMU_FAULT_DATA *psOutFaultData)
+{
+       IMG_DEV_PHYADDR sPCDevPAddr;
+       DLLIST_NODE *psNode, *psNext;
+
+       OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+       {
+               SERVER_MMU_CONTEXT *psServerMMUContext =
+                       IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+               if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+               {
+                       PVR_LOG(("Failed to get PC address for memory context"));
+                       continue;
+               }
+
+               if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+               {
+                       MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr, psOutFaultData);
+                       goto out_unlock;
+               }
+       }
+
+       /* Lastly check for fault in the kernel allocated memory */
+       if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK)
+       {
+               PVR_LOG(("Failed to get PC address for kernel memory context"));
+       }
+
+       if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+       {
+               MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData);
+       }
+
+out_unlock:
+       OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+}
+
+/* given the physical address of a page catalogue, searches for a corresponding
+ * MMU context and if found, provides the caller details of the process.
+ * Returns IMG_TRUE if a process is found.
+ */
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+                                                               RGXMEM_PROCESS_INFO *psInfo)
+{
+       IMG_BOOL bRet = IMG_FALSE;
+       DLLIST_NODE *psNode, *psNext;
+       SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+       /* check if the input PC addr corresponds to an active memory context */
+       dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+       {
+               SERVER_MMU_CONTEXT *psThisMMUContext =
+                       IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+               IMG_DEV_PHYADDR sPCDevPAddr;
+
+               if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+               {
+                       PVR_LOG(("Failed to get PC address for memory context"));
+                       continue;
+               }
+
+               if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr)
+               {
+                       psServerMMUContext = psThisMMUContext;
+                       break;
+               }
+       }
+
+       if (psServerMMUContext != NULL)
+       {
+               psInfo->uiPID = psServerMMUContext->uiPID;
+               OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+               psInfo->bUnregistered = IMG_FALSE;
+               bRet = IMG_TRUE;
+       }
+       /* else check if the input PC addr corresponds to the firmware */
+       else
+       {
+               IMG_DEV_PHYADDR sKernelPCDevPAddr;
+               PVRSRV_ERROR eError;
+
+               eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG(("Failed to get PC address for kernel memory context"));
+               }
+               else
+               {
+                       if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr)
+                       {
+                               psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+                               OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+                               psInfo->bUnregistered = IMG_FALSE;
+                               bRet = IMG_TRUE;
+                       }
+               }
+       }
+
+       if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) &&
+               (bRet == IMG_FALSE))
+       {
+               /* no active memory context found with the given PC address.
+                * Check the list of most recently freed memory contexts.
+                */
+               IMG_UINT32 i;
+
+               OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+               /* iterate through the list of unregistered memory contexts
+                * from newest (one before the head) to the oldest (the current head)
+                */
+               i = gui32UnregisteredMemCtxsHead;
+
+               do
+               {
+                       UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+                       i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1));
+
+                       psRecord = &gasUnregisteredMemCtxs[i];
+
+                       if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
+                       {
+                               psInfo->uiPID = psRecord->uiPID;
+                               OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName));
+                               psInfo->bUnregistered = IMG_TRUE;
+                               bRet = IMG_TRUE;
+                               break;
+                       }
+               } while (i != gui32UnregisteredMemCtxsHead);
+
+               OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+       }
+
+       return bRet;
+}
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+                                                               RGXMEM_PROCESS_INFO *psInfo)
+{
+       IMG_BOOL bRet = IMG_FALSE;
+       DLLIST_NODE *psNode, *psNext;
+       SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+       /* check if the input PID corresponds to an active memory context */
+       dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+       {
+               SERVER_MMU_CONTEXT *psThisMMUContext =
+                       IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+               if (psThisMMUContext->uiPID == uiPID)
+               {
+                       psServerMMUContext = psThisMMUContext;
+                       break;
+               }
+       }
+
+       if (psServerMMUContext != NULL)
+       {
+               psInfo->uiPID = psServerMMUContext->uiPID;
+               OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+               psInfo->bUnregistered = IMG_FALSE;
+               bRet = IMG_TRUE;
+       }
+       /* else check if the input PID corresponds to the firmware */
+       else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+       {
+               psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+               OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+               psInfo->bUnregistered = IMG_FALSE;
+               bRet = IMG_TRUE;
+       }
+
+       if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) &&
+               (bRet == IMG_FALSE))
+       {
+               /* if the PID didn't correspond to an active context or the
+                * FW address then see if it matches a recently unregistered context
+                */
+               const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1;
+               IMG_UINT32 i, j;
+
+               OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+               for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0;
+                    j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+                    i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++)
+               {
+                       UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
+
+                       if (psRecord->uiPID == uiPID)
+                       {
+                               psInfo->uiPID = psRecord->uiPID;
+                               OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName));
+                               psInfo->bUnregistered = IMG_TRUE;
+                               bRet = IMG_TRUE;
+                               break;
+                       }
+               }
+
+               OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+       }
+
+       return bRet;
+}
+
+IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+       if (psServerMMUContext)
+       {
+               return psServerMMUContext->uiPID;
+       }
+       return 0;
+}
+
+/******************************************************************************
+ End of file (rgxmem.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmem.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmem.h
new file mode 100644 (file)
index 0000000..cbcbed7
--- /dev/null
@@ -0,0 +1,147 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXMEM_H)
+#define RGXMEM_H
+
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "mmu_common.h"
+#include "rgxdevice.h"
+
+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16
+
+/* this PID denotes the firmware */
+#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF
+
+/* this PID denotes the PM */
+#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF
+
+typedef struct _RGXMEM_PROCESS_INFO_
+{
+       IMG_PID uiPID;
+       IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+       IMG_BOOL bUnregistered;
+} RGXMEM_PROCESS_INFO;
+
+typedef struct SERVER_MMU_CONTEXT_TAG SERVER_MMU_CONTEXT;
+
+IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext);
+
+void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext,
+                       RGXFWIF_DEV_VIRTADDR    sFWMemContextAddr);
+
+void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode);
+void RGXMMUSyncPrimFree(void);
+
+PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDevNode,
+                                                         MMU_CONTEXT *psMMUContext,
+                                                         IMG_DEV_VIRTADDR sDevVAddr,
+                                                         IMG_DEVMEM_SIZE_T uiLength,
+                                                         IMG_BOOL bInvalidate);
+
+PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       MMU_CONTEXT *psMMUContext,
+                                                                       IMG_UINT64 ui64FBSCEntryMask);
+
+PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                  SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                                                  IMG_UINT64 *pui64FBSCEntryMask);
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDevNode,
+                                                  MMU_CONTEXT *psMMUContext,
+                                                  MMU_LEVEL eMMULevel,
+                                                  IMG_BOOL bUnmap);
+
+/*************************************************************************/ /*!
+@Function       RGXMMUCacheInvalidateKick
+
+@Description    Sends a flush command to a particular DM but first takes
+                the power lock.
+
+@Input          psDevNode   Device Node pointer
+@Input          pui32NextMMUInvalidateUpdate
+
+@Return                        PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevNode,
+                                       IMG_UINT32 *pui32NextMMUInvalidateUpdate);
+
+/*************************************************************************/ /*!
+@Function       RGXPreKickCacheCommand
+
+@Description    Sends a cache flush command to a particular DM without
+                honouring the power lock. It's the caller's responsibility
+                to ensure power lock is held before calling this function.
+
+@Input          psDevInfo   Device Info
+@Input          eDM                    To which DM the cmd is sent.
+@Input          pui32MMUInvalidateUpdate
+
+@Return                        PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                       RGXFWIF_DM eDM,
+                                                                       IMG_UINT32 *pui32MMUInvalidateUpdate);
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData);
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE       *psDevNode,
+                                                                         MMU_CONTEXT                   *psMMUContext,
+                                                                         IMG_HANDLE                    *hPrivData);
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv);
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_DEV_VIRTADDR *psDevVAddr,
+                               IMG_DEV_PHYADDR *psDevPAddr,
+                               MMU_FAULT_DATA *psOutFaultData);
+
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+                                                               RGXMEM_PROCESS_INFO *psInfo);
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+                               RGXMEM_PROCESS_INFO *psInfo);
+
+IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext);
+
+#endif /* RGXMEM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmulticore.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxmulticore.h
new file mode 100644 (file)
index 0000000..b45a20a
--- /dev/null
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@File           rgxmulticore.h
+@Title          Functions related to multicore devices
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose memory shared between kernel driver and user
+                mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXMULTICORE_H
+#define RGXMULTICORE_H
+
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* RGXMULTICORE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdump.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdump.h
new file mode 100644 (file)
index 0000000..54443a5
--- /dev/null
@@ -0,0 +1,228 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX pdump Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX pdump functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxdevice.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVPDumpSignatureBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE * psDeviceNode,
+                                          IMG_UINT32 ui32PDumpFlags);
+
+#if defined(SUPPORT_VALIDATION)
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVPDumpComputeCRCSignatureCheckKM
+
+ @Description
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpComputeCRCSignatureCheckKM(CONNECTION_DATA * psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                   IMG_UINT32 ui32PDumpFlags);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVPDumpCRCSignatureCheckKM
+
+ @Description
+
+ Poll on FBC/FBDC end-to-end signature status
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection,
+                                            PVRSRV_DEVICE_NODE * psDeviceNode,
+                                            IMG_UINT32 ui32PDumpFlags);
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVPDumpValCheckPreCommandKM
+
+ @Description
+
+ Poll on various GPU status/signature status for validation, before
+ sending the GPU command.
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpValCheckPreCommandKM(CONNECTION_DATA * psConnection,
+                                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                                             IMG_UINT32 ui32PDumpFlags);
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVPDumpValCheckPostCommandKM
+
+ @Description
+
+ Poll on various GPU status/signature status for validation, after
+ sending the GPU command.
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpValCheckPostCommandKM(CONNECTION_DATA * psConnection,
+                                              PVRSRV_DEVICE_NODE * psDeviceNode,
+                                              IMG_UINT32 ui32PDumpFlags);
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVPDumpTraceBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+                                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                                      IMG_UINT32 ui32PDumpFlags);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXPDumpPrepareOutputImageDescriptorHdr
+
+ @Description
+
+ Dumps the header for an OutputImage command
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32HeaderSize,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32LogicalWidth,
+                                                                       IMG_UINT32 ui32LogicalHeight,
+                                                                       IMG_UINT32 ui32PhysicalWidth,
+                                                                       IMG_UINT32 ui32PhysicalHeight,
+                                                                       PDUMP_PIXEL_FORMAT ePixFmt,
+                                                                       IMG_MEMLAYOUT eMemLayout,
+                                                                       IMG_FB_COMPRESSION eFBCompression,
+                                                                       const IMG_UINT32 *paui32FBCClearColour,
+                                                                       PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                                                                       IMG_PBYTE abyPDumpDesc);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXPDumpPrepareOutputDataDescriptorHdr
+
+ @Description
+
+ Dumps the header for an OutputData command
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32HeaderType,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32ElementType,
+                                                                       IMG_UINT32 ui32ElementCount,
+                                                                       IMG_PBYTE pbyPDumpDataHdr);
+
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpSignatureBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                             IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpTraceBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+/******************************************************************************
+ End of file (rgxpdump.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdvfs.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdvfs.c
new file mode 100644 (file)
index 0000000..7240c96
--- /dev/null
@@ -0,0 +1,284 @@
+/*************************************************************************/ /*!
+@File           rgxpdvfs.c
+@Title          RGX Proactive DVFS Functionality
+@Codingstyle    IMG
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel mode Proactive DVFS Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxpdvfs.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgxtimecorr.h"
+
+#define USEC_TO_MSEC 1000
+
+static inline IMG_BOOL _PDVFSEnabled(void)
+{
+       PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData();
+
+       if (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions &
+           psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions &
+           OPTIONS_PDVFS_MASK)
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint)
+{
+       RGXFWIF_KCCB_CMD                sGPCCBCmd;
+       PVRSRV_ERROR                    eError;
+       IMG_UINT32                              ui32CmdKCCBSlot;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       if (!_PDVFSEnabled())
+       {
+               /* No error message to avoid excessive messages */
+               return PVRSRV_OK;
+       }
+
+       /* send feedback */
+       sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ;
+       sGPCCBCmd.uCmdData.sPDVFSMaxFreqData.ui32MaxOPPPoint = ui32MaxOPPPoint;
+
+       /* Submit command to the firmware.  */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                     &sGPCCBCmd,
+                                                     PDUMP_FLAGS_CONTINUOUS,
+                                                     &ui32CmdKCCBSlot);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MinOPPPoint)
+{
+       RGXFWIF_KCCB_CMD                sGPCCBCmd;
+       PVRSRV_ERROR                    eError;
+       IMG_UINT32                              ui32CmdKCCBSlot;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       if (!_PDVFSEnabled())
+       {
+               /* No error message to avoid excessive messages */
+               return PVRSRV_OK;
+       }
+
+       /* send feedback */
+       sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ;
+       sGPCCBCmd.uCmdData.sPDVFSMinFreqData.ui32MinOPPPoint = ui32MinOPPPoint;
+
+       /* Submit command to the firmware.  */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                     &sGPCCBCmd,
+                                                     PDUMP_FLAGS_CONTINUOUS,
+                                                     &ui32CmdKCCBSlot);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+
+#if (PDVFS_COM == PDVFS_COM_HOST)
+/*************************************************************************/ /*!
+@Function       PDVFSProcessCoreClkChangeRequest
+@Description    Processes a core clock rate change request.
+@Input          psDevInfo            A pointer to PVRSRV_RGXDEV_INFO.
+@Input          ui32CoreClockRate    New core clock rate.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDVFSProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate)
+{
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig;
+       IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &psDevConfig->sDVFS.sDVFSDeviceCfg;
+       RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo;
+       IMG_UINT32 ui32CoreClockRateCurrent = psRGXTimingInfo->ui32CoreClockSpeed;
+       const IMG_OPP *psOpp = NULL;
+       IMG_UINT32 ui32Index;
+       PVRSRV_ERROR eError;
+
+       if (!_PDVFSEnabled())
+       {
+               /* No error message to avoid excessive messages */
+               return PVRSRV_OK;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Core clock rate = %u", ui32CoreClockRate));
+
+       /* Find the matching OPP (Exact). */
+       for (ui32Index = 0; ui32Index < psDVFSDeviceCfg->ui32OPPTableSize; ui32Index++)
+       {
+               if (ui32CoreClockRate == psDVFSDeviceCfg->pasOPPTable[ui32Index].ui32Freq)
+               {
+                       psOpp = &psDVFSDeviceCfg->pasOPPTable[ui32Index];
+                       break;
+               }
+       }
+
+       if (! psOpp)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Frequency not present in OPP table - %u", ui32CoreClockRate));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = PVRSRVDevicePreClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange failed"));
+               return eError;
+       }
+
+       psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate;
+
+       /* Increasing frequency, change voltage first */
+       if (ui32CoreClockRate > ui32CoreClockRateCurrent)
+       {
+               psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt);
+       }
+
+       psDVFSDeviceCfg->pfnSetFrequency(ui32CoreClockRate);
+
+       /* Decreasing frequency, change frequency first */
+       if (ui32CoreClockRate < ui32CoreClockRateCurrent)
+       {
+               psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt);
+       }
+
+       PVRSRVDevicePostClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL);
+
+       return PVRSRV_OK;
+}
+#else
+/*************************************************************************/ /*!
+@Function       PDVFSProcessCoreClkChangeNotification
+@Description    Processes a core clock rate change notification.
+@Input          psDevInfo            A pointer to PVRSRV_RGXDEV_INFO.
+@Input          ui32CoreClockRate    New core clock rate.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDVFSProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate)
+{
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevInfo->psDeviceNode->psDevConfig;
+       RGX_TIMING_INFORMATION *psRGXTimingInfo = ((RGX_DATA*)(psDevConfig->hDevData))->psRGXTimingInfo;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRVPowerLock(psDevInfo->psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+                                __func__, PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       eError = PVRSRVGetDevicePowerState(psDevInfo->psDeviceNode, &ePowerState);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire power state (%s)",
+                                __func__, PVRSRVGetErrorString(eError)));
+               PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+               return eError;
+       }
+
+       /* Guest drivers do not initialize psRGXFWIfFwSysData */
+       if ((ePowerState != PVRSRV_DEV_POWER_STATE_OFF)
+           && ((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF)))
+       {
+               /* Update GPU frequency and timer correlation related data */
+               RGXTimeCorrEnd(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+               psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate;
+               RGXTimeCorrBegin(psDevInfo->psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+       }
+       else
+       {
+               psRGXTimingInfo->ui32CoreClockSpeed = ui32CoreClockRate;
+       }
+
+       PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+
+       return PVRSRV_OK;
+}
+#endif
+
+
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+/*************************************************************************/ /*!
+@Function       RGXPDVFSCheckCoreClkRateChange
+@Description    Checks if core clock rate has changed since the last snap-shot.
+@Input          psDevInfo    A pointer to PVRSRV_RGXDEV_INFO.
+@Return         None.
+*/ /**************************************************************************/
+void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32CoreClkRate = *psDevInfo->pui32RGXFWIFCoreClkRate;
+
+       if (!_PDVFSEnabled())
+       {
+               /* No error message to avoid excessive messages */
+               return;
+       }
+
+       if (ui32CoreClkRate != 0 && psDevInfo->ui32CoreClkRateSnapshot != ui32CoreClkRate)
+       {
+               psDevInfo->ui32CoreClkRateSnapshot = ui32CoreClkRate;
+               PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, ui32CoreClkRate);
+       }
+}
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdvfs.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxpdvfs.h
new file mode 100644 (file)
index 0000000..13a94b5
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File           rgxpdvfs.h
+@Title          RGX Proactive DVFS Functionality
+@Codingstyle    IMG
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the kernel mode Proactive DVFS Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXPDVFS_H
+#define RGXPDVFS_H
+
+#include "img_types.h"
+#include "rgxdevice.h"
+
+
+PVRSRV_ERROR PDVFSLimitMaxFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MaxOPPPoint);
+
+PVRSRV_ERROR PDVFSLimitMinFrequency(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32MinOPPPoint);
+
+#if (PDVFS_COM == PDVFS_COM_HOST)
+PVRSRV_ERROR PDVFSProcessCoreClkChangeRequest(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate);
+#define PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk)  PDVFSProcessCoreClkChangeRequest(devinfo, clk)
+#else
+PVRSRV_ERROR PDVFSProcessCoreClkChangeNotification(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32CoreClockRate);
+#define PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(devinfo, clk)  PDVFSProcessCoreClkChangeNotification(devinfo, clk)
+#endif
+
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+void RGXPDVFSCheckCoreClkRateChange(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+#endif /* RGXPDVFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxregconfig.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxregconfig.c
new file mode 100644 (file)
index 0000000..ef39bea
--- /dev/null
@@ -0,0 +1,319 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Register configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Regconfig routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxregconfig.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+                                         PVRSRV_DEVICE_NODE     *psDeviceNode,
+                                         IMG_UINT8           ui8RegCfgType)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PVRSRV_ERROR          eError      = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO    *psDevInfo  = psDeviceNode->pvDevice;
+       RGX_REG_CONFIG        *psRegCfg   = &psDevInfo->sRegCongfig;
+       RGXFWIF_REG_CFG_TYPE  eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType;
+
+       PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+       OSLockAcquire(psRegCfg->hLock);
+
+       if (eRegCfgType < psRegCfg->eRegCfgTypeToPush)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Register configuration requested (%d) is not valid since it has to be at least %d."
+                        " Configurations of different types need to go in order",
+                        __func__,
+                        eRegCfgType,
+                        psRegCfg->eRegCfgTypeToPush));
+               OSLockRelease(psRegCfg->hLock);
+               return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE;
+       }
+
+       psRegCfg->eRegCfgTypeToPush = eRegCfgType;
+
+       OSLockRelease(psRegCfg->hLock);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+       PVR_DPF((PVR_DBG_ERROR,
+                "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION",
+                __func__));
+       return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+                                     PVRSRV_DEVICE_NODE        *psDeviceNode,
+                                     IMG_UINT32                ui32RegAddr,
+                                     IMG_UINT64                ui64RegValue,
+                                     IMG_UINT64                ui64RegMask)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sRegCfgCmd;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       OSLockAcquire(psRegCfg->hLock);
+
+       if (psRegCfg->bEnabled)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Cannot add record whilst register configuration active.",
+                        __func__));
+               OSLockRelease(psRegCfg->hLock);
+               return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+       }
+       if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Register configuration full.",
+                        __func__));
+               OSLockRelease(psRegCfg->hLock);
+               return PVRSRV_ERROR_REG_CONFIG_FULL;
+       }
+
+       sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+       sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr;
+       sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue;
+       sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask;
+       sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush;
+       sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD;
+
+       eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+                               RGXFWIF_DM_GP,
+                               &sRegCfgCmd,
+                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: RGXScheduleCommand failed. Error:%u",
+                        __func__,
+                        eError));
+               OSLockRelease(psRegCfg->hLock);
+               return eError;
+       }
+
+       psRegCfg->ui32NumRegRecords++;
+
+       OSLockRelease(psRegCfg->hLock);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVR_DPF((PVR_DBG_ERROR,
+                "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION",
+                __func__));
+       return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+                                       PVRSRV_DEVICE_NODE      *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sRegCfgCmd;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       OSLockAcquire(psRegCfg->hLock);
+
+       if (psRegCfg->bEnabled)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Attempt to clear register configuration whilst active.",
+                        __func__));
+               OSLockRelease(psRegCfg->hLock);
+               return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+       }
+
+       sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+       sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR;
+
+       eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+                               RGXFWIF_DM_GP,
+                               &sRegCfgCmd,
+                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: RGXScheduleCommand failed. Error:%u",
+                        __func__,
+                        eError));
+               OSLockRelease(psRegCfg->hLock);
+               return eError;
+       }
+
+       psRegCfg->ui32NumRegRecords = 0;
+       psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON;
+
+       OSLockRelease(psRegCfg->hLock);
+
+       return eError;
+#else
+       PVR_DPF((PVR_DBG_ERROR,
+                "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION",
+                __func__));
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+                                        PVRSRV_DEVICE_NODE     *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sRegCfgCmd;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       OSLockAcquire(psRegCfg->hLock);
+
+       sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+       sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE;
+
+       eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+                               RGXFWIF_DM_GP,
+                               &sRegCfgCmd,
+                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: RGXScheduleCommand failed. Error:%u",
+                        __func__,
+                        eError));
+               OSLockRelease(psRegCfg->hLock);
+               return eError;
+       }
+
+       psRegCfg->bEnabled = IMG_TRUE;
+
+       OSLockRelease(psRegCfg->hLock);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVR_DPF((PVR_DBG_ERROR,
+                "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION",
+                __func__));
+       return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+                                         PVRSRV_DEVICE_NODE    *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sRegCfgCmd;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       OSLockAcquire(psRegCfg->hLock);
+
+       sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+       sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE;
+
+       eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+                               RGXFWIF_DM_GP,
+                               &sRegCfgCmd,
+                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: RGXScheduleCommand failed. Error:%u",
+                        __func__,
+                        eError));
+               OSLockRelease(psRegCfg->hLock);
+               return eError;
+       }
+
+       psRegCfg->bEnabled = IMG_FALSE;
+
+       OSLockRelease(psRegCfg->hLock);
+
+       return eError;
+#else
+       PVR_DPF((PVR_DBG_ERROR,
+                "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION",
+                __func__));
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+/******************************************************************************
+ End of file (rgxregconfig.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxregconfig.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxregconfig.h
new file mode 100644 (file)
index 0000000..b0921d9
--- /dev/null
@@ -0,0 +1,130 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX register configuration functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX register configuration functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXREGCONFIG_H)
+#define RGXREGCONFIG_H
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXSetRegConfigTypeKM
+
+ @Description
+       Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui8RegPowerIsland - Reg configuration
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+                                         PVRSRV_DEVICE_NODE    *psDeviceNode,
+                                         IMG_UINT8 ui8RegPowerIsland);
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXSetRegConfigKM
+
+ @Description
+       Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui64RegAddr - Register address
+ @Input ui64RegValue - Reg value
+ @Input ui64RegMask - Reg mask
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+                                     PVRSRV_DEVICE_NODE        *psDeviceNode,
+                                     IMG_UINT32        ui64RegAddr,
+                                     IMG_UINT64        ui64RegValue,
+                                     IMG_UINT64        ui64RegMask);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXClearRegConfigKM
+
+ @Description
+       Server-side implementation of RGXClearRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+                                       PVRSRV_DEVICE_NODE      *psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXEnableRegConfigKM
+
+ @Description
+       Server-side implementation of RGXEnableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+                                        PVRSRV_DEVICE_NODE     *psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXDisableRegConfigKM
+
+ @Description
+       Server-side implementation of RGXDisableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+                                         PVRSRV_DEVICE_NODE    *psDeviceNode);
+
+#endif /* RGXREGCONFIG_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxshader.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxshader.c
new file mode 100644 (file)
index 0000000..407c0fb
--- /dev/null
@@ -0,0 +1,302 @@
+/*************************************************************************/ /*!
+@File           rgxshader.c
+@Title          TQ Shader Load
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shader code and info are shared for all context on the device.
+                If allocation doesn't already exist, read shader data from file
+                and allocate PMR memory. PMR memory is not deallocated until
+                device deinit.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxshader.h"
+#include "osfunc_common.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "physmem.h"
+#include "ri_server.h"
+#include "pvr_ricommon.h"
+
+static void
+RGXShaderReadHeader(OS_FW_IMAGE *psShaderFW, RGX_SHADER_HEADER *psHeader)
+{
+       const void * pvData;
+
+       pvData = OSFirmwareData(psShaderFW);
+
+       OSDeviceMemCopy(psHeader, pvData, sizeof(RGX_SHADER_HEADER));
+}
+
+static size_t
+RGXShaderCLIMemSize(OS_FW_IMAGE *psShaderFW)
+{
+       RGX_SHADER_HEADER sHeader;
+
+       RGXShaderReadHeader(psShaderFW, &sHeader);
+
+       return sHeader.ui32SizeClientMem;
+}
+
+static size_t
+RGXShaderUSCMemSize(OS_FW_IMAGE *psShaderFW)
+{
+       RGX_SHADER_HEADER sHeader;
+
+       RGXShaderReadHeader(psShaderFW, &sHeader);
+
+       return sHeader.ui32SizeFragment;
+}
+
+static void *
+RGXShaderCLIMem(OS_FW_IMAGE *psShaderFW)
+{
+       return (void*)OSFirmwareData(psShaderFW);
+}
+
+static void *
+RGXShaderUSCMem(OS_FW_IMAGE *psShaderFW)
+{
+       IMG_PBYTE pui8Data;
+
+       pui8Data = (IMG_PBYTE)OSFirmwareData(psShaderFW);
+
+       pui8Data += RGXShaderCLIMemSize(psShaderFW);
+
+       return (void*) pui8Data;
+}
+
+#define RGX_SHADER_FILENAME_MAX_SIZE   ((sizeof(RGX_SH_FILENAME)+      \
+                                                                                RGX_BVNC_STR_SIZE_MAX))
+
+static void
+_GetShaderFileName(PVRSRV_DEVICE_NODE * psDeviceNode,
+                                  IMG_CHAR           * pszShaderFilenameStr,
+                                  IMG_CHAR           * pszShaderpFilenameStr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       OSSNPrintf(pszShaderFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE,
+                          "%s." RGX_BVNC_STR_FMTSPEC,
+                          RGX_SH_FILENAME,
+                          psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+                          psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+       OSSNPrintf(pszShaderpFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE,
+                          "%s." RGX_BVNC_STRP_FMTSPEC,
+                          RGX_SH_FILENAME,
+                          psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+                          psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+}
+
+PVRSRV_ERROR
+PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       OS_FW_IMAGE        *psShaderFW;
+       RGX_SHADER_HEADER   sHeader;
+       IMG_UINT32          ui32MappingTable = 0;
+       IMG_UINT32          ui32NumPages;
+       IMG_CHAR            aszShaderFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE];
+       IMG_CHAR            aszShaderpFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE];
+       const IMG_CHAR      *pszShaderFilenameStr = aszShaderFilenameStr;
+       size_t              uiNumBytes;
+       PVRSRV_ERROR        eError;
+
+       _GetShaderFileName(psDeviceNode, aszShaderFilenameStr, aszShaderpFilenameStr);
+
+       eError = OSLoadFirmware(psDeviceNode, aszShaderFilenameStr, NULL, &psShaderFW);
+
+       if (eError != PVRSRV_OK)
+       {
+               eError = OSLoadFirmware(psDeviceNode, aszShaderpFilenameStr,
+                                       NULL, &psShaderFW);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load shader binary file %s (%s)",
+                                __func__,
+                                aszShaderpFilenameStr,
+                                PVRSRVGetErrorString(eError)));
+                       eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE;
+                       goto failed_init;
+               }
+
+               pszShaderFilenameStr = aszShaderpFilenameStr;
+       }
+
+       PVR_LOG(("Shader binary image '%s' loaded", pszShaderFilenameStr));
+
+       RGXShaderReadHeader(psShaderFW, &sHeader);
+
+       ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1;
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages);
+
+       eError = PhysmemNewRamBackedPMR(NULL,
+                                                                       psDeviceNode,
+                                                                       (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
+                                                                       (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
+                                                                       1,
+                                                                       1,
+                                                                       &ui32MappingTable,
+                                                                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                                                                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+                                                                       | PVRSRV_MEMALLOCFLAG_GPU_READABLE
+                                                                       | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT
+                                                                       | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER,
+                                                                       sizeof("tquscpmr"),
+                                                                       "tquscpmr",
+                                                                       PVR_SYS_ALLOC_PID,
+                                                                       (PMR**)&psDevInfo->hTQUSCSharedMem,
+                                                                       PDUMP_NONE,
+                                                                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto failed_firmware;
+       }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQUSCSharedMem, PVR_SYS_ALLOC_PID);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto failed_uscpmr;
+       }
+#endif
+
+       eError = PMR_WriteBytes(psDevInfo->hTQUSCSharedMem, 0, RGXShaderUSCMem(psShaderFW), RGXShaderUSCMemSize(psShaderFW), &uiNumBytes);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto failed_uscpmr;
+       }
+
+       ui32NumPages = (sHeader.ui32SizeClientMem / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1;
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate TDM Client PMR Block (Pages %08X)", ui32NumPages);
+
+       eError = PhysmemNewRamBackedPMR(NULL,
+                                                                       psDeviceNode,
+                                                                       (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
+                                                                       (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
+                                                                       1,
+                                                                       1,
+                                                                       &ui32MappingTable,
+                                                                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                                                                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+                                                                       | PVRSRV_MEMALLOCFLAG_CPU_READABLE
+                                                                       | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT
+                                                                       | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER,
+                                                                       sizeof("tqclipmr"),
+                                                                       "tqclipmr",
+                                                                       PVR_SYS_ALLOC_PID,
+                                                                       (PMR**)&psDevInfo->hTQCLISharedMem,
+                                                                       PDUMP_NONE,
+                                                                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto failed_uscpmr;
+       }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQCLISharedMem, PVR_SYS_ALLOC_PID);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto failed_clipmr;
+       }
+#endif
+
+       eError = PMR_WriteBytes(psDevInfo->hTQCLISharedMem, 0, RGXShaderCLIMem(psShaderFW), RGXShaderCLIMemSize(psShaderFW), &uiNumBytes);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto failed_clipmr;
+       }
+
+       OSUnloadFirmware(psShaderFW);
+
+       PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL);
+       PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL);
+
+       return PVRSRV_OK;
+
+failed_clipmr:
+       PMRUnrefPMR(psDevInfo->hTQCLISharedMem);
+failed_uscpmr:
+       PMRUnrefPMR(psDevInfo->hTQUSCSharedMem);
+failed_firmware:
+       OSUnloadFirmware(psShaderFW);
+failed_init:
+       return eError;
+}
+
+void
+PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE  * psDeviceNode,
+                                          PMR                ** ppsCLIPMRMem,
+                                          PMR                ** ppsUSCPMRMem)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL);
+       PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL);
+
+       *ppsUSCPMRMem = psDevInfo->hTQUSCSharedMem;
+       *ppsCLIPMRMem = psDevInfo->hTQCLISharedMem;
+}
+
+void PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       (void) PMRUnrefPMR(psDevInfo->hTQUSCSharedMem);
+       (void) PMRUnrefPMR(psDevInfo->hTQCLISharedMem);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxshader.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxshader.h
new file mode 100644 (file)
index 0000000..7676ede
--- /dev/null
@@ -0,0 +1,83 @@
+/*************************************************************************/ /*!
+@File           rgxshader.h
+@Title          TQ Shader Load
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shader code and info are shared for all context on the device.
+                If allocation doesn't already exist, read shader data from file
+                and allocate PMR memory. PMR memory is not deallocated until
+                device deinit.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXSHADER_H)
+#define RGXSHADER_H
+
+#include "fwload.h"
+#include "rgxtransfer_shader.h"
+#include "connection_server.h"
+
+/*************************************************************************/ /*!
+@Function       PVRSRVTQLoadShaders
+@Description    If PMR is not allocated, reads shader binary data from file
+                and allocates new PMR memory.
+@Input          psDeviceNode Device node
+@Return         PVRSRV_ERROR Returns PVRSRV_OK on success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVTQAcquireShaders
+@Description    Get handle to ready allocated shader PMR memory
+@Input          psDeviceNode Device node
+@Output         ppsCLIPMRMem Shader data used by CPU client side.
+@Output         ppsUSCPMRMem Shader usc code used by GPU.
+*/ /**************************************************************************/
+void
+PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE  *psDeviceNode,
+                       PMR                **ppsCLIPMRMem,
+                       PMR                **ppsUSCPMRMem);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVTQUnLoadShaders
+@Description    Unref PMR memory.
+@Input          psDeviceNode Device node
+*/ /**************************************************************************/
+void PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* RGXSHADER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxstartstop.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxstartstop.h
new file mode 100644 (file)
index 0000000..178afe2
--- /dev/null
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX start/stop header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX start/stop functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXSTARTSTOP_H)
+#define RGXSTARTSTOP_H
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary.
+ */
+#include "rgxlayer.h"
+
+/*!
+*******************************************************************************
+
+ @Function      RGXStart
+
+ @Description   Perform GPU reset and initialisation
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStart(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXStop
+
+ @Description   Stop Rogue in preparation for power down
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStop(const void *hPrivate);
+
+#endif /* RGXSTARTSTOP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxsyncutils.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxsyncutils.c
new file mode 100644 (file)
index 0000000..cec0597
--- /dev/null
@@ -0,0 +1,184 @@
+/*************************************************************************/ /*!
+@File           rgxsyncutils.c
+@Title          RGX Sync Utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Sync helper functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxsyncutils.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "allocmem.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static
+void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues,
+                                         IMG_UINT32 ui32Count)
+{
+       IMG_UINT32 iii;
+       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+       for (iii = 0; iii < ui32Count; iii++)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+               pui32Tmp++;
+       }
+}
+#else
+#define CHKPT_DBG(X)
+#endif
+
+
+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue,
+                                                                                SYNC_ADDR_LIST *psSyncList,
+                                                                                SYNC_ADDR_LIST *psPRSyncList,
+                                                                                PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync,
+                                                                                RGX_SYNC_DATA *psSyncData,
+                                                                                IMG_BOOL bKick3D)
+{
+       IMG_UINT32 *pui32TimelineUpdateWOff = NULL;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+
+       IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount;
+
+       /* Space for original client updates, and the one new update */
+       size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1);
+
+       if (!bKick3D)
+       {
+               /* Additional space for one PR update, only the newest one */
+               uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                  "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)",
+                  __func__,
+                  (void*)pui32IntAllocatedUpdateValues));
+
+       /* Allocate memory to hold the list of update values (including our timeline update) */
+       pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize);
+       if (!pui32IntAllocatedUpdateValues)
+       {
+               /* Failed to allocate memory */
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+       OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize);
+       pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues;
+
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)",
+                          __func__,
+                          ui32ClientUpdateValueCount,
+                          bKick3D ? "TA/3D" : "TA/PR",
+                          (void*)pui32IntAllocatedUpdateValues));
+               /* Copy the update values into the new memory, then append our timeline update value */
+               OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount);
+#endif
+
+               pui32TimelineUpdateWOff += ui32ClientUpdateValueCount;
+       }
+
+       /* Now set the additional update value and append the timeline sync prim addr to either the
+        * render context 3D (or TA) update list
+        */
+       CHKPT_DBG((PVR_DBG_ERROR,
+                  "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...",
+                  __func__,
+                  ui32FenceTimelineUpdateValue,
+                  bKick3D ? "TA/3D" : "TA/PR"));
+
+       /* Append the TA/3D update */
+       {
+               *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue;
+               psSyncData->ui32ClientUpdateValueCount++;
+               psSyncData->ui32ClientUpdateCount++;
+               SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync);
+
+               if (!psSyncData->pauiClientUpdateUFOAddress)
+               {
+                       psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs;
+               }
+               /* Update paui32ClientUpdateValue to point to our new list of update values */
+               psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount);
+#endif
+       }
+
+       if (!bKick3D)
+       {
+               /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */
+               *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue;
+               psSyncData->ui32ClientPRUpdateValueCount = 1;
+               psSyncData->ui32ClientPRUpdateCount = 1;
+               SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync);
+
+               if (!psSyncData->pauiClientPRUpdateUFOAddress)
+               {
+                       psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs;
+               }
+               /* Update paui32ClientPRUpdateValue to point to our new list of update values */
+               psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount];
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount);
+#endif
+       }
+
+       /* Do not free the old psSyncData->ui32ClientUpdateValueCount,
+        * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */
+
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxsyncutils.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxsyncutils.h
new file mode 100644 (file)
index 0000000..2133da8
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File           rgxsyncutils.h
+@Title          RGX Sync Utilities
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Sync helper functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXSYNCUTILS_H
+#define RGXSYNCUTILS_H
+
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "rgxdebug.h"
+#include "rgx_fwif_km.h"
+
+typedef struct _RGX_SYNC_DATA_
+{
+       PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress;
+       IMG_UINT32 *paui32ClientUpdateValue;
+       IMG_UINT32 ui32ClientUpdateValueCount;
+       IMG_UINT32 ui32ClientUpdateCount;
+
+       PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress;
+       IMG_UINT32 *paui32ClientPRUpdateValue;
+       IMG_UINT32 ui32ClientPRUpdateValueCount;
+       IMG_UINT32 ui32ClientPRUpdateCount;
+} RGX_SYNC_DATA;
+
+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue,
+                                                                                SYNC_ADDR_LIST *psSyncList,
+                                                                                SYNC_ADDR_LIST *psPRSyncList,
+                                                                                PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync,
+                                                                                RGX_SYNC_DATA *psSyncData,
+                                                                                IMG_BOOL bKick3D);
+
+#endif /* RGXSYNCUTILS_H */
+
+/******************************************************************************
+ End of file (rgxsyncutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtdmtransfer.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtdmtransfer.h
new file mode 100644 (file)
index 0000000..87ca2cf
--- /dev/null
@@ -0,0 +1,132 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.h
+@Title          RGX Transfer queue 2 Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXTDMTRANSFER_H)
+#define RGXTDMTRANSFER_H
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT;
+
+
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+       CONNECTION_DATA           * psConnection,
+       PVRSRV_DEVICE_NODE        * psDeviceNode,
+       IMG_UINT32                  ui32Priority,
+       IMG_UINT32                  ui32FrameworkCommandSize,
+       IMG_PBYTE                   pabyFrameworkCommand,
+       IMG_HANDLE                  hMemCtxPrivData,
+       IMG_UINT32                                      ui32PackedCCBSizeU88,
+       IMG_UINT32                  ui32ContextFlags,
+       IMG_UINT64                                      ui64RobustnessAddress,
+       RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM(
+       CONNECTION_DATA           * psConnection,
+       PVRSRV_DEVICE_NODE        * psDeviceNode,
+       PMR                      ** ppsCLIPMRMem,
+       PMR                      ** ppsUSCPMRMem);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+       RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+       IMG_UINT32                  ui32PDumpFlags,
+       IMG_UINT32                  ui32ClientUpdateCount,
+       SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFODevVarBlock,
+       IMG_UINT32                * paui32ClientUpdateSyncOffset,
+       IMG_UINT32                * paui32ClientUpdateValue,
+       PVRSRV_FENCE                iCheckFence,
+       PVRSRV_TIMELINE             iUpdateTimeline,
+       PVRSRV_FENCE              * piUpdateFence,
+       IMG_CHAR                    szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+       IMG_UINT32                  ui32FWCommandSize,
+       IMG_UINT8                 * pui8FWCommand,
+       IMG_UINT32                  ui32ExtJobRef,
+       IMG_UINT32                  ui32SyncPMRCount,
+       IMG_UINT32                * pui32SyncPMRFlags,
+       PMR                      ** ppsSyncPMRs,
+       IMG_UINT32                                      ui32TDMCharacteristic1,
+       IMG_UINT32                                      ui32TDMCharacteristic2,
+       IMG_UINT64                                      ui64DeadlineInus);
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+       IMG_UINT32                 ui32PDumpFlags);
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                                                                  RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                                                                  IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                                                                        RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                        IMG_UINT64 ui64Input,
+                                                                                                        IMG_UINT64 *pui64Output);
+
+/* Debug - Dump debug info of TDM transfer contexts on this device */
+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              void *pvDumpDebugFile,
+                              IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+#endif /* RGXTDMTRANSFER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimecorr.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimecorr.c
new file mode 100644 (file)
index 0000000..584dbf1
--- /dev/null
@@ -0,0 +1,648 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific time correlation and calibration routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxtimecorr.h"
+#include "rgxfwutils.h"
+#include "htbserver.h"
+#include "pvrsrv_apphint.h"
+
+/******************************************************************************
+ *
+ * - A calibration period is started on power-on and after a DVFS transition,
+ *   and it's closed before a power-off and before a DVFS transition
+ *   (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs...,
+ *   where each arrow is a calibration period).
+ *
+ * - The timers on the Host and on the FW are correlated at the beginning of
+ *   each period together with the current GPU frequency.
+ *
+ * - Correlation and calibration are also done at regular intervals using
+ *   a best effort approach.
+ *
+ *****************************************************************************/
+
+/*
+       AppHint interfaces
+*/
+
+static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 ui32Value)
+{
+       static __maybe_unused const char* const apszClocks[] = {
+               "mono", "mono_raw", "sched"
+       };
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       if (ui32Value >= RGXTIMECORR_CLOCK_LAST)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode,
+                      RGXTIMECORR_EVENT_CLOCK_CHANGE);
+
+       PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"",
+                       apszClocks[psDevInfo->ui32ClockSource],
+                       apszClocks[ui32Value]));
+
+       psDevInfo->ui32ClockSource = ui32Value;
+
+       RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode,
+                        RGXTIMECORR_EVENT_CLOCK_CHANGE);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const void *psPrivate,
+                              IMG_UINT32 *pui32Value)
+{
+       PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+
+       *pui32Value =
+           ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource;
+
+       PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+       return PVRSRV_OK;
+}
+
+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock,
+                                           _SetClock, psDeviceNode, NULL);
+}
+
+/*
+       End of AppHint interface
+*/
+
+IMG_UINT64 RGXTimeCorrGetClockns64(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       IMG_UINT64 ui64Clock;
+
+       switch (((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource) {
+               case RGXTIMECORR_CLOCK_MONO:
+                       return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock);
+               case RGXTIMECORR_CLOCK_MONO_RAW:
+                       return OSClockMonotonicRawns64();
+               case RGXTIMECORR_CLOCK_SCHED:
+                       return OSClockns64();
+               default:
+                       PVR_ASSERT(IMG_FALSE);
+                       return 0;
+       }
+}
+
+IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       IMG_UINT32 rem;
+       return OSDivide64r64(RGXTimeCorrGetClockns64(psDeviceNode), 1000, &rem);
+}
+
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       RGXFWIF_TIME_CORR *psTimeCorrs,
+                                                       IMG_UINT32 ui32NumOut)
+{
+       PVRSRV_RGXDEV_INFO    *psDevInfo     = psDeviceNode->pvDevice;
+       RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount;
+
+       while (ui32NumOut--)
+       {
+               *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)];
+               ui32CurrentIndex--;
+       }
+}
+
+static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent)
+{
+       switch (eEvent)
+       {
+               case RGXTIMECORR_EVENT_POWER:
+                       return "power";
+               case RGXTIMECORR_EVENT_DVFS:
+                       return "dvfs";
+               case RGXTIMECORR_EVENT_PERIODIC:
+                       return "periodic";
+               case RGXTIMECORR_EVENT_CLOCK_CHANGE:
+                       return "clock source";
+               default:
+                       return "n/a";
+       }
+}
+
+static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+
+       return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+}
+
+static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+       GPU_FREQ_TRACKING_DATA *psTrackingData;
+
+       psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex];
+
+       return psTrackingData->ui32EstCoreClockSpeed;
+}
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+       IMG_UINT32 i = psGpuDVFSTable->ui32HistoryIndex;
+
+       PVR_DPF((PVR_DBG_ERROR, "Dumping history of timer correlation data (latest first):"));
+
+       do
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "  Begin times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", "
+                                "End times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", "
+                                "Core clk %u, Estimated clk %u",
+                                psGpuDVFSTable->asTrackingHistory[i].ui64BeginOSTimestamp,
+                                psGpuDVFSTable->asTrackingHistory[i].ui64BeginCRTimestamp,
+                                psGpuDVFSTable->asTrackingHistory[i].ui64EndOSTimestamp,
+                                psGpuDVFSTable->asTrackingHistory[i].ui64EndCRTimestamp,
+                                psGpuDVFSTable->asTrackingHistory[i].ui32CoreClockSpeed,
+                                psGpuDVFSTable->asTrackingHistory[i].ui32EstCoreClockSpeed));
+
+               i = (i - 1) % RGX_GPU_FREQ_TRACKING_SIZE;
+
+       } while (i != psGpuDVFSTable->ui32HistoryIndex);
+}
+#endif
+
+static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1;
+       RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)];
+
+       /*
+        * The following reads must be done as close together as possible, because
+        * they represent the same current time sampled from different clock sources.
+        */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "_RGXMakeTimeCorrData: System Monotonic Clock not available."));
+               PVR_ASSERT(0);
+       }
+#endif
+       psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+       psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode);
+       psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo);
+       psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed);
+
+       if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0)
+       {
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+               _DumpTimerCorrelationHistory(psDevInfo);
+#endif
+
+               /* Revert to original clock speed (error already printed) */
+               psTimeCorr->ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode);
+               psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed);
+       }
+
+       /* Make sure the values are written to memory before updating the index of the current entry */
+       OSWriteMemoryBarrier(psTimeCorr);
+
+       /* Update the index of the current entry in the timer correlation array */
+       psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount;
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, "
+                "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)",
+                _EventToString(eEvent),
+                psTimeCorr->ui64OSTimeStamp,
+                psTimeCorr->ui64CRTimeStamp,
+                RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed),
+                _RGXGetSystemLayerGPUClockSpeed(psDeviceNode)));
+
+       /*
+        * Don't log timing data to the HTB log after a power(-on) event.
+        * Otherwise this will be logged before the HTB partition marker, breaking
+        * the log sync grammar. This data will be automatically repeated when the
+        * partition marker is written.
+        */
+       HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER,
+                    psTimeCorr->ui64OSTimeStamp,
+                    psTimeCorr->ui64CRTimeStamp,
+                    psTimeCorr->ui32CoreClockSpeed);
+}
+
+static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+#if !defined(NO_HARDWARE) && !defined(VIRTUAL_PLATFORM) && defined(DEBUG)
+#define SCALING_FACTOR (10)
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+       RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index];
+       IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp;
+       IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff;
+       IMG_INT64 i64Diff;
+       IMG_UINT32 ui32Ratio, ui32Remainder;
+
+       /*
+        * The following reads must be done as close together as possible, because
+        * they represent the same current time sampled from different clock sources.
+        */
+       ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+       ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode);
+
+       if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR))
+       {
+               /*
+                * Less than ~1us has passed since the timer correlation data was generated.
+                * A time frame this short is probably not enough to get an estimate
+                * of how good the timer correlation data was.
+                * Skip calculations for the above reason and to avoid a division by 0 below.
+                */
+               return;
+       }
+
+
+       /* Calculate an estimated timestamp based on the latest timer correlation data */
+       ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp;
+       ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff,
+                                                    psTimeCorr->ui64CRDeltaToOSDeltaKNs);
+       ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff;
+
+       /* Get difference between estimated timestamp and current timestamp, in ns */
+       i64Diff = ui64EstimatedTime - ui64OSTimeStamp;
+
+       /*
+        * Calculate ratio between estimated time diff and real time diff:
+        * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr)
+        *
+        * The operands are scaled down (approximately from ns to us) so at least
+        * the divisor fits on 32 bit.
+        */
+       ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR,
+                              (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR,
+                              &ui32Remainder);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over "
+                "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%",
+                i64Diff,
+                ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp,
+                ui32Ratio));
+
+       /* Warn if the estimated timestamp is not within +/- 1% of the current time */
+       if (ui32Ratio < 99 || ui32Ratio > 101)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns "
+                        "were %s the real time (increasing at %u%% speed)",
+                        ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp,
+                        i64Diff > 0 ? "ahead of" : "behind",
+                        ui32Ratio));
+
+               /* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */
+               PVR_DPF((PVR_DBG_WARNING,
+                        "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected",
+                        RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed),
+                        _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+                        i64Diff > 0 ? "lower" : "higher"));
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable);
+#endif
+}
+
+static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed)
+{
+       IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency;
+       IMG_UINT32 i;
+
+       for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++)
+       {
+               if (paui32GPUFrequencies[i] == ui32CoreClockSpeed)
+               {
+                       return i;
+               }
+
+               if (paui32GPUFrequencies[i] == 0)
+               {
+                       paui32GPUFrequencies[i] = ui32CoreClockSpeed;
+                       return i;
+               }
+       }
+
+       i--;
+
+       PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! "
+                "Table size should be increased! Overriding last entry (%u) with %u",
+                paui32GPUFrequencies[i], ui32CoreClockSpeed));
+
+       paui32GPUFrequencies[i] = ui32CoreClockSpeed;
+
+       return i;
+}
+
+static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       GPU_FREQ_TRACKING_DATA *psTrackingData;
+       IMG_UINT32 ui32CoreClockSpeed, ui32Index;
+
+       IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+       IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(psDeviceNode);
+
+       psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp;
+       psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp;
+
+       ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode);
+       ui32Index          = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed);
+       psTrackingData     = &psGpuDVFSTable->asTrackingData[ui32Index];
+
+       /* Set the time needed to (re)calibrate the GPU frequency */
+       if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */
+       {
+               psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed;
+               psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US;
+       }
+       else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */
+       {
+               psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US;
+       }
+       else
+       {
+               psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US;
+       }
+
+       /* Update the index to the DVFS table */
+       psGpuDVFSTable->ui32FreqIndex = ui32Index;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+       /* Update tracking history */
+       {
+               GPU_FREQ_TRACKING_HISTORY *psTrackingHistory;
+
+               psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex];
+               psTrackingHistory->ui32CoreClockSpeed    = ui32CoreClockSpeed;
+               psTrackingHistory->ui32EstCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed;
+               psTrackingHistory->ui64BeginCRTimestamp  = ui64CRTimestamp;
+               psTrackingHistory->ui64BeginOSTimestamp  = ui64OSTimestamp;
+               psTrackingHistory->ui64EndCRTimestamp    = 0ULL;
+               psTrackingHistory->ui64EndOSTimestamp    = 0ULL;
+       }
+#endif
+}
+
+static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                        RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+       IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(psDeviceNode);
+
+       psGpuDVFSTable->ui64CalibrationCRTimediff =
+           ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp;
+       psGpuDVFSTable->ui64CalibrationOSTimediff =
+           ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp;
+
+       /* Check if the current timer correlation data is good enough */
+       _RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable);
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+       /* Update tracking history */
+       {
+               GPU_FREQ_TRACKING_HISTORY *psTrackingHistory;
+
+               psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex];
+               psTrackingHistory->ui64EndCRTimestamp = ui64CRTimestamp;
+               psTrackingHistory->ui64EndOSTimestamp = ui64OSTimestamp;
+       }
+#endif
+}
+
+static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            RGX_GPU_DVFS_TABLE *psGpuDVFSTable,
+                                            RGXTIMECORR_EVENT   eEvent)
+{
+#if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION)
+       GPU_FREQ_TRACKING_DATA *psTrackingData;
+       IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed;
+       IMG_INT32  i32Diff;
+       IMG_UINT32 ui32Remainder;
+
+       /*
+        * Find out what the GPU frequency was in the last period.
+        * This should return a value very close to the frequency passed by the system layer.
+        */
+       ui32EstCoreClockSpeed =
+           RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff,
+                                              psGpuDVFSTable->ui64CalibrationOSTimediff,
+                                              ui32Remainder);
+
+       /* Update GPU frequency used by the driver for a given system layer frequency */
+       psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex];
+
+       ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed;
+       psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed;
+       psTrackingData->ui32CalibrationCount++;
+
+       i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed);
+
+       if ((i32Diff < -1000000) || (i32Diff > 1000000))
+       {
+               /* Warn if the frequency changed by more than 1 MHz between recalculations */
+               PVR_DPF((PVR_DBG_WARNING,
+                        "GPU frequency calibration of system layer frequency %u Hz (pre %s event): "
+                        "more than 1 MHz difference between old and new value "
+                        "(%u Hz -> %u Hz over %"  IMG_UINT64_FMTSPEC " us)",
+                        _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+                        _EventToString(eEvent),
+                        RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed),
+                        RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed),
+                        psGpuDVFSTable->ui64CalibrationOSTimediff));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                        "GPU frequency calibration of system layer frequency %u Hz (pre %s event): "
+                        "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us",
+                        _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+                        _EventToString(eEvent),
+                        RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed),
+                        RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed),
+                        psGpuDVFSTable->ui64CalibrationOSTimediff));
+       }
+
+       /* Reset time deltas to avoid recalibrating the same frequency over and over again */
+       psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+       psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+       /* Update tracking history */
+       {
+               GPU_FREQ_TRACKING_HISTORY *psTrackingHistory;
+
+               psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex];
+               psTrackingHistory->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed;
+               psGpuDVFSTable->ui32HistoryIndex =
+                       (psGpuDVFSTable->ui32HistoryIndex + 1) % RGX_GPU_FREQ_TRACKING_SIZE;
+       }
+#endif
+
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable);
+       PVR_UNREFERENCED_PARAMETER(eEvent);
+#endif
+}
+
+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent)
+{
+       PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+       PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+       RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+       _RGXMakeTimeCorrData(psDeviceNode, eEvent);
+}
+
+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent)
+{
+       PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+       PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+       RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+       if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+       {
+               _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent);
+       }
+}
+
+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle)
+{
+       PVRSRV_DEVICE_NODE     *psDeviceNode   = hDevHandle;
+       PVRSRV_RGXDEV_INFO     *psDevInfo      = psDeviceNode->pvDevice;
+       RGX_GPU_DVFS_TABLE     *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+       IMG_UINT64             ui64TimeNow     = RGXTimeCorrGetClockus64(psDeviceNode);
+       PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       if (psGpuDVFSTable == NULL)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__));
+               return;
+       }
+
+       /* Check if it's the right time to recalibrate the GPU clock frequency */
+       if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return;
+
+       /* Try to acquire the powerlock, if not possible then don't wait */
+       if (PVRSRVPowerTryLock(psDeviceNode) != PVRSRV_OK) return;
+
+       /* If the GPU is off then we can't do anything */
+       PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+       if (ePowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               PVRSRVPowerUnlock(psDeviceNode);
+               return;
+       }
+
+       /* All checks passed, we can calibrate and correlate */
+       RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC);
+       RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC);
+
+       PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/*
+       RGXTimeCorrGetClockSource
+*/
+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       return ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource;
+}
+
+/*
+       RGXTimeCorrSetClockSource
+*/
+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       RGXTIMECORR_CLOCK_TYPE eClockType)
+{
+       return _SetClock(psDeviceNode, NULL, eClockType);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA    * psConnection,
+                     PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       *pui64Time = RGXTimeCorrGetClockns64(psDeviceNode);
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxtimecorr.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimecorr.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimecorr.h
new file mode 100644 (file)
index 0000000..e1cfff9
--- /dev/null
@@ -0,0 +1,272 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX time correlation and calibration header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXTIMECORR_H)
+#define RGXTIMECORR_H
+
+#include "img_types.h"
+#include "device.h"
+#include "osfunc.h"
+#include "connection_server.h"
+
+typedef enum
+{
+       RGXTIMECORR_CLOCK_MONO,
+       RGXTIMECORR_CLOCK_MONO_RAW,
+       RGXTIMECORR_CLOCK_SCHED,
+
+       RGXTIMECORR_CLOCK_LAST
+} RGXTIMECORR_CLOCK_TYPE;
+
+typedef enum
+{
+       RGXTIMECORR_EVENT_POWER,
+       RGXTIMECORR_EVENT_DVFS,
+       RGXTIMECORR_EVENT_PERIODIC,
+       RGXTIMECORR_EVENT_CLOCK_CHANGE
+} RGXTIMECORR_EVENT;
+
+/*
+ * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz
+ * before use, to reduce the noise introduced by calculations done with
+ * imperfect operands (correlated timers not sampled at exactly the same
+ * time, GPU CR timer incrementing only once every 256 GPU cycles).
+ * This also helps reducing the variation between consecutive calculations.
+ */
+#define RGXFWIF_CONVERT_TO_KHZ(freq)   (((freq) + 500) / 1000)
+#define RGXFWIF_ROUND_TO_KHZ(freq)    ((((freq) + 500) / 1000) * 1000)
+
+/* Constants used in different calculations */
+#define SECONDS_TO_MICROSECONDS          (1000000ULL)
+#define CRTIME_TO_CYCLES_WITH_US_SCALE   (RGX_CRTIME_TICK_IN_CYCLES * SECONDS_TO_MICROSECONDS)
+
+/*
+ * Use this macro to get a more realistic GPU core clock speed than the one
+ * given by the upper layers (used when doing GPU frequency calibration)
+ */
+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \
+    OSDivide64((deltacr_us) * CRTIME_TO_CYCLES_WITH_US_SCALE, (deltaos_us), &(remainder))
+
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetConversionFactor
+
+ @Description Generate constant used to convert a GPU time difference into
+              an OS time difference (for more info see rgx_fwif_km.h).
+
+ @Input       ui32ClockSpeed : GPU clock speed
+
+ @Return      0 on failure, conversion factor otherwise
+
+******************************************************************************/
+static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpeed)
+{
+       IMG_UINT32 ui32Remainder;
+
+       if (RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed) == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: GPU clock frequency %u is too low",
+                                __func__, ui32ClockSpeed));
+
+               return 0;
+       }
+
+       return OSDivide64r64(CRTIME_TO_CYCLES_WITH_US_SCALE << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT,
+                            RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder);
+}
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrBegin
+
+ @Description Generate new timer correlation data, and start tracking
+              the current GPU frequency.
+
+ @Input       hDevHandle : RGX Device Node
+ @Input       eEvent     : Event associated with the beginning of a timer
+                           correlation period
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrEnd
+
+ @Description Stop tracking the CPU and GPU timers, and if possible
+              recalculate the GPU frequency to a value which makes the timer
+              correlation data more accurate.
+
+ @Input       hDevHandle : RGX Device Node
+ @Input       eEvent     : Event associated with the end of a timer
+                           correlation period
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrRestartPeriodic
+
+ @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin,
+              but only if enough time has passed since the last timer
+              correlation data was generated.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetClockns64
+
+ @Description Returns value of currently selected clock (in ns).
+
+ @Input       psDeviceNode : RGX Device Node
+ @Return      clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXTimeCorrGetClockns64(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetClockus64
+
+ @Description Returns value of currently selected clock (in us).
+
+ @Input       psDeviceNode : RGX Device Node
+ @Return      clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrGetClockSource
+
+ @Description Returns currently selected clock source
+
+ @Input       psDeviceNode : RGX Device Node
+ @Return      clock source type
+
+******************************************************************************/
+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrSetClockSource
+
+ @Description Sets clock source for correlation data.
+
+ @Input       psDeviceNode : RGX Device Node
+ @Input       eClockType : clock source type
+
+ @Return      error code
+
+******************************************************************************/
+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       RGXTIMECORR_CLOCK_TYPE eClockType);
+
+/*!
+******************************************************************************
+
+ @Function    RGXTimeCorrInitAppHintCallbacks
+
+ @Description Initialise apphint callbacks for timer correlation
+              related apphints.
+
+ @Input       psDeviceNode : RGX Device Node
+
+ @Return      void
+
+******************************************************************************/
+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGetTimeCorrData
+
+ @Description Get a number of the most recent time correlation data points
+
+ @Input       psDeviceNode : RGX Device Node
+ @Output      psTimeCorrs  : Output array of RGXFWIF_TIME_CORR elements
+                             for data to be written to
+ @Input       ui32NumOut   : Number of elements to be written out
+
+ @Return      void
+
+******************************************************************************/
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       RGXFWIF_TIME_CORR *psTimeCorrs,
+                                                       IMG_UINT32 ui32NumOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCurrentTime
+@Description    Returns the current state of the device timer
+@Input          psDevData  Device data.
+@Out            pui64Time
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA    * psConnection,
+                     PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time);
+
+#endif /* RGXTIMECORR_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimerquery.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimerquery.c
new file mode 100644 (file)
index 0000000..d5d11bf
--- /dev/null
@@ -0,0 +1,244 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Timer queries
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Timer queries
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimerquery.h"
+#include "rgxdevice.h"
+#include "rgxtimecorr.h"
+
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA    * psConnection,
+                           PVRSRV_DEVICE_NODE * psDeviceNode,
+                           IMG_UINT32         ui32QueryId)
+{
+       PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       OSLockAcquire(psDevInfo->hTimerQueryLock);
+#endif
+
+       psDevInfo->bSaveStart = IMG_TRUE;
+       psDevInfo->bSaveEnd   = IMG_TRUE;
+
+       /* clear the stamps, in case there is no Kick */
+       psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL;
+       psDevInfo->pui64EndTimeById[ui32QueryId]   = 0UL;
+       OSWriteMemoryBarrier(&psDevInfo->pui64EndTimeById[ui32QueryId]);
+
+       /* save of the active query index */
+       psDevInfo->ui32ActiveQueryId = ui32QueryId;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       OSLockRelease(psDevInfo->hTimerQueryLock);
+#endif
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA    * psConnection,
+                         PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       OSLockAcquire(psDevInfo->hTimerQueryLock);
+#endif
+
+       /* clear off the flags set by Begin(). Note that _START_TIME is
+        * probably already cleared by Kick()
+        */
+       psDevInfo->bSaveStart = IMG_FALSE;
+       psDevInfo->bSaveEnd   = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       OSLockRelease(psDevInfo->hTimerQueryLock);
+#endif
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA    * psConnection,
+                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                      IMG_UINT32         ui32QueryId,
+                      IMG_UINT64         * pui64StartTime,
+                      IMG_UINT64         * pui64EndTime)
+{
+       PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       IMG_UINT32         ui32Scheduled;
+       IMG_UINT32         ui32Completed;
+       PVRSRV_ERROR       eError;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       OSLockAcquire(psDevInfo->hTimerQueryLock);
+#endif
+
+       ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId];
+       ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId];
+
+       /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared
+        * the stamps. If there was no begin the returned data is undefined - but still
+        * safe from services pov
+        */
+       if (ui32Completed >= ui32Scheduled)
+       {
+               * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId];
+               * pui64EndTime   = psDevInfo->pui64EndTimeById[ui32QueryId];
+
+               eError = PVRSRV_OK;
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+       }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       OSLockRelease(psDevInfo->hTimerQueryLock);
+#endif
+       return eError;
+}
+
+
+
+/******************************************************************************
+ NOT BRIDGED/EXPORTED FUNCS
+******************************************************************************/
+/* writes a time stamp command in the client CCB */
+void
+RGXWriteTimestampCommand(void                  ** ppvPtr,
+                         RGXFWIF_CCB_CMD_TYPE     eCmdType,
+                         PRGXFWIF_TIMESTAMP_ADDR  pAddr)
+{
+       RGXFWIF_CCB_CMD_HEADER * psHeader;
+       PRGXFWIF_TIMESTAMP_ADDR * psTimestampAddr;
+
+       psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppvPtr);
+
+       PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP
+                  || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP);
+
+       psHeader->eCmdType    = eCmdType;
+       psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1);
+
+       (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+       psTimestampAddr = (PRGXFWIF_TIMESTAMP_ADDR *) *ppvPtr;
+       psTimestampAddr->ui32Addr = pAddr.ui32Addr;
+
+       (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, psHeader->ui32CmdSize);
+}
+
+
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO      * psDevInfo,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+                          PRGXFWIF_UFO_ADDR       * ppUpdate)
+{
+       if (ppPreAddr != NULL)
+       {
+               if (psDevInfo->bSaveStart)
+               {
+                       /* drop the SaveStart on the first Kick */
+                       psDevInfo->bSaveStart = IMG_FALSE;
+
+                       RGXSetFirmwareAddress(ppPreAddr,
+                                             psDevInfo->psStartTimeMemDesc,
+                                             sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+                                             RFW_FWADDR_NOREF_FLAG);
+               }
+               else
+               {
+                       ppPreAddr->ui32Addr = 0;
+               }
+       }
+
+       if (ppPostAddr != NULL && ppUpdate != NULL)
+       {
+               if (psDevInfo->bSaveEnd)
+               {
+                       RGXSetFirmwareAddress(ppPostAddr,
+                                             psDevInfo->psEndTimeMemDesc,
+                                             sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+                                             RFW_FWADDR_NOREF_FLAG);
+
+                       psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++;
+
+                       RGXSetFirmwareAddress(ppUpdate,
+                                             psDevInfo->psCompletedMemDesc,
+                                             sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId,
+                                             RFW_FWADDR_NOREF_FLAG);
+               }
+               else
+               {
+                       ppUpdate->ui32Addr   = 0;
+                       ppPostAddr->ui32Addr = 0;
+               }
+       }
+}
+
+
+/******************************************************************************
+ End of file (rgxtimerquery.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimerquery.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxtimerquery.h
new file mode 100644 (file)
index 0000000..8189886
--- /dev/null
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Timer queries
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Timer queries functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_TIMERQUERIES_H)
+#define RGX_TIMERQUERIES_H
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+#include "connection_server.h"
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXBeginTimerQueryKM
+@Description    Opens a new timer query.
+
+@Input          ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Return         PVRSRV_OK on success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA    * psConnection,
+                           PVRSRV_DEVICE_NODE * psDeviceNode,
+                           IMG_UINT32         ui32QueryId);
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXEndTimerQueryKM
+@Description    Closes a timer query
+
+                The lack of ui32QueryId argument expresses the fact that there
+                can't be overlapping queries open.
+@Return         PVRSRV_OK on success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA    * psConnection,
+                         PVRSRV_DEVICE_NODE * psDeviceNode);
+
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXQueryTimerKM
+@Description    Queries the state of the specified timer
+
+@Input          ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Out            pui64StartTime
+@Out            pui64EndTime
+@Return         PVRSRV_OK                         on success.
+                PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with
+                                                  operations from the queried period
+                other error code                  otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA    * psConnection,
+                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                      IMG_UINT32         ui32QueryId,
+                      IMG_UINT64         * pui64StartTime,
+                      IMG_UINT64         * pui64EndTime);
+
+
+
+/******************************************************************************
+ NON BRIDGED/EXPORTED interface
+******************************************************************************/
+
+/* write the timestamp cmd from the helper*/
+void
+RGXWriteTimestampCommand(void               ** ppvCmd,
+                         RGXFWIF_CCB_CMD_TYPE    eCmdType,
+                         PRGXFWIF_TIMESTAMP_ADDR pAddr);
+
+/* get the relevant data from the Kick to the helper*/
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO      * psDevInfo,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+                          PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+                          PRGXFWIF_UFO_ADDR       * ppUpdate);
+
+#endif /* RGX_TIMERQUERIES_H */
+
+/******************************************************************************
+ End of file (rgxtimerquery.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxutils.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxutils.c
new file mode 100644 (file)
index 0000000..866fd01
--- /dev/null
@@ -0,0 +1,221 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "power.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_UINT32 *pui32State)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+       if (!psDeviceNode)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       psDevInfo = psDeviceNode->pvDevice;
+       *pui32State = psDevInfo->eActivePMConf;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_UINT32 ui32State)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+#if !defined(NO_HARDWARE)
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+#endif
+
+       PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+       if (!psDeviceNode || !psDeviceNode->pvDevice)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (RGX_ACTIVEPM_FORCE_OFF != ui32State)
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+#if !defined(NO_HARDWARE)
+       psDevInfo = psDeviceNode->pvDevice;
+
+       if (psDevInfo->pvAPMISRData)
+       {
+               psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF;
+               psDevInfo->pvAPMISRData = NULL;
+               eError = PVRSRVSetDeviceDefaultPowerState((PPVRSRV_DEVICE_NODE)psDeviceNode,
+                                                         PVRSRV_DEV_POWER_STATE_ON);
+       }
+#endif
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_BOOL *pbDisabled)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+       if (!psDeviceNode || !psDeviceNode->pvDevice)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       *pbDisabled = !psDevInfo->bPDPEnabled;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_BOOL bDisable)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+       if (!psDeviceNode || !psDeviceNode->pvDevice)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       psDevInfo->bPDPEnabled = !bDisable;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 *pui32DeviceFlags)
+{
+       if (!pui32DeviceFlags || !psDevInfo)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *pui32DeviceFlags = psDevInfo->ui32DeviceFlags;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32Config,
+                               IMG_BOOL bSetNotClear)
+{
+       if (!psDevInfo)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)",
+                                __func__, ui32Config, RGXKM_DEVICE_STATE_MASK));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (bSetNotClear)
+       {
+               psDevInfo->ui32DeviceFlags |= ui32Config;
+       }
+       else
+       {
+               psDevInfo->ui32DeviceFlags &= ~ui32Config;
+       }
+
+       return PVRSRV_OK;
+}
+
+inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM)
+{
+       PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST);
+
+       switch (eKickTypeDM) {
+               case RGX_KICK_TYPE_DM_GP:
+                       return "GP ";
+               case RGX_KICK_TYPE_DM_TDM_2D:
+                       return "TDM/2D ";
+               case RGX_KICK_TYPE_DM_TA:
+                       return "TA ";
+               case RGX_KICK_TYPE_DM_3D:
+                       return "3D ";
+               case RGX_KICK_TYPE_DM_CDM:
+                       return "CDM ";
+               case RGX_KICK_TYPE_DM_RTU:
+                       return "RTU ";
+               case RGX_KICK_TYPE_DM_SHG:
+                       return "SHG ";
+               case RGX_KICK_TYPE_DM_TQ2D:
+                       return "TQ2D ";
+               case RGX_KICK_TYPE_DM_TQ3D:
+                       return "TQ3D ";
+               default:
+                       return "Invalid DM ";
+       }
+}
+
+/******************************************************************************
+ End of file (rgxutils.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxutils.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxutils.h
new file mode 100644 (file)
index 0000000..6709863
--- /dev/null
@@ -0,0 +1,185 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Inline functions/structures specific to RGX
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+
+ @Function      RGXQueryAPMState
+
+ @Description   Query the state of the APM configuration
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Output        pui32State   : The APM configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_UINT32 *pui32State);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetAPMState
+
+ @Description   Set the APM configuration state. Currently only 'OFF' is
+                supported
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         ui32State    : The requested APM configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_UINT32 ui32State);
+
+/*!
+******************************************************************************
+
+ @Function      RGXQueryPdumpPanicDisable
+
+ @Description   Get the PDump Panic Enable configuration state.
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         pbDisabled    : IMG_TRUE if PDump Panic is disabled
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_BOOL *pbDisabled);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetPdumpPanicDisable
+
+ @Description   Set the PDump Panic Enable flag
+
+ @Input         psDeviceNode : The device node
+
+ @Input         pvPrivateData: Unused (required for AppHint callback)
+
+ @Input         bDisable      : The requested configuration state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+       const void *pvPrivateData,
+       IMG_BOOL bDisable);
+
+/*!
+******************************************************************************
+
+ @Function      RGXGetDeviceFlags
+
+ @Description   Get the device flags for a given device
+
+ @Input         psDevInfo        : The device descriptor query
+
+ @Output        pui32DeviceFlags : The current state of the device flags
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 *pui32DeviceFlags);
+
+/*!
+******************************************************************************
+
+ @Function      RGXSetDeviceFlags
+
+ @Description   Set the device flags for a given device
+
+ @Input         psDevInfo : The device descriptor to modify
+
+ @Input         ui32Config : The device flags to modify
+
+ @Input         bSetNotClear : Set or clear the specified flags
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32Config,
+                               IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function    RGXStringifyKickTypeDM
+
+ @Description Gives the kick type DM name stringified
+
+ @Input       Kick type DM
+
+ @Return      Array containing the kick type DM name
+
+******************************************************************************/
+const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM);
+
+#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : ""
+/******************************************************************************
+ End of file (rgxutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxworkest.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxworkest.c
new file mode 100644 (file)
index 0000000..02e0683
--- /dev/null
@@ -0,0 +1,624 @@
+/*************************************************************************/ /*!
+@File           rgxworkest.c
+@Title          RGX Workload Estimation Functionality
+@Codingstyle    IMG
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel mode workload estimation functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdevice.h"
+#include "rgxworkest.h"
+#include "rgxfwutils.h"
+#include "rgxpdvfs.h"
+#include "rgx_options.h"
+#include "device.h"
+#include "hash.h"
+#include "pvr_debug.h"
+
+#define ROUND_DOWN_TO_NEAREST_1024(number) (((number) >> 10) << 10)
+
+static inline IMG_BOOL _WorkEstEnabled(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if (psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions &
+           psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions &
+           OPTIONS_WORKLOAD_ESTIMATION_MASK)
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+static inline IMG_UINT32 _WorkEstDoHash(IMG_UINT32 ui32Input)
+{
+       IMG_UINT32 ui32HashPart;
+
+       /* Hash function borrowed from hash.c */
+       ui32HashPart = ui32Input;
+       ui32HashPart += (ui32HashPart << 12);
+       ui32HashPart ^= (ui32HashPart >> 22);
+       ui32HashPart += (ui32HashPart << 4);
+       ui32HashPart ^= (ui32HashPart >> 9);
+       ui32HashPart += (ui32HashPart << 10);
+       ui32HashPart ^= (ui32HashPart >> 2);
+       ui32HashPart += (ui32HashPart << 7);
+       ui32HashPart ^= (ui32HashPart >> 12);
+
+       return ui32HashPart;
+}
+
+/*! Hash functions for TA/3D workload estimation */
+IMG_BOOL WorkEstHashCompareTA3D(size_t uKeySize, void *pKey1, void *pKey2);
+IMG_UINT32 WorkEstHashFuncTA3D(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*! Hash functions for compute workload estimation */
+IMG_BOOL WorkEstHashCompareCompute(size_t uKeySize, void *pKey1, void *pKey2);
+IMG_UINT32 WorkEstHashFuncCompute(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*! Hash functions for TDM/transfer workload estimation */
+IMG_BOOL WorkEstHashCompareTDM(size_t uKeySize, void *pKey1, void *pKey2);
+IMG_UINT32 WorkEstHashFuncTDM(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+
+IMG_BOOL WorkEstHashCompareTA3D(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       RGX_WORKLOAD *psWorkload1;
+       RGX_WORKLOAD *psWorkload2;
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+       if (pKey1 && pKey2)
+       {
+               psWorkload1 = *((RGX_WORKLOAD **)pKey1);
+               psWorkload2 = *((RGX_WORKLOAD **)pKey2);
+
+               PVR_ASSERT(psWorkload1);
+               PVR_ASSERT(psWorkload2);
+
+               if (psWorkload1->sTA3D.ui32RenderTargetSize  == psWorkload2->sTA3D.ui32RenderTargetSize &&
+                   psWorkload1->sTA3D.ui32NumberOfDrawCalls == psWorkload2->sTA3D.ui32NumberOfDrawCalls &&
+                   psWorkload1->sTA3D.ui32NumberOfIndices   == psWorkload2->sTA3D.ui32NumberOfIndices &&
+                   psWorkload1->sTA3D.ui32NumberOfMRTs      == psWorkload2->sTA3D.ui32NumberOfMRTs)
+               {
+                       /* This is added to allow this memory to be freed */
+                       *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1;
+                       return IMG_TRUE;
+               }
+       }
+
+       return IMG_FALSE;
+}
+
+IMG_UINT32 WorkEstHashFuncTA3D(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey);
+       IMG_UINT32 ui32HashKey = 0;
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+       /* Hash key predicated on multiple render target attributes */
+       ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32RenderTargetSize);
+       ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32NumberOfDrawCalls);
+       ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32NumberOfIndices);
+       ui32HashKey += _WorkEstDoHash(psWorkload->sTA3D.ui32NumberOfMRTs);
+
+       return ui32HashKey;
+}
+
+IMG_BOOL WorkEstHashCompareCompute(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       RGX_WORKLOAD *psWorkload1;
+       RGX_WORKLOAD *psWorkload2;
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+       if (pKey1 && pKey2)
+       {
+               psWorkload1 = *((RGX_WORKLOAD **)pKey1);
+               psWorkload2 = *((RGX_WORKLOAD **)pKey2);
+
+               PVR_ASSERT(psWorkload1);
+               PVR_ASSERT(psWorkload2);
+
+               if (psWorkload1->sCompute.ui32NumberOfWorkgroups == psWorkload2->sCompute.ui32NumberOfWorkgroups &&
+                   psWorkload1->sCompute.ui32NumberOfWorkitems  == psWorkload2->sCompute.ui32NumberOfWorkitems)
+               {
+                       /* This is added to allow this memory to be freed */
+                       *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1;
+                       return IMG_TRUE;
+               }
+       }
+
+       return IMG_FALSE;
+}
+
+IMG_UINT32 WorkEstHashFuncCompute(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey);
+       IMG_UINT32 ui32HashKey = 0;
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+       /* Hash key predicated on multiple render target attributes */
+       ui32HashKey += _WorkEstDoHash(psWorkload->sCompute.ui32NumberOfWorkgroups);
+       ui32HashKey += _WorkEstDoHash(psWorkload->sCompute.ui32NumberOfWorkitems);
+       return ui32HashKey;
+}
+
+IMG_BOOL WorkEstHashCompareTDM(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       RGX_WORKLOAD *psWorkload1;
+       RGX_WORKLOAD *psWorkload2;
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+       if (pKey1 && pKey2)
+       {
+               psWorkload1 = *((RGX_WORKLOAD **)pKey1);
+               psWorkload2 = *((RGX_WORKLOAD **)pKey2);
+
+               PVR_ASSERT(psWorkload1);
+               PVR_ASSERT(psWorkload2);
+
+               if (psWorkload1->sTransfer.ui32Characteristic1 == psWorkload2->sTransfer.ui32Characteristic1 &&
+                   psWorkload1->sTransfer.ui32Characteristic2 == psWorkload2->sTransfer.ui32Characteristic2)
+               {
+                       /* This is added to allow this memory to be freed */
+                       *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1;
+                       return IMG_TRUE;
+               }
+       }
+
+       return IMG_FALSE;
+}
+
+IMG_UINT32 WorkEstHashFuncTDM(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey);
+       IMG_UINT32 ui32HashKey = 0;
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+       PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+       /* Hash key predicated on transfer src/dest attributes */
+       ui32HashKey += _WorkEstDoHash(psWorkload->sTransfer.ui32Characteristic1);
+       ui32HashKey += _WorkEstDoHash(psWorkload->sTransfer.ui32Characteristic2);
+
+       return ui32HashKey;
+}
+
+void WorkEstHashLockCreate(POS_LOCK *ppsHashLock)
+{
+       if (*ppsHashLock == NULL)
+       {
+               OSLockCreate(ppsHashLock);
+       }
+}
+
+void WorkEstHashLockDestroy(POS_LOCK psWorkEstHashLock)
+{
+       if (psWorkEstHashLock != NULL)
+       {
+               OSLockDestroy(psWorkEstHashLock);
+               psWorkEstHashLock = NULL;
+       }
+}
+
+void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_WORKEST_FWCCB_CMD *psFwCCBCmd;
+       IMG_UINT8 *psFWCCB = psDevInfo->psWorkEstFirmwareCCB;
+       RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
+
+       while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+       {
+               PVRSRV_ERROR eError;
+
+               /* Point to the next command */
+               psFwCCBCmd = (RGXFWIF_WORKEST_FWCCB_CMD *)((uintptr_t)psFWCCB + psFWCCBCtl->ui32ReadOffset * sizeof(RGXFWIF_WORKEST_FWCCB_CMD));
+
+               eError = WorkEstRetire(psDevInfo, psFwCCBCmd);
+               PVR_LOG_IF_ERROR(eError, "WorkEstCheckFirmwareCCB: WorkEstRetire failed");
+
+               /* Update read offset */
+               psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+       }
+}
+
+PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO        *psDevInfo,
+                            WORKEST_HOST_DATA         *psWorkEstHostData,
+                            WORKLOAD_MATCHING_DATA    *psWorkloadMatchingData,
+                            const RGXFWIF_CCB_CMD_TYPE eDMCmdType,
+                            const RGX_WORKLOAD        *psWorkloadCharsIn,
+                            IMG_UINT64                ui64DeadlineInus,
+                            RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData)
+{
+       RGX_WORKLOAD          *psWorkloadCharacteristics;
+       IMG_UINT64            *pui64CyclePrediction;
+       IMG_UINT64            ui64CurrentTime;
+       WORKEST_RETURN_DATA   *psReturnData;
+       IMG_UINT32            ui32ReturnDataWO;
+#if defined(SUPPORT_SOC_TIMER)
+       PVRSRV_DEVICE_CONFIG  *psDevConfig;
+       IMG_UINT64            ui64CurrentSoCTime;
+#endif
+       PVRSRV_ERROR          eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (!_WorkEstEnabled())
+       {
+               /* No error message to avoid excessive messages */
+               return PVRSRV_OK;
+       }
+
+       if (eDMCmdType == RGXFWIF_CCB_CMD_TYPE_NULL)
+       {
+               /* No workload, only fence updates */
+               return PVRSRV_OK;
+       }
+
+#if !defined(PVRSRV_NEED_PVR_DPF)
+       PVR_UNREFERENCED_PARAMETER(eDMCmdType);
+#endif
+
+       /* Validate all required objects required for preparing work estimation */
+       PVR_LOG_RETURN_IF_FALSE(psDevInfo, "device info not available", eError);
+       PVR_LOG_RETURN_IF_FALSE(psWorkEstHostData, "host data not available", eError);
+       PVR_LOG_RETURN_IF_FALSE(psWorkloadMatchingData, "Workload Matching Data not available", eError);
+       PVR_LOG_RETURN_IF_FALSE(psWorkloadMatchingData->psHashLock, "hash lock not available", eError);
+       PVR_LOG_RETURN_IF_FALSE(psWorkloadMatchingData->psHashTable, "hash table not available", eError);
+
+#if defined(SUPPORT_SOC_TIMER)
+       psDevConfig = psDevInfo->psDeviceNode->psDevConfig;
+       PVR_LOG_RETURN_IF_FALSE(psDevConfig->pfnSoCTimerRead, "SoC timer not available", eError);
+       ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData);
+#endif
+
+       eError = OSClockMonotonicus64(&ui64CurrentTime);
+       PVR_LOG_RETURN_IF_ERROR(eError, "unable to access System Monotonic clock");
+
+       OSLockAcquire(psDevInfo->hWorkEstLock);
+
+       /* Select the next index for the return data and update it (is this thread safe?) */
+       ui32ReturnDataWO = psDevInfo->ui32ReturnDataWO;
+       psDevInfo->ui32ReturnDataWO = (ui32ReturnDataWO + 1) & RETURN_DATA_ARRAY_WRAP_MASK;
+
+       /* Index for the return data passed to/from the firmware. */
+       psWorkEstKickData->ui16ReturnDataIndex = ui32ReturnDataWO;
+       if (ui64DeadlineInus > ui64CurrentTime)
+       {
+               /* Rounding is done to reduce multiple deadlines with minor spread flooding the fw workload array. */
+#if defined(SUPPORT_SOC_TIMER)
+               IMG_UINT64 ui64TimeDelta = (ui64DeadlineInus - ui64CurrentTime) * SOC_TIMER_FREQ;
+               psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64CurrentSoCTime + ui64TimeDelta);
+#else
+               psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64DeadlineInus);
+#endif
+       }
+       else
+       {
+               /* If deadline has already passed, assign zero to suggest full frequency */
+               psWorkEstKickData->ui64Deadline = 0;
+       }
+
+       /* Set up data for the return path to process the workload; the matching data is needed
+          as it holds the hash data, the host data is needed for completion updates */
+       psReturnData = &psDevInfo->asReturnData[ui32ReturnDataWO];
+       psReturnData->psWorkloadMatchingData = psWorkloadMatchingData;
+       psReturnData->psWorkEstHostData = psWorkEstHostData;
+
+       /* The workload characteristic is needed in the return data for the matching
+          of future workloads via the hash. */
+       psWorkloadCharacteristics = &psReturnData->sWorkloadCharacteristics;
+       memcpy(psWorkloadCharacteristics, psWorkloadCharsIn, sizeof(RGX_WORKLOAD));
+
+       OSLockRelease(psDevInfo->hWorkEstLock);
+
+       /* Acquire the lock to access hash */
+       OSLockAcquire(psWorkloadMatchingData->psHashLock);
+
+       /* Check if there is a prediction for this workload */
+       pui64CyclePrediction = (IMG_UINT64*) HASH_Retrieve(psWorkloadMatchingData->psHashTable,
+                                                                                                          (uintptr_t)psWorkloadCharacteristics);
+
+       /* Release lock */
+       OSLockRelease(psWorkloadMatchingData->psHashLock);
+
+       if (pui64CyclePrediction != NULL)
+       {
+               /* Cycle prediction is available, store this prediction */
+               psWorkEstKickData->ui32CyclesPrediction = *pui64CyclePrediction;
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+               if (*pui64CyclePrediction >= IMG_UINT32_MAX)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "Workload estimate overflow:"
+                                       " %" IMG_UINT64_FMTSPEC, *pui64CyclePrediction));
+               }
+
+               switch (eDMCmdType)
+               {
+                       case RGXFWIF_CCB_CMD_TYPE_GEOM:
+                       case RGXFWIF_CCB_CMD_TYPE_3D:
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: RT size = %u, draw count = %u, indices = %u, prediction = " IMG_DEVMEM_SIZE_FMTSPEC,
+                                        __func__,
+                                        psWorkloadCharacteristics->sTA3D.ui32RenderTargetSize,
+                                        psWorkloadCharacteristics->sTA3D.ui32NumberOfDrawCalls,
+                                        psWorkloadCharacteristics->sTA3D.ui32NumberOfIndices,
+                                        *pui64CyclePrediction));
+                               break;
+                       case RGXFWIF_CCB_CMD_TYPE_CDM:
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Number of workgroups = %u, max workgroup size = %u, prediction = " IMG_DEVMEM_SIZE_FMTSPEC,
+                                        __func__,
+                                        psWorkloadCharacteristics->sCompute.ui32NumberOfWorkgroups,
+                                        psWorkloadCharacteristics->sCompute.ui32NumberOfWorkitems,
+                                        *pui64CyclePrediction));
+                               break;
+                       case RGXFWIF_CCB_CMD_TYPE_TQ_TDM:
+                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Dest size = %u, Pixel format ID = %u, prediction = " IMG_DEVMEM_SIZE_FMTSPEC,
+                                        __func__,
+                                        psWorkloadCharacteristics->sTransfer.ui32Characteristic1,
+                                        psWorkloadCharacteristics->sTransfer.ui32Characteristic2,
+                                        *pui64CyclePrediction));
+                               break;
+                       default:
+                               break;
+               }
+#endif
+       }
+       else
+       {
+               /* There is no prediction */
+               psWorkEstKickData->ui32CyclesPrediction = 0;
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                  RGXFWIF_WORKEST_FWCCB_CMD *psReturnCmd)
+{
+       RGX_WORKLOAD           *psWorkloadCharacteristics;
+       WORKLOAD_MATCHING_DATA *psWorkloadMatchingData;
+       IMG_UINT64             *paui64WorkloadHashData;
+       RGX_WORKLOAD           *pasWorkloadHashKeys;
+       IMG_UINT32             ui32HashArrayWO;
+       IMG_UINT64             *pui64CyclesTaken;
+       WORKEST_RETURN_DATA    *psReturnData;
+       WORKEST_HOST_DATA      *psWorkEstHostData;
+
+       if (!_WorkEstEnabled())
+       {
+               /* No error message to avoid excessive messages */
+               return PVRSRV_OK;
+       }
+
+       PVR_LOG_RETURN_IF_FALSE(psReturnCmd,
+                               "WorkEstRetire: Missing return command",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+
+       if (psReturnCmd->ui16ReturnDataIndex >= RETURN_DATA_ARRAY_SIZE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "WorkEstRetire: Handle reference out-of-bounds:"
+                       " %u >= %u",
+                       psReturnCmd->ui16ReturnDataIndex,
+                       RETURN_DATA_ARRAY_SIZE));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       OSLockAcquire(psDevInfo->hWorkEstLock);
+
+       /* Retrieve/validate the return data from this completed workload */
+       psReturnData = &psDevInfo->asReturnData[psReturnCmd->ui16ReturnDataIndex];
+       psWorkloadCharacteristics = &psReturnData->sWorkloadCharacteristics;
+       psWorkEstHostData = psReturnData->psWorkEstHostData;
+       PVR_LOG_GOTO_IF_FALSE(psWorkEstHostData,
+                             "WorkEstRetire: Missing host data",
+                             unlock_workest);
+
+       /* Retrieve/validate completed workload matching data */
+       psWorkloadMatchingData = psReturnData->psWorkloadMatchingData;
+       PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData,
+                             "WorkEstRetire: Missing matching data",
+                             unlock_workest);
+       PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData->psHashTable,
+                             "WorkEstRetire: Missing hash",
+                             unlock_workest);
+       PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData->psHashLock,
+                             "WorkEstRetire: Missing hash/lock",
+                             unlock_workest);
+       paui64WorkloadHashData = psWorkloadMatchingData->aui64HashData;
+       pasWorkloadHashKeys = psWorkloadMatchingData->asHashKeys;
+       ui32HashArrayWO = psWorkloadMatchingData->ui32HashArrayWO;
+
+       OSLockRelease(psDevInfo->hWorkEstLock);
+
+       OSLockAcquire(psWorkloadMatchingData->psHashLock);
+
+       /* Update workload prediction by removing old hash entry (if any)
+        * & inserting new hash entry */
+       pui64CyclesTaken = (IMG_UINT64*) HASH_Remove(psWorkloadMatchingData->psHashTable,
+                                                                                                (uintptr_t)psWorkloadCharacteristics);
+
+       if (paui64WorkloadHashData[ui32HashArrayWO] > 0)
+       {
+               /* Out-of-space so remove the oldest hash data before it becomes
+                * overwritten */
+               RGX_WORKLOAD *psWorkloadHashKey = &pasWorkloadHashKeys[ui32HashArrayWO];
+               (void) HASH_Remove(psWorkloadMatchingData->psHashTable, (uintptr_t)psWorkloadHashKey);
+       }
+
+       if (pui64CyclesTaken == NULL)
+       {
+               /* There is no existing entry for this workload characteristics,
+                * store it */
+               paui64WorkloadHashData[ui32HashArrayWO] = psReturnCmd->ui32CyclesTaken;
+               pasWorkloadHashKeys[ui32HashArrayWO] = *psWorkloadCharacteristics;
+       }
+       else
+       {
+               /* Found prior entry for workload characteristics, average with
+                * completed; also reset the old value to 0 so it is known to be
+                * invalid */
+               paui64WorkloadHashData[ui32HashArrayWO] = (*pui64CyclesTaken + psReturnCmd->ui32CyclesTaken)/2;
+               pasWorkloadHashKeys[ui32HashArrayWO] = *psWorkloadCharacteristics;
+               *pui64CyclesTaken = 0;
+       }
+
+       /* Hash insertion should not fail but if it does best we can do is to exit
+        * gracefully and not update the FW received counter */
+       if (IMG_TRUE != HASH_Insert((HASH_TABLE*)psWorkloadMatchingData->psHashTable,
+                                                               (uintptr_t)&pasWorkloadHashKeys[ui32HashArrayWO],
+                                                               (uintptr_t)&paui64WorkloadHashData[ui32HashArrayWO]))
+       {
+               PVR_ASSERT(0);
+               PVR_LOG(("WorkEstRetire: HASH_Insert failed"));
+       }
+
+       psWorkloadMatchingData->ui32HashArrayWO = (ui32HashArrayWO + 1) & WORKLOAD_HASH_WRAP_MASK;
+
+       OSLockRelease(psWorkloadMatchingData->psHashLock);
+
+       /* Update the received counter so that the FW is able to check as to whether
+        * all the workloads connected to a render context are finished.
+        * Note: needs to be done also for *unlock_workest* label below. */
+       psWorkEstHostData->ui32WorkEstCCBReceived++;
+
+       return PVRSRV_OK;
+
+unlock_workest:
+       OSLockRelease(psDevInfo->hWorkEstLock);
+       psWorkEstHostData->ui32WorkEstCCBReceived++;
+
+       return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+static void _WorkEstInit(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                WORKLOAD_MATCHING_DATA *psWorkloadMatchingData,
+                                                HASH_FUNC *pfnWorkEstHashFunc,
+                                                HASH_KEY_COMP *pfnWorkEstHashCompare)
+{
+       HASH_TABLE *psWorkloadHashTable;
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+       /* Create a lock to protect the per-DM hash table */
+       WorkEstHashLockCreate(&psWorkloadMatchingData->psHashLock);
+
+       /* Create hash table for the per-DM workload matching */
+       psWorkloadHashTable = HASH_Create_Extended(WORKLOAD_HASH_SIZE,
+                                                                                          sizeof(RGX_WORKLOAD *),
+                                                                                          pfnWorkEstHashFunc,
+                                                                                          pfnWorkEstHashCompare);
+       psWorkloadMatchingData->psHashTable = psWorkloadHashTable;
+}
+
+static void _WorkEstDeInit(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                  WORKLOAD_MATCHING_DATA *psWorkloadMatchingData)
+{
+       HASH_TABLE        *psWorkloadHashTable;
+       RGX_WORKLOAD      *pasWorkloadHashKeys;
+       RGX_WORKLOAD      *psWorkloadHashKey;
+       IMG_UINT64        *paui64WorkloadCycleData;
+       IMG_UINT32        ui32Itr;
+
+       /* Tear down per-DM hash */
+       pasWorkloadHashKeys = psWorkloadMatchingData->asHashKeys;
+       paui64WorkloadCycleData = psWorkloadMatchingData->aui64HashData;
+       psWorkloadHashTable = psWorkloadMatchingData->psHashTable;
+
+       if (psWorkloadHashTable)
+       {
+               for (ui32Itr = 0; ui32Itr < WORKLOAD_HASH_SIZE; ui32Itr++)
+               {
+                       if (paui64WorkloadCycleData[ui32Itr] > 0)
+                       {
+                               psWorkloadHashKey = &pasWorkloadHashKeys[ui32Itr];
+                               HASH_Remove(psWorkloadHashTable, (uintptr_t)psWorkloadHashKey);
+                       }
+               }
+
+               HASH_Delete(psWorkloadHashTable);
+       }
+
+       /* Remove the hash lock */
+       WorkEstHashLockDestroy(psWorkloadMatchingData->psHashLock);
+
+       return;
+}
+
+void WorkEstInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+       _WorkEstInit(psDevInfo,
+               &psWorkEstData->uWorkloadMatchingData.sTA3D.sDataTA,
+               (HASH_FUNC *)WorkEstHashFuncTA3D,
+               (HASH_KEY_COMP *)WorkEstHashCompareTA3D);
+       _WorkEstInit(psDevInfo,
+               &psWorkEstData->uWorkloadMatchingData.sTA3D.sData3D,
+               (HASH_FUNC *)WorkEstHashFuncTA3D,
+               (HASH_KEY_COMP *)WorkEstHashCompareTA3D);
+}
+
+void WorkEstDeInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+       _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sTA3D.sDataTA);
+       _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sTA3D.sData3D);
+}
+
+void WorkEstInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+       _WorkEstInit(psDevInfo,
+               &psWorkEstData->uWorkloadMatchingData.sCompute.sDataCDM,
+               (HASH_FUNC *)WorkEstHashFuncCompute,
+               (HASH_KEY_COMP *)WorkEstHashCompareCompute);
+}
+
+void WorkEstDeInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+       _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sCompute.sDataCDM);
+}
+
+void WorkEstInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+       _WorkEstInit(psDevInfo,
+               &psWorkEstData->uWorkloadMatchingData.sTransfer.sDataTDM,
+               (HASH_FUNC *)WorkEstHashFuncTDM,
+               (HASH_KEY_COMP *)WorkEstHashCompareTDM);
+}
+
+void WorkEstDeInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+       _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sTransfer.sDataTDM);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxworkest.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rgxworkest.h
new file mode 100644 (file)
index 0000000..dc01eec
--- /dev/null
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File           rgxworkest.h
+@Title          RGX Workload Estimation Functionality
+@Codingstyle    IMG
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the kernel mode workload estimation functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXWORKEST_H
+#define RGXWORKEST_H
+
+#include "img_types.h"
+#include "rgxta3d.h"
+
+
+void WorkEstInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+void WorkEstDeInitTA3D(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+void WorkEstInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+void WorkEstDeInitCompute(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+void WorkEstInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+void WorkEstDeInitTDM(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+PVRSRV_ERROR WorkEstPrepare(PVRSRV_RGXDEV_INFO        *psDevInfo,
+                            WORKEST_HOST_DATA         *psWorkEstHostData,
+                            WORKLOAD_MATCHING_DATA    *psWorkloadMatchingData,
+                            const RGXFWIF_CCB_CMD_TYPE eDMCmdType,
+                            const RGX_WORKLOAD        *psWorkloadCharsIn,
+                            IMG_UINT64                ui64DeadlineInus,
+                            RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData);
+
+PVRSRV_ERROR WorkEstRetire(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                  RGXFWIF_WORKEST_FWCCB_CMD *psReturnCmd);
+
+void WorkEstHashLockCreate(POS_LOCK *ppsHashLock);
+
+void WorkEstHashLockDestroy(POS_LOCK psHashLock);
+
+void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXWORKEST_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxcompute.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxcompute.c
new file mode 100644 (file)
index 0000000..952940f
--- /dev/null
@@ -0,0 +1,1324 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Compute routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Compute routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#include "rgxtimerquery.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP    0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       DEVMEM_MEMDESC                          *psFWComputeContextMemDesc;
+       DEVMEM_MEMDESC                          *psFWFrameworkMemDesc;
+       DEVMEM_MEMDESC                          *psFWComputeContextStateMemDesc;
+       DLLIST_NODE                                     sListNode;
+       SYNC_ADDR_LIST                          sSyncAddrListFence;
+       SYNC_ADDR_LIST                          sSyncAddrListUpdate;
+       POS_LOCK                                        hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA                       sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA                   *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE                     *psDeviceNode,
+                                                                                        IMG_UINT32                                     ui32Priority,
+                                                                                        IMG_UINT32                                     ui32FrameworkCommandSize,
+                                                                                        IMG_PBYTE                                      pbyFrameworkCommand,
+                                                                                        IMG_HANDLE                                     hMemCtxPrivData,
+                                                                                        IMG_UINT32                                     ui32StaticComputecontextStateSize,
+                                                                                        IMG_PBYTE                                      pStaticComputecontextState,
+                                                                                        IMG_UINT32                                     ui32PackedCCBSizeU88,
+                                                                                        IMG_UINT32                                     ui32ContextFlags,
+                                                                                        IMG_UINT64                                     ui64RobustnessAddress,
+                                                                                        IMG_UINT32                                     ui32MaxDeadlineMS,
+                                                                                        RGX_SERVER_COMPUTE_CONTEXT     **ppsComputeContext)
+{
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC                          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       RGX_SERVER_COMPUTE_CONTEXT      *psComputeContext;
+       RGX_COMMON_CONTEXT_INFO         sInfo = {NULL};
+       PVRSRV_ERROR                            eError = PVRSRV_OK;
+       RGXFWIF_FWCOMPUTECONTEXT        *psFWComputeContext;
+       IMG_UINT32                                      ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
+
+       /* Prepare cleanup struct */
+       *ppsComputeContext = NULL;
+
+       psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+       if (psComputeContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /*
+               Create the FW compute context, this has the CDM common
+               context embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWCOMPUTECONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwComputeContext",
+                       &psComputeContext->psFWComputeContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwcomputecontext;
+       }
+
+       eError = OSLockCreate(&psComputeContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to create lock (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto fail_createlock;
+       }
+
+       psComputeContext->psDeviceNode = psDeviceNode;
+
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+       */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware compute context suspend state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         sizeof(RGXFWIF_COMPUTECTX_STATE),
+                                                         RGX_FWCOMCTX_ALLOCFLAGS,
+                                                         "FwComputeContextState",
+                                                         &psComputeContext->psFWComputeContextStateMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate firmware GPU context suspend state (%d)",
+                                __func__,
+                                eError));
+               goto fail_contextsuspendalloc;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
+#endif
+
+       if (ui32FrameworkCommandSize)
+       {
+               /*
+                * Create the FW framework buffer
+                */
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psComputeContext->psFWFrameworkMemDesc,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate firmware GPU framework state (%d)",
+                                               __func__,
+                                               eError));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psComputeContext->psFWFrameworkMemDesc,
+                               pbyFrameworkCommand,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to populate the framework buffer (%s)",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+
+               sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+       }
+
+       ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
+       ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
+       eError = FWCommonContextAllocate(psConnection,
+                                                                        psDeviceNode,
+                                                                        REQ_TYPE_CDM,
+                                                                        RGXFWIF_DM_CDM,
+                                                                        NULL,
+                                                                        psComputeContext->psFWComputeContextMemDesc,
+                                                                        offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext),
+                                                                        psFWMemContextMemDesc,
+                                                                        psComputeContext->psFWComputeContextStateMemDesc,
+                                                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2,
+                                                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2,
+                                                                        ui32ContextFlags,
+                                                                        ui32Priority,
+                                                                        ui32MaxDeadlineMS,
+                                                                        ui64RobustnessAddress,
+                                                                        &sInfo,
+                                                                        &psComputeContext->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextalloc;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+                       (void **)&psFWComputeContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_acquire_cpu_mapping;
+       }
+
+       OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputecontextState, ui32StaticComputecontextStateSize);
+       DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
+       DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       psComputeContext->psBufferSyncContext =
+                       pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                       "rogue-cdm");
+       if (IS_ERR(psComputeContext->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to create buffer_sync context (err=%ld)",
+                               __func__, PTR_ERR(psComputeContext->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+       SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+       {
+               PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+
+               OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+               dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+               OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+       }
+
+       *ppsComputeContext = psComputeContext;
+       return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+fail_buffer_sync_context_create:
+#endif
+fail_acquire_cpu_mapping:
+       FWCommonContextFree(psComputeContext->psServerCommonContext);
+fail_contextalloc:
+fail_frameworkcopy:
+       if (psComputeContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+       OSLockDestroy(psComputeContext->hLock);
+fail_createlock:
+       DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+fail_fwcomputecontext:
+       OSFreeMem(psComputeContext);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+       PVRSRV_ERROR                            eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_FWCOMPUTECONTEXT        *psFWComputeContext;
+       IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+                                                                                         psComputeContext->psServerCommonContext,
+                                                                                         RGXFWIF_DM_CDM,
+                                                                                         PDUMP_FLAGS_NONE);
+
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       /* remove after RGXFWRequestCommonContextCleanUp() because we might return
+        * RETRY and don't want to be calling this twice */
+       if (psComputeContext->psBufferSyncContext != NULL)
+       {
+               pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext);
+               psComputeContext->psBufferSyncContext = NULL;
+       }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+                       (void **)&psFWComputeContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware compute context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted;
+
+       DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+       /* Check if all of the workload estimation CCB commands for this workload are read */
+       if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                       __func__, ui32WorkEstCCBSubmitted,
+                       psComputeContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+               return PVRSRV_ERROR_RETRY;
+       }
+#endif
+
+       /* ... it has so we can free its resources */
+
+       OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+       dllist_remove_node(&(psComputeContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
+#endif
+
+       FWCommonContextFree(psComputeContext->psServerCommonContext);
+       if (psComputeContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+       }
+       DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+
+       OSLockDestroy(psComputeContext->hLock);
+       OSFreeMem(psComputeContext);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT     *psComputeContext,
+                                                               IMG_UINT32                                      ui32ClientUpdateCount,
+                                                               SYNC_PRIMITIVE_BLOCK            **pauiClientUpdateUFODevVarBlock,
+                                                               IMG_UINT32                                      *paui32ClientUpdateSyncOffset,
+                                                               IMG_UINT32                                      *paui32ClientUpdateValue,
+                                                               PVRSRV_FENCE                            iCheckFence,
+                                                               PVRSRV_TIMELINE                         iUpdateTimeline,
+                                                               PVRSRV_FENCE                            *piUpdateFence,
+                                                               IMG_CHAR                                        pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                               IMG_UINT32                                      ui32CmdSize,
+                                                               IMG_PBYTE                                       pui8DMCmd,
+                                                               IMG_UINT32                                      ui32PDumpFlags,
+                                                               IMG_UINT32                                      ui32ExtJobRef,
+                                                               IMG_UINT32                                      ui32SyncPMRCount,
+                                                               IMG_UINT32                                      *paui32SyncPMRFlags,
+                                                               PMR                                                     **ppsSyncPMRs,
+                                                               IMG_UINT32                                      ui32NumWorkgroups,
+                                                               IMG_UINT32                                      ui32NumWorkitems,
+                                                               IMG_UINT64                                      ui64DeadlineInus)
+{
+       RGXFWIF_KCCB_CMD                sCmpKCCBCmd;
+       RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+       PVRSRV_ERROR                    eError;
+       PVRSRV_ERROR                    eError2;
+       IMG_UINT32                              ui32CDMCmdOffset = 0;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext);
+       RGX_CLIENT_CCB          *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext);
+       IMG_UINT32              ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+       IMG_UINT32                              ui32FWCtx;
+       IMG_BOOL                                bCCBStateOpen = IMG_FALSE;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0};
+       IMG_UINT32 ui32CDMWorkloadDataRO = 0;
+       IMG_UINT32 ui32CDMCmdHeaderOffset = 0;
+       IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0;
+       RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+       IMG_UINT32 ui32IntClientFenceCount = 0;
+       PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+       IMG_UINT32 ui32IntClientUpdateCount = 0;
+       PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+       IMG_UINT32 *paui32IntUpdateValue = NULL;
+       PVRSRV_FENCE  iUpdateFence = PVRSRV_NO_FENCE;
+       IMG_UINT64 uiCheckFenceUID = 0;
+       IMG_UINT64 uiUpdateFenceUID = 0;
+       PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+       void *pvUpdateFenceFinaliseData = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0);
+
+       if (iUpdateTimeline >= 0 && !piUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Ensure we haven't been given a null ptr to
+        * update values if we have been told we
+        * have updates
+        */
+       if (ui32ClientUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+                                       "paui32ClientUpdateValue NULL but "
+                                               "ui32ClientUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       pszUpdateFenceName[31] = '\0';
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+                                                                       0,
+                                                                       NULL,
+                                                                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               goto err_populate_sync_addr_list;
+       }
+
+       ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+       eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+                                                                       ui32ClientUpdateCount,
+                                                                       pauiClientUpdateUFODevVarBlock,
+                                                                       paui32ClientUpdateSyncOffset);
+       if (eError != PVRSRV_OK)
+       {
+               goto err_populate_sync_addr_list;
+       }
+       if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+       {
+               pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+       }
+       paui32IntUpdateValue = paui32ClientUpdateValue;
+
+       if (ui32SyncPMRCount != 0)
+       {
+#if defined(SUPPORT_BUFFER_SYNC)
+               int err;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling "
+                         "pvr_buffer_sync_resolve_and_create_fences", __func__));
+
+               err = pvr_buffer_sync_resolve_and_create_fences(
+                   psComputeContext->psBufferSyncContext,
+                   psComputeContext->psDeviceNode->hSyncCheckpointContext,
+                   ui32SyncPMRCount,
+                   ppsSyncPMRs,
+                   paui32SyncPMRFlags,
+                   &ui32BufferFenceSyncCheckpointCount,
+                   &apsBufferFenceSyncCheckpoints,
+                   &psBufferUpdateSyncCheckpoint,
+                   &psBufferSyncData
+               );
+
+               if (unlikely(err))
+               {
+                       switch (err)
+                       {
+                               case -EINTR:
+                                       eError = PVRSRV_ERROR_RETRY;
+                                       break;
+                               case -ENOMEM:
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       break;
+                               default:
+                                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                       break;
+                       }
+
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   "
+                                       "pvr_buffer_sync_resolve_and_create_fences failed (%d)",
+                                       __func__, eError));
+                       }
+
+                       goto fail_resolve_input_fence;
+               }
+
+               /* Append buffer sync fences */
+               if (ui32BufferFenceSyncCheckpointCount > 0)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints "
+                                 "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, "
+                                 "pauiIntFenceUFOAddress=<%p>)...", __func__,
+                                 ui32BufferFenceSyncCheckpointCount,
+                                 (void *) &psComputeContext->sSyncAddrListFence ,
+                                 (void *) pauiIntFenceUFOAddress));
+
+                       SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence,
+                                                             ui32BufferFenceSyncCheckpointCount,
+                                                             apsBufferFenceSyncCheckpoints);
+                       if (pauiIntFenceUFOAddress == NULL)
+                       {
+                               pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+                       }
+                       ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+
+               /* Append the update (from output fence) */
+               if (psBufferUpdateSyncCheckpoint)
+               {
+                       SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+                                                     1, &psBufferUpdateSyncCheckpoint);
+                       if (pauiIntUpdateUFOAddress == NULL)
+                       {
+                               pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+                       }
+                       ui32IntClientUpdateCount++;
+               }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+               PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers",
+                       __func__, ui32SyncPMRCount));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+       /* Resolve the sync checkpoints that make up the input fence */
+       eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+                                                                               iCheckFence,
+                                                                               &ui32FenceSyncCheckpointCount,
+                                                                               &apsFenceSyncCheckpoints,
+                                           &uiCheckFenceUID, ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+               goto fail_free_buffer_sync_data;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               IMG_UINT32 ii;
+               for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+               {
+                       PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+               }
+       }
+#endif
+       /* Create the output fence (if required) */
+       if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+               eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
+                                                  pszUpdateFenceName,
+                                                                                  iUpdateTimeline,
+                                                                                  psComputeContext->psDeviceNode->hSyncCheckpointContext,
+                                                                                  &iUpdateFence,
+                                                                                  &uiUpdateFenceUID,
+                                                                                  &pvUpdateFenceFinaliseData,
+                                                                                  &psUpdateSyncCheckpoint,
+                                                                                  (void*)&psFenceTimelineUpdateSync,
+                                                                                  &ui32FenceTimelineUpdateValue,
+                                                                                  ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError));
+                       goto fail_create_output_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+               /* Append the sync prim update for the timeline (if required) */
+               if (psFenceTimelineUpdateSync)
+               {
+                       IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                       /* Allocate memory to hold the list of update values (including our timeline update) */
+                       pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                       if (!pui32IntAllocatedUpdateValues)
+                       {
+                               /* Failed to allocate memory */
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto fail_alloc_update_values_mem;
+                       }
+                       OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                       /* Copy the update values into the new memory, then append our timeline update value */
+                       if (paui32IntUpdateValue)
+                       {
+                               OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+                       }
+#if defined(CMP_CHECKPOINT_DEBUG)
+                       if (ui32IntClientUpdateCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Now set the additional update value */
+                       pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+                       *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+                       ui32IntClientUpdateCount++;
+                       /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+                       paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__,  (void*)psFenceTimelineUpdateSync));
+                       /* Now append the timeline sync prim addr to the compute context update list */
+                       SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
+                                                  psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+                       if (ui32IntClientUpdateCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+                       paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+               }
+       }
+
+       /* Append the checks (from input fence) */
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+               if (ui32IntClientUpdateCount > 0)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+               SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+                                                                         ui32FenceSyncCheckpointCount,
+                                                                         apsFenceSyncCheckpoints);
+               if (!pauiIntFenceUFOAddress)
+               {
+                       pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+               }
+               ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+       }
+#if defined(CMP_CHECKPOINT_DEBUG)
+       if (ui32IntClientUpdateCount > 0)
+       {
+               IMG_UINT32 iii;
+               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __func__, iii, (void*)pui32Tmp));
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp));
+                       pui32Tmp++;
+               }
+       }
+#endif
+
+       if (psUpdateSyncCheckpoint)
+       {
+               /* Append the update (from output fence) */
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+               SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+                                                                         1,
+                                                                         &psUpdateSyncCheckpoint);
+               if (!pauiIntUpdateUFOAddress)
+               {
+                       pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+               }
+               ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+               if (ui32IntClientUpdateCount > 0)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+                       for (ii=0; ii<ui32IntClientFenceCount; ii++)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+                       for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+                       {
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups;
+       sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems  = ui32NumWorkitems;
+
+       /* Prepare workload estimation */
+       WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice,
+                       &psComputeContext->sWorkEstData,
+                       &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM,
+                       RGXFWIF_CCB_CMD_TYPE_CDM,
+                       &sWorkloadCharacteristics,
+                       ui64DeadlineInus,
+                       &sWorkloadKickDataCompute);
+#endif
+
+       RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+                                 &pPreAddr,
+                                 &pPostAddr,
+                                 &pRMWUFOAddr);
+
+       RGXCmdHelperInitCmdCCB(psDevInfo,
+                              psClientCCB,
+                              0,
+                              ui32IntClientFenceCount,
+                              pauiIntFenceUFOAddress,
+                              NULL,
+                              ui32IntClientUpdateCount,
+                              pauiIntUpdateUFOAddress,
+                              paui32IntUpdateValue,
+                              ui32CmdSize,
+                              pui8DMCmd,
+                              &pPreAddr,
+                              &pPostAddr,
+                              &pRMWUFOAddr,
+                              RGXFWIF_CCB_CMD_TYPE_CDM,
+                              ui32ExtJobRef,
+                              ui32IntJobRef,
+                              ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                              &sWorkloadKickDataCompute,
+#else
+                              NULL,
+#endif
+                              "Compute",
+                              bCCBStateOpen,
+                              asCmdHelperData);
+
+       eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_cmdaquire;
+       }
+
+
+       /*
+               We should reserve space in the kernel CCB here and fill in the command
+               directly.
+               This is so if there isn't space in the kernel CCB we can return with
+               retry back to services client before we take any operations
+       */
+
+       /*
+               We might only be kicking for flush out a padding packet so only submit
+               the command if the create was successful
+       */
+       if (eError == PVRSRV_OK)
+       {
+               /*
+                       All the required resources are ready at this point, we can't fail so
+                       take the required server sync operations and commit all the resources
+               */
+
+               ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB);
+               RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* The following is used to determine the offset of the command header containing
+          the workload estimation data so that can be accessed when the KCCB is read */
+       ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData);
+
+       ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+
+       /* This checks if the command would wrap around at the end of the CCB and
+        * therefore would start at an offset of 0 rather than the current command
+        * offset */
+       if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck)
+       {
+               ui32CDMWorkloadDataRO = ui32CDMCmdOffset;
+       }
+       else
+       {
+               ui32CDMWorkloadDataRO = 0;
+       }
+#endif
+
+       /* Construct the kernel compute CCB command. */
+       sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+       sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+       /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Store the offset to the CCCB command header so that it can be referenced
+        * when the KCCB command reaches the FW */
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset;
+#else
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+       ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr;
+
+       if (psComputeCmdCmn)
+       {
+               HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+                               sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+                               ui32CDMCmdOffset,
+                               psComputeCmdCmn->ui32FrameNum,
+                               ui32ExtJobRef,
+                               ui32IntJobRef);
+       }
+
+       RGXSRV_HWPERF_ENQ(psComputeContext,
+                         OSGetCurrentClientProcessIDKM(),
+                         ui32FWCtx,
+                         ui32ExtJobRef,
+                         ui32IntJobRef,
+                         RGX_HWPERF_KICK_TYPE_CDM,
+                         iCheckFence,
+                         iUpdateFence,
+                         iUpdateTimeline,
+                         uiCheckFenceUID,
+                         uiUpdateFenceUID,
+                         NO_DEADLINE,
+                         NO_CYCEST);
+
+       /*
+        * Submit the compute command to the firmware.
+        */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+                                                                       RGXFWIF_DM_CDM,
+                                                                       &sCmpKCCBCmd,
+                                                                       ui32PDumpFlags);
+               if (eError2 != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError2 != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s failed to schedule kernel CCB command (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError2)));
+       }
+       else
+       {
+               PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+                                       ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                                       RGX_HWPERF_KICK_TYPE_CDM);
+       }
+       /*
+        * Now check eError (which may have returned an error from our earlier call
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_cmdaquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+       }
+       if (psFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       *piUpdateFence = iUpdateFence;
+
+       if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence,
+                                           pvUpdateFenceFinaliseData,
+                                                                       psUpdateSyncCheckpoint, pszUpdateFenceName);
+       }
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+
+       return PVRSRV_OK;
+
+fail_cmdaquire:
+       SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
+       SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+       if (iUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+       }
+fail_create_output_fence:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+
+fail_free_buffer_sync_data:
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+
+fail_resolve_input_fence:
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+err_populate_sync_addr_list:
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+       OSLockRelease(psComputeContext->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+       RGXFWIF_KCCB_CMD sFlushCmd;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psComputeContext->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+       sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                       RGXFWIF_DM_CDM,
+                                                                       &sFlushCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS,
+                                                                       &ui32kCCBCommandSlot);
+               /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */
+               if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Returning RETRY to caller", __func__));
+                       eError = PVRSRV_ERROR_RETRY;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to schedule SLC flush command (%s)",
+                                        __func__,
+                                        PVRSRVGetErrorString(eError)));
+               }
+       }
+       else
+       {
+               /* Wait for the SLC flush to complete */
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Compute flush aborted (%s)",
+                                        __func__,
+                                        PVRSRVGetErrorString(eError)));
+               }
+               else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] &
+                                 RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE))
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+               }
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT  *psComputeContext)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+               2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT))
+       {
+
+               RGXFWIF_KCCB_CMD  sKCCBCmd;
+               PVRSRV_ERROR      eError;
+
+               OSLockAcquire(psComputeContext->hLock);
+
+               /* Schedule the firmware command */
+               sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+               sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+                                                                               RGXFWIF_DM_CDM,
+                                                                               &sKCCBCmd,
+                                                                               PDUMP_FLAGS_NONE);
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to schedule the FW command %d (%s)",
+                                       __func__,
+                                       eError,
+                                       PVRSRVGETERRORSTRING(eError)));
+               }
+
+               OSLockRelease(psComputeContext->hLock);
+               return eError;
+       }else
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                  PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                                                                 RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                                                                 IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+                                                               psConnection,
+                                                               psComputeContext->psDeviceNode->pvDevice,
+                                                               ui32Priority,
+                                                               RGXFWIF_DM_CDM);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError)));
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+       return eError;
+}
+
+/*
+ * PVRSRVRGXSetComputeContextPropertyKM
+ */
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                  RGX_CONTEXT_PROPERTY eContextProperty,
+                                                  IMG_UINT64 ui64Input,
+                                                  IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psComputeContext->hLock);
+                       eError = FWCommonContextSetFlags(psComputeContext->psServerCommonContext,
+                                                        ui32ContextFlags);
+                       OSLockRelease(psComputeContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                          DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+       dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+               DumpFWCommonContextInfo(psCurrentServerComputeCtx->psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+       }
+       OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32ContextBitMask = 0;
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+       dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+               if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+                       == PVRSRV_ERROR_CCCB_STALLED)
+               {
+                       ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+               }
+       }
+       OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/*
+ * PVRSRVRGXGetLastDeviceErrorKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA    *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32         *ui32Error)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       *ui32Error = psDevInfo->eLastDeviceError;
+       psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE;
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxcompute.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxcompute.h
new file mode 100644 (file)
index 0000000..0ac6e4a
--- /dev/null
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX compute functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX compute functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXCOMPUTE_H)
+#define RGXCOMPUTE_H
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXCreateComputeContextKM
+
+ @Description
+
+@Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA                   *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE                     *psDeviceNode,
+                                                                                        IMG_UINT32                                     ui32Priority,
+                                                                                        IMG_UINT32                                     ui32FrameworkRegisterSize,
+                                                                                        IMG_PBYTE                                      pbyFrameworkRegisters,
+                                                                                        IMG_HANDLE                                     hMemCtxPrivData,
+                                                                                        IMG_UINT32                                     ui32StaticComputecontextStateSize,
+                                                                                        IMG_PBYTE                                      pStaticComputecontextState,
+                                                                                        IMG_UINT32                                     ui32PackedCCBSizeU88,
+                                                                                        IMG_UINT32                                     ui32ContextFlags,
+                                                                                        IMG_UINT64                                     ui64RobustnessAddress,
+                                                                                        IMG_UINT32                                     ui32MaxDeadlineMS,
+                                                                                        RGX_SERVER_COMPUTE_CONTEXT     **ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+       Server-side implementation of RGXDestroyComputeContext
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXKickCDMKM
+
+ @Description
+       Server-side implementation of RGXKickCDM
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT     *psComputeContext,
+                                                               IMG_UINT32                                      ui32ClientUpdateCount,
+                                                               SYNC_PRIMITIVE_BLOCK            **pauiClientUpdateUFODevVarBlock,
+                                                               IMG_UINT32                                      *paui32ClientUpdateSyncOffset,
+                                                               IMG_UINT32                                      *paui32ClientUpdateValue,
+                                                               PVRSRV_FENCE                            iCheckFence,
+                                                               PVRSRV_TIMELINE                         iUpdateTimeline,
+                                                               PVRSRV_FENCE                            *piUpdateFence,
+                                                               IMG_CHAR                                        pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                               IMG_UINT32                                      ui32CmdSize,
+                                                               IMG_PBYTE                                       pui8DMCmd,
+                                                               IMG_UINT32                                      ui32PDumpFlags,
+                                                               IMG_UINT32                                      ui32ExtJobRef,
+                                                               IMG_UINT32                                      ui32SyncPMRCount,
+                                                               IMG_UINT32                                      *paui32SyncPMRFlags,
+                                                               PMR                                                     **ppsSyncPMRs,
+                                                               IMG_UINT32                                      ui32NumWorkgroups,
+                                                               IMG_UINT32                                      ui32NumWorkitems,
+                                                               IMG_UINT64                                      ui64DeadlineInus);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXFlushComputeDataKM
+
+ @Description
+       Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function         PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description   Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input         psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                                                                 PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                 RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                                                                 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                                                                 RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                 IMG_UINT64 ui64Input,
+                                                                                                 IMG_UINT64 *pui64Output);
+
+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA    *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32         *ui32Error);
+
+/* Debug - Dump debug info of compute contexts on this device */
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                          DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXCOMPUTE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdebug.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdebug.c
new file mode 100644 (file)
index 0000000..86dcb9e
--- /dev/null
@@ -0,0 +1,5785 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rgx debug information
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "cache_km.h"
+#include "osfunc.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services_km.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_utils.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif_sf.h"
+#include "rgxfw_log_helper.h"
+#include "fwtrace_string.h"
+#include "rgxfwimageutils.h"
+#include "fwload.h"
+
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxtdmtransfer.h"
+#include "rgxtimecorr.h"
+#include "rgx_options.h"
+#include "rgxinit.h"
+#include "devicemem_history_server.h"
+#include "info_page.h"
+#include "rgx_bvnc_defs_km.h"
+
+#define PVR_DUMP_FIRMWARE_INFO(x)                                                                                                              \
+       PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x",                            \
+                                               PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion),                                              \
+                                               PVRVERSION_UNPACK_MIN((x).ui32DDKVersion),                                              \
+                                               (x).ui32DDKBuild,                                                                                               \
+                                               ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\
+                                               (x).ui32BuildOptions);
+
+#define DD_SUMMARY_INDENT  ""
+#define DD_NORMAL_INDENT   "    "
+
+#define RGX_DEBUG_STR_SIZE                     (150U)
+#define MAX_FW_DESCRIPTION_LENGTH      (500U)
+
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+
+#define RGX_CR_BIF_CAT_BASEN(n) \
+       RGX_CR_BIF_CAT_BASE0 + \
+       ((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n)
+
+
+#define RGXDBG_BIF_IDS \
+       X(BIF0)\
+       X(BIF1)\
+       X(TEXAS_BIF)\
+       X(DPX_BIF) \
+       X(FWCORE)
+
+#define RGXDBG_SIDEBAND_TYPES \
+       X(META)\
+       X(TLA)\
+       X(DMA)\
+       X(VDMM)\
+       X(CDM)\
+       X(IPP)\
+       X(PM)\
+       X(TILING)\
+       X(MCU)\
+       X(PDS)\
+       X(PBE)\
+       X(VDMS)\
+       X(IPF)\
+       X(ISP)\
+       X(TPF)\
+       X(USCS)\
+       X(PPP)\
+       X(VCE)\
+       X(TPF_CPF)\
+       X(IPF_CPF)\
+       X(FBCDC)
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+       RGXDBG_BIF_IDS
+#undef X
+} RGXDBG_BIF_ID;
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+       RGXDBG_SIDEBAND_TYPES
+#undef X
+} RGXDBG_SIDEBAND_TYPE;
+
+static const IMG_CHAR *const pszPowStateName[] =
+{
+#define X(NAME)        #NAME,
+       RGXFWIF_POW_STATES
+#undef X
+};
+
+static const IMG_CHAR *const pszBIFNames[] =
+{
+#define X(NAME)        #NAME,
+       RGXDBG_BIF_IDS
+#undef X
+};
+
+typedef struct _IMG_FLAGS2DESC_
+{
+       IMG_UINT32              uiFlag;
+       const IMG_CHAR  *pszLabel;
+} IMG_FLAGS2DESC;
+
+static const IMG_FLAGS2DESC asCswOpts2Description[] =
+{
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"},
+       {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"},
+};
+
+static const IMG_FLAGS2DESC asMisc2Description[] =
+{
+       {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"},
+       {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"},
+       {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"},
+       {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"},
+       {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"},
+       {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"},
+       {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"},
+       {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"},
+       {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"},
+       {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"},
+       {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"},
+       {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"},
+       {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"},
+       {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"},
+       {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"},
+       {RGXFWIF_INICFG_WORKEST, " Workload Estim;"},
+       {RGXFWIF_INICFG_PDVFS, " PDVFS;"},
+       {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"},
+       {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"},
+       {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"},
+       {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"},
+       {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"},
+};
+
+static const IMG_FLAGS2DESC asFwOsCfg2Description[] =
+{
+       {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " TA;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio TA;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"},
+};
+
+static const IMG_FLAGS2DESC asHwrState2Description[] =
+{
+       {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"},
+       {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"},
+       {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"},
+       {RGXFWIF_HWR_DM_STALLING, " DM stalling;"},
+       {RGXFWIF_HWR_FW_FAULT, " FW fault;"},
+       {RGXFWIF_HWR_RESTART_REQUESTED, " Restarting;"},
+};
+
+static const IMG_FLAGS2DESC asDmState2Description[] =
+{
+               {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"},
+               {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"},
+               {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"},
+               {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"},
+               {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"},
+               {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"},
+               {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"},
+               {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"},
+               {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"},
+               {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"},
+};
+
+const IMG_CHAR * const gapszMipsPermissionPTFlags[4] =
+{
+       "    ",
+       "XI  ",
+       "RI  ",
+       "RIXI"
+};
+
+const IMG_CHAR * const gapszMipsCoherencyPTFlags[8] =
+{
+       "C",
+       "C",
+       " ",
+       "C",
+       "C",
+       "C",
+       "C",
+       " "
+};
+
+const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8] =
+{
+       "   ",
+       "  G",
+       " V ",
+       " VG",
+       "D  ",
+       "D G",
+       "DV ",
+       "DVG"
+};
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+#if !defined(NO_HARDWARE)
+/* Translation of MIPS exception encoding */
+typedef struct _MIPS_EXCEPTION_ENCODING_
+{
+       const IMG_CHAR *const pszStr;   /* Error type */
+       const IMG_BOOL bIsFatal;        /* Error is fatal or non-fatal */
+} MIPS_EXCEPTION_ENCODING;
+
+static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] =
+{
+       {"Interrupt", IMG_FALSE},
+       {"TLB modified exception", IMG_FALSE},
+       {"TLB exception (load/instruction fetch)", IMG_FALSE},
+       {"TLB exception (store)", IMG_FALSE},
+       {"Address error exception (load/instruction fetch)", IMG_TRUE},
+       {"Address error exception (store)", IMG_TRUE},
+       {"Bus error exception (instruction fetch)", IMG_TRUE},
+       {"Bus error exception (load/store)", IMG_TRUE},
+       {"Syscall exception", IMG_FALSE},
+       {"Breakpoint exception (FW assert)", IMG_FALSE},
+       {"Reserved instruction exception", IMG_TRUE},
+       {"Coprocessor Unusable exception", IMG_FALSE},
+       {"Arithmetic Overflow exception", IMG_FALSE},
+       {"Trap exception", IMG_FALSE},
+       {NULL, IMG_FALSE},
+       {NULL, IMG_FALSE},
+       {"Implementation-Specific Exception 1 (COP2)", IMG_FALSE},
+       {"CorExtend Unusable", IMG_FALSE},
+       {"Coprocessor 2 exceptions", IMG_FALSE},
+       {"TLB Read-Inhibit", IMG_TRUE},
+       {"TLB Execute-Inhibit", IMG_TRUE},
+       {NULL, IMG_FALSE},
+       {NULL, IMG_FALSE},
+       {"Reference to WatchHi/WatchLo address", IMG_FALSE},
+       {"Machine check", IMG_FALSE},
+       {NULL, IMG_FALSE},
+       {"DSP Module State Disabled exception", IMG_FALSE},
+       {NULL, IMG_FALSE},
+       {NULL, IMG_FALSE},
+       {NULL, IMG_FALSE},
+       /* Can only happen in MIPS debug mode */
+       {"Parity error", IMG_FALSE},
+       {NULL, IMG_FALSE}
+};
+
+static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode)
+{
+       if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING))
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "Only %lu exceptions available in MIPS, %u is not a valid exception code",
+                        (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode));
+               return NULL;
+       }
+
+       return apsMIPSExcCodes[ui32ExcCode].pszStr;
+}
+#endif
+#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
+
+typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_
+{
+    IMG_UINT32 ui32Mask;
+    const IMG_CHAR * pszExplanation;
+} RGXMIPSFW_C0_DEBUG_TBL_ENTRY;
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+#if !defined(NO_HARDWARE)
+static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] =
+{
+    { RGXMIPSFW_C0_DEBUG_DSS,      "Debug single-step exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DBP,      "Debug software breakpoint exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBL,     "Debug data break exception occurred on a load" },
+    { RGXMIPSFW_C0_DEBUG_DDBS,     "Debug data break exception occurred on a store" },
+    { RGXMIPSFW_C0_DEBUG_DIB,      "Debug instruction break exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DINT,     "Debug interrupt exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DIBIMPR,  "Imprecise debug instruction break exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_IEXI,     "Imprecise error exception inhibit controls exception occurred" },
+    { RGXMIPSFW_C0_DEBUG_DBUSEP,   "Data access Bus Error exception pending" },
+    { RGXMIPSFW_C0_DEBUG_CACHEEP,  "Imprecise Cache Error pending" },
+    { RGXMIPSFW_C0_DEBUG_MCHECKP,  "Imprecise Machine Check exception pending" },
+    { RGXMIPSFW_C0_DEBUG_IBUSEP,   "Instruction fetch Bus Error exception pending" },
+    { (IMG_UINT32)RGXMIPSFW_C0_DEBUG_DBD,      "Debug exception occurred in branch delay slot" }
+};
+#endif
+#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
+
+static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] =
+{
+       "offline",
+       "ready",
+       "active",
+       "offloading"
+};
+
+#if defined(PVR_ENABLE_PHR)
+static const IMG_FLAGS2DESC asPHRConfig2Description[] =
+{
+       {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"},
+       {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"},
+       {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "},
+};
+#endif
+
+static PVRSRV_ERROR
+RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset,
+                        IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask)
+{
+       IMG_UINT32 ui32RegValue, ui32NumPolls = 0;
+       PVRSRV_ERROR eError;
+
+       do
+       {
+               eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000));
+
+       return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY;
+}
+
+static PVRSRV_ERROR
+RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal)
+{
+       PVRSRV_ERROR eError;
+
+       /* Core Read Ready? */
+       eError = RGXPollMetaRegThroughSP(psDevInfo,
+                                        META_CR_TXUXXRXRQ_OFFSET,
+                                        META_CR_TXUXXRXRQ_DREADY_BIT,
+                                                                        META_CR_TXUXXRXRQ_DREADY_BIT);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+       /* Set the reg we are interested in reading */
+       eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET,
+                               ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr");
+
+       /* Core Read Done? */
+       eError = RGXPollMetaRegThroughSP(psDevInfo,
+                                        META_CR_TXUXXRXRQ_OFFSET,
+                                        META_CR_TXUXXRXRQ_DREADY_BIT,
+                                                                        META_CR_TXUXXRXRQ_DREADY_BIT);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+       /* Read the value */
+       return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal);
+}
+
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                                               RGXFWIF_DEV_VIRTADDR *psFWAddr,
+                                               void *pvHostCodeAddr,
+                                               IMG_UINT32 ui32MaxLen,
+                                               const IMG_CHAR *pszDesc,
+                                               IMG_UINT32 ui32StartOffset)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32Value = 0;
+       IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset;
+       IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset);
+       IMG_UINT32 i;
+
+#if defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               return PVRSRV_OK;
+       }
+#endif
+
+       ui32MaxLen -= ui32StartOffset;
+       ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+       for (i = 0; i < ui32MaxLen; i++)
+       {
+               eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       return eError;
+               }
+
+#if defined(EMULATOR)
+               if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+#endif
+               {
+                       PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value));
+
+                       if (pui32FWCode[i] != ui32Value)
+                       {
+                               PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)",
+                                        __func__, pszDesc,
+                                        (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr);
+                               return PVRSRV_ERROR_FW_IMAGE_MISMATCH;
+                       }
+               }
+
+               ui32FWCodeDevVAAddr += 4;
+       }
+
+       PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc);
+       return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR _ValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+       PVRSRV_ERROR eError;
+       IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL;
+       OS_FW_IMAGE *psRGXFW = NULL;
+       const IMG_BYTE *pbRGXFirmware = NULL;
+       IMG_UINT32 *pui32CodeMemoryPointer;
+       RGXFWIF_DEV_VIRTADDR sFWAddr;
+       IMG_UINT32 ui32StartOffset = 0;
+       RGX_LAYER_PARAMS sLayerParams;
+       sLayerParams.psDevInfo = psDevInfo;
+
+#if defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator");
+               return PVRSRV_OK;
+       }
+#endif
+
+       if (psDevInfo->pvRegsBaseKM == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__));
+               return PVRSRV_ERROR_BAD_MAPPING;
+       }
+
+       /* Load FW from system for code verification */
+       pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes);
+       if (pui32HostFWCode == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed in allocating memory for FW code. "
+                               "So skipping FW code verification",
+                               __func__));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes);
+               if (pui32HostFWCoremem == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed in allocating memory for FW core code. "
+                                       "So skipping FW code verification",
+                                       __func__));
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto freeHostFWCode;
+               }
+       }
+
+       /* Load FW image */
+       eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).",
+                        __func__, PVRSRVGetErrorString(eError)));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto cleanup_initfw;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware,
+                                               (void*) pui32HostFWCode, NULL,
+                                               (void*) pui32HostFWCoremem, NULL, NULL);
+       }
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware,
+                                                pui32HostFWCode, NULL,
+                                                NULL, NULL);
+       }
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware,
+                                                pui32HostFWCode, NULL,
+                                                pui32HostFWCoremem, NULL);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__));
+               goto cleanup_initfw;
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Error in acquiring MIPS FW code memory area (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto cleanup_initfw;
+               }
+
+               if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes) == 0)
+               {
+                       PVR_DUMPDEBUG_LOG("Match between Host and MIPS views of the FW code" );
+               }
+               else
+               {
+                       IMG_UINT32 ui32Count = 10; /* Show only the first 10 mismatches */
+                       IMG_UINT32 ui32Offset;
+
+                       PVR_DUMPDEBUG_LOG("Mismatch between Host and MIPS views of the FW code");
+                       for (ui32Offset = 0; (ui32Offset*4 < psDevInfo->ui32FWCodeSizeInBytes) || (ui32Count == 0); ui32Offset++)
+                       {
+                               if (pui32HostFWCode[ui32Offset] != pui32CodeMemoryPointer[ui32Offset])
+                               {
+                                       PVR_DUMPDEBUG_LOG("At %d bytes, code should be 0x%x but it is instead 0x%x",
+                                          ui32Offset*4, pui32HostFWCode[ui32Offset], pui32CodeMemoryPointer[ui32Offset]);
+                                       ui32Count--;
+                               }
+                       }
+               }
+
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+       }
+       else
+       {
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       /* starting checking after BOOT LOADER config */
+                       sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+
+                       ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET;
+               }
+               else
+               {
+                       /* Use bootloader code remap which is always configured before the FW is started */
+                       sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE;
+               }
+
+               eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                               psDevInfo, &sFWAddr,
+                                               pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes,
+                                               "FW code", ui32StartOffset);
+               if (eError != PVRSRV_OK)
+               {
+                       goto cleanup_initfw;
+               }
+
+               if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+               {
+                       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+                       {
+                               sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE);
+                       }
+                       else
+                       {
+                               sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE);
+
+                               /* Core must be halted while issuing abstract commands */
+                               eError = RGXRiscvHalt(psDevInfo);
+                               PVR_GOTO_IF_ERROR(eError, cleanup_initfw);
+                       }
+
+                       eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                       psDevInfo, &sFWAddr,
+                                                       pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes,
+                                                       "FW coremem code", 0);
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+                       {
+                               eError = RGXRiscvResume(psDevInfo);
+                               PVR_GOTO_IF_ERROR(eError, cleanup_initfw);
+                       }
+               }
+       }
+
+cleanup_initfw:
+       if (psRGXFW)
+       {
+               OSUnloadFirmware(psRGXFW);
+       }
+
+       if (pui32HostFWCoremem)
+       {
+               OSFreeMem(pui32HostFWCoremem);
+       }
+freeHostFWCode:
+       if (pui32HostFWCode)
+       {
+               OSFreeMem(pui32HostFWCode);
+       }
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+       PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       return PVRSRV_OK;
+#endif
+}
+
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+       IMG_PBYTE pbCodeMemoryPointer;
+       PVRSRV_ERROR eError;
+       RGXFWIF_DEV_VIRTADDR sFWAddr;
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+       }
+       else
+       {
+               PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR));
+               sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE;
+       };
+
+       eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0);
+       if (eError != PVRSRV_OK)
+       {
+               goto releaseFWCodeMapping;
+       }
+
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer);
+               if (eError != PVRSRV_OK)
+               {
+                       goto releaseFWCoreCodeMapping;
+               }
+
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE);
+               }
+               else
+               {
+                       PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR));
+                       sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE);
+               }
+
+               eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer,
+                                               psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0);
+       }
+
+releaseFWCoreCodeMapping:
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+       }
+releaseFWCodeMapping:
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       return PVRSRV_OK;
+#endif
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDecodePMPC
+
+ @Description
+
+ Return the name for the PM managed Page Catalogues
+
+ @Input ui32PC  - Page Catalogue number
+
+ @Return   void
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC)
+{
+       const IMG_CHAR* pszPMPC = " (-)";
+
+       switch (ui32PC)
+       {
+               case 0x8: pszPMPC = " (PM-VCE0)"; break;
+               case 0x9: pszPMPC = " (PM-TE0)"; break;
+               case 0xA: pszPMPC = " (PM-ZLS0)"; break;
+               case 0xB: pszPMPC = " (PM-ALIST0)"; break;
+               case 0xC: pszPMPC = " (PM-VCE1)"; break;
+               case 0xD: pszPMPC = " (PM-TE1)"; break;
+               case 0xE: pszPMPC = " (PM-ZLS1)"; break;
+               case 0xF: pszPMPC = " (PM-ALIST1)"; break;
+       }
+
+       return pszPMPC;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID             - BIF identifier
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+#include "rgxmhdefs_km.h"
+
+static void _RGXDecodeBIFReqTagsXE(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                                                  IMG_UINT32   ui32TagID,
+                                                                  IMG_UINT32   ui32TagSB,
+                                                                  IMG_CHAR             **ppszTagID,
+                                                                  IMG_CHAR             **ppszTagSB,
+                                                                  IMG_CHAR             *pszScratchBuf,
+                                                                  IMG_UINT32   ui32ScratchBufSize)
+{
+       /* default to unknown */
+       IMG_CHAR *pszTagID = "-";
+       IMG_CHAR *pszTagSB = "-";
+       IMG_BOOL bNewTagEncoding = IMG_FALSE;
+
+       PVR_ASSERT(ppszTagID != NULL);
+       PVR_ASSERT(ppszTagSB != NULL);
+
+       /* tags updated for all cores (auto & consumer) with branch > 36 or only auto cores with branch = 36 */
+       if ((psDevInfo->sDevFeatureCfg.ui32B > 36) ||
+           (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION) && (psDevInfo->sDevFeatureCfg.ui32B == 36)))
+       {
+               bNewTagEncoding = IMG_TRUE;
+       }
+
+       switch (ui32TagID)
+       {
+               /* MMU tags */
+               case RGX_MH_TAG_ENCODING_MH_TAG_MMU:
+               case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU:
+               case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU:
+               case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU:
+               {
+                       switch (ui32TagID)
+                       {
+                               case RGX_MH_TAG_ENCODING_MH_TAG_MMU:        pszTagID = "MMU"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU:        pszTagID = "CPU MMU"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU:        pszTagID = "CPU IFU"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU:        pszTagID = "CPU LSU"; break;
+                       }
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST:             pszTagSB = "PT"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST:             pszTagSB = "PD"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST:             pszTagSB = "PC"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST:  pszTagSB = "PM PT"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST:  pszTagSB = "PM PD"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST:  pszTagSB = "PM PC"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST: pszTagSB = "PM PD W"; break;
+                               case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST: pszTagSB = "PM PC W"; break;
+                       }
+                       break;
+               }
+
+               /* MIPS */
+               case RGX_MH_TAG_ENCODING_MH_TAG_MIPS:
+               {
+                       pszTagID = "MIPS";
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH: pszTagSB = "Opcode"; break;
+                               case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS:  pszTagSB = "Data"; break;
+                       }
+                       break;
+               }
+
+               /* CDM tags */
+               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0:
+               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1:
+               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2:
+               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3:
+               {
+                       switch (ui32TagID)
+                       {
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0:       pszTagID = "CDM Stage 0"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1:       pszTagID = "CDM Stage 1"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2:       pszTagID = "CDM Stage 2"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3:       pszTagID = "CDM Stage 3"; break;
+                       }
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM: pszTagSB = "Control"; break;
+                               case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA:  pszTagSB = "Indirect"; break;
+                               case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA:             pszTagSB = "Event"; break;
+                               case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE:  pszTagSB = "Context"; break;
+                       }
+                       break;
+               }
+
+               /* VDM tags */
+               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0:
+               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1:
+               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2:
+               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3:
+               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4:
+               {
+                       switch (ui32TagID)
+                       {
+                               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0:       pszTagID = "VDM Stage 0"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1:       pszTagID = "VDM Stage 1"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2:       pszTagID = "VDM Stage 2"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3:       pszTagID = "VDM Stage 3"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4:       pszTagID = "VDM Stage 4"; break;
+                       }
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL:        pszTagSB = "Control"; break;
+                               case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE:          pszTagSB = "State"; break;
+                               case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX:          pszTagSB = "Index"; break;
+                               case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK:          pszTagSB = "Stack"; break;
+                               case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT:        pszTagSB = "Context"; break;
+                       }
+                       break;
+               }
+
+               /* PDS */
+               case RGX_MH_TAG_ENCODING_MH_TAG_PDS_0:
+                       pszTagID = "PDS req 0"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_PDS_1:
+                       pszTagID = "PDS req 1"; break;
+
+               /* MCU */
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA:
+                       pszTagID = "MCU USCA"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB:
+                       pszTagID = "MCU USCB"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC:
+                       pszTagID = "MCU USCC"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD:
+                       pszTagID = "MCU USCD"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA:
+                       pszTagID = "MCU PDS USCA"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB:
+                       pszTagID = "MCU PDS USCB"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC:
+                       pszTagID = "MCU PDS USCC"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD:
+                       pszTagID = "MCU PDSUSCD"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW:
+                       pszTagID = "MCU PDS PDSRW"; break;
+
+               /* TCU */
+               case RGX_MH_TAG_ENCODING_MH_TAG_TCU_0:
+                       pszTagID = "TCU req 0"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TCU_1:
+                       pszTagID = "TCU req 1"; break;
+
+               /* FBCDC */
+               case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0:
+                       pszTagID = bNewTagEncoding ? "TFBDC_TCU0" : "FBCDC0"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1:
+                       pszTagID = bNewTagEncoding ? "TFBDC_ZLS0" : "FBCDC1"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2:
+                       pszTagID = bNewTagEncoding ? "TFBDC_TCU1" : "FBCDC2"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3:
+                       pszTagID = bNewTagEncoding ? "TFBDC_ZLS1" : "FBCDC3"; break;
+
+               /* USC Shared */
+               case RGX_MH_TAG_ENCODING_MH_TAG_USC:
+                       pszTagID = "USCS"; break;
+
+               /* ISP */
+               case RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS:
+                       pszTagID = "ISP0 ZLS"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS:
+                       pszTagID = "ISP0 DS"; break;
+
+               /* TPF */
+               case RGX_MH_TAG_ENCODING_MH_TAG_TPF:
+               case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS:
+               case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF:
+               {
+                       switch (ui32TagID)
+                       {
+                               case RGX_MH_TAG_ENCODING_MH_TAG_TPF:           pszTagID = "TPF0"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS:  pszTagID = "TPF0 DBIAS"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF:       pszTagID = "TPF0 SPF"; break;
+                       }
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE:      pszTagSB = "PDS state"; break;
+                               case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS:     pszTagSB = "Depth bias"; break;
+                               case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA:      pszTagSB = "Floor offset"; break;
+                               case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA:     pszTagSB = "Delta"; break;
+                       }
+                       break;
+               }
+
+               /* IPF */
+               case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ:
+               case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS:
+               {
+                       switch (ui32TagID)
+                       {
+                               case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ:      pszTagID = "IPF0"; break;
+                               case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS:    pszTagID = "IPF0"; break;
+                       }
+
+                       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_ISP_IPP_PIPES))
+                       {
+                               if (ui32TagID < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+                               {
+                                       OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "CReq%d", ui32TagID);
+                                       pszTagSB = pszScratchBuf;
+                               }
+                               else if (ui32TagID < 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+                               {
+                                       ui32TagID -= RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES);
+                                       OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "PReq%d", ui32TagID);
+                                       pszTagSB = pszScratchBuf;
+                               }
+                               else
+                               {
+                                       switch (ui32TagSB - 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+                                       {
+                                               case 0: pszTagSB = "RReq"; break;
+                                               case 1: pszTagSB = "DBSC"; break;
+                                               case 2: pszTagSB = "CPF"; break;
+                                               case 3: pszTagSB = "Delta"; break;
+                                       }
+                               }
+                       }
+                       break;
+               }
+
+               /* VDM Stage 5 (temporary) */
+               case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5:
+                       pszTagID = "VDM Stage 5"; break;
+
+               /* TA */
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP:
+                       pszTagID = "PPP"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC:
+                       pszTagID = "TPW RTC"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC:
+                       pszTagID = "TEAC RTC"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC:
+                       pszTagID = "PSG RTC"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION:
+                       pszTagID = "PSG Region"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM:
+                       pszTagID = "PSG Stream"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW:
+                       pszTagID = "TPW"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC:
+                       pszTagID = "TPC"; break;
+
+               /* PM */
+               case RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC:
+               {
+                       pszTagID = "PMA";
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK:    pszTagSB = "TA Fstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST:             pszTagSB = "TA MList"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK:    pszTagSB = "3D Fstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST:             pszTagSB = "3D MList"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0:              pszTagSB = "Context0"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1:              pszTagSB = "Context1"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP:                pszTagSB = "MAVP"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK:             pszTagSB = "UFstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK:  pszTagSB = "TA MMUstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK:  pszTagSB = "3D MMUstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK:   pszTagSB = "TA UFstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK:   pszTagSB = "3D UFstack"; break;
+                               case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP:               pszTagSB = "TA VFP"; break;
+                       }
+                       break;
+               }
+               case RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC:
+               {
+                       pszTagID = "PMD";
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK:    pszTagSB = "TA Fstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST:             pszTagSB = "TA MList"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK:    pszTagSB = "3D Fstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST:             pszTagSB = "3D MList"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0:              pszTagSB = "Context0"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1:              pszTagSB = "Context1"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK:             pszTagSB = "UFstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK:  pszTagSB = "TA MMUstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK:  pszTagSB = "3D MMUstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK:   pszTagSB = "TA UFstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK:   pszTagSB = "3D UFstack"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP:               pszTagSB = "TA VFP"; break;
+                               case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP:               pszTagSB = "3D VFP"; break;
+                       }
+                       break;
+               }
+
+               /* TDM */
+               case RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA:
+               {
+                       pszTagID = "TDM DMA";
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM: pszTagSB = "Ctl stream"; break;
+                               case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER: pszTagSB = "Ctx buffer"; break;
+                               case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL:  pszTagSB = "Queue ctl"; break;
+                       }
+                       break;
+               }
+               case RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL:
+               {
+                       pszTagID = "TDM CTL";
+                       switch (ui32TagSB)
+                       {
+                               case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE:   pszTagSB = "Fence"; break;
+                               case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT: pszTagSB = "Context"; break;
+                               case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE:   pszTagSB = "Queue"; break;
+                       }
+                       break;
+               }
+
+               /* PBE */
+               case RGX_MH_TAG_ENCODING_MH_TAG_PBE0:
+                       pszTagID = "PBE0"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_PBE1:
+                       pszTagID = "PBE1"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_PBE2:
+                       pszTagID = "PBE2"; break;
+               case RGX_MH_TAG_ENCODING_MH_TAG_PBE3:
+                       pszTagID = "PBE3"; break;
+       }
+
+       *ppszTagID = pszTagID;
+       *ppszTagSB = pszTagSB;
+}
+
+/* RISC-V pf tags */
+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU  (0x00000001U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU  (0x00000002U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU  (0x00000003U)
+
+static void _RGXDecodeBIFReqTagsFwcore(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                          IMG_UINT32 ui32TagID,
+                                                                          IMG_UINT32 ui32TagSB,
+                                                                          IMG_CHAR **ppszTagID,
+                                                                          IMG_CHAR **ppszTagSB)
+{
+       /* default to unknown */
+       IMG_CHAR *pszTagID = "-";
+       IMG_CHAR *pszTagSB = "-";
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               pszTagSB = "RISC-V";
+
+               switch (ui32TagID)
+               {
+                       case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU:        pszTagID = "RISC-V MMU"; break;
+                       case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU:        pszTagID = "RISC-V Instruction Fetch Unit"; break;
+                       case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU:        pszTagID = "RISC-V Load/Store Unit"; break; /* Or Debug Module System Bus */
+               }
+       }
+
+       *ppszTagID = pszTagID;
+       *ppszTagSB = pszTagSB;
+}
+
+static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO    *psDevInfo,
+                                                                RGXDBG_BIF_ID  eBankID,
+                                                                IMG_UINT32             ui32TagID,
+                                                                IMG_UINT32             ui32TagSB,
+                                                                IMG_CHAR               **ppszTagID,
+                                                                IMG_CHAR               **ppszTagSB,
+                                                                IMG_CHAR               *pszScratchBuf,
+                                                                IMG_UINT32             ui32ScratchBufSize)
+{
+       /* default to unknown */
+       IMG_CHAR *pszTagID = "-";
+       IMG_CHAR *pszTagSB = "-";
+
+       PVR_ASSERT(ppszTagID != NULL);
+       PVR_ASSERT(ppszTagSB != NULL);
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+       {
+               if (eBankID == RGXDBG_FWCORE)
+               {
+                       _RGXDecodeBIFReqTagsFwcore(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB);
+               }
+               else
+               {
+                       _RGXDecodeBIFReqTagsXE(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+               }
+               return;
+       }
+
+       switch (ui32TagID)
+       {
+               case 0x0:
+               {
+                       pszTagID = "MMU";
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Table"; break;
+                               case 0x1: pszTagSB = "Directory"; break;
+                               case 0x2: pszTagSB = "Catalogue"; break;
+                       }
+                       break;
+               }
+               case 0x1:
+               {
+                       pszTagID = "TLA";
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Pixel data"; break;
+                               case 0x1: pszTagSB = "Command stream data"; break;
+                               case 0x2: pszTagSB = "Fence or flush"; break;
+                       }
+                       break;
+               }
+               case 0x2:
+               {
+                       pszTagID = "HOST";
+                       break;
+               }
+               case 0x3:
+               {
+                       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+                       {
+                                       pszTagID = "META";
+                                       switch (ui32TagSB)
+                                       {
+                                               case 0x0: pszTagSB = "DCache - Thread 0"; break;
+                                               case 0x1: pszTagSB = "ICache - Thread 0"; break;
+                                               case 0x2: pszTagSB = "JTag - Thread 0"; break;
+                                               case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+                                               case 0x4: pszTagSB = "DCache - Thread "; break;
+                                               case 0x5: pszTagSB = "ICache - Thread 1"; break;
+                                               case 0x6: pszTagSB = "JTag - Thread 1"; break;
+                                               case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+                                       }
+                       }
+                       else if (RGX_IS_ERN_SUPPORTED(psDevInfo, 57596))
+                       {
+                               pszTagID="TCU";
+                       }
+                       else
+                       {
+                               /* Unreachable code */
+                               PVR_ASSERT(IMG_FALSE);
+                       }
+                       break;
+               }
+               case 0x4:
+               {
+                       pszTagID = "USC";
+                       OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+                                  "Cache line %d", (ui32TagSB & 0x3f));
+                       pszTagSB = pszScratchBuf;
+                       break;
+               }
+               case 0x5:
+               {
+                       pszTagID = "PBE";
+                       break;
+               }
+               case 0x6:
+               {
+                       pszTagID = "ISP";
+                       switch (ui32TagSB)
+                       {
+                               case 0x00: pszTagSB = "ZLS"; break;
+                               case 0x20: pszTagSB = "Occlusion Query"; break;
+                       }
+                       break;
+               }
+               case 0x7:
+               {
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+                       {
+                               if (eBankID == RGXDBG_TEXAS_BIF)
+                               {
+                                       pszTagID = "IPF";
+                                       switch (ui32TagSB)
+                                       {
+                                               case 0x0: pszTagSB = "CPF"; break;
+                                               case 0x1: pszTagSB = "DBSC"; break;
+                                               case 0x2:
+                                               case 0x4:
+                                               case 0x6:
+                                               case 0x8: pszTagSB = "Control Stream"; break;
+                                               case 0x3:
+                                               case 0x5:
+                                               case 0x7:
+                                               case 0x9: pszTagSB = "Primitive Block"; break;
+                                       }
+                               }
+                               else
+                               {
+                                       pszTagID = "IPP";
+                                       switch (ui32TagSB)
+                                       {
+                                               case 0x0: pszTagSB = "Macrotile Header"; break;
+                                               case 0x1: pszTagSB = "Region Header"; break;
+                                       }
+                               }
+                       }
+                       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIMPLE_INTERNAL_PARAMETER_FORMAT))
+                       {
+                               pszTagID = "IPF";
+                               switch (ui32TagSB)
+                               {
+                                       case 0x0: pszTagSB = "Region Header"; break;
+                                       case 0x1: pszTagSB = "DBSC"; break;
+                                       case 0x2: pszTagSB = "CPF"; break;
+                                       case 0x3: pszTagSB = "Control Stream"; break;
+                                       case 0x4: pszTagSB = "Primitive Block"; break;
+                               }
+                       }
+                       else
+                       {
+                               pszTagID = "IPF";
+                               switch (ui32TagSB)
+                               {
+                                       case 0x0: pszTagSB = "Macrotile Header"; break;
+                                       case 0x1: pszTagSB = "Region Header"; break;
+                                       case 0x2: pszTagSB = "DBSC"; break;
+                                       case 0x3: pszTagSB = "CPF"; break;
+                                       case 0x4:
+                                       case 0x6:
+                                       case 0x8: pszTagSB = "Control Stream"; break;
+                                       case 0x5:
+                                       case 0x7:
+                                       case 0x9: pszTagSB = "Primitive Block"; break;
+                               }
+                       }
+                       break;
+               }
+               case 0x8:
+               {
+                       pszTagID = "CDM";
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Control Stream"; break;
+                               case 0x1: pszTagSB = "Indirect Data"; break;
+                               case 0x2: pszTagSB = "Event Write"; break;
+                               case 0x3: pszTagSB = "Context State"; break;
+                       }
+                       break;
+               }
+               case 0x9:
+               {
+                       pszTagID = "VDM";
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Control Stream"; break;
+                               case 0x1: pszTagSB = "PPP State"; break;
+                               case 0x2: pszTagSB = "Index Data"; break;
+                               case 0x4: pszTagSB = "Call Stack"; break;
+                               case 0x8: pszTagSB = "Context State"; break;
+                       }
+                       break;
+               }
+               case 0xA:
+               {
+                       pszTagID = "PM";
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+                               case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+                               case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+                               case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+                               case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+                               case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+                               case 0x6: pszTagSB = "PMA_MAVP"; break;
+                               case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+                               case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+                               case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+                               case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+                               case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+                               case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+                               case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+                               case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+                               case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+                               case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+                               case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+                               case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+                               case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+                               case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+                               case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+                               case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+                               case 0x18: pszTagSB = "PMA_TAVFP"; break;
+                               case 0x19: pszTagSB = "PMD_3DVFP"; break;
+                               case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+                       }
+                       break;
+               }
+               case 0xB:
+               {
+                       pszTagID = "TA";
+                       switch (ui32TagSB)
+                       {
+                               case 0x1: pszTagSB = "VCE"; break;
+                               case 0x2: pszTagSB = "TPC"; break;
+                               case 0x3: pszTagSB = "TE Control Stream"; break;
+                               case 0x4: pszTagSB = "TE Region Header"; break;
+                               case 0x5: pszTagSB = "TE Render Target Cache"; break;
+                               case 0x6: pszTagSB = "TEAC Render Target Cache"; break;
+                               case 0x7: pszTagSB = "VCE Render Target Cache"; break;
+                               case 0x8: pszTagSB = "PPP Context State"; break;
+                       }
+                       break;
+               }
+               case 0xC:
+               {
+                       pszTagID = "TPF";
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+                               case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+                               case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+                               case 0x3: pszTagSB = "CPF - Tables"; break;
+                               case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+                               case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+                               case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+                               case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+                               case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+                               case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+                               case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+                               case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+                               case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+                               case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+                               case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+                               case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+                       }
+                       break;
+               }
+               case 0xD:
+               {
+                       pszTagID = "PDS";
+                       break;
+               }
+               case 0xE:
+               {
+                       pszTagID = "MCU";
+                       {
+                               IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7;
+                               IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7;
+                               IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+                               IMG_CHAR* pszBurst = "";
+                               IMG_CHAR* pszGroupEnc = "";
+                               IMG_CHAR* pszGroup = "";
+
+                               switch (ui32Burst)
+                               {
+                                       case 0x0:
+                                       case 0x1: pszBurst = "128bit word within the Lower 256bits"; break;
+                                       case 0x2:
+                                       case 0x3: pszBurst = "128bit word within the Upper 256bits"; break;
+                                       case 0x4: pszBurst = "Lower 256bits"; break;
+                                       case 0x5: pszBurst = "Upper 256bits"; break;
+                                       case 0x6: pszBurst = "512 bits"; break;
+                               }
+                               switch (ui32GroupEnc)
+                               {
+                                       case 0x0: pszGroupEnc = "TPUA_USC"; break;
+                                       case 0x1: pszGroupEnc = "TPUB_USC"; break;
+                                       case 0x2: pszGroupEnc = "USCA_USC"; break;
+                                       case 0x3: pszGroupEnc = "USCB_USC"; break;
+                                       case 0x4: pszGroupEnc = "PDS_USC"; break;
+                                       case 0x5:
+                                               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+                                                       6 > RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+                                               {
+                                                       pszGroupEnc = "PDSRW";
+                                               } else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+                                                       6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+                                               {
+                                                       pszGroupEnc = "UPUC_USC";
+                                               }
+                                               break;
+                                       case 0x6:
+                                               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+                                                       6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+                                               {
+                                                       pszGroupEnc = "TPUC_USC";
+                                               }
+                                               break;
+                                       case 0x7:
+                                               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+                                                       6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+                                               {
+                                                       pszGroupEnc = "PDSRW";
+                                               }
+                                               break;
+                               }
+                               switch (ui32Group)
+                               {
+                                       case 0x0: pszGroup = "Banks 0-3"; break;
+                                       case 0x1: pszGroup = "Banks 4-7"; break;
+                                       case 0x2: pszGroup = "Banks 8-11"; break;
+                                       case 0x3: pszGroup = "Banks 12-15"; break;
+                               }
+
+                               OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+                                                               "%s, %s, %s", pszBurst, pszGroupEnc, pszGroup);
+                               pszTagSB = pszScratchBuf;
+                       }
+                       break;
+               }
+               case 0xF:
+               {
+                       pszTagID = "FB_CDC";
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+                       {
+                               IMG_UINT32 ui32Req   = (ui32TagSB >> 0) & 0xf;
+                               IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+                               IMG_CHAR* pszReqOrig = "";
+
+                               switch (ui32Req)
+                               {
+                                       case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+                                       case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+                                       case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+                                       case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+                                       case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+                                       case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+                                       case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+                                       case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+                                       case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+                                       case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+                                       case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+                                       case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+                                       case 0xc: pszReqOrig = "Reserved"; break;
+                                       case 0xd: pszReqOrig = "Reserved"; break;
+                                       case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+                                       case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+                               }
+                               OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+                                          "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+                               pszTagSB = pszScratchBuf;
+                       }
+                       else
+                       {
+                               IMG_UINT32 ui32Req   = (ui32TagSB >> 2) & 0x7;
+                               IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3;
+                               IMG_CHAR* pszReqOrig = "";
+
+                               switch (ui32Req)
+                               {
+                                       case 0x0: pszReqOrig = "FBC Request, originator ZLS";   break;
+                                       case 0x1: pszReqOrig = "FBC Request, originator PBE";   break;
+                                       case 0x2: pszReqOrig = "FBC Request, originator Host";  break;
+                                       case 0x3: pszReqOrig = "FBC Request, originator TLA";   break;
+                                       case 0x4: pszReqOrig = "FBDC Request, originator ZLS";  break;
+                                       case 0x5: pszReqOrig = "FBDC Request, originator MCU";  break;
+                                       case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+                                       case 0x7: pszReqOrig = "FBDC Request, originator TLA";  break;
+                               }
+                               OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+                                          "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+                               pszTagSB = pszScratchBuf;
+                       }
+                       break;
+               }
+       } /* switch (TagID) */
+
+       *ppszTagID = pszTagID;
+       *ppszTagSB = pszTagSB;
+}
+
+
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel    - MMU level
+
+ @Return   IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+       const IMG_CHAR* pszMMULevel = "";
+
+       switch (ui32MMULevel)
+       {
+               case 0x0: pszMMULevel = " (Page Table)"; break;
+               case 0x1: pszMMULevel = " (Page Directory)"; break;
+               case 0x2: pszMMULevel = " (Page Catalog)"; break;
+               case 0x3: pszMMULevel = " (Cat Base Reg)"; break;
+       }
+
+       return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Input bRead               - Read flag
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO    *psDevInfo,
+                                                                IMG_UINT32  ui32TagID,
+                                                                IMG_UINT32  ui32TagSB,
+                                                                IMG_BOOL    bRead,
+                                                                IMG_CHAR    **ppszTagID,
+                                                                IMG_CHAR    **ppszTagSB,
+                                                                IMG_CHAR    *pszScratchBuf,
+                                                                IMG_UINT32  ui32ScratchBufSize)
+{
+       IMG_INT32  i32SideBandType = -1;
+       IMG_CHAR   *pszTagID = "-";
+       IMG_CHAR   *pszTagSB = "-";
+
+       PVR_ASSERT(ppszTagID != NULL);
+       PVR_ASSERT(ppszTagSB != NULL);
+
+
+       switch (ui32TagID)
+       {
+               case  0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break;
+               case  1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break;
+               case  2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break;
+               case  3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break;
+               case  4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break;
+               case  5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break;
+               case  6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break;
+               case  7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break;
+               case  8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break;
+               case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break;
+               case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break;
+               case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break;
+               case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break;
+               case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break;
+               case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break;
+               case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break;
+               case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break;
+               case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break;
+               case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break;
+               case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break;
+               case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break;
+               case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break;
+               case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break;
+               case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break;
+               case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break;
+               case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break;
+               case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break;
+               case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break;
+               case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break;
+               case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break;
+               case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break;
+               case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break;
+               case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break;
+               case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break;
+               case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break;
+               case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break;
+               case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break;
+               case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break;
+               case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break;
+               case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break;
+       }
+       if (('-' == pszTagID[0]) && '\n' == pszTagID[1])
+       {
+
+               if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) ||
+                       (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3))
+               {
+                       switch (ui32TagID)
+                       {
+                       case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+                       case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+                       case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+                       case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+                       case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+                       case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+                       case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+                       case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+                       }
+
+                       if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539))
+                       {
+                               switch (ui32TagID)
+                               {
+                               case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               }
+                       }else
+                       {
+                               switch (ui32TagID)
+                               {
+                               case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+                               case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+                               case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+                               }
+                       }
+               }else
+               {
+                       switch (ui32TagID)
+                       {
+                       case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break;
+                       case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+                       case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break;
+                       case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+                       case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break;
+                       case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+                       case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break;
+                       case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+                       case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+                       case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+                       case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+                       }
+               }
+
+       }
+
+       switch (i32SideBandType)
+       {
+               case RGXDBG_META:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "DCache - Thread 0"; break;
+                               case 0x1: pszTagSB = "ICache - Thread 0"; break;
+                               case 0x2: pszTagSB = "JTag - Thread 0"; break;
+                               case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+                               case 0x4: pszTagSB = "DCache - Thread 1"; break;
+                               case 0x5: pszTagSB = "ICache - Thread 1"; break;
+                               case 0x6: pszTagSB = "JTag - Thread 1"; break;
+                               case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_TLA:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Pixel data"; break;
+                               case 0x1: pszTagSB = "Command stream data"; break;
+                               case 0x2: pszTagSB = "Fence or flush"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_VDMM:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Control Stream - Read Only"; break;
+                               case 0x1: pszTagSB = "PPP State - Read Only"; break;
+                               case 0x2: pszTagSB = "Indices - Read Only"; break;
+                               case 0x4: pszTagSB = "Call Stack - Read/Write"; break;
+                               case 0x6: pszTagSB = "DrawIndirect - Read Only"; break;
+                               case 0xA: pszTagSB = "Context State - Write Only"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_CDM:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Control Stream"; break;
+                               case 0x1: pszTagSB = "Indirect Data"; break;
+                               case 0x2: pszTagSB = "Event Write"; break;
+                               case 0x3: pszTagSB = "Context State"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_IPP:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Macrotile Header"; break;
+                               case 0x1: pszTagSB = "Region Header"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_PM:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+                               case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+                               case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+                               case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+                               case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+                               case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+                               case 0x6: pszTagSB = "PMA_MAVP"; break;
+                               case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+                               case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+                               case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+                               case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+                               case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+                               case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+                               case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+                               case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+                               case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+                               case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+                               case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+                               case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+                               case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+                               case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+                               case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+                               case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+                               case 0x18: pszTagSB = "PMA_TAVFP"; break;
+                               case 0x19: pszTagSB = "PMD_3DVFP"; break;
+                               case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_TILING:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "PSG Control Stream TP0"; break;
+                               case 0x1: pszTagSB = "TPC TP0"; break;
+                               case 0x2: pszTagSB = "VCE0"; break;
+                               case 0x3: pszTagSB = "VCE1"; break;
+                               case 0x4: pszTagSB = "PSG Control Stream TP1"; break;
+                               case 0x5: pszTagSB = "TPC TP1"; break;
+                               case 0x8: pszTagSB = "PSG Region Header TP0"; break;
+                               case 0xC: pszTagSB = "PSG Region Header TP1"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_VDMS:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "Context State - Write Only"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_IPF:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x00:
+                               case 0x20: pszTagSB = "CPF"; break;
+                               case 0x01: pszTagSB = "DBSC"; break;
+                               case 0x02:
+                               case 0x04:
+                               case 0x06:
+                               case 0x08:
+                               case 0x0A:
+                               case 0x0C:
+                               case 0x0E:
+                               case 0x10: pszTagSB = "Control Stream"; break;
+                               case 0x03:
+                               case 0x05:
+                               case 0x07:
+                               case 0x09:
+                               case 0x0B:
+                               case 0x0D:
+                               case 0x0F:
+                               case 0x11: pszTagSB = "Primitive Block"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_ISP:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x00: pszTagSB = "ZLS read/write"; break;
+                               case 0x20: pszTagSB = "Occlusion query read/write"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_TPF:
+               {
+                       switch (ui32TagSB)
+                       {
+                               case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+                               case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+                               case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+                               case 0x3: pszTagSB = "CPF - Tables"; break;
+                               case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+                               case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+                               case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+                               case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+                               case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+                               case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+                               case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+                               case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+                               case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+                               case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+                               case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+                               case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_FBCDC:
+               {
+                       /*
+                        * FBC faults on a 4-cluster phantom does not always set SB
+                        * bit 5, but since FBC is write-only and FBDC is read-only,
+                        * we can set bit 5 if this is a write fault, before decoding.
+                        */
+                       if (bRead == IMG_FALSE)
+                       {
+                               ui32TagSB |= 0x20;
+                       }
+
+                       switch (ui32TagSB)
+                       {
+                               case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break;
+                               case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break;
+                               case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break;
+                               case 0x20: pszTagSB = "FBC Request, originator ZLS"; break;
+                               case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break;
+                               case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break;
+                               case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break;
+                               case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break;
+                               case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break;
+                               case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break;
+                               case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break;
+                               case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break;
+                               case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break;
+                       }
+                       break;
+               }
+
+               case RGXDBG_MCU:
+               {
+                       IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7;
+                       IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7;
+                       IMG_UINT32 ui32Group     = ui32TagSB & 0x3;
+
+                       IMG_CHAR* pszGroup = "";
+
+                       switch (ui32Group)
+                       {
+                               case 0x0: pszGroup = "Banks 0-1"; break;
+                               case 0x1: pszGroup = "Banks 2-3"; break;
+                               case 0x2: pszGroup = "Banks 4-5"; break;
+                               case 0x3: pszGroup = "Banks 6-7"; break;
+                       }
+
+                       OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+                                  "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup);
+                       pszTagSB = pszScratchBuf;
+                       break;
+               }
+
+               default:
+               {
+                       OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB);
+                       pszTagSB = pszScratchBuf;
+                       break;
+               }
+       }
+
+       *ppszTagID = pszTagID;
+       *ppszTagSB = pszTagSB;
+}
+
+
+static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer,
+                                                       IMG_UINT64 *pui64Seconds,
+                                                       IMG_UINT64 *pui64Nanoseconds)
+{
+       IMG_UINT32 ui32Remainder;
+
+       *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder);
+       *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL);
+}
+
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+       DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+       DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+       DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+       DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+
+/*!
+*******************************************************************************
+
+ @Function     _PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psResult                 - The DevicememHistory result to be printed
+ @Input ui32Index                - The index of the result
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+                                               DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+                                               IMG_UINT32 ui32Index,
+                                               const IMG_CHAR* pszIndent)
+{
+       IMG_UINT32 ui32Remainder;
+       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+       ConvertOSTimestampToSAndNS(psResult->ui64When,
+                                                       &ui64Seconds,
+                                                       &ui64Nanoseconds);
+
+       if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE)
+       {
+               PVR_DUMPDEBUG_LOG("%s    [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+                                       " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+                                       " Operation: %s Modified: %" IMG_UINT64_FMTSPEC
+                                       " us ago (OS time %" IMG_UINT64_FMTSPEC
+                                       ".%09" IMG_UINT64_FMTSPEC " s)",
+                                               pszIndent,
+                                               ui32Index,
+                                               psResult->szString,
+                                               psResult->sBaseDevVAddr.uiAddr,
+                                               psResult->uiSize,
+                                               psResult->bMap ? "Map": "Unmap",
+                                               OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+                                               ui64Seconds,
+                                               ui64Nanoseconds);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%s    [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+                                       " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+                                       " Operation: %s Modified: %" IMG_UINT64_FMTSPEC
+                                       " us ago (OS time %" IMG_UINT64_FMTSPEC
+                                       ".%09" IMG_UINT64_FMTSPEC
+                                       ") PID: %u (%s)",
+                                               pszIndent,
+                                               ui32Index,
+                                               psResult->szString,
+                                               psResult->sBaseDevVAddr.uiAddr,
+                                               psResult->uiSize,
+                                               psResult->bMap ? "Map": "Unmap",
+                                               OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+                                               ui64Seconds,
+                                               ui64Nanoseconds,
+                                               psResult->sProcessInfo.uiPID,
+                                               psResult->sProcessInfo.szProcessName);
+       }
+
+       if (!psResult->bRange)
+       {
+               PVR_DUMPDEBUG_LOG("%s        Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped");
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%s        Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s",
+                                                                               pszIndent,
+                                                                               psResult->ui32StartPage,
+                                                                               psResult->ui32StartPage + psResult->ui32PageCount - 1,
+                                                                               psResult->sMapStartAddr.uiAddr,
+                                                                               psResult->sMapEndAddr.uiAddr,
+                                                                               psResult->bAll ? "(whole allocation) " : "",
+                                                                               psResult->bMap ? "mapped": "unmapped");
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psQueryOut               - Storage for the query results
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+                                               DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                                               const IMG_CHAR* pszIndent)
+{
+       IMG_UINT32 i;
+
+       if (psQueryOut->ui32NumResults == 0)
+       {
+               PVR_DUMPDEBUG_LOG("%s    No results", pszIndent);
+       }
+       else
+       {
+               for (i = 0; i < psQueryOut->ui32NumResults; i++)
+               {
+                       _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                                       psFaultProcessInfo,
+                                                                       &psQueryOut->sResults[i],
+                                                                       i,
+                                                                       pszIndent);
+               }
+       }
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+       { 0, PVRSRV_4K_PAGE_SIZE },
+       { 1, PVRSRV_16K_PAGE_SIZE },
+       { 2, PVRSRV_64K_PAGE_SIZE },
+       { 3, PVRSRV_256K_PAGE_SIZE },
+       { 4, PVRSRV_1M_PAGE_SIZE },
+       { 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function     _PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW     - The HW page size value
+
+ @Return   IMG_UINT32      The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+       if (ui32PageSizeHW > 5)
+       {
+               /* This is invalid, so return a default value as we cannot ASSERT in this code! */
+               return PVRSRV_4K_PAGE_SIZE;
+       }
+
+       return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID              - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr     - The device address to search for allocations at/before/after
+ @Input asQueryOut         - Storage for the query results
+ @Input ui32PageSizeBytes  - Faulted page size in bytes
+
+ @Return IMG_BOOL          - IMG_TRUE if any results were found for this page fault
+
+******************************************************************************/
+static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+                                                       DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+                                                       IMG_UINT32 ui32PageSizeBytes)
+{
+       DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+       IMG_BOOL bAnyHits = IMG_FALSE;
+
+       /* if the page fault originated in the firmware then the allocation may
+        * appear to belong to any PID, because FW allocations are attributed
+        * to the client process creating the allocation, so instruct the
+        * devicemem_history query to search all available PIDs
+        */
+       if (uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+       {
+               sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY;
+       }
+       else
+       {
+               sQueryIn.uiPID = uiPID;
+       }
+
+       /* Query the DevicememHistory for all allocations in the previous page... */
+       sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes;
+       if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING],
+                                 ui32PageSizeBytes, IMG_TRUE))
+       {
+               bAnyHits = IMG_TRUE;
+       }
+
+       /* Query the DevicememHistory for any record at the exact address... */
+       sQueryIn.sDevVAddr = sFaultDevVAddr;
+       if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED],
+                                 ui32PageSizeBytes, IMG_FALSE))
+       {
+               bAnyHits = IMG_TRUE;
+       }
+       else
+       {
+               /* If not matched then try matching any record in the faulting page... */
+               if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED],
+                                         ui32PageSizeBytes, IMG_TRUE))
+               {
+                       bAnyHits = IMG_TRUE;
+               }
+       }
+
+       /* Query the DevicememHistory for all allocations in the next page... */
+       sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+       if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT],
+                                 ui32PageSizeBytes, IMG_TRUE))
+       {
+               bAnyHits = IMG_TRUE;
+       }
+
+       return bAnyHits;
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+       /* the process info of the memory context that page faulted */
+       RGXMEM_PROCESS_INFO sProcessInfo;
+       IMG_DEV_VIRTADDR sFaultDevVAddr;
+       MMU_FAULT_DATA   sMMUFaultData;
+       DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+       /* the CR timer value at the time of the fault, recorded by the FW.
+        * used to differentiate different page faults
+        */
+       IMG_UINT64 ui64CRTimer;
+       /* time when this FAULT_INFO entry was added. used for timing
+        * reference against the map/unmap information
+        */
+       IMG_UINT64 ui64When;
+       IMG_UINT32 ui32FaultInfoFlags;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the first `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+       IMG_UINT32 ui32Head;
+       /* the number of faults in this log need not correspond exactly to
+        * the HWINFO number of the FW, as the FW HWINFO log may contain
+        * non-page fault HWRs
+        */
+       FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+#define FAULT_INFO_PROC_INFO   (0x1U)
+#define FAULT_INFO_DEVMEM_HIST (0x2U)
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                       FAULT_INFO *psInfo,
+                                                       RGXMEM_PROCESS_INFO *psProcInfo)
+{
+       IMG_UINT32 i, j;
+
+       for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+       {
+               for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++)
+               {
+                       IMG_BOOL bFound;
+
+                       RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo;
+                       bFound = RGXPCPIDToProcessInfo(psDevInfo,
+                                                               psProcInfo->uiPID,
+                                                               psProcInfo);
+                       if (!bFound)
+                       {
+                               OSStringLCopy(psProcInfo->szProcessName,
+                                                               "(unknown)",
+                                                               sizeof(psProcInfo->szProcessName));
+                       }
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psInfo               - The page fault occurrence to print
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       FAULT_INFO *psInfo,
+                                       const IMG_CHAR* pszIndent)
+{
+       IMG_UINT32 i;
+       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+       ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds);
+
+       if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO))
+       {
+               IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ?
+                                                       0 : psInfo->sProcessInfo.uiPID;
+
+               PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC
+                                                       ", PID: %u "
+                                                       "(%s, unregistered: %u) OS time: "
+                                                       "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+                                       pszIndent,
+                                       psInfo->sFaultDevVAddr.uiAddr,
+                                       uiPID,
+                                       psInfo->sProcessInfo.szProcessName,
+                                       psInfo->sProcessInfo.bUnregistered,
+                                       ui64Seconds,
+                                       ui64Nanoseconds);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent);
+       }
+
+       if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST))
+       {
+               for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+               {
+                       const IMG_CHAR *pszWhich = NULL;
+
+                       switch (i)
+                       {
+                               case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+                                       pszWhich = "Preceding page";
+                                       break;
+                               case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+                                       pszWhich = "Faulted page";
+                                       break;
+                               case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+                                       pszWhich = "Next page";
+                                       break;
+                       }
+
+                       PVR_DUMPDEBUG_LOG("%s  %s:", pszIndent, pszWhich);
+                       _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                               &psInfo->sProcessInfo,
+                                                               &psInfo->asQueryOut[i],
+                                                               pszIndent);
+               }
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%s  No matching Devmem History for fault address", pszIndent);
+       }
+}
+
+static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       FAULT_INFO *psInfo,
+                                       IMG_DEV_VIRTADDR sFaultDevVAddr,
+                                       IMG_DEV_PHYADDR sPCDevPAddr,
+                                       IMG_UINT64 ui64CRTimer,
+                                       IMG_UINT32 ui32PageSizeBytes)
+{
+       IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE;
+       RGXMEM_PROCESS_INFO sProcessInfo;
+
+       psInfo->ui32FaultInfoFlags = 0;
+       psInfo->sFaultDevVAddr = sFaultDevVAddr;
+       psInfo->ui64CRTimer = ui64CRTimer;
+       psInfo->ui64When = OSClockns64();
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               /* Check if this is PM fault */
+               if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM)
+               {
+                       bIsPMFault = IMG_TRUE;
+                       bFound = IMG_TRUE;
+                       sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM;
+                       OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName));
+                       sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0';
+                       sProcessInfo.bUnregistered = IMG_FALSE;
+               }
+               else
+               {
+                       /* look up the process details for the faulting page catalogue */
+                       bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+               }
+
+               if (bFound)
+               {
+                       IMG_BOOL bHits;
+
+                       psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO;
+                       psInfo->sProcessInfo = sProcessInfo;
+
+                       if (bIsPMFault)
+                       {
+                               bHits = IMG_TRUE;
+                       }
+                       else
+                       {
+                               /* get any DevicememHistory data for the faulting address */
+                               bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+                                                                sFaultDevVAddr,
+                                                                psInfo->asQueryOut,
+                                                                ui32PageSizeBytes);
+
+                               if (bHits)
+                               {
+                                       psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST;
+
+                                       /* if the page fault was caused by the firmware then get information about
+                                        * which client application created the related allocations.
+                                        *
+                                        * Fill in the process info data for each query result.
+                                        */
+
+                                       if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+                                       {
+                                               _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo);
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _DumpFaultAddressHostView
+
+ @Description
+
+ Dump FW HWR fault status in human readable form.
+
+ @Input ui32Index            - Index of global Fault info
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Return   void
+
+******************************************************************************/
+static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       const IMG_CHAR* pszIndent)
+{
+       MMU_LEVEL eTopLevel;
+       const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" };
+       const IMG_CHAR szPageError[][3] = {"", "PT",  "PD",  "PC"  };
+
+       eTopLevel = psFaultData->eTopLevel;
+
+       if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN)
+       {
+               PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent);
+               return;
+       }
+       else if (psFaultData->eType == MMU_FAULT_TYPE_PM)
+       {
+               PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address);
+       }
+       else
+       {
+               MMU_LEVEL eCurrLevel;
+               PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST);
+
+               for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--)
+               {
+                       MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel];
+                       if (psMMULevelData->ui64Address)
+                       {
+                               if (psMMULevelData->uiBytesPerEntry == 4)
+                               {
+                                       PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s",
+                                                               pszIndent,
+                                                               szPageLevel[eCurrLevel],
+                                                               psMMULevelData->ui32Index,
+                                                               (IMG_UINT) psMMULevelData->ui64Address,
+                                                               psMMULevelData->psDebugStr);
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+                                                               pszIndent,
+                                                               szPageLevel[eCurrLevel],
+                                                               psMMULevelData->ui32Index,
+                                                               psMMULevelData->ui64Address,
+                                                               psMMULevelData->psDebugStr);
+                               }
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)",
+                                                       pszIndent,
+                                                       szPageError[eCurrLevel],
+                                                       psMMULevelData->ui32Index,
+                                                       psMMULevelData->ui32NumOfEntries);
+                               break;
+                       }
+               }
+       }
+
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpRGXBIFBank
+
+ @Description
+
+ Dump BIF Bank state in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input eBankID              - BIF identifier
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input ui64ReqStatus        - BIF request Status register value
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       RGXDBG_BIF_ID eBankID,
+                                       IMG_UINT64 ui64MMUStatus,
+                                       IMG_UINT64 ui64ReqStatus,
+                                       const IMG_CHAR *pszIndent)
+{
+       if (ui64MMUStatus == 0x0)
+       {
+               PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]);
+       }
+       else
+       {
+               IMG_UINT32 ui32PageSize;
+               IMG_UINT32 ui32PC =
+                       (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+                               RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+
+               /* Bank 0 & 1 share the same fields */
+               PVR_DUMPDEBUG_LOG("%s%s - FAULT:",
+                                                 pszIndent,
+                                                 pszBIFNames[eBankID]);
+
+               /* MMU Status */
+               {
+                       IMG_UINT32 ui32MMUDataType =
+                               (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >>
+                                       RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT;
+
+                       IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0;
+                       IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0;
+
+                       ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+                                               RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+
+                       PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d%s%s%s.",
+                                               pszIndent,
+                                               ui64MMUStatus,
+                                               ui32PC,
+                                               (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC),
+                                               ui32PageSize,
+                                               (bROFault)?", Read Only fault":"",
+                                               (bProtFault)?", PM/META protection fault":"",
+                                               _RGXDecodeMMULevel(ui32MMUDataType));
+               }
+
+               /* Req Status */
+               {
+                       IMG_CHAR *pszTagID;
+                       IMG_CHAR *pszTagSB;
+                       IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+                       IMG_BOOL bRead;
+                       IMG_UINT32 ui32TagSB, ui32TagID;
+                       IMG_UINT64 ui64Addr;
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+                       {
+                               bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0;
+                               ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >>
+                                       RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT;
+                               ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >>
+                                       RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT;
+                       }
+                       else
+                       {
+                               bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0;
+                               ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >>
+                                       RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT;
+                               ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >>
+                                       RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT;
+                       }
+                       ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >>
+                               RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) <<
+                               RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT;
+
+                       _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE);
+
+                       PVR_DUMPDEBUG_LOG("%s  * Request (0x%016" IMG_UINT64_FMTSPECx
+                                               "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".",
+                                                         pszIndent,
+                                                         ui64ReqStatus,
+                                         pszTagID,
+                                         pszTagSB,
+                                         (bRead)?"Reading from":"Writing to",
+                                         ui64Addr);
+               }
+       }
+}
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT mismatch!");
+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT),
+                         "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT mismatch!");
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input pszMetaOrCore        - string representing call is for META or MMU core
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       IMG_UINT64 ui64MMUStatus,
+                                       const IMG_PCHAR pszMetaOrCore,
+                                       const IMG_CHAR *pszIndent)
+{
+       if (ui64MMUStatus == 0x0)
+       {
+               PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore);
+       }
+       else
+       {
+               IMG_UINT32 ui32PC        = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+               IMG_UINT64 ui64Addr      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) <<  4; /* align shift */
+               IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT;
+               IMG_UINT32 ui32SideBand  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT;
+               IMG_UINT32 ui32MMULevel  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT;
+               IMG_BOOL bRead           = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0;
+               IMG_BOOL bFault          = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0;
+               IMG_BOOL bROFault        = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+                                           RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2;
+               IMG_BOOL bProtFault      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+                                           RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3;
+               IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+               IMG_CHAR *pszTagID;
+               IMG_CHAR *pszTagSB;
+
+               _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+               PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore);
+               PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECx ", %s (%s)%s%s%s%s.",
+                                                 pszIndent,
+                                                 ui64MMUStatus,
+                                                 ui32PC,
+                                                 (bRead)?"Reading from":"Writing to",
+                                                 ui64Addr,
+                                                 pszTagID,
+                                                 pszTagSB,
+                                                 (bFault)?", Fault":"",
+                                                 (bROFault)?", Read Only fault":"",
+                                                 (bProtFault)?", PM/META protection fault":"",
+                                                 _RGXDecodeMMULevel(ui32MMULevel));
+
+       }
+}
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+
+
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState)
+{
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+       IMG_UINT32 ui32RegRead;
+       IMG_UINT32 eError = PVRSRV_OK;
+       IMG_UINT32 *pui32NMIMemoryPointer;
+       IMG_UINT32 volatile *pui32SyncFlag;
+       IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset;
+
+       /* Map the FW data area to the kernel */
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+                                                                        (void **)&pui32NMIMemoryPointer);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to acquire NMI shared memory area (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto map_error_fail;
+       }
+
+       /* Calculate offset to the boot/NMI data page */
+       uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA));
+
+       /* Jump to the NMI shared data area within the page above */
+       pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE);
+
+       /* Acquire the NMI operations lock */
+       OSLockAcquire(psDevInfo->hNMILock);
+
+       /* Make sure the synchronisation flag is set to 0 */
+       pui32SyncFlag = &pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET];
+       *pui32SyncFlag = 0;
+
+       /* Readback performed as a part of memory barrier */
+       OSWriteMemoryBarrier(pui32SyncFlag);
+
+       /* Enable NMI issuing in the MIPS wrapper */
+       OSWriteHWReg64(pvRegsBaseKM,
+                                  RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+                                  RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN);
+       (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE);
+
+       /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */
+       ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+                                  RGX_CR_MIPS_EXCEPTION_STATUS);
+       if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN))
+       {
+
+               eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+               goto fail;
+       }
+       ui32RegRead = 0;
+
+       /* Issue NMI */
+       OSWriteHWReg32(pvRegsBaseKM,
+                                  RGX_CR_MIPS_WRAPPER_NMI_EVENT,
+                                  RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN);
+       (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_EVENT);
+
+
+       /* Wait for NMI Taken to be asserted */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+                                                                       RGX_CR_MIPS_EXCEPTION_STATUS);
+               if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0)
+       {
+                       eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+                       goto fail;
+       }
+       ui32RegRead = 0;
+
+       /* Allow the firmware to proceed */
+       *pui32SyncFlag = 1;
+
+       /* Readback performed as a part of memory barrier */
+       OSWriteMemoryBarrier(pui32SyncFlag);
+
+       /* Wait for the FW to have finished the NMI routine */
+       ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+                                                               RGX_CR_MIPS_EXCEPTION_STATUS);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+                                                                       RGX_CR_MIPS_EXCEPTION_STATUS);
+               if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN))
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+       if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)
+       {
+                       eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+                       goto fail;
+       }
+       ui32RegRead = 0;
+
+       /* Copy state */
+       OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState));
+
+       --(psMIPSState->ui32ErrorEPC);
+       --(psMIPSState->ui32EPC);
+
+       /* Disable NMI issuing in the MIPS wrapper */
+       OSWriteHWReg32(pvRegsBaseKM,
+                                  RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+                                  0);
+       (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE);
+
+fail:
+       /* Release the NMI operations lock */
+       OSLockRelease(psDevInfo->hNMILock);
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+map_error_fail:
+       return eError;
+}
+
+/* Print decoded information from cause register */
+static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                    void *pvDumpDebugFile,
+                                    IMG_UINT32 ui32Cause,
+                                    IMG_UINT32 ui32ErrorState)
+{
+#define INDENT "    "
+       const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause);
+       const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode);
+
+       if (ui32ErrorState == RGXMIPSFW_NMI_ERROR_STATE_SET &&
+           pszException != NULL)
+       {
+               PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException);
+       }
+
+       if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING)
+       {
+               PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending");
+       }
+
+       if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV))
+       {
+               PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector");
+       }
+
+       if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING)
+       {
+               PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending");
+       }
+
+       /* Unusable Coproc exception */
+       if (ui32ExcCode == 11)
+       {
+               PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause));
+       }
+
+#undef INDENT
+}
+
+static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode)
+{
+       if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING))
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "Only %lu exceptions available in MIPS, %u is not a valid exception code",
+                        (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode));
+               return IMG_FALSE;
+       }
+
+       return apsMIPSExcCodes[ui32ExcCode].bIsFatal;
+}
+
+static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       IMG_UINT32 ui32Debug,
+                                       IMG_UINT32 ui32DEPC)
+{
+       const IMG_CHAR *pszDException = NULL;
+       IMG_UINT32 i;
+#define INDENT "    "
+
+       if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM))
+       {
+               return;
+       }
+
+       PVR_DUMPDEBUG_LOG("DEBUG                        :");
+
+       pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug));
+
+       if (pszDException != NULL)
+       {
+               PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i)
+       {
+               const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i];
+
+               if (ui32Debug & psDebugEntry->ui32Mask)
+               {
+                       PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation);
+               }
+       }
+#undef INDENT
+       PVR_DUMPDEBUG_LOG("DEPC                    :0x%08X", ui32DEPC);
+}
+
+static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry,
+                                       const RGX_MIPS_REMAP_ENTRY *psRemapEntry0,
+                                       const RGX_MIPS_REMAP_ENTRY *psRemapEntry1,
+                                       IMG_UINT64 *pui64PA0Start,
+                                       IMG_UINT64 *pui64PA0End,
+                                       IMG_UINT64 *pui64PA1Start,
+                                       IMG_UINT64 *pui64PA1End)
+{
+       IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE;
+       IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask);
+
+       if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0)
+       {
+               /* Dummy values to fail the range checks later */
+               *pui64PA0Start = -1ULL;
+               *pui64PA0End   = -1ULL;
+       }
+       else if (bUseRemapOutput)
+       {
+               *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12;
+               *pui64PA0End   = *pui64PA0Start + ui64PageSize - 1;
+       }
+       else
+       {
+               *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0);
+               *pui64PA0End   = *pui64PA0Start + ui64PageSize - 1;
+       }
+
+       if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0)
+       {
+               /* Dummy values to fail the range checks later */
+               *pui64PA1Start = -1ULL;
+               *pui64PA1End   = -1ULL;
+       }
+       else if (bUseRemapOutput)
+       {
+               *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12;
+               *pui64PA1End   = *pui64PA1Start + ui64PageSize - 1;
+       }
+       else
+       {
+               *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1);
+               *pui64PA1End   = *pui64PA1Start + ui64PageSize - 1;
+       }
+}
+
+static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                      void *pvDumpDebugFile,
+                                      const RGX_MIPS_TLB_ENTRY *psTLB,
+                                      const RGX_MIPS_REMAP_ENTRY *psRemap)
+{
+       IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ;
+       IMG_UINT64 ui64PA0EndI,   ui64PA1EndI,   ui64PA0EndJ,   ui64PA1EndJ;
+       IMG_UINT32 i, j;
+
+#define RANGES_OVERLAP(start0,end0,start1,end1)  ((start0) < (end1) && (start1) < (end0))
+
+       for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++)
+       {
+               _GetMipsTLBPARanges(&psTLB[i],
+                                   psRemap ? &psRemap[i] : NULL,
+                                   psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
+                                   &ui64PA0StartI, &ui64PA0EndI,
+                                   &ui64PA1StartI, &ui64PA1EndI);
+
+               for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++)
+               {
+                       _GetMipsTLBPARanges(&psTLB[j],
+                                           psRemap ? &psRemap[j] : NULL,
+                                           psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
+                                           &ui64PA0StartJ, &ui64PA0EndJ,
+                                           &ui64PA1StartJ, &ui64PA1EndJ);
+
+                       if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) ||
+                           RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) ||
+                           RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) ||
+                           RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ)  )
+                       {
+                               PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j);
+                       }
+               }
+       }
+}
+
+static inline IMG_UINT32 _GetMIPSRemapRegionSize(IMG_UINT32 ui32RegionSizeEncoding)
+{
+    return 1U << ((ui32RegionSizeEncoding + 1U) << 1U);
+}
+
+static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                        void *pvDumpDebugFile,
+                                        const RGX_MIPS_TLB_ENTRY *psTLBEntry,
+                                        const RGX_MIPS_REMAP_ENTRY *psRemapEntry0,
+                                        const RGX_MIPS_REMAP_ENTRY *psRemapEntry1,
+                                        IMG_UINT32 ui32Index)
+{
+       IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE;
+       IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0);
+       IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1);
+       IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0;
+       IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0;
+
+       if (bDumpRemapEntries)
+       {
+               /* RemapAddrIn is always 4k aligned and on 32 bit */
+               ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12;
+               ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12;
+
+               /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */
+               ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12;
+               ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12;
+
+               /* If TLB and remap entries match, then merge them else, print them separately */
+               if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn &&
+                   (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn)
+               {
+                       ui64PA0 = ui64Remap0AddrOut;
+                       ui64PA1 = ui64Remap1AddrOut;
+                       bDumpRemapEntries = IMG_FALSE;
+               }
+       }
+
+       PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, "
+                                                  "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s",
+                         ui32Index,
+                         psTLBEntry->ui32TLBHi,
+                         RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask),
+                         ui64PA0,
+                         gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)],
+                         gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)],
+                         gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)],
+                         ui64PA1,
+                         gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)],
+                         gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)],
+                         gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]);
+
+       if (bDumpRemapEntries)
+       {
+               PVR_DUMPDEBUG_LOG("    Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx,
+                                 ui32Index,
+                                 ui32Remap0AddrIn,
+                                 _GetMIPSRemapRegionSize(psRemapEntry0->ui32RemapRegionSize),
+                                 ui64Remap0AddrOut);
+
+               PVR_DUMPDEBUG_LOG("    Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx,
+                                 ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES,
+                                 ui32Remap1AddrIn,
+                                 _GetMIPSRemapRegionSize(psRemapEntry1->ui32RemapRegionSize),
+                                 ui64Remap1AddrOut);
+       }
+}
+
+#endif /* !defined(NO_HARDWARE) */
+#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
+
+static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause)
+{
+       switch (ui32Mcause)
+       {
+#define X(value, fatal, description) \
+               case value: \
+                       if (fatal) \
+                               return description; \
+                       return NULL;
+
+               RGXRISCVFW_MCAUSE_TABLE
+#undef X
+
+               default:
+                       PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause));
+                       return NULL;
+       }
+}
+
+/*
+       Appends flags strings to a null-terminated string buffer - each flag
+       description string starts with a space.
+*/
+static void _Flags2Description(IMG_CHAR *psDesc,
+                               IMG_UINT32 ui32DescSize,
+                               const IMG_FLAGS2DESC *psConvTable,
+                               IMG_UINT32 ui32TableSize,
+                               IMG_UINT32 ui32Flags)
+{
+       IMG_UINT32 ui32Idx;
+
+       for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+       {
+               if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
+               {
+                       OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize);
+               }
+       }
+}
+
+/*
+       Writes flags strings to an uninitialised buffer.
+*/
+static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags)
+{
+       const IMG_CHAR szCswLabel[] = "Ctx switch options:";
+       size_t uLabelLen = sizeof(szCswLabel) - 1;
+       const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U;
+
+       OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
+
+       _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags);
+       _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
+}
+
+static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags)
+{
+       const IMG_CHAR szCswLabel[] = "Ctx switch:";
+       size_t uLabelLen = sizeof(szCswLabel) - 1;
+       const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U;
+
+       OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
+
+       _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+       const IMG_CHAR *pszTraceAssertPath;
+       const IMG_CHAR *pszTraceAssertInfo;
+       IMG_INT32 ui32TraceAssertLine;
+       IMG_UINT32 i;
+
+       for (i = 0; i < RGXFW_THREAD_NUM; i++)
+       {
+               pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+               pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+               ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+               /* print non-null assert strings */
+               if (*pszTraceAssertInfo)
+               {
+                       PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)",
+                                         i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine);
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpFWFaults
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psFwSysData       - RGX FW shared system data
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             const RGXFWIF_SYSDATA *psFwSysData)
+{
+       if (psFwSysData->ui32FWFaults > 0)
+       {
+               IMG_UINT32      ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX;
+               IMG_UINT32      ui32EndFault   = psFwSysData->ui32FWFaults - 1;
+               IMG_UINT32  ui32Index;
+
+               if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX)
+               {
+                       ui32StartFault = 0;
+               }
+
+               for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++)
+               {
+                       const RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX];
+                       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+                       /* Split OS timestamp in seconds and nanoseconds */
+                       ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+                       PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)",
+                                         ui32Index+1, psFaultInfo->sFaultBuf.szInfo,
+                                         psFaultInfo->sFaultBuf.szPath,
+                                         psFaultInfo->sFaultBuf.ui32LineNum);
+                       PVR_DUMPDEBUG_LOG("            Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+                                         psFaultInfo->ui32Data,
+                                         psFaultInfo->ui64CRTimer,
+                                         ui64Seconds, ui64Nanoseconds);
+               }
+       }
+}
+
+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       const RGXFWIF_SYSDATA *psFwSysData)
+{
+       IMG_UINT32 i;
+       for (i = 0; i < RGXFW_THREAD_NUM; i++)
+       {
+               if (psFwSysData->aui32CrPollAddr[i])
+               {
+                       PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)",
+                                         i,
+                                         ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+                                         psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET,
+                                         psFwSysData->aui32CrPollMask[i]);
+               }
+       }
+
+}
+
+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                         void *pvDumpDebugFile,
+                                                         const RGXFWIF_SYSDATA *psFwSysData,
+                                                         const RGXFWIF_HWRINFOBUF *psHWRInfoBuf,
+                                                         PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_BOOL          bAnyLocked = IMG_FALSE;
+       IMG_UINT32        dm, i;
+       IMG_UINT32        ui32LineSize;
+       IMG_CHAR          *pszLine, *pszTemp;
+       const IMG_CHAR    *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "TA", "3D", "CDM", "RAY", "TA2", "TA3", "TA4"};
+       const IMG_CHAR    szMsgHeader[] = "Number of HWR: ";
+       const IMG_CHAR    szMsgFalse[] = "FALSE(";
+       IMG_CHAR          *pszLockupType = "";
+       const IMG_UINT32  ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */
+       const IMG_UINT32  ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1;
+       IMG_UINT32        ui32HWRRecoveryFlags;
+       IMG_UINT32        ui32ReadIndex;
+
+       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)))
+       {
+               apszDmNames[RGXFWIF_DM_TDM] = "2D";
+       }
+
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] ||
+                   psHWRInfoBuf->aui32HwrDmOverranCount[dm])
+               {
+                       bAnyLocked = IMG_TRUE;
+                       break;
+               }
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK))
+       {
+               /* No HWR situation, print nothing */
+               return;
+       }
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               IMG_BOOL bAnyHWROccured = IMG_FALSE;
+
+               for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+               {
+                       if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 ||
+                               psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 ||
+                               psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0)
+                               {
+                                       bAnyHWROccured = IMG_TRUE;
+                                       break;
+                               }
+               }
+
+               if (!bAnyHWROccured)
+               {
+                       return;
+               }
+       }
+
+       ui32LineSize = sizeof(IMG_CHAR) * (
+                       ui32MsgHeaderCharCount +
+                       (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*(     4/*DM name + left parenthesis*/ +
+                               10/*UINT32 max num of digits*/ +
+                               1/*slash*/ +
+                               10/*UINT32 max num of digits*/ +
+                               3/*right parenthesis + comma + space*/)) +
+                       ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1
+                               /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */
+                       );
+
+       pszLine = OSAllocMem(ui32LineSize);
+       if (pszLine == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Out of mem allocating line string (size: %d)",
+                       __func__,
+                       ui32LineSize));
+               return;
+       }
+
+       OSStringLCopy(pszLine, szMsgHeader, ui32LineSize);
+       pszTemp = pszLine + ui32MsgHeaderCharCount;
+
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               pszTemp += OSSNPrintf(pszTemp,
+                               4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1
+                               /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */,
+                               "%s(%u/%u+%u), ",
+                               apszDmNames[dm],
+                               psHWRInfoBuf->aui32HwrDmRecoveredCount[dm],
+                               psHWRInfoBuf->aui32HwrDmLockedUpCount[dm],
+                               psHWRInfoBuf->aui32HwrDmOverranCount[dm]);
+       }
+
+       OSStringLCat(pszLine, szMsgFalse, ui32LineSize);
+       pszTemp += ui32MsgFalseCharCount;
+
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               pszTemp += OSSNPrintf(pszTemp,
+                               10 + 1 + 1 /* UINT32 max num + comma + \0 */,
+                               (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"),
+                               psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]);
+       }
+
+       PVR_DUMPDEBUG_LOG("%s", pszLine);
+
+       OSFreeMem(pszLine);
+
+       /* Print out per HWR info */
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               if (dm == RGXFWIF_DM_GP)
+               {
+                       PVR_DUMPDEBUG_LOG("DM %d (GP)", dm);
+               }
+               else
+               {
+                       if (!PVRSRV_VZ_MODE_IS(GUEST))
+                       {
+                               IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm];
+                               IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE];
+                               sPerDmHwrDescription[0] = '\0';
+
+                               if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING)
+                               {
+                                       OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE);
+                               }
+                               else
+                               {
+                                       _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE,
+                                               asDmState2Description, ARRAY_SIZE(asDmState2Description),
+                                               ui32HWRRecoveryFlags);
+                               }
+                               PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, ui32HWRRecoveryFlags, sPerDmHwrDescription);
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("DM %d", dm);
+                       }
+               }
+
+               ui32ReadIndex = 0;
+               for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+               {
+                       IMG_BOOL bPMFault = IMG_FALSE;
+                       IMG_UINT32 ui32PC;
+                       IMG_UINT32 ui32PageSize = 0;
+                       IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+                       const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex];
+
+                       if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+                       {
+                               IMG_CHAR aui8RecoveryNum[10+10+1];
+                               IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+                               IMG_BOOL bPageFault = IMG_FALSE;
+                               IMG_DEV_VIRTADDR sFaultDevVAddr;
+
+                               /* Split OS timestamp in seconds and nanoseconds */
+                               ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+                               ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+                               if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; }
+
+                               OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
+                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s Core = %u, PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+                                                  aui8RecoveryNum,
+                                                  psHWRInfo->ui32CoreID,
+                                                  psHWRInfo->ui32PID,
+                                                  psHWRInfo->ui32FrameNum,
+                                                  psHWRInfo->ui32ActiveHWRTData,
+                                                  psHWRInfo->ui32EventStatus,
+                                                  pszLockupType);
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+                                                  aui8RecoveryNum,
+                                                  psHWRInfo->ui32PID,
+                                                  psHWRInfo->ui32FrameNum,
+                                                  psHWRInfo->ui32ActiveHWRTData,
+                                                  psHWRInfo->ui32EventStatus,
+                                                  pszLockupType);
+                               }
+                               pszTemp = &aui8RecoveryNum[0];
+                               while (*pszTemp != '\0')
+                               {
+                                       *pszTemp++ = ' ';
+                               }
+
+                               /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */
+                               if (!PVRSRV_VZ_MODE_IS(GUEST))
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+                                                                          aui8RecoveryNum,
+                                                                          psHWRInfo->ui64CRTimer,
+                                                                          ui64Seconds,
+                                                                          ui64Nanoseconds,
+                                                                          (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+                                                                          aui8RecoveryNum,
+                                                                          psHWRInfo->ui64CRTimer,
+                                                                          (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+                               }
+
+                               if (psHWRInfo->ui64CRTimeHWResetFinish != 0)
+                               {
+                                       if (psHWRInfo->ui64CRTimeFreelistReady != 0)
+                                       {
+                                               /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */
+                                               if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady)
+                                               {
+                                                       PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+                                                                                          aui8RecoveryNum,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+                                                                                          (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256,
+                                                                                          (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256);
+                                               }
+                                               else
+                                               {
+                                                       PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = <not_timed>, TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd,
+                                                                                          aui8RecoveryNum,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd,
+                                                                                  aui8RecoveryNum,
+                                                                                  (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+                                                                                  (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+                                                                                  (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+                                       }
+                               }
+
+                               switch (psHWRInfo->eHWRType)
+                               {
+                                       case RGX_HWRTYPE_BIF0FAULT:
+                                       case RGX_HWRTYPE_BIF1FAULT:
+                                       {
+                                               if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+                                               {
+                                                       _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType),
+                                                                                       psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+                                                                                       psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+                                                                                       DD_NORMAL_INDENT);
+
+                                                       bPageFault = IMG_TRUE;
+                                                       sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+                                                       ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+                                                                       RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+                                                       bPMFault = (ui32PC >= 8);
+                                                       ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+                                                                               RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+                                                       sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress;
+                                               }
+                                       }
+                                       break;
+                                       case RGX_HWRTYPE_TEXASBIF0FAULT:
+                                       {
+                                               if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+                                               {
+                                                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+                                                       {
+                                                               _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF,
+                                                                                       psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+                                                                                       psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+                                                                                       DD_NORMAL_INDENT);
+
+                                                               bPageFault = IMG_TRUE;
+                                                               sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+                                                               ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+                                                                               RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+                                                               bPMFault = (ui32PC >= 8);
+                                                               ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+                                                                                       RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+                                                               sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress;
+                                                       }
+                                               }
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_ECCFAULT:
+                                       {
+                                               PVR_DUMPDEBUG_LOG("    ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU);
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_MMUFAULT:
+                                       {
+                                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+                                               {
+                                                       _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+                                                                                       psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0],
+                                                                                       "Core",
+                                                                                       DD_NORMAL_INDENT);
+
+                                                       bPageFault = IMG_TRUE;
+                                                       sFaultDevVAddr.uiAddr =   psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0];
+                                                       sFaultDevVAddr.uiAddr &=  ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK;
+                                                       sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT;
+                                                       sFaultDevVAddr.uiAddr <<= 4; /* align shift */
+                                                       ui32PC  = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+                                                                                                          RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+#if defined(SUPPORT_TRUSTED_DEVICE)
+                                                       ui32PC = ui32PC - 1;
+#endif
+                                                       bPMFault = (ui32PC <= 8);
+                                                       sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+                                               }
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_MMUMETAFAULT:
+                                       {
+                                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+                                               {
+                                                       _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+                                                                                       psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0],
+                                                                                       "Meta",
+                                                                                       DD_NORMAL_INDENT);
+
+                                                       bPageFault = IMG_TRUE;
+                                                       sFaultDevVAddr.uiAddr =   psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0];
+                                                       sFaultDevVAddr.uiAddr &=  ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK;
+                                                       sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT;
+                                                       sFaultDevVAddr.uiAddr <<= 4; /* align shift */
+                                                       sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+                                               }
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_POLLFAILURE:
+                                       {
+                                               PVR_DUMPDEBUG_LOG("    T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)",
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+                                                                                 ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask,
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue);
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_MIPSTLBFAULT:
+                                       {
+                                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+                                               {
+                                                       IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo;
+
+                                                       /* This is not exactly what the MMU code does, but the result should be the same */
+                                                       const IMG_UINT32 ui32UnmappedEntry =
+                                                               ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED;
+
+                                                       PVR_DUMPDEBUG_LOG("    MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X"
+                                                                                         " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)",
+                                                                                         psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr,
+                                                                                         ui32EntryLo,
+                                                                                         RGXMIPSFW_TLB_GET_PA(ui32EntryLo),
+                                                                                         ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0);
+
+                                                       if (ui32EntryLo == ui32UnmappedEntry)
+                                                       {
+                                                               PVR_DUMPDEBUG_LOG("    Potential use-after-free detected");
+                                                       }
+                                               }
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_MMURISCVFAULT:
+                                       {
+                                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+                                               {
+                                                       _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE,
+                                                                                       psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+                                                                                       psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+                                                                                       DD_NORMAL_INDENT);
+
+                                                       bPageFault = IMG_TRUE;
+                                                       bPMFault = IMG_FALSE;
+                                                       sFaultDevVAddr.uiAddr =
+                                                               (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus &
+                                                                ~RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK);
+                                                       ui32PageSize =
+                                                               (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus &
+                                                                ~RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+                                                               RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT;
+                                                       sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress;
+                                               }
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_OVERRUN:
+                                       case RGX_HWRTYPE_UNKNOWNFAILURE:
+                                       {
+                                               /* Nothing to dump */
+                                       }
+                                       break;
+
+                                       default:
+                                       {
+                                               PVR_DUMPDEBUG_LOG("    Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType);
+                                       }
+                                       break;
+                               }
+
+                               if (bPageFault)
+                               {
+
+                                       FAULT_INFO *psInfo;
+
+                                       OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+
+                                       /* Find the matching Fault Info for this HWRInfo */
+                                       psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex];
+
+                                       /* if they do not match, we need to update the psInfo */
+                                       if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) ||
+                                               (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr))
+                                       {
+                                               MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData;
+
+                                               psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN;
+
+                                               if (bPMFault)
+                                               {
+                                                       /* PM fault and we dump PC details only */
+                                                       psFaultData->eTopLevel = MMU_LEVEL_0;
+                                                       psFaultData->eType     = MMU_FAULT_TYPE_PM;
+                                                       psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr;
+                                               }
+                                               else
+                                               {
+                                                       RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData);
+                                               }
+
+                                               _RecordFaultInfo(psDevInfo, psInfo,
+                                                                       sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer,
+                                                                       _PageSizeHWToBytes(ui32PageSize));
+
+                                       }
+
+                                       _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT);
+
+                                       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+                                       {
+                                               _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT);
+                                       }
+
+                                       OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+                               }
+
+                       }
+
+                       if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+                               ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex;
+                       else
+                               ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+               }
+       }
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function     _CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+
+ @Input psDevInfo       - RGX device info
+
+ @Return   IMG_BOOL      - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32BIFMMUEntry;
+
+       ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY);
+
+       if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN)
+       {
+               return IMG_TRUE;
+       }
+       else
+       {
+               return IMG_FALSE;
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo       - RGX device info
+ @Output psDevVAddr      - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase    - The page catalog base
+ @Output pui32DataType   - The MMU entry data type
+
+ @Return   void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+                                                                       IMG_UINT32 *pui32CatBase,
+                                                                       IMG_UINT32 *pui32DataType)
+{
+       IMG_UINT64 ui64BIFMMUEntryStatus;
+
+       ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS);
+
+       psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+       *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >>
+                                                               RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT;
+
+       *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >>
+                                                               RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT;
+}
+
+#endif
+
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       IMG_BOOL bRGXPoweredON)
+{
+       IMG_CHAR *pszState, *pszReason;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       IMG_UINT32 ui32OSid;
+       const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+       /* space for the current clock speed and 3 previous */
+       RGXFWIF_TIME_CORR asTimeCorrs[4];
+       IMG_UINT32 ui32NumClockSpeedChanges;
+
+#if defined(NO_HARDWARE)
+       PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+       if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+               {
+
+                       IMG_UINT64      ui64RegValMMUStatus;
+
+                       ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS);
+                       _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Core", DD_SUMMARY_INDENT);
+
+                       ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+                       _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Meta", DD_SUMMARY_INDENT);
+               }
+               else
+               {
+                       IMG_UINT64      ui64RegValMMUStatus, ui64RegValREQStatus;
+
+                       ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS);
+                       ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS);
+
+                       _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT);
+
+                       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF)))
+                       {
+                               ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS);
+                               ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS);
+                               _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT);
+                       }
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+                       {
+                               ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS);
+                               ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS);
+                               _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT);
+                       }
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+                       {
+                               IMG_UINT32  ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ?  RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0;
+
+                               if (ui32PhantomCnt > 1)
+                               {
+                                       IMG_UINT32  ui32Phantom;
+                                       for (ui32Phantom = 0;  ui32Phantom < ui32PhantomCnt;  ui32Phantom++)
+                                       {
+                                               /* This can't be done as it may interfere with the FW... */
+                                               /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/
+
+                                               ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+                                               ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+                                               _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT);
+                                       }
+                               }else
+                               {
+                                       ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+                                       ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+                                       _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT);
+                               }
+                       }
+               }
+
+               if (_CheckForPendingPage(psDevInfo))
+               {
+                       IMG_UINT32 ui32CatBase;
+                       IMG_UINT32 ui32DataType;
+                       IMG_DEV_VIRTADDR sDevVAddr;
+
+                       PVR_DUMPDEBUG_LOG("MMU Pending page: Yes");
+
+                       _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType);
+
+                       if (ui32CatBase >= 8)
+                       {
+                               PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase);
+                       }
+                       else
+                       {
+                               IMG_DEV_PHYADDR sPCDevPAddr;
+                               MMU_FAULT_DATA sFaultData;
+
+                               sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase));
+
+                               PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+                                                       " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx,
+                                                               sDevVAddr.uiAddr,
+                                                               ui32CatBase,
+                                                               sPCDevPAddr.uiAddr);
+                               RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData);
+                               _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT);
+                       }
+               }
+       }
+#endif /* NO_HARDWARE */
+
+       /* Firmware state */
+       switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus))
+       {
+               case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszState = "OK";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszState = "NOT RESPONDING";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszState = "DEAD";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:  pszState = "FAULT";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:  pszState = "UNDEFINED";  break;
+               default:  pszState = "UNKNOWN";  break;
+       }
+
+       switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))
+       {
+               case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " - Asserted";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " - Poll failing";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " - Global Event Object timeouts rising";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " - KCCB offset invalid";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " - KCCB stalled";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_IDLING:  pszReason = " - Idling";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING:  pszReason = " - Restarting";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:  pszReason = " - Missing interrupts";  break;
+               default:  pszReason = " - Unknown reason";  break;
+       }
+
+#if !defined(NO_HARDWARE)
+       /* Determine the type virtualisation support used */
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+#if defined(SUPPORT_AUTOVZ)
+#if defined(SUPPORT_AUTOVZ_HW_REGS)
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support");
+#else
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory");
+#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */
+#else
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation");
+#endif /* defined(SUPPORT_AUTOVZ) */
+#else
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation");
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+       }
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1))
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo);
+               RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo);
+
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)",
+                                                 ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"),
+                                                 (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"),
+                                                 (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid"));
+
+       }
+#endif
+
+#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo);
+               IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo);
+
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u",
+                                                 ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS);
+       }
+#endif
+#endif /* !defined(NO_HARDWARE) */
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE];
+               IMG_BOOL bOsIsolationEnabled = IMG_FALSE;
+
+               if (psFwSysData == NULL)
+               {
+                       /* can't dump any more information */
+                       PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason);
+                       return;
+               }
+
+               sHwrStateDescription[0] = '\0';
+
+               _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE,
+                       asHwrState2Description, ARRAY_SIZE(asHwrState2Description),
+                       psFwSysData->ui32HWRStateFlags);
+               PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription);
+               PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)",
+                         pszPowStateName[psFwSysData->ePowState],
+                         (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+                         psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
+                         psDevInfo->ui32ActivePMReqDenied,
+                         psDevInfo->ui32ActivePMReqNonIdle,
+                         psDevInfo->ui32ActivePMReqRetry,
+                         psDevInfo->ui32ActivePMReqTotal -
+                                                 psDevInfo->ui32ActivePMReqOk -
+                                                 psDevInfo->ui32ActivePMReqDenied -
+                                                 psDevInfo->ui32ActivePMReqRetry -
+                                                 psDevInfo->ui32ActivePMReqNonIdle,
+                         psDevInfo->ui32ActivePMReqTotal,
+                         psRuntimeCfg->ui32ActivePMLatencyms);
+
+               ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
+               RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs));
+
+               PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. "
+                                 "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). "
+                                 "FW frequency: %u.%03u MHz.",
+                                 ui32NumClockSpeedChanges,
+                                 asTimeCorrs[0].ui32CoreClockSpeed / 1000000,
+                                 (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000,
+                                 asTimeCorrs[0].ui64OSTimeStamp,
+                                 psRuntimeCfg->ui32CoreClockSpeed / 1000000,
+                                 (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000);
+               if (ui32NumClockSpeedChanges > 0)
+               {
+                       PVR_DUMPDEBUG_LOG("          Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at "
+                                                       "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")",
+                                                                                               asTimeCorrs[1].ui32CoreClockSpeed / 1000000,
+                                                                                               (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000,
+                                                                                               asTimeCorrs[2].ui32CoreClockSpeed / 1000000,
+                                                                                               (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000,
+                                                                                               asTimeCorrs[3].ui32CoreClockSpeed / 1000000,
+                                                                                               (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000,
+                                                                                               asTimeCorrs[1].ui64OSTimeStamp,
+                                                                                               asTimeCorrs[2].ui64OSTimeStamp,
+                                                                                               asTimeCorrs[3].ui64OSTimeStamp);
+               }
+
+               for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+
+                       IMG_BOOL bMTSEnabled = (RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ?
+                                                                       IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0);
+
+                       PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid,
+                                                         apszFwOsStateName[sFwRunFlags.bfOsState],
+                                                         (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok",
+                                                         (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "",
+                                                         psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid],
+                                                         (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "",
+                                                         (bMTSEnabled) ? "MTS on;" : "MTS off;"
+                                                        );
+
+                       bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS;
+               }
+
+#if defined(PVR_ENABLE_PHR)
+               {
+                       IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE];
+
+                       sPHRConfigDescription[0] = '\0';
+                       _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE,
+                                          asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description),
+                                          BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode));
+
+                       PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, sPHRConfigDescription);
+               }
+#endif
+
+               if (bRGXPoweredON && RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+               {
+                       if (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) > 1U)
+                       {
+                               PVR_DUMPDEBUG_LOG("RGX MC Configuration: 0x%X (1:primary, 0:secondary)", psFwSysData->ui32McConfig);
+                       }
+               }
+
+               if (bOsIsolationEnabled)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS);
+               }
+
+               _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl);
+               _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData);
+               _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation");
+               PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation");
+       }
+
+       _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo);
+
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+       /* Dump all non-zero values in lines of 8... */
+       {
+               IMG_CHAR    pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1];
+               const IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf;
+               IMG_UINT32  ui32Index1, ui32Index2;
+
+               PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX);
+               for (ui32Index1 = 0;  ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX;  ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE)
+               {
+                       IMG_UINT32  ui32OrOfValues = 0;
+                       IMG_CHAR    *pszBuf = pszLine;
+
+                       /* Print all values in this line and skip if all zero... */
+                       for (ui32Index2 = 0;  ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE;  ui32Index2++)
+                       {
+                               ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2];
+                               OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]);
+                               pszBuf += 9; /* write over the '\0' */
+                       }
+
+                       if (ui32OrOfValues != 0)
+                       {
+                               PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine);
+                       }
+               }
+               PVR_DUMPDEBUG_LOG("STATS[END]");
+       }
+#endif
+}
+
+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+#define RGX_META_SP_EXTRA_DEBUG \
+                       X(RGX_CR_META_SP_MSLVCTRL0) \
+                       X(RGX_CR_META_SP_MSLVCTRL1) \
+                       X(RGX_CR_META_SP_MSLVDATAX) \
+                       X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+                       X(RGX_CR_META_SP_MSLVIRQENABLE) \
+                       X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+       IMG_UINT32 ui32Idx, ui32RegIdx;
+       IMG_UINT32 ui32RegVal;
+       IMG_UINT32 ui32RegAddr;
+
+       const IMG_UINT32 aui32DebugRegAddr[] = {
+#define X(A) A,
+               RGX_META_SP_EXTRA_DEBUG
+#undef X
+               };
+
+       const IMG_CHAR* apszDebugRegName[] = {
+#define X(A) #A,
+       RGX_META_SP_EXTRA_DEBUG
+#undef X
+       };
+
+       const IMG_UINT32 aui32Debug2RegAddr[] = {0xA28, 0x0A30, 0x0A38};
+
+       PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
+
+       /* dump first set of Slave Port debug registers */
+       for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+       {
+               const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+               ui32RegAddr = aui32DebugRegAddr[ui32Idx];
+               ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+               PVR_DUMPDEBUG_LOG("  * %s: 0x%8.8X", pszRegName, ui32RegVal);
+       }
+
+       /* dump second set of Slave Port debug registers */
+       for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+       {
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+               ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+               PVR_DUMPDEBUG_LOG("  * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
+
+       }
+
+       for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+       {
+               ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+               for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+               {
+                       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+                       ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+                       PVR_DUMPDEBUG_LOG("  * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
+               }
+       }
+
+}
+
+/*
+ *  Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+       RGXFW_LOG_SFids eSFId;
+       const IMG_CHAR  *pszName;
+       const IMG_CHAR  *pszFmt;
+       IMG_UINT32              ui32ArgNum;
+} TRACEBUF_LOG;
+
+static const TRACEBUF_LOG aLogDefinitions[] =
+{
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+       RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile)
+{
+       const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0];
+       IMG_BOOL bIntegrityOk = IMG_TRUE;
+
+       /*
+        * For every log ID, check the format string and number of arguments is valid.
+        */
+       while (psLogDef->eSFId != RGXFW_SF_LAST)
+       {
+               const TRACEBUF_LOG *psLogDef2;
+               const IMG_CHAR *pszString;
+               IMG_UINT32 ui32Count;
+
+               /*
+                * Check the number of arguments matches the number of '%' in the string and
+                * check that no string uses %s which is not supported as it requires a
+                * pointer to memory that is not going to be valid.
+                */
+               pszString = psLogDef->pszFmt;
+               ui32Count = 0;
+
+               while (*pszString != '\0')
+               {
+                       if (*pszString++ == '%')
+                       {
+                               ui32Count++;
+                               if (*pszString == 's')
+                               {
+                                       bIntegrityOk = IMG_FALSE;
+                                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+                                                                         psLogDef->pszName, *pszString);
+                               }
+                               else if (*pszString == '%')
+                               {
+                                       /* Double % is a printable % sign and not a format string... */
+                                       ui32Count--;
+                               }
+                       }
+               }
+
+               if (ui32Count != psLogDef->ui32ArgNum)
+               {
+                       bIntegrityOk = IMG_FALSE;
+                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+                                         psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum);
+               }
+
+               /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+               if (ui32Count > 20)
+               {
+                       bIntegrityOk = IMG_FALSE;
+                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+                                         psLogDef->pszName, ui32Count);
+               }
+
+               /* Check the id number is unique (don't take into account the number of arguments) */
+               ui32Count = 0;
+               psLogDef2 = &aLogDefinitions[0];
+
+               while (psLogDef2->eSFId != RGXFW_SF_LAST)
+               {
+                       if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+                       {
+                               ui32Count++;
+                       }
+                       psLogDef2++;
+               }
+
+               if (ui32Count != 1)
+               {
+                       bIntegrityOk = IMG_FALSE;
+                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+                                         psLogDef->pszName, psLogDef->eSFId, ui32Count - 1);
+               }
+
+               /* Move to the next log ID... */
+               psLogDef++;
+       }
+
+       return bIntegrityOk;
+}
+
+typedef struct {
+       IMG_UINT16     ui16Mask;
+       const IMG_CHAR *pszStr;
+} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXPrepareExtraDebugInfo
+
+ @Description
+
+ Prepares debug info string by decoding ui16DebugInfo value passed
+
+ @Input pszBuffer       - pointer to debug info string buffer
+
+ @Return   void
+
+******************************************************************************/
+static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo)
+{
+       const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] =
+       {
+#define X(a, b) {a, b},
+               RGXFWT_DEBUG_INFO_MSKSTRLIST
+#undef X
+       };
+
+       IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR);
+       IMG_UINT32 i;
+       IMG_BOOL   bHasExtraDebugInfo = IMG_FALSE;
+
+       /* Add prepend string */
+       OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize);
+
+       /* Add debug info strings */
+       for (i = 0; i < ui32NumFields; i++)
+       {
+               if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask)
+               {
+                       if (bHasExtraDebugInfo)
+                       {
+                               OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */
+                       }
+                       OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize);
+                       bHasExtraDebugInfo = IMG_TRUE;
+               }
+       }
+
+       /* Add append string */
+       OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize);
+}
+
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+       RGXFWIF_TRACEBUF  *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       static IMG_BOOL   bIntegrityCheckPassed = IMG_FALSE;
+
+       /* Check that the firmware trace is correctly defined... */
+       if (!bIntegrityCheckPassed)
+       {
+               bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile);
+               if (!bIntegrityCheckPassed)
+               {
+                       return;
+               }
+       }
+
+       /* Dump FW trace information... */
+       if (psRGXFWIfTraceBufCtl != NULL)
+       {
+               IMG_UINT32  tid;
+               IMG_UINT32  ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords;
+
+               PVR_DUMPDEBUG_LOG("Device ID: %u", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
+
+               /* Print the log type settings... */
+               if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+               {
+                       PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+                                                         ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+                                                         RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+                                                         );
+               }
+               else
+               {
+                       PVR_DUMPDEBUG_LOG("Debug log type: none");
+               }
+
+               /* Print the decoded log for each thread... */
+               for (tid = 0;  tid < RGXFW_THREAD_NUM;  tid++)
+               {
+                       volatile IMG_UINT32  *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+                       volatile IMG_UINT32  *pui32FWTracePtr  = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+                       IMG_UINT32           *pui32TraceBuf    = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+                       IMG_UINT32           ui32HostWrapCount = *pui32FWWrapCount;
+                       IMG_UINT32           ui32HostTracePtr  = *pui32FWTracePtr;
+                       IMG_UINT32           ui32Count         = 0;
+
+                       if (pui32TraceBuf == NULL)
+                       {
+                               /* trace buffer not yet allocated */
+                               continue;
+                       }
+
+                       while (ui32Count < ui32TraceBufSizeInDWords)
+                       {
+                               IMG_UINT32  ui32Data, ui32DataToId;
+
+                               /* Find the first valid log ID, skipping whitespace... */
+                               do
+                               {
+                                       ui32Data     = pui32TraceBuf[ui32HostTracePtr];
+                                       ui32DataToId = idToStringID(ui32Data, SFs);
+
+                                       /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */
+                                       if (ui32DataToId == RGXFW_SF_LAST  &&  RGXFW_LOG_VALIDID(ui32Data))
+                                       {
+                                               PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data);
+                                       }
+
+                                       /* Update the trace pointer... */
+                                       ui32HostTracePtr++;
+                                       if (ui32HostTracePtr >= ui32TraceBufSizeInDWords)
+                                       {
+                                               ui32HostTracePtr = 0;
+                                               ui32HostWrapCount++;
+                                       }
+                                       ui32Count++;
+                               } while ((RGXFW_SF_LAST == ui32DataToId)  &&
+                                        ui32Count < ui32TraceBufSizeInDWords);
+
+                               if (ui32Count < ui32TraceBufSizeInDWords)
+                               {
+                                       IMG_CHAR   szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> ";
+                                       IMG_CHAR   szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = "";
+                                       IMG_UINT64 ui64Timestamp;
+                                       IMG_UINT16 ui16DebugInfo;
+
+                                       /* If we hit the ASSERT message then this is the end of the log... */
+                                       if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+                                       {
+                                               PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u",
+                                                                                 psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+                                                                                 psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+                                                                                 psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+                                               break;
+                                       }
+
+                                       ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 |
+                                                       (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]);
+
+                                       ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT);
+                                       ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT;
+
+                                       /*
+                                        * Print the trace string and provide up to 20 arguments which
+                                        * printf function will be able to use. We have already checked
+                                        * that no string uses more than this.
+                                        */
+                                       OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN);
+
+                                       /* Check and append any extra debug info available */
+                                       if (ui16DebugInfo)
+                                       {
+                                               /* Prepare debug info string */
+                                               RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo);
+
+                                               /* Append debug info string */
+                                               OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN);
+                                       }
+
+                                       PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  2) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  3) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  4) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  5) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  6) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  7) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  8) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  9) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]);
+
+                                       /* Update the trace pointer... */
+                                       ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data);
+                                       if (ui32HostTracePtr >= ui32TraceBufSizeInDWords)
+                                       {
+                                               ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords;
+                                               ui32HostWrapCount++;
+                                       }
+                                       ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+
+                                       /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */
+                                       if ((*pui32FWWrapCount > ui32HostWrapCount) ||
+                                           ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr)))
+                                       {
+                                               /* Move forward to the oldest entry again... */
+                                               PVR_DUMPDEBUG_LOG(". . .");
+                                               ui32HostWrapCount = *pui32FWWrapCount;
+                                               ui32HostTracePtr  = *pui32FWTracePtr;
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       /* Print the power monitoring counters... */
+       if (psFwSysData != NULL)
+       {
+               const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer;
+               IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer;
+               IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords;
+               IMG_UINT32 ui32Count = 0;
+               IMG_UINT64 ui64Timestamp;
+
+               if (pui32TraceBuf == NULL)
+               {
+                       /* power monitoring buffer not yet allocated */
+                       return;
+               }
+
+               if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available."));
+                       return;
+               }
+               ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 |
+                                               (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]);
+
+               /* Update the trace pointer... */
+               ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords;
+               ui32Count    = (ui32Count    + 3);
+
+               PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x",
+                                pui32TraceBuf,
+                                ui32TracePtr,
+                                ui32PowerMonBufSizeInDWords));
+
+               while (ui32Count < ui32PowerMonBufSizeInDWords)
+               {
+                       /* power monitoring data is (register, value) dword pairs */
+                       PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON  0x%08x 0x%08x  0x%08x 0x%08x",
+                                                         ui64Timestamp,
+                                                         pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords],
+                                                         pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords],
+                                                         pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords],
+                                                         pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]);
+
+                       if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID ||
+                               pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID)
+                       {
+                               /* end of buffer */
+                               break;
+                       }
+
+                       /* Update the trace pointer... */
+                       ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords;
+                       ui32Count    = (ui32Count    + 4);
+               }
+       }
+}
+#endif
+
+static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
+{
+       switch (eDevState)
+       {
+               case PVRSRV_DEVICE_STATE_INIT:
+                       return "Initialising";
+               case PVRSRV_DEVICE_STATE_ACTIVE:
+                       return "Active";
+               case PVRSRV_DEVICE_STATE_DEINIT:
+                       return "De-initialising";
+               case PVRSRV_DEVICE_STATE_BAD:
+                       return "Bad";
+               case PVRSRV_DEVICE_STATE_UNDEFINED:
+                       PVR_ASSERT(!"Device has undefined state");
+                       __fallthrough;
+               default:
+                       return "Unknown";
+       }
+}
+
+static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+       switch (ePowerState)
+       {
+               case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+               case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+               case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+               default: return "UNKNOWN";
+       }
+}
+
+/* Helper macros to emit data */
+#define REG32_FMTSPEC   "%-30s: 0x%08X"
+#define REG64_FMTSPEC   "%-30s: 0x%016" IMG_UINT64_FMTSPECx
+#define DDLOG32(R)      PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG64(R)      PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG32_DPX(R)  PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOG64_DPX(R)  PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+#if !defined(NO_HARDWARE)
+static void RGXDumpMIPSState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                        void *pvDumpDebugFile,
+                                                        PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+       RGX_MIPS_STATE sMIPSState = {0};
+       PVRSRV_ERROR eError;
+
+       eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState);
+       PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----");
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DUMPDEBUG_LOG("MIPS extra debug not available");
+       }
+       else
+       {
+               DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC);
+               DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister);
+               DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister);
+               _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                               sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState);
+               DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister);
+               DDLOGVAL32("EPC", sMIPSState.ui32EPC);
+               DDLOGVAL32("SP", sMIPSState.ui32SP);
+               DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr);
+               _RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                               sMIPSState.ui32Debug, sMIPSState.ui32DEPC);
+
+               {
+                       IMG_UINT32 ui32Idx;
+
+                       IMG_BOOL bCheckBRN63553WA =
+                               RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) &&
+                               (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN));
+
+                       IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+
+                       PVR_DUMPDEBUG_LOG("TLB                           :");
+
+                       for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++)
+                       {
+                               RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL;
+                               RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL;
+
+                               if (bUseRemapRanges)
+                               {
+                                       psRemapEntry0 = &sMIPSState.asRemap[ui32Idx];
+                                       psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16];
+                               }
+
+                               _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf,
+                                                                    pvDumpDebugFile,
+                                                                    &sMIPSState.asTLB[ui32Idx],
+                                                                    psRemapEntry0,
+                                                                    psRemapEntry1,
+                                                                    ui32Idx);
+
+                               if (bCheckBRN63553WA)
+                               {
+                                       const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx];
+
+                                       #define BRN63553_TLB_IS_NUL(X)  (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0))
+
+                                       if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1))
+                                       {
+                                               PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0.");
+                                       }
+                               }
+                       }
+
+                       /* This implicitly also checks for overlaps between memory and regbank addresses */
+                       _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf,
+                                                                         pvDumpDebugFile,
+                                                                         sMIPSState.asTLB,
+                                                                         bUseRemapRanges ? sMIPSState.asRemap : NULL);
+
+                       if (bUseRemapRanges)
+                       {
+                               /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */
+                               if (sMIPSState.ui32UnmappedAddress)
+                               {
+                                       PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X",
+                                                                         sMIPSState.ui32UnmappedAddress);
+                               }
+                       }
+               }
+
+               /* Check FW code corruption in case of known errors */
+               if (_IsFWCodeException(RGXMIPSFW_C0_CAUSE_EXCCODE(sMIPSState.ui32CauseRegister)))
+               {
+                       eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption");
+                       }
+               }
+       }
+       PVR_DUMPDEBUG_LOG("--------------------------------");
+}
+#endif
+#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
+
+static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                                         void *pvDumpDebugFile,
+                                                                         PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+       RGXRISCVFW_STATE sRiscvState;
+       const IMG_CHAR *pszException;
+       PVRSRV_ERROR eError;
+
+       DDLOG64(FWCORE_MEM_CAT_BASE0);
+       DDLOG64(FWCORE_MEM_CAT_BASE1);
+       DDLOG64(FWCORE_MEM_CAT_BASE2);
+       DDLOG64(FWCORE_MEM_CAT_BASE3);
+       DDLOG64(FWCORE_MEM_CAT_BASE4);
+       DDLOG64(FWCORE_MEM_CAT_BASE5);
+       DDLOG64(FWCORE_MEM_CAT_BASE6);
+       DDLOG64(FWCORE_MEM_CAT_BASE7);
+
+       /* Limit dump to what is currently being used */
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG4);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG5);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG6);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG12);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG13);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG14);
+
+       DDLOG32(FWCORE_MEM_FAULT_MMU_STATUS);
+       DDLOG64(FWCORE_MEM_FAULT_REQ_STATUS);
+       DDLOG32(FWCORE_MEM_MMU_STATUS);
+       DDLOG32(FWCORE_MEM_READS_EXT_STATUS);
+       DDLOG32(FWCORE_MEM_READS_INT_STATUS);
+
+       PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----");
+
+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB)
+       if (RGXRiscvIsHalted(psDevInfo))
+       {
+               /* Avoid resuming the RISC-V FW as most operations
+                * on the debug module require a halted core */
+               PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)");
+               return PVRSRV_OK;
+       }
+#endif
+
+       eError = RGXRiscvHalt(psDevInfo);
+       PVR_GOTO_IF_ERROR(eError, _RISCVDMError);
+
+#define X(name, address)                                                                                               \
+       eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name);        \
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError);        \
+       DDLOGVAL32(#name, sRiscvState.name);
+
+       RGXRISCVFW_DEBUG_DUMP_REGISTERS
+#undef X
+
+       eError = RGXRiscvResume(psDevInfo);
+       PVR_GOTO_IF_ERROR(eError, _RISCVDMError);
+
+       pszException = _GetRISCVException(sRiscvState.mcause);
+       if (pszException != NULL)
+       {
+               PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException);
+
+               eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption");
+               }
+       }
+
+       return PVRSRV_OK;
+
+_RISCVDMError:
+       PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module"));
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                                void *pvDumpDebugFile,
+                                                                PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32   ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0;
+       IMG_UINT32   ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles;
+       IMG_UINT32   ui32RegVal;
+       IMG_BOOL     bFirmwarePerf;
+       IMG_BOOL     bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE);
+       IMG_BOOL     bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT);
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+       PVRSRV_ERROR eError;
+
+       PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+       PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear):   0x%p", psDevInfo->pvRegsBaseKM);
+       PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+
+       /* Check if firmware perf was set at Init time */
+       bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE);
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG))
+       {
+               DDLOG64(CORE_ID);
+       }
+       else
+       {
+               DDLOG32(CORE_ID);
+       }
+       DDLOG32(CORE_REVISION);
+       DDLOG32(DESIGNER_REV_FIELD1);
+       DDLOG32(DESIGNER_REV_FIELD2);
+       DDLOG64(CHANGESET_NUMBER);
+       if (ui32Meta)
+       {
+               DDLOG32(META_SP_MSLVIRQSTATUS);
+       }
+
+       if (bMulticore)
+       {
+               DDLOG32(MULTICORE_SYSTEM);
+               DDLOG32(MULTICORE_GPU);
+       }
+
+       DDLOG64(CLK_CTRL);
+       DDLOG64(CLK_STATUS);
+       DDLOG64(CLK_CTRL2);
+       DDLOG64(CLK_STATUS2);
+
+       if (bS7Infra)
+       {
+               DDLOG64(CLK_XTPLUS_CTRL);
+               DDLOG64(CLK_XTPLUS_STATUS);
+       }
+       DDLOG32(EVENT_STATUS);
+       DDLOG64(TIMER);
+       if (bS7Infra)
+       {
+               DDLOG64(MMU_FAULT_STATUS);
+               DDLOG64(MMU_FAULT_STATUS_META);
+       }
+       else
+       {
+               DDLOG32(BIF_FAULT_BANK0_MMU_STATUS);
+               DDLOG64(BIF_FAULT_BANK0_REQ_STATUS);
+               DDLOG32(BIF_FAULT_BANK1_MMU_STATUS);
+               DDLOG64(BIF_FAULT_BANK1_REQ_STATUS);
+       }
+       DDLOG32(BIF_MMU_STATUS);
+       DDLOG32(BIF_MMU_ENTRY);
+       DDLOG64(BIF_MMU_ENTRY_STATUS);
+
+       if (bS7Infra)
+       {
+               DDLOG32(BIF_JONES_OUTSTANDING_READ);
+               DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ);
+               DDLOG32(BIF_DUST_OUTSTANDING_READ);
+       }
+       else
+       {
+               if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)))
+               {
+                       DDLOG32(BIF_STATUS_MMU);
+                       DDLOG32(BIF_READS_EXT_STATUS);
+                       DDLOG32(BIF_READS_INT_STATUS);
+               }
+               DDLOG32(BIFPM_STATUS_MMU);
+               DDLOG32(BIFPM_READS_EXT_STATUS);
+               DDLOG32(BIFPM_READS_INT_STATUS);
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+       {
+               DDLOG64(CONTEXT_MAPPING0);
+               DDLOG64(CONTEXT_MAPPING1);
+               DDLOG64(CONTEXT_MAPPING2);
+               DDLOG64(CONTEXT_MAPPING3);
+               DDLOG64(CONTEXT_MAPPING4);
+       }
+       else
+       {
+               DDLOG64(BIF_CAT_BASE_INDEX);
+               DDLOG64(BIF_CAT_BASE0);
+               DDLOG64(BIF_CAT_BASE1);
+               DDLOG64(BIF_CAT_BASE2);
+               DDLOG64(BIF_CAT_BASE3);
+               DDLOG64(BIF_CAT_BASE4);
+               DDLOG64(BIF_CAT_BASE5);
+               DDLOG64(BIF_CAT_BASE6);
+               DDLOG64(BIF_CAT_BASE7);
+       }
+
+       DDLOG32(BIF_CTRL_INVAL);
+       DDLOG32(BIF_CTRL);
+
+       DDLOG64(BIF_PM_CAT_BASE_VCE0);
+       DDLOG64(BIF_PM_CAT_BASE_TE0);
+       DDLOG64(BIF_PM_CAT_BASE_ALIST0);
+       DDLOG64(BIF_PM_CAT_BASE_VCE1);
+       DDLOG64(BIF_PM_CAT_BASE_TE1);
+       DDLOG64(BIF_PM_CAT_BASE_ALIST1);
+
+       if (bMulticore)
+       {
+               DDLOG32(MULTICORE_GEOMETRY_CTRL_COMMON);
+               DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON);
+               DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON);
+       }
+
+       DDLOG32(PERF_TA_PHASE);
+       DDLOG32(PERF_TA_CYCLE);
+       DDLOG32(PERF_3D_PHASE);
+       DDLOG32(PERF_3D_CYCLE);
+
+       ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE);
+       ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE);
+       ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE);
+       ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0;
+       DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles);
+       DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles);
+
+       DDLOG32(PERF_COMPUTE_PHASE);
+       DDLOG32(PERF_COMPUTE_CYCLE);
+
+       DDLOG32(PM_PARTIAL_RENDER_ENABLE);
+
+       DDLOG32(ISP_RENDER);
+       DDLOG64(TLA_STATUS);
+       DDLOG64(MCU_FENCE);
+
+       DDLOG32(VDM_CONTEXT_STORE_STATUS);
+       DDLOG64(VDM_CONTEXT_STORE_TASK0);
+       DDLOG64(VDM_CONTEXT_STORE_TASK1);
+       DDLOG64(VDM_CONTEXT_STORE_TASK2);
+       DDLOG64(VDM_CONTEXT_RESUME_TASK0);
+       DDLOG64(VDM_CONTEXT_RESUME_TASK1);
+       DDLOG64(VDM_CONTEXT_RESUME_TASK2);
+
+       DDLOG32(ISP_CTL);
+       DDLOG32(ISP_STATUS);
+       DDLOG32(MTS_INTCTX);
+       DDLOG32(MTS_BGCTX);
+       DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE);
+       DDLOG32(MTS_SCHEDULE);
+       DDLOG32(MTS_GPU_INT_STATUS);
+
+       DDLOG32(CDM_CONTEXT_STORE_STATUS);
+       DDLOG64(CDM_CONTEXT_PDS0);
+       DDLOG64(CDM_CONTEXT_PDS1);
+       DDLOG64(CDM_TERMINATE_PDS);
+       DDLOG64(CDM_TERMINATE_PDS1);
+
+       if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025))
+       {
+               DDLOG64(CDM_CONTEXT_LOAD_PDS0);
+               DDLOG64(CDM_CONTEXT_LOAD_PDS1);
+       }
+
+       if (bS7Infra)
+       {
+               DDLOG32(JONES_IDLE);
+       }
+
+       DDLOG32(SIDEKICK_IDLE);
+
+       if (!bS7Infra)
+       {
+               DDLOG32(SLC_IDLE);
+               DDLOG32(SLC_STATUS0);
+               DDLOG64(SLC_STATUS1);
+
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS))
+               {
+                       DDLOG64(SLC_STATUS2);
+               }
+
+               DDLOG32(SLC_CTRL_BYPASS);
+               DDLOG64(SLC_CTRL_MISC);
+       }
+       else
+       {
+               DDLOG32(SLC3_IDLE);
+               DDLOG64(SLC3_STATUS);
+               DDLOG32(SLC3_FAULT_STOP_STATUS);
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE) &&
+               RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER))
+       {
+               DDLOG32(SAFETY_EVENT_STATUS__ROGUEXE);
+               DDLOG32(MTS_SAFETY_EVENT_ENABLE__ROGUEXE);
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER))
+       {
+               DDLOG32(FWCORE_WDT_CTRL);
+       }
+
+       if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)
+       {
+               DDLOG32(SCRATCH0);
+               DDLOG32(SCRATCH1);
+               DDLOG32(SCRATCH2);
+               DDLOG32(SCRATCH3);
+               DDLOG32(SCRATCH4);
+               DDLOG32(SCRATCH5);
+               DDLOG32(SCRATCH6);
+               DDLOG32(SCRATCH7);
+               DDLOG32(SCRATCH8);
+               DDLOG32(SCRATCH9);
+               DDLOG32(SCRATCH10);
+               DDLOG32(SCRATCH11);
+               DDLOG32(SCRATCH12);
+               DDLOG32(SCRATCH13);
+               DDLOG32(SCRATCH14);
+               DDLOG32(SCRATCH15);
+       }
+
+       if (ui32Meta)
+       {
+               IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE;
+
+               /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+               DDLOGVAL32("T0 TXENABLE", ui32RegVal);
+               if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT)
+               {
+                       bIsT0Enabled = IMG_TRUE;
+               }
+
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+               DDLOGVAL32("T0 TXSTATUS", ui32RegVal);
+
+               /* check for FW fault */
+               if (((ui32RegVal >> 20) & 0x3) == 0x2)
+               {
+                       bIsFWFaulted = IMG_TRUE;
+               }
+
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+               DDLOGVAL32("T0 TXDEFR", ui32RegVal);
+
+               eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+               DDLOGVAL32("T0 PC", ui32RegVal);
+
+               eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+               DDLOGVAL32("T0 PCX", ui32RegVal);
+
+               eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+               DDLOGVAL32("T0 SP", ui32RegVal);
+
+               if ((ui32Meta == MTP218) || (ui32Meta == MTP219))
+               {
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("T1 TXENABLE", ui32RegVal);
+
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("T1 TXSTATUS", ui32RegVal);
+
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("T1 TXDEFR", ui32RegVal);
+
+                       eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+                       DDLOGVAL32("T1 PC", ui32RegVal);
+
+                       eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+                       DDLOGVAL32("T1 PCX", ui32RegVal);
+
+                       eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+                       DDLOGVAL32("T1 SP", ui32RegVal);
+               }
+
+               if (bFirmwarePerf)
+               {
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("PERF_COUNT0", ui32RegVal);
+
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("PERF_COUNT1", ui32RegVal);
+               }
+
+               if (bIsT0Enabled & bIsFWFaulted)
+               {
+                       eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption");
+                       }
+               }
+               else if (bIsFWFaulted)
+               {
+                       PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled");
+               }
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               DDLOG32(MIPS_ADDR_REMAP1_CONFIG1);
+               DDLOG64(MIPS_ADDR_REMAP1_CONFIG2);
+               DDLOG32(MIPS_ADDR_REMAP2_CONFIG1);
+               DDLOG64(MIPS_ADDR_REMAP2_CONFIG2);
+               DDLOG32(MIPS_ADDR_REMAP3_CONFIG1);
+               DDLOG64(MIPS_ADDR_REMAP3_CONFIG2);
+               DDLOG32(MIPS_ADDR_REMAP4_CONFIG1);
+               DDLOG64(MIPS_ADDR_REMAP4_CONFIG2);
+               DDLOG32(MIPS_ADDR_REMAP5_CONFIG1);
+               DDLOG64(MIPS_ADDR_REMAP5_CONFIG2);
+               DDLOG64(MIPS_WRAPPER_CONFIG);
+               DDLOG32(MIPS_EXCEPTION_STATUS);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+               PVR_DUMPDEBUG_LOG("MIPS extra debug not available with SUPPORT_TRUSTED_DEVICE.");
+#elif !defined(NO_HARDWARE)
+               RGXDumpMIPSState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+#endif
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+               PVR_RETURN_IF_ERROR(eError);
+       }
+
+       return PVRSRV_OK;
+
+_METASPError:
+       PVR_DUMPDEBUG_LOG("Dump Slave Port debug information");
+       _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+
+       return eError;
+}
+
+#undef REG32_FMTSPEC
+#undef REG64_FMTSPEC
+#undef DDLOG32
+#undef DDLOG64
+#undef DDLOG32_DPX
+#undef DDLOG64_DPX
+#undef DDLOGVAL32
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specified level of verbosity
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+ @Input ui32VerbLevel       - Verbosity level
+
+ @Return   void
+
+******************************************************************************/
+static
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32VerbLevel)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       PVRSRV_DEV_POWER_STATE  ePowerState;
+       IMG_BOOL                bRGXPoweredON;
+       IMG_UINT8               ui8FwOsCount;
+       RGXFWIF_TRACEBUF        *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       const RGXFWIF_OSDATA    *psFwOsData = psDevInfo->psRGXFWIfFwOsData;
+       IMG_BOOL                bPwrLockAlreadyHeld;
+
+       bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode);
+       if (!bPwrLockAlreadyHeld)
+       {
+               /* Only acquire the power-lock if not already held by the calling context */
+               eError = PVRSRVPowerLock(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       return;
+               }
+       }
+
+       ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
+
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Error retrieving RGX power state. No debug info dumped.",
+                               __func__));
+               goto Exit;
+       }
+
+       if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
+               (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+       {
+               PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
+                                                 (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount);
+       }
+
+       PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
+
+       bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+       PVR_DUMPDEBUG_LOG("------[ RGX Info ]------");
+       PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo);
+       PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B,
+                                                                                          psDevInfo->sDevFeatureCfg.ui32V,
+                                                                                          psDevInfo->sDevFeatureCfg.ui32N,
+                                                                                          psDevInfo->sDevFeatureCfg.ui32C,
+                                                                                          PVR_ARCH_NAME);
+       PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState));
+       PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState));
+       if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)
+       {
+               PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED");
+       }
+
+       RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+
+       /* Dump out the kernel CCB. */
+       {
+               const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+               if (psKCCBCtl != NULL)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X",
+                                                         psKCCBCtl->ui32WriteOffset,
+                                                         psKCCBCtl->ui32ReadOffset);
+               }
+       }
+
+       /* Dump out the firmware CCB. */
+       {
+               const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl;
+
+               if (psFCCBCtl != NULL)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X",
+                                                          psFCCBCtl->ui32WriteOffset,
+                                                          psFCCBCtl->ui32ReadOffset);
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Dump out the Workload estimation CCB. */
+       {
+           const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
+
+           if (psWorkEstCCBCtl != NULL)
+           {
+               PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X",
+                                  psWorkEstCCBCtl->ui32WriteOffset,
+                                  psWorkEstCCBCtl->ui32ReadOffset);
+           }
+       }
+#endif
+
+
+       if (psFwOsData != NULL)
+       {
+               /* Dump the KCCB commands executed */
+               PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d",
+                                                 psFwOsData->ui32KCCBCmdsExecuted);
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+               /* Dump the number of times we have performed a forced UFO update,
+                * and (if non-zero) the timestamp of the most recent occurrence/
+                */
+               PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d",
+                                                 psFwOsData->ui32ForcedUpdatesRequested);
+               if (psFwOsData->ui32ForcedUpdatesRequested > 0)
+               {
+                       IMG_UINT8 ui8Idx;
+                       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+                       if (psFwOsData->ui64LastForcedUpdateTime > 0ULL)
+                       {
+                               ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds);
+                               PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")",
+                                                                 ui64Seconds, ui64Nanoseconds);
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)");
+                       }
+                       /* Dump SLR log */
+                       if (psFwOsData->sSLRLogFirst.aszCCBName[0])
+                       {
+                               ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds);
+                               PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC
+                                                                 "} Fence found on context 0x%x '%s' has %d UFOs",
+                                                                 ui64Seconds, ui64Nanoseconds,
+                                                                 psFwOsData->sSLRLogFirst.ui32FWCtxAddr,
+                                                                 psFwOsData->sSLRLogFirst.aszCCBName,
+                                                                 psFwOsData->sSLRLogFirst.ui32NumUFOs);
+                       }
+                       for (ui8Idx=0; ui8Idx<PVR_SLR_LOG_ENTRIES;ui8Idx++)
+                       {
+                               if (psFwOsData->sSLRLog[ui8Idx].aszCCBName[0])
+                               {
+                                       ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds);
+                                       PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC
+                                                                         "] Fence found on context 0x%x '%s' has %d UFOs",
+                                                                         ui64Seconds, ui64Nanoseconds,
+                                                                         psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr,
+                                                                         psFwOsData->sSLRLog[ui8Idx].aszCCBName,
+                                                                         psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs);
+                               }
+                       }
+               }
+#else
+               PVR_DUMPDEBUG_LOG("RGX SLR: Disabled");
+#endif
+
+               /* Dump the error counts */
+               PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d",
+                                                 psDevInfo->sErrorCounts.ui32WGPErrorCount,
+                                                 psDevInfo->sErrorCounts.ui32TRPErrorCount);
+
+               /* Dump the IRQ info for threads or OS IDs */
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+               /* only Host has access to registers containing IRQ counters */
+               if (!PVRSRV_VZ_MODE_IS(GUEST))
+#endif
+               {
+                       IMG_UINT32 ui32idx;
+
+                       for_each_irq_cnt(ui32idx)
+                       {
+                               IMG_UINT32 ui32IrqCnt;
+
+                               get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo);
+                               if (ui32IrqCnt)
+                               {
+                                       PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt);
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+                                       if (ui32idx == RGXFW_HOST_OS)
+#endif
+                                       {
+                                               PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]);
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /* Dump the FW Sys config flags on the Host */
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+               IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH];
+
+               if (!psFwSysData)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__));
+                       goto Exit;
+               }
+
+               _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags);
+               PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription);
+       }
+
+       /* Dump the FW OS config flags */
+       {
+               IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH];
+
+               if (!psFwOsData)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__));
+                       goto Exit;
+               }
+
+               _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags);
+               PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription);
+       }
+
+       if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+
+               eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: RGXDumpRGXRegisters failed (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+               }
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down");
+       }
+
+       PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------");
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+       {
+               IMG_INT tid;
+               /* Dump FW trace information */
+               if (psRGXFWIfTraceBufCtl != NULL)
+               {
+                       for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++)
+                       {
+                               IMG_UINT32      i;
+                               IMG_BOOL        bPrevLineWasZero = IMG_FALSE;
+                               IMG_BOOL        bLineIsAllZeros = IMG_FALSE;
+                               IMG_UINT32      ui32CountLines = 0;
+                               IMG_UINT32      *pui32TraceBuffer;
+                               IMG_CHAR        *pszLine;
+
+                               if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+                               {
+                                       PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+                                                                         ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+                                                                         RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+                                                                         );
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("Debug log type: none");
+                               }
+
+                               pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+
+                               /* Skip if trace buffer is not allocated */
+                               if (pui32TraceBuffer == NULL)
+                               {
+                                       PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid);
+                                       continue;
+                               }
+
+/* Max number of DWords to be printed per line, in debug dump output */
+#define PVR_DD_FW_TRACEBUF_LINESIZE 30U
+                               /* each element in the line is 8 characters plus a space.  The '+ 1' is because of the final trailing '\0'. */
+                               pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1);
+                               if (pszLine == NULL)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                       "%s: Out of mem allocating line string (size: %d)",
+                                                       __func__,
+                                                       9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1));
+                                       goto Exit;
+                               }
+
+                               PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid);
+                               PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+                               PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords);
+
+                               for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE)
+                               {
+                                       IMG_UINT32 k = 0;
+                                       IMG_UINT32 ui32Line = 0x0;
+                                       IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+                                       IMG_CHAR   *pszBuf = pszLine;
+
+                                       for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++)
+                                       {
+                                               if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords)
+                                               {
+                                                       /* Stop reading when the index goes beyond trace buffer size. This condition is
+                                                        * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not
+                                                        * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */
+                                                       break;
+                                               }
+
+                                               ui32Line |= pui32TraceBuffer[i + k];
+
+                                               /* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+                                               OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+                                               pszBuf += 9; /* write over the '\0' */
+                                       }
+
+                                       bLineIsAllZeros = (ui32Line == 0x0);
+
+                                       if (bLineIsAllZeros)
+                                       {
+                                               if (bPrevLineWasZero)
+                                               {
+                                                       ui32CountLines++;
+                                               }
+                                               else
+                                               {
+                                                       bPrevLineWasZero = IMG_TRUE;
+                                                       ui32CountLines = 1;
+                                                       PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               if (bPrevLineWasZero  &&  ui32CountLines > 1)
+                                               {
+                                                       PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines);
+                                               }
+                                               bPrevLineWasZero = IMG_FALSE;
+
+                                               PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine);
+                                       }
+
+                               }
+                               if (bPrevLineWasZero)
+                               {
+                                       PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines);
+                               }
+
+                               PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid);
+
+                               OSFreeMem(pszLine);
+                       }
+               }
+
+               {
+                       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+                       {
+                               PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------");
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+                       }
+
+                       DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+                       DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+                       DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))
+                       {
+                               DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+                       }
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+                       {
+                               DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+                       }
+               }
+       }
+
+       PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
+
+Exit:
+       if (!bPwrLockAlreadyHeld)
+       {
+               PVRSRVPowerUnlock(psDeviceNode);
+       }
+}
+
+/*!
+ ******************************************************************************
+
+ @Function     RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+
+ ******************************************************************************/
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle,
+               IMG_UINT32 ui32VerbLevel,
+               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+               void *pvDumpDebugFile)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle;
+
+       /* Only action the request if we've fully init'ed */
+       if (psDevInfo->bDevInit2Done)
+       {
+               RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel);
+       }
+}
+
+PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify,
+                                                       psDevInfo->psDeviceNode,
+                                                       RGXDebugRequestNotify,
+                                                       DEBUG_REQUEST_RGX,
+                                                       psDevInfo);
+}
+
+PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (psDevInfo->hDbgReqNotify)
+       {
+               return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify);
+       }
+
+       /* No notifier registered */
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdebug.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdebug.h
new file mode 100644 (file)
index 0000000..f163997
--- /dev/null
@@ -0,0 +1,229 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX debug header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXDEBUG_H)
+#define RGXDEBUG_H
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+/**
+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in
+ * LISR for each RGX FW thread.
+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input.
+ */
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+#define for_each_irq_cnt(ui32idx) \
+       for (ui32idx = 0; ui32idx < RGX_NUM_OS_SUPPORTED; ui32idx++)
+
+#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \
+       do { \
+               extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS]; \
+               ui32Dest = PVRSRV_VZ_MODE_IS(GUEST) ? 0 : OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); \
+       } while (false)
+
+#define MSG_IRQ_CNT_TYPE "OS"
+
+#else
+
+#define for_each_irq_cnt(ui32idx) \
+       for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++)
+
+#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \
+       ui32Dest = (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx]
+
+#define MSG_IRQ_CNT_TYPE "Thread"
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+
+static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
+{
+#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG)
+       IMG_UINT32 ui32idx;
+
+       for_each_irq_cnt(ui32idx)
+       {
+               IMG_UINT32 ui32IrqCnt;
+
+               get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo);
+
+               PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE
+                        " %u FW IRQ count = %u", ui32idx, ui32IrqCnt));
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+               if (ui32idx == RGXFW_HOST_OS)
+#endif
+               {
+                       PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u",
+                               (psRgxDevInfo)->aui32SampleIRQCount[ui32idx]));
+               }
+       }
+#endif /* PVRSRV_NEED_PVR_DPF */
+}
+
+extern const IMG_CHAR * const gapszMipsPermissionPTFlags[4];
+extern const IMG_CHAR * const gapszMipsCoherencyPTFlags[8];
+extern const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8];
+/*!
+*******************************************************************************
+
+ @Function     RGXDumpRGXRegisters
+
+ @Description
+
+ Dumps an extensive list of RGX registers required for debugging
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return PVRSRV_ERROR         PVRSRV_OK on success, error code otherwise
+
+******************************************************************************/
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                                void *pvDumpDebugFile,
+                                                                PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo);
+
+#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo);
+#endif
+
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+/*!
+*******************************************************************************
+
+ @Function     ValidateFWOnLoad
+
+ @Description  Compare the Firmware image as seen from the CPU point of view
+               against the same memory area as seen from the firmware point
+               of view after first power up.
+
+ @Input        psDevInfo - Device Info
+
+ @Return       PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo           - RGX device info
+ @Input bRGXPoweredON        - IMG_TRUE if RGX device is on
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       IMG_BOOL bRGXPoweredON);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDebugInit
+
+ @Description
+
+ Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify
+
+ @Input          psDevInfo            RGX device info
+ @Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+
+******************************************************************************/
+PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDebugDeinit
+
+ @Description
+
+ Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify
+
+ @Output         phNotify             Points to debug notifier handle
+ @Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+
+******************************************************************************/
+PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXDEBUG_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdevice.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxdevice.h
new file mode 100644 (file)
index 0000000..4ebbd29
--- /dev/null
@@ -0,0 +1,828 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX device node header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX device node
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXDEVICE_H)
+#define RGXDEVICE_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "osfunc.h"
+#include "rgxlayer_impl.h"
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#endif
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+       DEVMEM_MEMDESC          *psFWFrameworkMemDesc;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST                          (0x1)  /*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN                  (0x2)  /*!< Used to disable the Devices Watchdog logging */
+#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN              (0x4)  /*!< Used for validation to inject dust requests every TA/3D kick */
+#define RGXKM_DEVICE_STATE_CCB_GROW_EN                            (0x8)  /*!< Used to indicate CCB grow is permitted */
+#define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN  (0x10) /*!< Used for validation to enable SPU power state mask change */
+#define RGXKM_DEVICE_STATE_MASK                                   (0x1F)
+
+/*!
+ ******************************************************************************
+ * ECC RAM Fault Validation
+ *****************************************************************************/
+#define RGXKM_ECC_ERR_INJ_DISABLE 0
+#define RGXKM_ECC_ERR_INJ_SLC     1
+#define RGXKM_ECC_ERR_INJ_USC     2
+#define RGXKM_ECC_ERR_INJ_TPU     3
+#define RGXKM_ECC_ERR_INJ_RASCAL  4
+#define RGXKM_ECC_ERR_INJ_MARS    5
+
+#define RGXKM_ECC_ERR_INJ_INTERVAL 10U
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE                      32
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US       25000     /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US  150000    /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US    10000000  /* Time before the next periodic calibration and correlation */
+
+/*!
+ ******************************************************************************
+ * Global flags for driver validation
+ *****************************************************************************/
+#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN            (0x10U)  /*!< Enable KZ signature check. Signatures must match */
+#define RGX_VAL_KZ_SIG_CHECK_ERR_EN              (0x20U)  /*!< Enable KZ signature check. Signatures must not match */
+#define RGX_VAL_SIG_CHECK_ERR_EN                 (0U)     /*!< Not supported on Rogue cores */
+
+typedef struct _GPU_FREQ_TRACKING_DATA_
+{
+       /* Core clock speed estimated by the driver */
+       IMG_UINT32 ui32EstCoreClockSpeed;
+
+       /* Amount of successful calculations of the estimated core clock speed */
+       IMG_UINT32 ui32CalibrationCount;
+} GPU_FREQ_TRACKING_DATA;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+#define RGX_GPU_FREQ_TRACKING_SIZE 16
+
+typedef struct
+{
+       IMG_UINT64 ui64BeginCRTimestamp;
+       IMG_UINT64 ui64BeginOSTimestamp;
+
+       IMG_UINT64 ui64EndCRTimestamp;
+       IMG_UINT64 ui64EndOSTimestamp;
+
+       IMG_UINT32 ui32EstCoreClockSpeed;
+       IMG_UINT32 ui32CoreClockSpeed;
+} GPU_FREQ_TRACKING_HISTORY;
+#endif
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+       /* Beginning of current calibration period (in us) */
+       IMG_UINT64 ui64CalibrationCRTimestamp;
+       IMG_UINT64 ui64CalibrationOSTimestamp;
+
+       /* Calculated calibration period (in us) */
+       IMG_UINT64 ui64CalibrationCRTimediff;
+       IMG_UINT64 ui64CalibrationOSTimediff;
+
+       /* Current calibration period (in us) */
+       IMG_UINT32 ui32CalibrationPeriod;
+
+       /* System layer frequency table and frequency tracking data */
+       IMG_UINT32 ui32FreqIndex;
+       IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE];
+       GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE];
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+       IMG_UINT32 ui32HistoryIndex;
+       GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE];
+#endif
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+       IMG_BOOL   bValid;                /* If TRUE, statistics are valid.
+                                            FALSE if the driver couldn't get reliable stats. */
+       IMG_UINT64 ui64GpuStatActive;     /* GPU active statistic */
+       IMG_UINT64 ui64GpuStatBlocked;    /* GPU blocked statistic */
+       IMG_UINT64 ui64GpuStatIdle;       /* GPU idle statistic */
+       IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+       IMG_UINT64 ui64TimeStamp;         /* Timestamp of the most recent sample of the GPU stats */
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+       IMG_BOOL               bEnabled;
+       RGXFWIF_REG_CFG_TYPE   eRegCfgTypeToPush;
+       IMG_UINT32             ui32NumRegRecords;
+       POS_LOCK               hLock;
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+typedef struct
+{
+       IMG_UINT32                      ui32DustCount1;
+       IMG_UINT32                      ui32DustCount2;
+       IMG_BOOL                        bToggle;
+} RGX_DUST_STATE;
+
+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_
+{
+       IMG_UINT64 ui64ErnsBrns;
+       IMG_UINT64 ui64Features;
+       IMG_UINT32 ui32B;
+       IMG_UINT32 ui32V;
+       IMG_UINT32 ui32N;
+       IMG_UINT32 ui32C;
+       IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX];
+       IMG_UINT32 ui32MAXDMCount;
+       IMG_UINT32 ui32MAXDustCount;
+       IMG_UINT32 ui32SLCSizeInBytes;
+       IMG_PCHAR  pszBVNCString;
+}PVRSRV_DEVICE_FEATURE_CONFIG;
+
+/* This is used to get the value of a specific feature.
+ * Note that it will assert if the feature is disabled or value is invalid. */
+#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \
+                       ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] )
+
+/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */
+#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \
+                       ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED )
+
+/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */
+#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \
+                       BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/* This is used to check if the ERN is available for the currently running BVNC or not */
+#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \
+                       BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK)
+
+/* This is used to check if the BRN is available for the currently running BVNC or not */
+#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \
+                       BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16U
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*!
+ * The host maintains a 512-deep cache of submitted workloads per device,
+ * i.e. a global look-up table for TA, 3D and compute (depending on the RGX
+ * hardware support present)
+ */
+
+/*
+ * For the workload estimation return data array, the max amount of commands the
+ * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for
+ * all corner cases
+ */
+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9)
+#define RETURN_DATA_ARRAY_SIZE      ((1U) << RETURN_DATA_ARRAY_SIZE_LOG2)
+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1)
+
+#define WORKLOAD_HASH_SIZE_LOG2                6
+#define WORKLOAD_HASH_SIZE                     ((1U) << WORKLOAD_HASH_SIZE_LOG2)
+#define WORKLOAD_HASH_WRAP_MASK                (WORKLOAD_HASH_SIZE - 1)
+
+/*!
+ * Workload characteristics for supported data masters.
+ * All characteristics must match for the workload estimate to be used/updated.
+ */
+typedef union _RGX_WORKLOAD_
+{
+       struct
+       {
+               IMG_UINT32                              ui32RenderTargetSize;
+               IMG_UINT32                              ui32NumberOfDrawCalls;
+               IMG_UINT32                              ui32NumberOfIndices;
+               IMG_UINT32                              ui32NumberOfMRTs;
+       } sTA3D;
+
+       struct
+       {
+               IMG_UINT32                              ui32NumberOfWorkgroups;
+               IMG_UINT32                              ui32NumberOfWorkitems;
+       } sCompute;
+
+       struct
+       {
+               IMG_UINT32                              ui32Characteristic1;
+               IMG_UINT32                              ui32Characteristic2;
+       } sTransfer;
+} RGX_WORKLOAD;
+
+/*!
+ * Host data used to match the return data (actual cycles count) to the
+ * submitted command packet.
+ * The hash table is a per-DM circular buffer containing a key based on the
+ * workload characteristics. On job completion, the oldest workload data
+ * is evicted if the CB is full and the driver matches the characteristics
+ * to the matching data.
+ *
+ * o If the driver finds a match the existing cycle estimate is averaged with
+ *   the actual cycles used.
+ * o Otherwise a new hash entry is created with the actual cycles for this
+ *   workload.
+ *
+ * Subsequently if a match is found during command submission, the estimate
+ * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled.
+ */
+typedef struct _WORKLOAD_MATCHING_DATA_
+{
+       POS_LOCK                                psHashLock;
+       HASH_TABLE                              *psHashTable;           /*! existing workload cycle estimates for this DM */
+       RGX_WORKLOAD                    asHashKeys[WORKLOAD_HASH_SIZE];
+       IMG_UINT64                              aui64HashData[WORKLOAD_HASH_SIZE];
+       IMG_UINT32                              ui32HashArrayWO;        /*! track the most recent workload estimates */
+} WORKLOAD_MATCHING_DATA;
+
+/*!
+ * A generic container for the workload matching data for GPU contexts:
+ * rendering (TA, 3D), compute, etc.
+ */
+typedef struct _WORKEST_HOST_DATA_
+{
+       union
+       {
+               struct
+               {
+                       WORKLOAD_MATCHING_DATA  sDataTA;        /*!< matching data for TA commands */
+                       WORKLOAD_MATCHING_DATA  sData3D;        /*!< matching data for 3D commands */
+               } sTA3D;
+
+               struct
+               {
+                       WORKLOAD_MATCHING_DATA  sDataCDM;       /*!< matching data for CDM commands */
+               } sCompute;
+
+               struct
+               {
+                       WORKLOAD_MATCHING_DATA  sDataTDM;       /*!< matching data for TDM-TQ commands */
+               } sTransfer;
+       } uWorkloadMatchingData;
+
+       /*
+        * This is a per-context property, hence the TA and 3D share the same
+        * per render context counter.
+        */
+       IMG_UINT32                              ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work
+                                                                                                                estimation commands are received
+                                                                                                                by the host before clean up. */
+} WORKEST_HOST_DATA;
+
+/*!
+ * Entries in the list of submitted workloads, used when the completed command
+ * returns data to the host.
+ *
+ * - the matching data is needed as it holds the hash data
+ * - the host data is needed for completion updates, ensuring memory is not
+ *   freed while workload estimates are in-flight.
+ * - the workload characteristic is used in the hash table look-up.
+ */
+typedef struct _WORKEST_RETURN_DATA_
+{
+       WORKEST_HOST_DATA               *psWorkEstHostData;
+       WORKLOAD_MATCHING_DATA  *psWorkloadMatchingData;
+       RGX_WORKLOAD                    sWorkloadCharacteristics;
+} WORKEST_RETURN_DATA;
+#endif
+
+
+typedef struct
+{
+#if defined(PDUMP)
+       IMG_HANDLE      hPdumpPages;
+#endif
+       PG_HANDLE       sPages;
+       IMG_DEV_PHYADDR sPhysAddr;
+} RGX_MIPS_ADDRESS_TRAMPOLINE;
+
+
+/*!
+ ******************************************************************************
+ * RGX Device error counts
+ *****************************************************************************/
+typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_
+{
+       IMG_UINT32 ui32WGPErrorCount;           /*!< count of the number of WGP checksum errors */
+       IMG_UINT32 ui32TRPErrorCount;           /*!< count of the number of TRP checksum errors */
+} PVRSRV_RGXDEV_ERROR_COUNTS;
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+       PVRSRV_DEVICE_NODE              *psDeviceNode;
+
+       PVRSRV_DEVICE_FEATURE_CONFIG    sDevFeatureCfg;
+
+       IMG_BOOL                                bDevInit2Done;
+
+       IMG_BOOL                                bFirmwareInitialised;
+       IMG_BOOL                                bPDPEnabled;
+
+       IMG_HANDLE                              hDbgReqNotify;
+
+       /* Kernel mode linear address of device registers */
+       void __iomem                    *pvRegsBaseKM;
+
+       IMG_HANDLE                              hRegMapping;
+
+       /* System physical address of device registers */
+       IMG_CPU_PHYADDR                 sRegsPhysBase;
+       /* Register region size in bytes */
+       IMG_UINT32                              ui32RegSize;
+
+       PVRSRV_STUB_PBDESC              *psStubPBDescListKM;
+
+       /* Firmware memory context info */
+       DEVMEM_CONTEXT                  *psKernelDevmemCtx;
+       DEVMEM_HEAP                             *psFirmwareMainHeap;
+       DEVMEM_HEAP                             *psFirmwareConfigHeap;
+       MMU_CONTEXT                             *psKernelMMUCtx;
+
+       void                                    *pvDeviceMemoryHeap;
+
+       /* Kernel CCB */
+       DEVMEM_MEMDESC                  *psKernelCCBCtlMemDesc;      /*!< memdesc for Kernel CCB control */
+       RGXFWIF_CCB_CTL                 *psKernelCCBCtl;             /*!< kernel mapping for Kernel CCB control */
+       DEVMEM_MEMDESC                  *psKernelCCBMemDesc;         /*!< memdesc for Kernel CCB */
+       IMG_UINT8                               *psKernelCCB;                /*!< kernel mapping for Kernel CCB */
+       DEVMEM_MEMDESC                  *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */
+       IMG_UINT32                              *pui32KernelCCBRtnSlots;     /*!< kernel mapping for return slot array */
+
+       /* Firmware CCB */
+       DEVMEM_MEMDESC                  *psFirmwareCCBCtlMemDesc;   /*!< memdesc for Firmware CCB control */
+       RGXFWIF_CCB_CTL                 *psFirmwareCCBCtl;          /*!< kernel mapping for Firmware CCB control */
+       DEVMEM_MEMDESC                  *psFirmwareCCBMemDesc;      /*!< memdesc for Firmware CCB */
+       IMG_UINT8                               *psFirmwareCCB;             /*!< kernel mapping for Firmware CCB */
+
+       /* Workload Estimation Firmware CCB */
+       DEVMEM_MEMDESC                  *psWorkEstFirmwareCCBCtlMemDesc;   /*!< memdesc for Workload Estimation Firmware CCB control */
+       RGXFWIF_CCB_CTL                 *psWorkEstFirmwareCCBCtl;          /*!< kernel mapping for Workload Estimation Firmware CCB control */
+       DEVMEM_MEMDESC                  *psWorkEstFirmwareCCBMemDesc;      /*!< memdesc for Workload Estimation Firmware CCB */
+       IMG_UINT8                               *psWorkEstFirmwareCCB;             /*!< kernel mapping for Workload Estimation Firmware CCB */
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+       /* Counter dumping */
+       DEVMEM_MEMDESC                  *psCounterBufferMemDesc;      /*!< mem desc for counter dumping buffer */
+       POS_LOCK                                hCounterDumpingLock;          /*!< Lock for guarding access to counter dumping buffer */
+#endif
+
+       PVRSRV_MEMALLOCFLAGS_T  uiFWPoisonOnFreeFlag;           /*!< Flag for poisoning FW allocations when freed */
+
+       IMG_BOOL                                bIgnoreHWReportedBVNC;                  /*!< Ignore BVNC reported by HW */
+
+       /*
+               if we don't preallocate the pagetables we must
+               insert newly allocated page tables dynamically
+       */
+       void                                    *pvMMUContextList;
+
+       IMG_UINT32                              ui32ClkGateStatusReg;
+       IMG_UINT32                              ui32ClkGateStatusMask;
+
+       DEVMEM_MEMDESC                  *psRGXFWCodeMemDesc;
+       IMG_DEV_VIRTADDR                sFWCodeDevVAddrBase;
+       IMG_UINT32                      ui32FWCodeSizeInBytes;
+       DEVMEM_MEMDESC                  *psRGXFWDataMemDesc;
+       IMG_DEV_VIRTADDR                sFWDataDevVAddrBase;
+       RGX_MIPS_ADDRESS_TRAMPOLINE     *psTrampoline;
+
+       DEVMEM_MEMDESC                  *psRGXFWCorememCodeMemDesc;
+       IMG_DEV_VIRTADDR                sFWCorememCodeDevVAddrBase;
+       RGXFWIF_DEV_VIRTADDR            sFWCorememCodeFWAddr;
+       IMG_UINT32                      ui32FWCorememCodeSizeInBytes;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfCorememDataStoreMemDesc;
+       IMG_DEV_VIRTADDR                sFWCorememDataStoreDevVAddrBase;
+       RGXFWIF_DEV_VIRTADDR            sFWCorememDataStoreFWAddr;
+
+       DEVMEM_MEMDESC                  *psRGXFWAlignChecksMemDesc;
+
+#if defined(PDUMP)
+       DEVMEM_MEMDESC                  *psRGXFWSigTAChecksMemDesc;
+       IMG_UINT32                              ui32SigTAChecksSize;
+
+       DEVMEM_MEMDESC                  *psRGXFWSig3DChecksMemDesc;
+       IMG_UINT32                              ui32Sig3DChecksSize;
+
+       DEVMEM_MEMDESC                  *psRGXFWSigTDM2DChecksMemDesc;
+       IMG_UINT32                              ui32SigTDM2DChecksSize;
+
+       IMG_BOOL                                bDumpedKCCBCtlAlready;
+
+       POS_SPINLOCK                    hSyncCheckpointSignalSpinLock;                                          /*!< Guards data shared between an atomic & sleepable-context */
+#endif
+
+       POS_LOCK                                hRGXFWIfBufInitLock;                                                            /*!< trace buffer lock for initialisation phase */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfTraceBufCtlMemDesc;                                           /*!< memdesc of trace buffer control structure */
+       DEVMEM_MEMDESC                  *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM];         /*!< memdesc of actual FW trace (log) buffer(s) */
+       RGXFWIF_TRACEBUF                *psRGXFWIfTraceBufCtl;                                                          /*!< structure containing trace control data and actual trace buffer */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfFwSysDataMemDesc;                                                     /*!< memdesc of the firmware-shared system data structure */
+       RGXFWIF_SYSDATA                 *psRGXFWIfFwSysData;                                                            /*!< structure containing trace control data and actual trace buffer */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfFwOsDataMemDesc;                                                      /*!< memdesc of the firmware-shared os structure */
+       RGXFWIF_OSDATA                  *psRGXFWIfFwOsData;                                                                     /*!< structure containing trace control data and actual trace buffer */
+
+#if defined(SUPPORT_TBI_INTERFACE)
+       DEVMEM_MEMDESC                  *psRGXFWIfTBIBufferMemDesc;                                                     /*!< memdesc of actual FW TBI buffer */
+       RGXFWIF_DEV_VIRTADDR    sRGXFWIfTBIBuffer;                                                                      /*!< TBI buffer data */
+       IMG_UINT32                              ui32FWIfTBIBufferSize;
+#endif
+
+       DEVMEM_MEMDESC                  *psRGXFWIfHWRInfoBufCtlMemDesc;
+       RGXFWIF_HWRINFOBUF              *psRGXFWIfHWRInfoBufCtl;
+       IMG_UINT32                              ui32ClockSource;
+       IMG_UINT32                              ui32LastClockSource;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfGpuUtilFWCbCtlMemDesc;
+       RGXFWIF_GPU_UTIL_FWCB   *psRGXFWIfGpuUtilFWCb;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfHWPerfBufMemDesc;
+       IMG_BYTE                                *psRGXFWIfHWPerfBuf;
+       IMG_UINT32                              ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfRegCfgMemDesc;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfHWPerfCountersMemDesc;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfConnectionCtlMemDesc;
+       RGXFWIF_CONNECTION_CTL  *psRGXFWIfConnectionCtl;
+
+       DEVMEM_MEMDESC                  *psRGXFWHeapGuardPageReserveMemDesc;
+       DEVMEM_MEMDESC                  *psRGXFWIfSysInitMemDesc;
+       RGXFWIF_SYSINIT                 *psRGXFWIfSysInit;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfOsInitMemDesc;
+       RGXFWIF_OSINIT                  *psRGXFWIfOsInit;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfRuntimeCfgMemDesc;
+       RGXFWIF_RUNTIME_CFG             *psRGXFWIfRuntimeCfg;
+
+       /* Additional guest firmware memory context info */
+       DEVMEM_HEAP                             *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED];
+       DEVMEM_MEMDESC                  *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED];
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Array to store data needed for workload estimation when a workload
+          has finished and its cycle time is returned to the host.      */
+       WORKEST_RETURN_DATA     asReturnData[RETURN_DATA_ARRAY_SIZE];
+       IMG_UINT32              ui32ReturnDataWO;
+       POS_LOCK                hWorkEstLock;
+#endif
+
+#if defined(SUPPORT_PDVFS)
+       /**
+        * Host memdesc and pointer to memory containing core clock rate in Hz.
+        * Firmware updates the memory on changing the core clock rate over GPIO.
+        * Note: Shared memory needs atomic access from Host driver and firmware,
+        * hence size should not be greater than memory transaction granularity.
+        * Currently it is chosen to be 32 bits.
+        */
+       DEVMEM_MEMDESC                  *psRGXFWIFCoreClkRateMemDesc;
+       volatile IMG_UINT32             *pui32RGXFWIFCoreClkRate;
+       /**
+        * Last sampled core clk rate.
+        */
+       volatile IMG_UINT32             ui32CoreClkRateSnapshot;
+#endif
+
+       /*
+          HWPerf data for the RGX device
+        */
+
+       POS_LOCK    hHWPerfLock;  /*! Critical section lock that protects HWPerf code
+                                  *  from multiple thread duplicate init/deinit
+                                  *  and loss/freeing of FW & Host resources while in
+                                  *  use in another thread e.g. MSIR. */
+
+       IMG_UINT64  ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+       IMG_HANDLE  hHWPerfStream;    /*! TL Stream buffer (L2) for firmware event stream */
+       IMG_UINT32  ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */
+       IMG_BOOL    bSuspendHWPerfL2DataCopy;  /*! Flag to indicate if copying HWPerf data is suspended */
+
+       IMG_UINT32  ui32HWPerfHostFilter;      /*! Event filter for HWPerfHost stream (settable by AppHint) */
+       POS_LOCK    hLockHWPerfHostStream;     /*! Lock guarding access to HWPerfHost stream from multiple threads */
+       IMG_HANDLE  hHWPerfHostStream;         /*! TL Stream buffer for host only event stream */
+       IMG_UINT32  ui32HWPerfHostBufSize;     /*! Host side buffer size in bytes */
+       IMG_UINT32  ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream.
+                                               *  Guarded by hLockHWPerfHostStream */
+       IMG_UINT32  ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */
+       IMG_UINT8   *pui8DeferredEvents;       /*! List of HWPerfHost events yet to be emitted in the TL stream.
+                                               *  Events generated from atomic context are deferred "emitted"
+                                                                                       *  as the "emission" code can sleep */
+       IMG_UINT16  ui16DEReadIdx;             /*! Read index in the above deferred events buffer */
+       IMG_UINT16  ui16DEWriteIdx;            /*! Write index in the above deferred events buffer */
+       void        *pvHostHWPerfMISR;         /*! MISR to emit pending/deferred events in HWPerfHost TL stream */
+       POS_SPINLOCK hHWPerfHostSpinLock;      /*! Guards data shared between an atomic & sleepable-context */
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+       IMG_UINT32  ui32DEHighWatermark;       /*! High watermark of deferred events buffer usage. Protected by
+                                               *! hHWPerfHostSpinLock */
+       /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */
+       IMG_UINT32  ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+       /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */
+       IMG_BOOL    bWarnedAtomicCtxPktLost;
+       /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */
+       IMG_UINT32  ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+       /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */
+       IMG_BOOL    bWarnedPktOrdinalBroke;
+#endif
+
+       void        *pvGpuFtraceData;
+
+       /* Poll data for detecting firmware fatal errors */
+       IMG_UINT32                              aui32CrLastPollCount[RGXFW_THREAD_NUM];
+       IMG_UINT32                              ui32KCCBCmdsExecutedLastTime;
+       IMG_BOOL                                bKCCBCmdsWaitingLastTime;
+       IMG_UINT32                              ui32GEOTimeoutsLastTime;
+       IMG_UINT32                              ui32InterruptCountLastTime;
+       IMG_UINT32                              ui32MissingInterruptsLastTime;
+
+       /* Client stall detection */
+       IMG_UINT32                              ui32StalledClientMask;
+
+       IMG_BOOL                                bWorkEstEnabled;
+       IMG_BOOL                                bPDVFSEnabled;
+
+       void                                    *pvLISRData;
+       void                                    *pvMISRData;
+       void                                    *pvAPMISRData;
+       RGX_ACTIVEPM_CONF               eActivePMConf;
+
+       volatile IMG_UINT32             aui32SampleIRQCount[RGXFW_THREAD_NUM];
+
+       DEVMEM_MEMDESC                  *psRGXFaultAddressMemDesc;
+
+       DEVMEM_MEMDESC                  *psSLC3FenceMemDesc;
+
+       /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */
+       IMG_UINT32                              ui32ZSBufferCurrID;     /*!< ID assigned to the next deferred devmem allocation */
+       IMG_UINT32                              ui32FreelistCurrID;     /*!< ID assigned to the next freelist */
+
+       POS_LOCK                                hLockZSBuffer;          /*!< Lock to protect simultaneous access to ZSBuffers */
+       DLLIST_NODE                             sZSBufferHead;          /*!< List of on-demand ZSBuffers */
+       POS_LOCK                                hLockFreeList;          /*!< Lock to protect simultaneous access to Freelists */
+       DLLIST_NODE                             sFreeListHead;          /*!< List of growable Freelists */
+       PSYNC_PRIM_CONTEXT              hSyncPrimContext;
+       PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+       IMG_UINT32                              ui32ActivePMReqOk;
+       IMG_UINT32                              ui32ActivePMReqDenied;
+       IMG_UINT32                              ui32ActivePMReqNonIdle;
+       IMG_UINT32                              ui32ActivePMReqRetry;
+       IMG_UINT32                              ui32ActivePMReqTotal;
+
+       IMG_HANDLE                              hProcessQueuesMISR;
+
+       IMG_UINT32                              ui32DeviceFlags;                /*!< Flags to track general device state */
+
+       /* GPU DVFS Table */
+       RGX_GPU_DVFS_TABLE              *psGpuDVFSTable;
+
+       /* Pointer to function returning the GPU utilisation statistics since the last
+        * time the function was called. Supports different users at the same time.
+        *
+        * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+        *                      in microseconds since the last time the function was called
+        *                      by a specific user (identified by hGpuUtilUser)
+        *
+        * Returns PVRSRV_OK in case the call completed without errors,
+        * some other value otherwise.
+        */
+       PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_HANDLE hGpuUtilUser,
+                                           RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+
+       /* Pointer to function that checks if the physical GPU IRQ
+        * line has been asserted and clears it if so */
+       IMG_BOOL (*pfnRGXAckIrq) (struct _PVRSRV_RGXDEV_INFO_ *psDevInfo);
+
+       POS_LOCK                                hGPUUtilLock;
+
+       /* Register configuration */
+       RGX_REG_CONFIG                  sRegCongfig;
+
+       IMG_BOOL                                bRGXPowered;
+       DLLIST_NODE                             sMemoryContextList;
+
+       POSWR_LOCK                              hRenderCtxListLock;
+       POSWR_LOCK                              hComputeCtxListLock;
+       POSWR_LOCK                              hTransferCtxListLock;
+       POSWR_LOCK                              hTDMCtxListLock;
+       POSWR_LOCK                              hMemoryCtxListLock;
+       POSWR_LOCK                              hKickSyncCtxListLock;
+
+       /* Linked list of deferred KCCB commands due to a full KCCB.
+        * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount
+        * are protected by the hLockKCCBDeferredCommandsList spin lock. */
+       POS_SPINLOCK                    hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */
+       DLLIST_NODE                             sKCCBDeferredCommandsListHead;
+       IMG_UINT32                              ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */
+
+       /* Linked lists of contexts on this device */
+       DLLIST_NODE                             sRenderCtxtListHead;
+       DLLIST_NODE                             sComputeCtxtListHead;
+       DLLIST_NODE                             sTransferCtxtListHead;
+       DLLIST_NODE                             sTDMCtxtListHead;
+       DLLIST_NODE                             sKickSyncCtxtListHead;
+
+       DLLIST_NODE                             sCommonCtxtListHead;
+       POSWR_LOCK                              hCommonCtxtListLock;
+       IMG_UINT32                              ui32CommonCtxtCurrentID;        /*!< ID assigned to the next common context */
+
+       POS_LOCK                                hDebugFaultInfoLock;    /*!< Lock to protect the debug fault info list */
+       POS_LOCK                                hMMUCtxUnregLock;               /*!< Lock to protect list of unregistered MMU contexts */
+
+       POS_LOCK                                hNMILock; /*!< Lock to protect NMI operations */
+
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32                              ui32ValidationFlags;    /*!< Validation flags for host driver */
+#endif
+       RGX_DUST_STATE                  sDustReqState;
+
+       RGX_LAYER_PARAMS                sLayerParams;
+
+       RGXFWIF_DM                              eBPDM;                                  /*!< Current breakpoint data master */
+       IMG_BOOL                                bBPSet;                                 /*!< A Breakpoint has been set */
+       POS_LOCK                                hBPLock;                                /*!< Lock for break point operations */
+
+       IMG_UINT32                              ui32CoherencyTestsDone;
+
+       ATOMIC_T                                iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */
+       POS_LOCK                                hCCBRecoveryLock;      /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */
+       void                                    *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */
+       IMG_UINT32                              ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */
+       IMG_UINT32                              ui32SLRHoldoffCounter;   /* Decremented each time health check is called until zero. SLR only happen when zero. */
+
+       POS_LOCK                                hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       /* Firmware gcov buffer */
+       DEVMEM_MEMDESC                  *psFirmwareGcovBufferMemDesc;      /*!< mem desc for Firmware gcov dumping buffer */
+       IMG_UINT32                              ui32FirmwareGcovSize;
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+       struct
+       {
+               IMG_UINT64 ui64timerGray;
+               IMG_UINT64 ui64timerBinary;
+               IMG_UINT64 *pui64uscTimers;
+       } sRGXTimerValues;
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       struct
+       {
+               IMG_UINT64 ui64RegVal;
+               struct completion sRegComp;
+       } sFwRegs;
+#endif
+
+       IMG_HANDLE                              hTQCLISharedMem;                /*!< TQ Client Shared Mem PMR */
+       IMG_HANDLE                              hTQUSCSharedMem;                /*!< TQ USC Shared Mem PMR */
+
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32                              ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */
+       IMG_UINT32                              ui32TestSLRCount;    /* (used to test SLR operation) */
+       IMG_UINT32                              ui32SLRSkipFWAddr;
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       DEVMEM_MEMDESC                  *psRGXFWIfSecureBufMemDesc;
+       DEVMEM_MEMDESC                  *psRGXFWIfNonSecureBufMemDesc;
+#endif
+
+       /* Timer Queries */
+       IMG_UINT32                              ui32ActiveQueryId;              /*!< id of the active line */
+       IMG_BOOL                                bSaveStart;                             /*!< save the start time of the next kick on the device*/
+       IMG_BOOL                                bSaveEnd;                               /*!< save the end time of the next kick on the device*/
+
+       DEVMEM_MEMDESC                  *psStartTimeMemDesc;    /*!< memdesc for Start Times */
+       IMG_UINT64                              *pui64StartTimeById;    /*!< CPU mapping of the above */
+
+       DEVMEM_MEMDESC                  *psEndTimeMemDesc;      /*!< memdesc for End Timer */
+       IMG_UINT64                              *pui64EndTimeById;      /*!< CPU mapping of the above */
+
+       IMG_UINT32                              aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES];      /*!< kicks Scheduled on QueryId */
+       DEVMEM_MEMDESC                  *psCompletedMemDesc;    /*!< kicks Completed on QueryId */
+       IMG_UINT32                              *pui32CompletedById;    /*!< CPU mapping of the above */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       POS_LOCK                                hTimerQueryLock;                /*!< lock to protect simultaneous access to timer query members */
+#endif
+
+       PVRSRV_RGXDEV_ERROR_COUNTS sErrorCounts;                /*!< struct containing device error counts */
+
+       IMG_UINT32                              ui32HostSafetyEventMask;/*!< mask of the safety events handled by the driver */
+
+       RGX_CONTEXT_RESET_REASON        eLastDeviceError;       /*!< device error reported to client */
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32 ui32ECCRAMErrInjModule;
+       IMG_UINT32 ui32ECCRAMErrInjInterval;
+#endif
+
+       IMG_UINT32              ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+       /*! GPU default core clock speed in Hz */
+       IMG_UINT32                      ui32CoreClockSpeed;
+
+       /*! Active Power Management: GPU actively requests the host driver to be powered off */
+       IMG_BOOL                        bEnableActivePM;
+
+       /*! Enable the GPU to power off internal Power Islands independently from the host driver */
+       IMG_BOOL                        bEnableRDPowIsland;
+
+       /*! Active Power Management: Delay between the GPU idle and the request to the host */
+       IMG_UINT32                      ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+       /*! Timing information */
+       RGX_TIMING_INFORMATION  *psRGXTimingInfo;
+} RGX_DATA;
+
+
+/*
+       RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME              "RGXREG"
+#define RGX_TB_PDUMPREG_NAME   "EMUREG"
+
+#endif /* RGXDEVICE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxfwutils.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxfwutils.c
new file mode 100644 (file)
index 0000000..2e98cd2
--- /dev/null
@@ -0,0 +1,7825 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rogue firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Rogue firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "oskm_apphint.h"
+#include "cache_km.h"
+#include "allocmem.h"
+#include "physheap.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgx_fwif_alignchecks.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "fwtrace_string.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxpower.h"
+#include "rgxtdmtransfer.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxmmudefs_km.h"
+#include "rgxmipsmmuinit.h"
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxutils.h"
+#include "rgxtimecorr.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_external.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+#include "htbuffer.h"
+#include "rgx_bvnc_defs_km.h"
+#include "info_page.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#ifdef __linux__
+#include <linux/kernel.h>      /* sprintf */
+#include "rogue_trace_events.h"
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "rgxsoctimer.h"
+#endif
+
+#include "vz_vmm_pvz.h"
+#include "rgx_heaps.h"
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN        (16U)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT    PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB
+#define RGXFW_HWPERF_L1_SIZE_MAX        (12288U)
+
+/* Firmware CCB length */
+#if defined(NO_HARDWARE) && defined(PDUMP)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (10)
+#elif defined(SUPPORT_PDVFS)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (8)
+#else
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (5)
+#endif
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS] = {IRQ_COUNTER_STORAGE_REGS};
+#endif
+
+/*
+ * Maximum length of time a DM can run for before the DM will be marked
+ * as out-of-time. CDM has an increased value due to longer running kernels.
+ *
+ * These deadlines are increased on FPGA, EMU and VP due to the slower
+ * execution time of these platforms. PDUMPS are also included since they
+ * are often run on EMU, FPGA or in CSim.
+ */
+#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP)
+#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS     (480000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000)
+#else
+#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS     (40000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000)
+#endif
+
+/* Workload Estimation Firmware CCB length */
+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2   (7)
+
+/* Size of memory buffer for firmware gcov data
+ * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */
+#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024)
+
+typedef struct
+{
+       RGXFWIF_KCCB_CMD        sKCCBcmd;
+       DLLIST_NODE             sListNode;
+       PDUMP_FLAGS_T           uiPDumpFlags;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+} RGX_DEFERRED_KCCB_CMD;
+
+#if defined(PDUMP)
+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the
+ * PID filter example entries
+ */
+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32),
+               "FW PID filtering assumes the IMG_PID type is 32-bits wide as it "
+               "generates WRW commands for loading the PID values");
+#endif
+
+static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo);
+static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+       IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(
+                       RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+       PVR_DPF_ENTERED;
+
+       eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap,
+                       1,
+                       ui32CacheLineSize,
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                       "FwSLC3FenceWA",
+                       ppsSLC3FenceMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       /* We need to map it so the heap for this allocation is set */
+       eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+                       psDevInfo->psFirmwareMainHeap,
+                       &psFwSysInit->sSLC3FenceDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               DevmemFree(*ppsSLC3FenceMemDesc);
+               *ppsSLC3FenceMemDesc = NULL;
+       }
+
+       PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+       DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+       if (psSLC3FenceMemDesc)
+       {
+               DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+               DevmemFree(psSLC3FenceMemDesc);
+       }
+}
+#endif
+
+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+       /* ensure memory is flushed before kicking MTS */
+       OSWriteMemoryBarrier(NULL);
+
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+       /* ensure the MTS kick goes through before continuing */
+#if !defined(NO_HARDWARE) && !defined(INTEGRITY_OS)
+       OSWriteMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + RGX_CR_MTS_SCHEDULE);
+#else
+       OSWriteMemoryBarrier(NULL);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       RGXSetupFwAllocation
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          psDevInfo       Device Info struct
+@Input          uiAllocFlags    Flags determining type of memory allocation
+@Input          ui32Size        Size of memory allocation
+@Input          pszName         Allocation label
+@Input          ppsMemDesc      pointer to the allocation's memory descriptor
+@Input          psFwPtr         Address of the firmware pointer to set
+@Input          ppvCpuPtr       Address of the cpu pointer to set
+@Input          ui32DevVAFlags  Any combination of  RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO*  psDevInfo,
+                                                                 PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+                                                                 IMG_UINT32           ui32Size,
+                                                                 const IMG_CHAR       *pszName,
+                                                                 DEVMEM_MEMDESC       **ppsMemDesc,
+                                                                 RGXFWIF_DEV_VIRTADDR *psFwPtr,
+                                                                 void                 **ppvCpuPtr,
+                                                                 IMG_UINT32           ui32DevVAFlags)
+{
+       PVRSRV_ERROR eError;
+#if defined(SUPPORT_AUTOVZ)
+       IMG_BOOL bClearByMemset;
+       if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiAllocFlags))
+       {
+               /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to
+                * be allocated from a different PMR than an allocation without the flag.
+                * When the content of an allocation needs to be recovered from physical memory
+                * on a later driver reboot, the memory then cannot be zeroed but the allocation
+                * addresses must still match.
+                * If the memory requires clearing, perform a memset after the allocation. */
+               uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+               bClearByMemset = IMG_TRUE;
+       }
+       else
+       {
+               bClearByMemset = IMG_FALSE;
+       }
+#endif
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate %s", pszName);
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         ui32Size,
+                                                         uiAllocFlags,
+                                                         pszName,
+                                                         ppsMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate %u bytes for %s (%u)",
+                                __func__,
+                                ui32Size,
+                                pszName,
+                                eError));
+               goto fail_alloc;
+       }
+
+       if (psFwPtr)
+       {
+               eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to acquire firmware virtual address for %s (%u)",
+                                        __func__,
+                                        pszName,
+                                        eError));
+                       goto fail_fwaddr;
+               }
+       }
+
+#if defined(SUPPORT_AUTOVZ)
+       if ((bClearByMemset) || (ppvCpuPtr))
+#else
+       if (ppvCpuPtr)
+#endif
+       {
+               void *pvTempCpuPtr;
+
+               eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to acquire CPU virtual address for %s (%u)",
+                                       __func__,
+                                        pszName,
+                                       eError));
+                       goto fail_cpuva;
+               }
+
+#if defined(SUPPORT_AUTOVZ)
+               if (bClearByMemset)
+               {
+                       if (PVRSRV_CHECK_CPU_WRITE_COMBINE(uiAllocFlags))
+                       {
+                               OSCachedMemSetWMB(pvTempCpuPtr, 0, ui32Size);
+                       }
+                       else
+                       {
+                               OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size);
+                       }
+               }
+               if (ppvCpuPtr)
+#endif
+               {
+                       *ppvCpuPtr = pvTempCpuPtr;
+               }
+#if defined(SUPPORT_AUTOVZ)
+               else
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+                       pvTempCpuPtr = NULL;
+               }
+#endif
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p with alloc flags 0x%" IMG_UINT64_FMTSPECX,
+                        __func__, pszName,
+                        (psFwPtr)   ? (psFwPtr->ui32Addr) : (0),
+                        (ppvCpuPtr) ? (*ppvCpuPtr)        : (NULL),
+                        uiAllocFlags));
+
+       return eError;
+
+fail_cpuva:
+       if (psFwPtr)
+       {
+               RGXUnsetFirmwareAddress(*ppsMemDesc);
+       }
+fail_fwaddr:
+       DevmemFree(*ppsMemDesc);
+fail_alloc:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       GetHwPerfBufferSize
+
+@Description    Computes the effective size of the HW Perf Buffer
+@Input          ui32HWPerfFWBufSizeKB       Device Info struct
+@Return         HwPerfBufferSize
+*/ /**************************************************************************/
+static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB)
+{
+       IMG_UINT32 HwPerfBufferSize;
+
+       /* HWPerf: Determine the size of the FW buffer */
+       if (ui32HWPerfFWBufSizeKB == 0 ||
+                       ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT)
+       {
+               /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero,
+                * use default size from driver constant. Set it to the default
+                * size, no logging.
+                */
+               HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10;
+       }
+       else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX))
+       {
+               /* Size specified as a AppHint but it is too big */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+                               __func__,
+                               ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX));
+               HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10;
+       }
+       else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN))
+       {
+               /* Size specified as in AppHint HWPerfFWBufSizeInKB */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: Using HWPerf FW buffer size of %u KB",
+                               __func__,
+                               ui32HWPerfFWBufSizeKB));
+               HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10;
+       }
+       else
+       {
+               /* Size specified as a AppHint but it is too small */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+                               __func__,
+                               ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN));
+               HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10;
+       }
+
+       return HwPerfBufferSize;
+}
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+ @Function             RGXFWSetupSignatureChecks
+ @Description
+ @Input                        psDevInfo
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+                                              DEVMEM_MEMDESC**    ppsSigChecksMemDesc,
+                                              IMG_UINT32          ui32SigChecksBufSize,
+                                              RGXFWIF_SIGBUF_CTL* psSigBufCtl)
+{
+       PVRSRV_ERROR    eError;
+
+       /* Allocate memory for the checks */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                                 ui32SigChecksBufSize,
+                                                                 "FwSignatureChecks",
+                                                                 ppsSigChecksMemDesc,
+                                                                 &psSigBufCtl->sBuffer,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       DevmemPDumpLoadMem(     *ppsSigChecksMemDesc,
+                       0,
+                       ui32SigChecksBufSize,
+                       PDUMP_FLAGS_CONTINUOUS);
+
+       psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+fail:
+       return eError;
+}
+#endif
+
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+/*!
+*******************************************************************************
+ @Function             RGXFWSetupFirmwareGcovBuffer
+ @Description
+ @Input                        psDevInfo
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO*                   psDevInfo,
+               DEVMEM_MEMDESC**                        ppsBufferMemDesc,
+               IMG_UINT32                                      ui32FirmwareGcovBufferSize,
+               RGXFWIF_FIRMWARE_GCOV_CTL*      psFirmwareGcovCtl,
+               const IMG_CHAR*                         pszBufferName)
+{
+       PVRSRV_ERROR    eError;
+
+       /* Allocate memory for gcov */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS |
+                                                                  PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)),
+                                                                 ui32FirmwareGcovBufferSize,
+                                                                 pszBufferName,
+                                                                 ppsBufferMemDesc,
+                                                                 &psFirmwareGcovCtl->sBuffer,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation");
+
+       psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize;
+
+       return PVRSRV_OK;
+}
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+/*!
+ ******************************************************************************
+ @Function             RGXFWSetupCounterBuffer
+ @Description
+ @Input                        psDevInfo
+
+ @Return               PVRSRV_ERROR
+ *****************************************************************************/
+static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo,
+               DEVMEM_MEMDESC**                        ppsBufferMemDesc,
+               IMG_UINT32                                      ui32CounterDataBufferSize,
+               RGXFWIF_COUNTER_DUMP_CTL*       psCounterDumpCtl,
+               const IMG_CHAR*                         pszBufferName)
+{
+       PVRSRV_ERROR    eError;
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS |
+                                                                  PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)),
+                                                                 ui32CounterDataBufferSize,
+                                                                 "FwCounterBuffer",
+                                                                 ppsBufferMemDesc,
+                                                                 &psCounterDumpCtl->sBuffer,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation");
+
+       psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2;
+
+       return PVRSRV_OK;
+}
+#endif
+
+/*!
+ ******************************************************************************
+ @Function      RGXFWSetupAlignChecks
+ @Description   This functions allocates and fills memory needed for the
+                aligns checks of the UM and KM structures shared with the
+                firmware. The format of the data in the memory is as follows:
+                    <number of elements in the KM array>
+                    <array of KM structures' sizes and members' offsets>
+                    <number of elements in the UM array>
+                    <array of UM structures' sizes and members' offsets>
+                The UM array is passed from the user side. Now the firmware is
+                is responsible for filling this part of the memory. If that
+                happens the check of the UM structures will be performed
+                by the host driver on client's connect.
+                If the macro is not defined the client driver fills the memory
+                and the firmware checks for the alignment of all structures.
+ @Input                        psDeviceNode
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                               RGXFWIF_DEV_VIRTADDR    *psAlignChecksDevFW)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32                      aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+       IMG_UINT32                      ui32RGXFWAlignChecksTotal;
+       IMG_UINT32*                     paui32AlignChecks;
+       PVRSRV_ERROR            eError;
+
+       /* In this case we don't know the number of elements in UM array.
+        * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX.
+        */
+       ui32RGXFWAlignChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+                                   + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32)
+                                   + 2 * sizeof(IMG_UINT32);
+
+       /* Allocate memory for the checks */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 ui32RGXFWAlignChecksTotal,
+                                                                 "FwAlignmentChecks",
+                                                                 &psDevInfo->psRGXFWAlignChecksMemDesc,
+                                                                 psAlignChecksDevFW,
+                                                                 (void**) &paui32AlignChecks,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               /* Copy the values */
+               *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM);
+               OSCachedMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0],
+                               sizeof(aui32RGXFWAlignChecksKM));
+               paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM);
+
+               *paui32AlignChecks = 0;
+       }
+
+       OSWriteMemoryBarrier(paui32AlignChecks);
+
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWAlignChecksMemDesc,
+                                               0,
+                                               ui32RGXFWAlignChecksTotal,
+                                               PDUMP_FLAGS_CONTINUOUS);
+
+       return PVRSRV_OK;
+
+fail:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+       if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+               psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+       }
+}
+
+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR        *ppDest,
+                                                  DEVMEM_MEMDESC               *psSrc,
+                                                  IMG_UINT32                   uiExtraOffset,
+                                                  IMG_UINT32                   ui32Flags)
+{
+       PVRSRV_ERROR            eError;
+       IMG_DEV_VIRTADDR        psDevVirtAddr;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+
+       psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc);
+       psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               IMG_UINT32          ui32Offset;
+               IMG_BOOL            bCachedInMETA;
+               PVRSRV_MEMALLOCFLAGS_T uiDevFlags;
+               IMG_UINT32          uiGPUCacheMode;
+
+               eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire);
+
+               /* Convert to an address in META memmap */
+               ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE;
+
+               /* Check in the devmem flags whether this memory is cached/uncached */
+               DevmemGetFlags(psSrc, &uiDevFlags);
+
+               /* Honour the META cache flags */
+               bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+               /* Honour the SLC cache flags */
+               eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode);
+
+               ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+               if (bCachedInMETA)
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED;
+               }
+               else
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+               }
+
+               if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode))
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED;
+               }
+               else
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+               }
+               ppDest->ui32Addr = ui32Offset;
+       }
+       else
+#endif
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+               PVR_GOTO_IF_ERROR(eError, failDevVAAcquire);
+
+               ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF);
+       }
+       else
+       {
+               IMG_UINT32      ui32Offset;
+               IMG_BOOL        bCachedInRISCV;
+               PVRSRV_MEMALLOCFLAGS_T  uiDevFlags;
+
+               eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire);
+
+               /* Convert to an address in RISCV memmap */
+               ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE;
+
+               /* Check in the devmem flags whether this memory is cached/uncached */
+               DevmemGetFlags(psSrc, &uiDevFlags);
+
+               /* Honour the RISCV cache flags */
+               bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+               if (bCachedInRISCV)
+               {
+                       ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE;
+               }
+               else
+               {
+                       ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE;
+               }
+
+               ppDest->ui32Addr = ui32Offset;
+       }
+
+       if ((ppDest->ui32Addr & 0x3U) != 0)
+       {
+               IMG_CHAR *pszAnnotation;
+               /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */
+               DevmemGetAnnotation(psSrc, &pszAnnotation);
+
+               PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit",
+                                __func__, pszAnnotation, ppDest->ui32Addr));
+
+               return PVRSRV_ERROR_INVALID_ALIGNMENT;
+       }
+
+       if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+       {
+               DevmemReleaseDevVirtAddr(psSrc);
+       }
+
+       return PVRSRV_OK;
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+failDevCacheMode:
+       DevmemReleaseDevVirtAddr(psSrc);
+#endif
+failDevVAAcquire:
+       return eError;
+}
+
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR             *psDest,
+               DEVMEM_MEMDESC          *psSrcMemDesc,
+               RGXFWIF_DEV_VIRTADDR    *psSrcFWDevVAddr,
+               IMG_UINT32                      uiOffset)
+{
+       PVRSRV_ERROR            eError;
+       IMG_DEV_VIRTADDR        sDevVirtAddr;
+
+       eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+       psDest->psDevVirtAddr.uiAddr += uiOffset;
+       psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+       DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+
+
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+       DevmemReleaseDevVirtAddr(psSrc);
+}
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+       PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+       SERVER_MMU_CONTEXT *psServerMMUContext;
+       DEVMEM_MEMDESC *psFWMemContextMemDesc;
+       DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+       DEVMEM_MEMDESC *psContextStateMemDesc;
+       RGX_CLIENT_CCB *psClientCCB;
+       DEVMEM_MEMDESC *psClientCCBMemDesc;
+       DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+       IMG_BOOL bCommonContextMemProvided;
+       IMG_UINT32 ui32ContextID;
+       DLLIST_NODE sListNode;
+       RGX_CONTEXT_RESET_REASON eLastResetReason;
+       IMG_UINT32 ui32LastResetJobRef;
+       IMG_INT32 i32Priority;
+       RGX_CCB_REQUESTOR_TYPE eRequestor;
+};
+
+/*************************************************************************/ /*!
+@Function       _CheckPriority
+@Description    Check if priority is allowed for requestor type
+@Input          psDevInfo    pointer to DevInfo struct
+@Input          i32Priority Requested priority
+@Input          eRequestor   Requestor type specifying data master
+@Return         PVRSRV_ERROR PVRSRV_OK on success
+*/ /**************************************************************************/
+static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                  IMG_INT32 i32Priority,
+                                                                  RGX_CCB_REQUESTOR_TYPE eRequestor)
+{
+       /* Only one context allowed with real time priority (highest priority) */
+       if (i32Priority == RGX_CTX_PRIORITY_REALTIME)
+       {
+               DLLIST_NODE *psNode, *psNext;
+
+               dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+               {
+                       RGX_SERVER_COMMON_CONTEXT *psThisContext =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+                       if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME &&
+                               psThisContext->eRequestor == eRequestor)
+                       {
+                               PVR_LOG(("Only one context with real time priority allowed"));
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+                       }
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+               PVRSRV_DEVICE_NODE *psDeviceNode,
+               RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+               RGXFWIF_DM eDM,
+               SERVER_MMU_CONTEXT *psServerMMUContext,
+               DEVMEM_MEMDESC *psAllocatedMemDesc,
+               IMG_UINT32 ui32AllocatedOffset,
+               DEVMEM_MEMDESC *psFWMemContextMemDesc,
+               DEVMEM_MEMDESC *psContextStateMemDesc,
+               IMG_UINT32 ui32CCBAllocSizeLog2,
+               IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+               IMG_UINT32 ui32ContextFlags,
+               IMG_UINT32 ui32Priority,
+               IMG_UINT32 ui32MaxDeadlineMS,
+               IMG_UINT64 ui64RobustnessAddress,
+               RGX_COMMON_CONTEXT_INFO *psInfo,
+               RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+       RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+       IMG_UINT32 ui32FWCommonContextOffset;
+       IMG_UINT8 *pui8Ptr;
+       IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
+       PVRSRV_ERROR eError;
+
+       /*
+        * Allocate all the resources that are required
+        */
+       psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+       if (psServerCommonContext == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_alloc;
+       }
+
+       psServerCommonContext->psDevInfo = psDevInfo;
+       psServerCommonContext->psServerMMUContext = psServerMMUContext;
+
+       if (psAllocatedMemDesc)
+       {
+               PDUMPCOMMENT(psDeviceNode,
+                                        "Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+                               aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                               ui32AllocatedOffset);
+               ui32FWCommonContextOffset = ui32AllocatedOffset;
+               psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+               psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+       }
+       else
+       {
+               /* Allocate device memory for the firmware context */
+               PDUMPCOMMENT(psDeviceNode,
+                                        "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+               eError = DevmemFwAllocate(psDevInfo,
+                               sizeof(*psFWCommonContext),
+                               RGX_FWCOMCTX_ALLOCFLAGS,
+                               "FwContext",
+                               &psServerCommonContext->psFWCommonContextMemDesc);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate firmware %s context (%s)",
+                                __func__,
+                                aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                                PVRSRVGetErrorString(eError)));
+                       goto fail_contextalloc;
+               }
+               ui32FWCommonContextOffset = 0;
+               psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+       }
+
+       /* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+       psServerCommonContext->eLastResetReason    = RGX_CONTEXT_RESET_REASON_NONE;
+       psServerCommonContext->ui32LastResetJobRef = 0;
+       psServerCommonContext->ui32ContextID       = psDevInfo->ui32CommonCtxtCurrentID++;
+
+       /*
+        * Temporarily map the firmware context to the kernel and initialise it
+        */
+       eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+                                         (void **)&pui8Ptr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to map firmware %s context to CPU (%s)",
+                        __func__,
+                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                        PVRSRVGetErrorString(eError)));
+               goto fail_cpuvirtacquire;
+       }
+
+       /* Allocate the client CCB */
+       eError = RGXCreateCCB(psDevInfo,
+                       ui32CCBAllocSizeLog2,
+                       ui32CCBMaxAllocSizeLog2,
+                       ui32ContextFlags,
+                       psConnection,
+                       eRGXCCBRequestor,
+                       psServerCommonContext,
+                       &psServerCommonContext->psClientCCB,
+                       &psServerCommonContext->psClientCCBMemDesc,
+                       &psServerCommonContext->psClientCCBCtrlMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: failed to create CCB for %s context (%s)",
+                        __func__,
+                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                        PVRSRVGetErrorString(eError)));
+               goto fail_allocateccb;
+       }
+
+       psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+       psFWCommonContext->eDM = eDM;
+
+       /* Set the firmware CCB device addresses in the firmware common context */
+       eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+                       psServerCommonContext->psClientCCBMemDesc,
+                       0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr);
+
+       eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+                       psServerCommonContext->psClientCCBCtrlMemDesc,
+                       0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr);
+
+#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+       {
+               RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr,
+                               psServerCommonContext->psClientCCBMemDesc,
+                               &psFWCommonContext->psCCB,
+                               0);
+       }
+#endif
+
+       /* Set the memory context device address */
+       psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+       eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+                       psFWMemContextMemDesc,
+                       0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr);
+
+       /* Set the framework register updates address */
+       psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+       if (psInfo->psFWFrameworkMemDesc != NULL)
+       {
+               eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+                               psInfo->psFWFrameworkMemDesc,
+                               0, RFW_FWADDR_FLAG_NONE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr);
+       }
+       else
+       {
+               /* This should never be touched in this contexts without a framework
+                * memdesc, but ensure it is zero so we see crashes if it is.
+                */
+               psFWCommonContext->psRFCmd.ui32Addr = 0;
+       }
+
+       eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority);
+
+       psServerCommonContext->i32Priority = i32Priority;
+       psServerCommonContext->eRequestor = eRGXCCBRequestor;
+
+       psFWCommonContext->i32Priority = i32Priority;
+       psFWCommonContext->ui32PrioritySeqNum = 0;
+       psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS,
+                                                                                          (eDM == RGXFWIF_DM_CDM ?
+                                                                                               RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS :
+                                                                                               RGXFWIF_MAX_WORKLOAD_DEADLINE_MS));
+       psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress;
+
+       /* Store a references to Server Common Context and PID for notifications back from the FW. */
+       psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+       psFWCommonContext->ui32PID                   = OSGetCurrentClientProcessIDKM();
+
+       /* Set the firmware GPU context state buffer */
+       psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+       if (psContextStateMemDesc)
+       {
+               eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+                               psContextStateMemDesc,
+                               0,
+                               RFW_FWADDR_FLAG_NONE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr);
+       }
+
+       /*
+        * Dump the created context
+        */
+       PDUMPCOMMENT(psDeviceNode,
+                                "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+       DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+                       ui32FWCommonContextOffset,
+                       sizeof(*psFWCommonContext),
+                       PDUMP_FLAGS_CONTINUOUS);
+
+       /* We've finished the setup so release the CPU mapping */
+       DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+       /* Map this allocation into the FW */
+       eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+                       psServerCommonContext->psFWCommonContextMemDesc,
+                       ui32FWCommonContextOffset,
+                       RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr);
+
+#if defined(__linux__)
+       {
+               IMG_UINT32 ui32FWAddr;
+               switch (eDM) {
+               case RGXFWIF_DM_GEOM:
+                       ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+                                       psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext));
+                       break;
+               case RGXFWIF_DM_3D:
+                       ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+                                       psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext));
+                       break;
+               default:
+                       ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr;
+                       break;
+               }
+
+               trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
+                               aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                               ui32FWAddr);
+       }
+#endif
+       /*Add the node to the list when finalised */
+       OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock);
+       dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock);
+
+       *ppsServerCommonContext = psServerCommonContext;
+       return PVRSRV_OK;
+
+fail_fwcommonctxfwaddr:
+       if (psContextStateMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psContextStateMemDesc);
+       }
+fail_ctxstatefwaddr:
+fail_checkpriority:
+       if (psInfo->psFWFrameworkMemDesc != NULL)
+       {
+               RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc);
+       }
+fail_fwframeworkfwaddr:
+       RGXUnsetFirmwareAddress(psFWMemContextMemDesc);
+fail_fwmemctxfwaddr:
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+fail_cccbctrlfwaddr:
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+fail_cccbfwaddr:
+       RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB);
+fail_allocateccb:
+       DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+fail_cpuvirtacquire:
+       if (!psServerCommonContext->bCommonContextMemProvided)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc);
+               psServerCommonContext->psFWCommonContextMemDesc = NULL;
+       }
+fail_contextalloc:
+       OSFreeMem(psServerCommonContext);
+fail_alloc:
+       return eError;
+}
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+
+       OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+       /* Remove the context from the list of all contexts. */
+       dllist_remove_node(&psServerCommonContext->sListNode);
+       OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+
+       /*
+               Unmap the context itself and then all its resources
+       */
+
+       /* Unmap the FW common context */
+       RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+       /* Umap context state buffer (if there was one) */
+       if (psServerCommonContext->psContextStateMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+       }
+       /* Unmap the framework buffer */
+       if (psServerCommonContext->psFWFrameworkMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+       }
+       /* Unmap client CCB and CCB control */
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+       /* Unmap the memory context */
+       RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+       /* Destroy the client CCB */
+       RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB);
+
+
+       /* Free the FW common context (if there was one) */
+       if (!psServerCommonContext->bCommonContextMemProvided)
+       {
+               DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo,
+                               psServerCommonContext->psFWCommonContextMemDesc);
+               psServerCommonContext->psFWCommonContextMemDesc = NULL;
+       }
+       /* Free the hosts representation of the common context */
+       OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->psClientCCB;
+}
+
+RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+               IMG_UINT32 *pui32LastResetJobRef)
+{
+       RGX_CONTEXT_RESET_REASON eLastResetReason;
+
+       PVR_ASSERT(psServerCommonContext != NULL);
+       PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+       /* Take the most recent reason & job ref and reset for next time... */
+       eLastResetReason      = psServerCommonContext->eLastResetReason;
+       *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef;
+       psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE;
+       psServerCommonContext->ui32LastResetJobRef = 0;
+
+       if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "A Hard Context Switch was triggered on the GPU to ensure Quality of Service."));
+       }
+
+       return eLastResetReason;
+}
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->psDevInfo;
+}
+
+PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                          SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                                                          PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr)
+{
+       DLLIST_NODE *psNode, *psNext;
+       dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_COMMON_CONTEXT *psThisContext =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+               if (psThisContext->psServerMMUContext == psServerMMUContext)
+               {
+                       psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr;
+                       return PVRSRV_OK;
+               }
+       }
+       return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                     IMG_UINT32 ui32ContextFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)",
+                                __func__, ui32ContextFlags));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       else
+       {
+               RGXSetCCBFlags(psServerCommonContext->psClientCCB,
+                                          ui32ContextFlags);
+       }
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function             RGXFreeCCB
+ @Description  Free the kernel or firmware CCB
+ @Input                        psDevInfo
+ @Input                        ppsCCBCtl
+ @Input                        ppsCCBCtlMemDesc
+ @Input                        ppsCCBMemDesc
+ @Input                        psCCBCtlFWAddr
+******************************************************************************/
+static void RGXFreeCCB(PVRSRV_RGXDEV_INFO      *psDevInfo,
+                                          RGXFWIF_CCB_CTL              **ppsCCBCtl,
+                                          DEVMEM_MEMDESC               **ppsCCBCtlMemDesc,
+                                          IMG_UINT8                    **ppui8CCB,
+                                          DEVMEM_MEMDESC               **ppsCCBMemDesc)
+{
+       if (*ppsCCBMemDesc != NULL)
+       {
+               if (*ppui8CCB != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc);
+                       *ppui8CCB = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc);
+               *ppsCCBMemDesc = NULL;
+       }
+       if (*ppsCCBCtlMemDesc != NULL)
+       {
+               if (*ppsCCBCtl != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc);
+                       *ppsCCBCtl = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc);
+               *ppsCCBCtlMemDesc = NULL;
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function             RGXFreeCCBReturnSlots
+ @Description  Free the kernel CCB's return slot array and associated mappings
+ @Input                        psDevInfo              Device Info struct
+ @Input                        ppui32CCBRtnSlots      CPU mapping of slot array
+ @Input                        ppsCCBRtnSlotsMemDesc  Slot array's device memdesc
+******************************************************************************/
+static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                  IMG_UINT32         **ppui32CCBRtnSlots,
+                                                                 DEVMEM_MEMDESC     **ppsCCBRtnSlotsMemDesc)
+{
+       /* Free the return slot array if allocated */
+       if (*ppsCCBRtnSlotsMemDesc != NULL)
+       {
+               /* Before freeing, ensure the CPU mapping as well is released */
+               if (*ppui32CCBRtnSlots != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc);
+                       *ppui32CCBRtnSlots = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc);
+               *ppsCCBRtnSlotsMemDesc = NULL;
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function             RGXSetupCCB
+ @Description  Allocate and initialise a circular command buffer
+ @Input                        psDevInfo
+ @Input                        ppsCCBCtl
+ @Input                        ppsCCBCtlMemDesc
+ @Input                        ppui8CCB
+ @Input                        ppsCCBMemDesc
+ @Input                        psCCBCtlFWAddr
+ @Input                        ui32NumCmdsLog2
+ @Input                        ui32CmdSize
+ @Input                        uiCCBMemAllocFlags
+ @Input                        pszName
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO     *psDevInfo,
+                                                               RGXFWIF_CCB_CTL         **ppsCCBCtl,
+                                                               DEVMEM_MEMDESC          **ppsCCBCtlMemDesc,
+                                                               IMG_UINT8                       **ppui8CCB,
+                                                               DEVMEM_MEMDESC          **ppsCCBMemDesc,
+                                                               PRGXFWIF_CCB_CTL        *psCCBCtlFWAddr,
+                                                               PRGXFWIF_CCB            *psCCBFWAddr,
+                                                               IMG_UINT32                      ui32NumCmdsLog2,
+                                                               IMG_UINT32                      ui32CmdSize,
+                                                               PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags,
+                                                               const IMG_CHAR          *pszName)
+{
+       PVRSRV_ERROR            eError;
+       RGXFWIF_CCB_CTL         *psCCBCtl;
+       IMG_UINT32              ui32CCBSize = (1U << ui32NumCmdsLog2);
+       IMG_CHAR                szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN];
+       IMG_INT32               iStrLen;
+
+       /* Append "Control" to the name for the control struct. */
+       iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName);
+       PVR_ASSERT(iStrLen < sizeof(szCCBCtlName));
+
+       if (unlikely(iStrLen < 0))
+       {
+               OSStringLCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN);
+       }
+
+       /* Allocate memory for the CCB control.*/
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                 PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                                                 PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                                 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED |
+                                                                 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                                 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                                 PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                                 sizeof(RGXFWIF_CCB_CTL),
+                                                                 szCCBCtlName,
+                                                                 ppsCCBCtlMemDesc,
+                                                                 psCCBCtlFWAddr,
+                                                                 (void**) ppsCCBCtl,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /*
+        * Allocate memory for the CCB.
+        * (this will reference further command data in non-shared CCBs)
+        */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 uiCCBMemAllocFlags,
+                                                                 ui32CCBSize * ui32CmdSize,
+                                                                 pszName,
+                                                                 ppsCCBMemDesc,
+                                                                 psCCBFWAddr,
+                                                                 (void**) ppui8CCB,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /*
+        * Initialise the CCB control.
+        */
+       psCCBCtl = *ppsCCBCtl;
+       psCCBCtl->ui32WriteOffset = 0;
+       psCCBCtl->ui32ReadOffset = 0;
+       psCCBCtl->ui32WrapMask = ui32CCBSize - 1;
+       psCCBCtl->ui32CmdSize = ui32CmdSize;
+
+       /* Pdump the CCB control */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName);
+       DevmemPDumpLoadMem(*ppsCCBCtlMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_CCB_CTL),
+                                          0);
+
+       return PVRSRV_OK;
+
+fail:
+       RGXFreeCCB(psDevInfo,
+                          ppsCCBCtl,
+                          ppsCCBCtlMemDesc,
+                          ppui8CCB,
+                          ppsCCBMemDesc);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PMR *psPMR;
+
+       if (psDevInfo->psRGXFaultAddressMemDesc)
+       {
+               if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK)
+               {
+                       PMRUnlockSysPhysAddresses(psPMR);
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+               psDevInfo->psRGXFaultAddressMemDesc = NULL;
+       }
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE       *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       IMG_UINT32                      *pui32MemoryVirtAddr;
+       IMG_UINT32                      i;
+       size_t                          ui32PageSize = OSGetPageSize();
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PMR                                     *psPMR;
+
+       /* Allocate page of memory to use for page faults on non-blocking memory transactions.
+        * Doesn't need to be cleared as it is initialised with the 0xDEADBEE0 pattern below. */
+       psDevInfo->psRGXFaultAddressMemDesc = NULL;
+       eError = DevmemFwAllocateExportable(psDeviceNode,
+                       ui32PageSize,
+                       ui32PageSize,
+                       RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                       "FwExFaultAddress",
+                       &psDevInfo->psRGXFaultAddressMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to allocate mem for fault address (%u)",
+                        __func__, eError));
+               goto failFaultAddressDescAlloc;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+                                                                         (void **)&pui32MemoryVirtAddr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to acquire mem for fault address (%u)",
+                        __func__, eError));
+               goto failFaultAddressDescAqCpuVirt;
+       }
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               /* fill the page with a known pattern when booting the firmware */
+               for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+               {
+                       *(pui32MemoryVirtAddr + i) = 0xDEADBEE0;
+               }
+       }
+
+       OSWriteMemoryBarrier(pui32MemoryVirtAddr);
+
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+       eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error getting PMR for fault address (%u)",
+                        __func__, eError));
+
+               goto failFaultAddressDescGetPMR;
+       }
+       else
+       {
+               IMG_BOOL bValid;
+               IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Error locking physical address for fault address MemDesc (%u)",
+                                __func__, eError));
+
+                       goto failFaultAddressDescLockPhys;
+               }
+
+               eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Error getting physical address for fault address MemDesc (%u)",
+                                __func__, eError));
+
+                       goto failFaultAddressDescGetPhys;
+               }
+
+               if (!bValid)
+               {
+                       psFwSysInit->sFaultPhysAddr.uiAddr = 0;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")",
+                                __func__, psFwSysInit->sFaultPhysAddr.uiAddr));
+
+                       goto failFaultAddressDescGetPhys;
+               }
+       }
+
+       return PVRSRV_OK;
+
+failFaultAddressDescGetPhys:
+       PMRUnlockSysPhysAddresses(psPMR);
+
+failFaultAddressDescLockPhys:
+failFaultAddressDescGetPMR:
+failFaultAddressDescAqCpuVirt:
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+       psDevInfo->psRGXFaultAddressMemDesc = NULL;
+
+failFaultAddressDescAlloc:
+
+       return eError;
+}
+
+#if defined(PDUMP)
+/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */
+static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR eError;
+       PMR *psFWInitPMR, *psFaultAddrPMR;
+       IMG_UINT32 ui32Dstoffset;
+
+       psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR);
+       ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr);
+
+       psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR);
+
+       eError = PDumpMemLabelToMem64(psFaultAddrPMR,
+                       psFWInitPMR,
+                       0,
+                       ui32Dstoffset,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError));
+       }
+       return eError;
+}
+#endif
+
+#if defined(SUPPORT_TBI_INTERFACE)
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferIsInitRequired
+
+@Description    Returns true if the firmware tbi buffer is not allocated and
+               might be required by the firmware soon. TBI buffer allocated
+               on-demand to reduce RAM footprint on systems not needing
+               tbi.
+
+@Input          psDevInfo       RGX device info
+
+@Return                IMG_BOOL        Whether on-demand allocation(s) is/are needed
+                               or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+
+       /* The firmware expects a tbi buffer only when:
+        *      - Logtype is "tbi"
+        */
+       if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL)
+                       && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE)
+                       && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+               for the FW tbi buffer
+
+@Input          ppsDevInfo      RGX device info
+@Return                void
+*/ /**************************************************************************/
+static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc);
+       psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL;
+       psDevInfo->ui32RGXFWIfHWPerfBufSize = 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferInitOnDemandResources
+
+@Description    Allocates the firmware TBI buffer required for reading SFs
+               strings and initialize it with SFs.
+
+@Input          psDevInfo       RGX device info
+
+@Return                PVRSRV_OK       If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR       eError = PVRSRV_OK;
+       IMG_UINT32         i, ui32Len;
+       const IMG_UINT32   ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT);
+       RGXFW_STID_FMT     *psFW_SFs = NULL;
+
+       /* Firmware address should not be already set */
+       if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: FW address for FWTBI is already set. Resetting it with newly allocated one",
+                        __func__));
+       }
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS,
+                                                                 ui32FWTBIBufsize,
+                                                                 "FwTBIBuffer",
+                                                                 &psDevInfo->psRGXFWIfTBIBufferMemDesc,
+                                                                 &psDevInfo->sRGXFWIfTBIBuffer,
+                                                                 (void**)&psFW_SFs,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /* Copy SFs entries to FW buffer */
+       for (i = 0; i < g_ui32SFsCount; i++)
+       {
+               OSCachedMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id));
+               ui32Len = OSStringLength(SFs[i].psName);
+               OSCachedMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1));
+       }
+
+       /* flush write buffers for psFW_SFs */
+       OSWriteMemoryBarrier(psFW_SFs);
+
+       /* Set size of TBI buffer */
+       psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize;
+
+       /* release CPU mapping */
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc);
+
+       return PVRSRV_OK;
+fail:
+       RGXTBIBufferDeinit(psDevInfo);
+       return eError;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferIsInitRequired
+
+@Description    Returns true if the firmware trace buffer is not allocated and
+               might be required by the firmware soon. Trace buffer allocated
+               on-demand to reduce RAM footprint on systems not needing
+               firmware trace.
+
+@Input          psDevInfo       RGX device info
+
+@Return                IMG_BOOL        Whether on-demand allocation(s) is/are needed
+                               or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+
+       /* The firmware expects a trace buffer only when:
+        *      - Logtype is "trace" AND
+        *      - at least one LogGroup is configured
+        *      - the Driver Mode is not Guest
+        */
+       if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL)
+               && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)
+               && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+               && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+               for the FW trace buffer(s)
+
+@Input          ppsDevInfo      RGX device info
+@Return                void
+*/ /**************************************************************************/
+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       IMG_UINT32 i;
+
+       for (i = 0; i < RGXFW_THREAD_NUM; i++)
+       {
+               if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i])
+               {
+                       if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL)
+                       {
+                               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+                               psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL;
+                       }
+
+                       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+                       psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL;
+               }
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferInitOnDemandResources
+
+@Description    Allocates the firmware trace buffer required for dumping trace
+               info from the firmware.
+
+@Input          psDevInfo       RGX device info
+
+@Return                PVRSRV_OK       If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo,
+                                                                                                PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       PVRSRV_ERROR       eError = PVRSRV_OK;
+       IMG_UINT32         ui32FwThreadNum;
+       IMG_UINT32         ui32DefaultTraceBufSize;
+       IMG_DEVMEM_SIZE_T  uiTraceBufSizeInBytes;
+       void               *pvAppHintState = NULL;
+       IMG_CHAR           pszBufferName[] = "FwTraceBuffer_Thread0";
+
+       /* Check AppHint value for module-param FWTraceBufSizeInDWords */
+       OSCreateKMAppHintState(&pvAppHintState);
+       ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                                                pvAppHintState,
+                                                FWTraceBufSizeInDWords,
+                                                &ui32DefaultTraceBufSize,
+                                                &psTraceBufCtl->ui32TraceBufSizeInDWords);
+       OSFreeKMAppHintState(pvAppHintState);
+       pvAppHintState = NULL;
+
+       uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
+
+       for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
+       {
+#if !defined(SUPPORT_AUTOVZ)
+               /* Ensure allocation API is only called when not already allocated */
+               PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL);
+               /* Firmware address should not be already set */
+               PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0);
+#endif
+
+               /* update the firmware thread number in the Trace Buffer's name */
+               pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum;
+
+               eError = RGXSetupFwAllocation(psDevInfo,
+                                                                         uiAllocFlags,
+                                                                         uiTraceBufSizeInBytes,
+                                                                         pszBufferName,
+                                                                         &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+                                                                         &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer,
+                                                                         (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer,
+                                                                         RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+       }
+
+       return PVRSRV_OK;
+
+fail:
+       RGXTraceBufferDeinit(psDevInfo);
+       return eError;
+}
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       RGXPDumpLoadFWInitData
+
+@Description    Allocates the firmware trace buffer required for dumping trace
+                info from the firmware.
+
+@Input          psDevInfo RGX device info
+ */ /*************************************************************************/
+static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                  IMG_UINT32         ui32HWPerfCountersDataSize,
+                                                                  IMG_BOOL           bEnableSignatureChecks)
+{
+       IMG_UINT32 ui32ConfigFlags    = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags;
+       IMG_UINT32 ui32FwOsCfgFlags   = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump RGXFW Init data");
+       if (!bEnableSignatureChecks)
+       {
+               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                        "(to enable rgxfw signatures place the following line after the RTCONF line)");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                  offsetof(RGXFWIF_SYSINIT, asSigBufCtl),
+                                                  sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX),
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump initial state of FW runtime configuration");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_RUNTIME_CFG),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw hwperfctl structure");
+       DevmemPDumpLoadZeroMem(psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+                                                  0,
+                                                  ui32HWPerfCountersDataSize,
+                                                  PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw trace control structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_TRACEBUF),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump firmware system data structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_SYSDATA),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump firmware OS data structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_OSDATA),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_TBI_INTERFACE)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgx TBI buffer");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc,
+                                          0,
+                                          psDevInfo->ui32FWIfTBIBufferSize,
+                                          PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(SUPPORT_TBI_INTERFACE) */
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw register configuration buffer");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_REG_CFG),
+                                          PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw system init structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_SYSINIT),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw os init structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_OSINIT),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address");
+       RGXPDumpFaultReadRegister(psDevInfo);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "RTCONF: run-time configuration");
+
+
+       /* Dump the config options so they can be edited.
+        *
+        */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Set the FW system config options here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch Rand mode:                      0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch Soft Reset Enable:              0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable HWPerf:                             0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable generic DM Killing Rand mode:       0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Rascal+Dust Power Island:                  0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( FBCDC Version 3.1 Enable:                  0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Check MList:                               0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Disable Auto Clock Gating:                 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable register configuration:             0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Assert on TA Out-of-Memory:                0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Disable HWPerf custom counter filter:      0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable coherent memory accesses:           0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable IRQ validation:                     0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( SPU power state mask change Enable:        0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN);
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable Workload Estimation:                0x%08x)", RGXFWIF_INICFG_WORKEST);
+#if defined(SUPPORT_PDVFS)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable Proactive DVFS:                     0x%08x)", RGXFWIF_INICFG_PDVFS);
+#endif /* defined(SUPPORT_PDVFS) */
+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( ISP Scheduling Mode (v1=b'01, v2=b'10):    0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Validate SOC & USC timers:                 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                                       offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags),
+                                                       ui32ConfigFlags,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Extended FW system config options not used.)");
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Set the FW OS config options here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch TDM Enable:                     0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch TA Enable:                      0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch 3D Enable:                      0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch CDM Enable:                     0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch  2D Enable:      0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch  TA Enable:      0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch  3D Enable:      0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch CDM Enable:      0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc,
+                                                         offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags),
+                                                         ui32FwOsCfgFlags,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Select one or more security tests here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Read/write FW code from non-FW contexts:         0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Execute FW code from non-secure memory:          0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Execute FW code from secure (non-FW) memory:     0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                 offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags),
+                                 psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags,
+                                 PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)",
+                                RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+                                RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                       offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode),
+                       psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode,
+                       PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+                                RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
+       {
+               IMG_UINT32 i;
+
+               /* generate a few WRWs in the pdump stream as an example */
+               for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++)
+               {
+                       /*
+                        * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of
+                        * a non-const variable in the expression, which it needs to be const. Typical compiler output is
+                        * "expression must have a constant value".
+                        */
+                       const IMG_DEVMEM_OFFSET_T uiPIDOff
+                       = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID);
+
+                       const IMG_DEVMEM_OFFSET_T uiOSIDOff
+                       = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                                "(PID and OSID pair %u)", i);
+
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)");
+                       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                                         uiPIDOff,
+                                                                         0,
+                                                                         PDUMP_FLAGS_CONTINUOUS);
+
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)");
+                       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                                         uiOSIDOff,
+                                                                         0,
+                                                                         PDUMP_FLAGS_CONTINUOUS);
+               }
+       }
+
+       /*
+        * Dump the log config so it can be edited.
+        */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Set the log config here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Log Type: set bit 0 for TRACE, reset for TBI)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+
+#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+       {
+               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                        "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+       }
+#endif
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                         offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+                                                         psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Set the HWPerf Filter config here, see \"hwperfbin2jsont -h\"");
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                         offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter),
+                                                         psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))",
+                                RGXFWIF_REG_CFG_TYPE_PWR_ON,
+                                RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,
+                                RGXFWIF_REG_CFG_TYPE_TA,
+                                RGXFWIF_REG_CFG_TYPE_3D,
+                                RGXFWIF_REG_CFG_TYPE_CDM,
+                                RGXFWIF_REG_CFG_TYPE_TLA,
+                                RGXFWIF_REG_CFG_TYPE_TDM);
+
+       {
+               IMG_UINT32 i;
+
+               /* Write 32 bits in each iteration as required by PDUMP WRW command */
+               for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32))
+               {
+                       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                                       offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]),
+                                                                       0,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+               }
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set registers here: address, mask, value)");
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                         offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+                                                         0,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                         offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask),
+                                                         0,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                         offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+                                                         0,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+}
+#endif /* defined(PDUMP) */
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFwGuardPage
+
+ @Description Allocate a Guard Page at the start of a Guest's Main Heap
+
+ @Input       psDevceNode
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFwGuardPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR eError;
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS |
+                                                                  PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)),
+                                                                 OSGetPageSize(),
+                                                                 "FwGuardPage",
+                                                                 &psDevInfo->psRGXFWHeapGuardPageReserveMemDesc,
+                                                                 NULL,
+                                                                 NULL,
+                                                                 RFW_FWADDR_FLAG_NONE);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFwSysData
+
+ @Description Sets up all system-wide firmware related data
+
+ @Input       psDevInfo
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                                         IMG_BOOL                 bEnableSignatureChecks,
+                                                                         IMG_UINT32               ui32SignatureChecksBufSize,
+                                                                         IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                                                                         IMG_UINT64               ui64HWPerfFilter,
+                                                                         IMG_UINT32               ui32ConfigFlags,
+                                                                         IMG_UINT32               ui32ConfigFlagsExt,
+                                                                         IMG_UINT32               ui32LogType,
+                                                                         IMG_UINT32               ui32FilterFlags,
+                                                                         IMG_UINT32               ui32JonesDisableMask,
+                                                                         IMG_UINT32               ui32HWPerfCountersDataSize,
+                                                                         IMG_UINT32               *pui32TPUTrilinearFracMask,
+                                                                         RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                                                                         FW_PERF_CONF             eFirmwarePerf)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_SYSINIT *psFwSysInitScratch = NULL;
+
+       psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch));
+       PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail);
+
+       /* Sys Fw init data */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS |
+                                                                  PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) &
+                                                                  RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_SYSINIT),
+                                                                 "FwSysInitStructure",
+                                                                 &psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                                 NULL,
+                                                                 (void**) &psDevInfo->psRGXFWIfSysInit,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail);
+
+       /* Setup Fault read register */
+       eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail);
+
+#if defined(SUPPORT_AUTOVZ)
+       psFwSysInitScratch->ui32VzWdgPeriod = PVR_AUTOVZ_WDG_PERIOD_MS;
+#endif
+
+       /* RD Power Island */
+       {
+               RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+               IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+               IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+                                                                               (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+               ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST;
+#if defined(SUPPORT_PDVFS)
+       {
+               RGXFWIF_PDVFS_OPP   *psPDVFSOPPInfo;
+               IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+
+               /* Pro-active DVFS depends on Workload Estimation */
+               psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo;
+               psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+               PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table");
+
+               if (psDVFSDeviceCfg->pasOPPTable != NULL)
+               {
+                       if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: OPP Table too large: Size = %u, Maximum size = %lu",
+                                       __func__,
+                                       psDVFSDeviceCfg->ui32OPPTableSize,
+                                       (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               goto fail;
+                       }
+
+                       OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+                                       psDVFSDeviceCfg->pasOPPTable,
+                                       sizeof(psPDVFSOPPInfo->asOPPValues));
+
+                       psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
+
+                       ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS;
+               }
+       }
+#endif /* defined(SUPPORT_PDVFS) */
+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
+
+       /* FW trace control structure */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_TRACEBUF),
+                                                                 "FwTraceCtlStruct",
+                                                                 &psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                                 &psFwSysInitScratch->sTraceBufCtl,
+                                                                 (void**) &psDevInfo->psRGXFWIfTraceBufCtl,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               /* Set initial firmware log type/group(s) */
+               if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Invalid initial log type (0x%X)",
+                                __func__, ui32LogType));
+                       goto fail;
+               }
+               psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType;
+       }
+
+       /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource
+        * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+        * be set during PDump playback in logconfig, at any point of time,
+        * Otherwise, allocate only if required. */
+#if !defined(PDUMP)
+#if defined(SUPPORT_AUTOVZ)
+       /* always allocate trace buffer for AutoVz Host drivers to allow
+        * deterministic addresses of all SysData structures */
+       if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo)))
+#else
+       if (RGXTraceBufferIsInitRequired(psDevInfo))
+#endif
+#endif
+       {
+               eError = RGXTraceBufferInitOnDemandResources(psDevInfo,
+                                                                                                        RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                                                                                        RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp));
+       }
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_SYSDATA),
+                                                                 "FwSysData",
+                                                                 &psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                                                 &psFwSysInitScratch->sFwSysData,
+                                                                 (void**) &psDevInfo->psRGXFWIfFwSysData,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /* GPIO validation setup */
+       psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF;
+#if defined(SUPPORT_VALIDATION)
+       {
+               IMG_INT32 ui32AppHintDefault;
+               IMG_INT32 ui32GPIOValidationMode;
+               void      *pvAppHintState = NULL;
+
+               /* Check AppHint for GPIO validation mode */
+               OSCreateKMAppHintState(&pvAppHintState);
+               ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE;
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                                                        pvAppHintState,
+                                                        GPIOValidationMode,
+                                                        &ui32AppHintDefault,
+                                                        &ui32GPIOValidationMode);
+               OSFreeKMAppHintState(pvAppHintState);
+               pvAppHintState = NULL;
+
+               if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.",
+                                __func__,
+                                ui32GPIOValidationMode,
+                                RGXFWIF_GPIO_VAL_LAST));
+               }
+               else
+               {
+                       psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode;
+               }
+
+               psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode;
+       }
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+       eError = RGXFWSetupCounterBuffer(psDevInfo,
+                                                                        &psDevInfo->psCounterBufferMemDesc,
+                                                                        PAGE_SIZE,
+                                                                        &psFwSysInitScratch->sCounterDumpCtl,
+                                                                        "CounterBuffer");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail);
+#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */
+
+#if defined(SUPPORT_VALIDATION)
+       {
+               IMG_UINT32 ui32EnablePollOnChecksumErrorStatus;
+               IMG_UINT32 ui32ApphintDefault = 0;
+               void      *pvAppHintState = NULL;
+
+               /* Check AppHint for polling on GPU Checksum status */
+               OSCreateKMAppHintState(&pvAppHintState);
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                                                        pvAppHintState,
+                                                        EnablePollOnChecksumErrorStatus,
+                                                        &ui32ApphintDefault,
+                                                        &ui32EnablePollOnChecksumErrorStatus);
+               OSFreeKMAppHintState(pvAppHintState);
+               pvAppHintState = NULL;
+
+               switch (ui32EnablePollOnChecksumErrorStatus)
+               {
+                       case 0: /* no checking */ break;
+                       case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break;
+                       case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break;
+                       default:
+                               PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus));
+                               break;
+               }
+       }
+#endif /* defined(SUPPORT_VALIDATION) */
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo,
+                                                                                 &psDevInfo->psFirmwareGcovBufferMemDesc,
+                                                                                 RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE,
+                                                                                 &psFwSysInitScratch->sFirmwareGcovCtl,
+                                                                                 "FirmwareGcovBuffer");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail);
+       psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE;
+#endif /* defined(SUPPORT_FIRMWARE_GCOV) */
+
+#if defined(PDUMP)
+       /* Require a minimum amount of memory for the signature buffers */
+       if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN)
+       {
+               ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN;
+       }
+
+       /* Setup Signature and Checksum Buffers for TDM, GEOM and 3D */
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                                                          &psDevInfo->psRGXFWSigTAChecksMemDesc,
+                                                                          ui32SignatureChecksBufSize,
+                                                                          &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TA Signature check setup", fail);
+       psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                                                          &psDevInfo->psRGXFWSig3DChecksMemDesc,
+                                                                          ui32SignatureChecksBufSize,
+                                                                          &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail);
+       psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+       psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL;
+       psDevInfo->ui32SigTDM2DChecksSize = 0;
+
+#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM))
+       {
+               /* Buffer allocated only when feature present because, all known TDM
+                * signature registers are dependent on this feature being present */
+               eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                                                                  &psDevInfo->psRGXFWSigTDM2DChecksMemDesc,
+                                                                                  ui32SignatureChecksBufSize,
+                                                                                  &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]);
+               PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail);
+               psDevInfo->ui32SigTDM2DChecksSize = ui32SignatureChecksBufSize;
+       }
+#endif
+
+       if (!bEnableSignatureChecks)
+       {
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0;
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0;
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0;
+       }
+#endif /* defined(PDUMP) */
+
+       eError = RGXFWSetupAlignChecks(psDeviceNode,
+                                                                  &psFwSysInitScratch->sAlignChecks);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail);
+
+       psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags;
+
+       /* Fill the remaining bits of fw the init data */
+       psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+       psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+       psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE;
+       psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE;
+       psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE;
+
+#if defined(FIX_HW_BRN_65273_BIT_MASK)
+       if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+       {
+               /* Fill the remaining bits of fw the init data */
+               psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE;
+               psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE;
+       }
+#endif
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask;
+       }
+#endif
+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+       {
+               eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch);
+               PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail);
+       }
+#endif
+#if defined(SUPPORT_PDVFS)
+       /* Core clock rate */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(IMG_UINT32),
+                                                                 "FwPDVFSCoreClkRate",
+                                                                 &psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+                                                                 &psFwSysInitScratch->sCoreClockRate,
+                                                                 (void**) &psDevInfo->pui32RGXFWIFCoreClkRate,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail);
+#endif
+       {
+       /* Timestamps */
+       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags =
+               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) |
+               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+               PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
+               PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+       /*
+         the timer query arrays
+       */
+       PDUMPCOMMENT(psDeviceNode, "Allocate timer query arrays (FW)");
+       eError = DevmemFwAllocate(psDevInfo,
+                                 sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+                                 uiMemAllocFlags |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+                                 "FwStartTimesArray",
+                                 &psDevInfo->psStartTimeMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map start times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc,
+                                         (void **)& psDevInfo->pui64StartTimeById);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map start times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                 sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+                                 uiMemAllocFlags |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+                                 "FwEndTimesArray",
+                                 & psDevInfo->psEndTimeMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map end times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc,
+                                         (void **)& psDevInfo->pui64EndTimeById);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map end times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                 sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES,
+                                 uiMemAllocFlags,
+                                 "FwCompletedOpsArray",
+                                 & psDevInfo->psCompletedMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to completed ops array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc,
+                                         (void **)& psDevInfo->pui32CompletedById);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map completed ops array",
+                               __func__));
+               goto fail;
+       }
+       }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       eError = OSLockCreate(&psDevInfo->hTimerQueryLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate log for timer query",
+                               __func__));
+               goto fail;
+       }
+#endif
+#if defined(SUPPORT_TBI_INTERFACE)
+#if !defined(PDUMP)
+       /* allocate only if required */
+       if (RGXTBIBufferIsInitRequired(psDevInfo))
+#endif /* !defined(PDUMP) */
+       {
+               /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource
+                * (irrespective of loggroup(s) enabled), given that logtype/loggroups
+                * can be set during PDump playback in logconfig, at any point of time
+                */
+               eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail);
+       }
+
+       psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer;
+#endif /* defined(SUPPORT_TBI_INTERFACE) */
+
+       /* Allocate shared buffer for GPU utilisation */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_GPU_UTIL_FWCB),
+                                                                 "FwGPUUtilisationBuffer",
+                                                                 &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+                                                                 &psFwSysInitScratch->sGpuUtilFWCbCtl,
+                                                                 (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_RUNTIME_CFG),
+                                                                 "FwRuntimeCfg",
+                                                                 &psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                                 &psFwSysInitScratch->sRuntimeCfg,
+                                                                 (void**) &psDevInfo->psRGXFWIfRuntimeCfg,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_REG_CFG),
+                                                                 "FwRegisterConfigStructure",
+                                                                 &psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                                 &psFwSysInitScratch->sRegCfg,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail);
+#endif
+
+       psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB);
+       /* Second stage initialisation or HWPerf, hHWPerfLock created in first
+        * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
+       if (psDevInfo->ui64HWPerfFilter == 0)
+       {
+               psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter;
+               psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter;
+       }
+       else
+       {
+               /* The filter has already been modified. This can happen if
+                * pvr/apphint/EnableFTraceGPU was enabled. */
+               psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter;
+       }
+
+#if !defined(PDUMP)
+       /* Allocate if HWPerf filter has already been set. This is possible either
+        * by setting a proper AppHint or enabling GPU ftrace events. */
+       if (psDevInfo->ui64HWPerfFilter != 0)
+#endif
+       {
+               /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources
+                * (irrespective of HWPerf enabled or not), given that HWPerf can be
+                * enabled during PDump playback via RTCONF at any point of time. */
+               eError = RGXHWPerfInitOnDemandResources(psDevInfo);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
+       }
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 ui32HWPerfCountersDataSize,
+                                                                 "FwHWPerfControlStructure",
+                                                                 &psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+                                                                 &psFwSysInitScratch->sHWPerfCtl,
+                                                                 NULL,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail);
+
+       psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN)
+                                                         ? IMG_FALSE : IMG_TRUE;
+
+       psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf;
+
+#if defined(PDUMP)
+       /* default: no filter */
+       psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT;
+       psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0;
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       {
+               IMG_UINT32 dm;
+
+               /* TPU trilinear rounding mask override */
+               for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++)
+               {
+                       psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm];
+               }
+       }
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       {
+               PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS;
+               PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags);
+
+               PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test");
+               eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                                       OSGetPageSize(),
+                                                                                       OSGetPageSize(),
+                                                                                       RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                                       "FwExNonSecureBuffer",
+                                                                                       &psDevInfo->psRGXFWIfNonSecureBufMemDesc);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail);
+
+               eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer,
+                                                                          psDevInfo->psRGXFWIfNonSecureBufMemDesc,
+                                                                          0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail);
+
+               PDUMPCOMMENT(psDeviceNode, "Allocate secure buffer for security validation test");
+               eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                                       OSGetPageSize(),
+                                                                                       OSGetPageSize(),
+                                                                                       uiFlags,
+                                                                                       "FwExSecureBuffer",
+                                                                                       &psDevInfo->psRGXFWIfSecureBufMemDesc);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail);
+
+               eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer,
+                                                                          psDevInfo->psRGXFWIfSecureBufMemDesc,
+                                                                          0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail);
+       }
+#endif /* SUPPORT_SECURITY_VALIDATION */
+
+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION))
+       {
+               psFwSysInitScratch->ui32TFBCCompressionControl =
+                       (ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >> RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT;
+       }
+#endif
+
+       /* Initialize FW started flag */
+       psFwSysInitScratch->bFirmwareStarted = IMG_FALSE;
+       psFwSysInitScratch->ui32MarkerVal = 1;
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               IMG_UINT32 ui32OSIndex;
+
+               RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+               RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+               /* Required info by FW to calculate the ActivePM idle timer latency */
+               psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+               psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+               /* Initialise variable runtime configuration to the system defaults */
+               psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed;
+               psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms;
+               psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+               psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US;
+               psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS;
+
+               if (PVRSRV_VZ_MODE_IS(NATIVE))
+               {
+                       psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0;
+               }
+               else
+               {
+                       for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++)
+                       {
+                               const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] =
+                                       {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY,
+                                        RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY};
+
+                               /* Set up initial priorities between different OSes */
+                               psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex];
+                       }
+               }
+
+#if defined(PVR_ENABLE_PHR) && defined(PDUMP)
+               psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET;
+#else
+               psRuntimeCfg->ui32PHRMode = 0;
+#endif
+
+               /* Initialize the DefaultDustsNumInit Field to Max Dusts */
+               psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount;
+
+               /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */
+               OSWriteMemoryBarrier(psDevInfo->psRGXFWIfRuntimeCfg);
+
+               /* Setup FW coremem data */
+               if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+               {
+                       psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr;
+
+#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX)
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+                       {
+                               RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore,
+                                               psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                                               &psFwSysInitScratch->sCorememDataStore.pbyFWAddr,
+                                               0);
+                       }
+#endif
+               }
+
+               psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags    = ui32ConfigFlags    & RGXFWIF_INICFG_ALL;
+               psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL;
+
+               /* Initialise GPU utilisation buffer */
+               psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+                               RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+               /* init HWPERF data */
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0;
+
+               /*Send through the BVNC Feature Flags*/
+               eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail);
+
+               /* populate the real FwOsInit structure with the values stored in the scratch copy */
+               OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT));
+       }
+
+       OSFreeMem(psFwSysInitScratch);
+
+       return PVRSRV_OK;
+
+fail:
+       if (psFwSysInitScratch)
+       {
+               OSFreeMem(psFwSysInitScratch);
+       }
+
+       RGXFreeFwSysData(psDevInfo);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFwOsData
+
+ @Description Sets up all os-specific firmware related data
+
+ @Input       psDevInfo
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                                        IMG_UINT32               ui32KCCBSizeLog2,
+                                                                        IMG_UINT32               ui32HWRDebugDumpLimit,
+                                                                        IMG_UINT32               ui32FwOsCfgFlags)
+{
+       PVRSRV_ERROR       eError;
+       RGXFWIF_OSINIT     sFwOsInitScratch;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT));
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = RGXSetupFwGuardPage(psDevInfo);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware heap guard pages", fail);
+       }
+
+       /* Memory tracking the connection state should be non-volatile and
+        * is not cleared on allocation to prevent loss of pre-reset information */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS &
+                                                                 ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                                                                 sizeof(RGXFWIF_CONNECTION_CTL),
+                                                                 "FwConnectionCtl",
+                                                                 &psDevInfo->psRGXFWIfConnectionCtlMemDesc,
+                                                                 NULL,
+                                                                 (void**) &psDevInfo->psRGXFWIfConnectionCtl,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS |
+                                                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED),
+                                                                 sizeof(RGXFWIF_OSINIT),
+                                                                 "FwOsInitStructure",
+                                                                 &psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                 NULL,
+                                                                 (void**) &psDevInfo->psRGXFWIfOsInit,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail);
+
+       /* init HWR frame info */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 sizeof(RGXFWIF_HWRINFOBUF),
+                                                                 "FwHWRInfoBuffer",
+                                                                 &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+                                                                 &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl,
+                                                                 (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail);
+
+       /* Might be uncached. Be conservative and use a DeviceMemSet */
+       OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+       /* Allocate a sync for power management */
+       eError = SyncPrimContextCreate(psDevInfo->psDeviceNode,
+                                      &psDevInfo->hSyncPrimContext);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail);
+
+       eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail);
+
+       /* Set up kernel CCB */
+       eError = RGXSetupCCB(psDevInfo,
+                                                &psDevInfo->psKernelCCBCtl,
+                                                &psDevInfo->psKernelCCBCtlMemDesc,
+                                                &psDevInfo->psKernelCCB,
+                                                &psDevInfo->psKernelCCBMemDesc,
+                                                &sFwOsInitScratch.psKernelCCBCtl,
+                                                &sFwOsInitScratch.psKernelCCB,
+                                                ui32KCCBSizeLog2,
+                                                sizeof(RGXFWIF_KCCB_CMD),
+                                                (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS |
+                                                PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)),
+                                                "FwKernelCCB");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail);
+
+       /* KCCB additionally uses a return slot array for FW to be able to send back
+        * return codes for each required command
+        */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 (1U << ui32KCCBSizeLog2) * sizeof(IMG_UINT32),
+                                                                 "FwKernelCCBRtnSlots",
+                                                                 &psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                                                 &sFwOsInitScratch.psKernelCCBRtnSlots,
+                                                                 (void**) &psDevInfo->pui32KernelCCBRtnSlots,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail);
+
+       /* Set up firmware CCB */
+       eError = RGXSetupCCB(psDevInfo,
+                                                &psDevInfo->psFirmwareCCBCtl,
+                                                &psDevInfo->psFirmwareCCBCtlMemDesc,
+                                                &psDevInfo->psFirmwareCCB,
+                                                &psDevInfo->psFirmwareCCBMemDesc,
+                                                &sFwOsInitScratch.psFirmwareCCBCtl,
+                                                &sFwOsInitScratch.psFirmwareCCB,
+                                                RGXFWIF_FWCCB_NUMCMDS_LOG2,
+                                                sizeof(RGXFWIF_FWCCB_CMD),
+                                                RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                "FwCCB");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 sizeof(RGXFWIF_OSDATA),
+                                                                 "FwOsData",
+                                                                 &psDevInfo->psRGXFWIfFwOsDataMemDesc,
+                                                                 &sFwOsInitScratch.sFwOsData,
+                                                                 (void**) &psDevInfo->psRGXFWIfFwOsData,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL;
+
+       eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail);
+
+       /* flush write buffers for psRGXFWIfFwOsData */
+       OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwOsData);
+
+       sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Set up Workload Estimation firmware CCB */
+       eError = RGXSetupCCB(psDevInfo,
+                                                &psDevInfo->psWorkEstFirmwareCCBCtl,
+                                                &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+                                                &psDevInfo->psWorkEstFirmwareCCB,
+                                                &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+                                                &sFwOsInitScratch.psWorkEstFirmwareCCBCtl,
+                                                &sFwOsInitScratch.psWorkEstFirmwareCCB,
+                                                RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+                                                sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+                                                RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                "FwWEstCCB");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail);
+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
+
+       /* Initialise the compatibility check data */
+       RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC);
+       RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC);
+
+       /* populate the real FwOsInit structure with the values stored in the scratch copy */
+       OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT));
+
+       return PVRSRV_OK;
+
+fail:
+       RGXFreeFwOsData(psDevInfo);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFirmware
+
+ @Description Sets up all firmware related data
+
+ @Input       psDevInfo
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                         IMG_BOOL                 bEnableSignatureChecks,
+                                                         IMG_UINT32               ui32SignatureChecksBufSize,
+                                                         IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                                                         IMG_UINT64               ui64HWPerfFilter,
+                                                         IMG_UINT32               ui32ConfigFlags,
+                                                         IMG_UINT32               ui32ConfigFlagsExt,
+                                                         IMG_UINT32               ui32FwOsCfgFlags,
+                                                         IMG_UINT32               ui32LogType,
+                                                         IMG_UINT32               ui32FilterFlags,
+                                                         IMG_UINT32               ui32JonesDisableMask,
+                                                         IMG_UINT32               ui32HWRDebugDumpLimit,
+                                                         IMG_UINT32               ui32HWPerfCountersDataSize,
+                                                         IMG_UINT32               *pui32TPUTrilinearFracMask,
+                                                         RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                                                         FW_PERF_CONF             eFirmwarePerf,
+                                                         IMG_UINT32               ui32KCCBSizeLog2)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       eError = RGXSetupFwOsData(psDeviceNode,
+                                                         ui32KCCBSizeLog2,
+                                                         ui32HWRDebugDumpLimit,
+                                                         ui32FwOsCfgFlags);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail);
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               /* Guest drivers do not configure system-wide firmware data */
+               psDevInfo->psRGXFWIfSysInit = NULL;
+       }
+       else
+       {
+               /* Native and Host drivers must initialise the firmware's system data */
+               eError = RGXSetupFwSysData(psDeviceNode,
+                                                                  bEnableSignatureChecks,
+                                                                  ui32SignatureChecksBufSize,
+                                                                  ui32HWPerfFWBufSizeKB,
+                                                                  ui64HWPerfFilter,
+                                                                  ui32ConfigFlags,
+                                                                  ui32ConfigFlagsExt,
+                                                                  ui32LogType,
+                                                                  ui32FilterFlags,
+                                                                  ui32JonesDisableMask,
+                                                                  ui32HWPerfCountersDataSize,
+                                                                  pui32TPUTrilinearFracMask,
+                                                                  eRGXRDPowerIslandConf,
+                                                                  eFirmwarePerf);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail);
+       }
+
+       psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+#if defined(PDUMP)
+       RGXPDumpLoadFWInitData(psDevInfo,
+                                              ui32HWPerfCountersDataSize,
+                                              bEnableSignatureChecks);
+#endif /* PDUMP */
+
+fail:
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXFreeFwSysData
+
+ @Description Frees all system-wide firmware related data
+
+ @Input       psDevInfo
+******************************************************************************/
+static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+       if (psDevInfo->psRGXFWAlignChecksMemDesc)
+       {
+               RGXFWFreeAlignChecks(psDevInfo);
+       }
+
+#if defined(PDUMP)
+#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) &&
+           psDevInfo->psRGXFWSigTDM2DChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDM2DChecksMemDesc);
+               psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL;
+       }
+#endif
+
+       if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc);
+               psDevInfo->psRGXFWSigTAChecksMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc);
+               psDevInfo->psRGXFWSig3DChecksMemDesc = NULL;
+       }
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+       if (psDevInfo->psCounterBufferMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCounterBufferMemDesc);
+               psDevInfo->psCounterBufferMemDesc = NULL;
+       }
+#endif
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       if (psDevInfo->psFirmwareGcovBufferMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc);
+               psDevInfo->psFirmwareGcovBufferMemDesc = NULL;
+       }
+#endif
+
+       RGXSetupFaultReadRegisterRollback(psDevInfo);
+
+       if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+                       psDevInfo->psRGXFWIfGpuUtilFWCb = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+               psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfRuntimeCfg != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+                       psDevInfo->psRGXFWIfRuntimeCfg = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+               psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+       {
+               psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfTraceBufCtl != NULL)
+               {
+                       /* first deinit/free the tracebuffer allocation */
+                       RGXTraceBufferDeinit(psDevInfo);
+
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+                       psDevInfo->psRGXFWIfTraceBufCtl = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+               psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfFwSysDataMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfFwSysData != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc);
+                       psDevInfo->psRGXFWIfFwSysData = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc);
+               psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL;
+       }
+
+#if defined(SUPPORT_TBI_INTERFACE)
+       if (psDevInfo->psRGXFWIfTBIBufferMemDesc)
+       {
+               RGXTBIBufferDeinit(psDevInfo);
+       }
+#endif
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc);
+               psDevInfo->psRGXFWIfRegCfgMemDesc = NULL;
+       }
+#endif
+       if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+               psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL;
+       }
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       if (psDevInfo->psRGXFWIfNonSecureBufMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc);
+               psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfSecureBufMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc);
+               psDevInfo->psRGXFWIfSecureBufMemDesc = NULL;
+       }
+#endif
+
+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+       {
+               _FreeSLC3Fence(psDevInfo);
+       }
+#endif
+#if defined(SUPPORT_PDVFS)
+       if (psDevInfo->psRGXFWIFCoreClkRateMemDesc)
+       {
+               if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+                       psDevInfo->pui32RGXFWIFCoreClkRate = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+               psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
+       }
+#endif
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXFreeFwOsData
+
+ @Description Frees all os-specific firmware related data
+
+ @Input       psDevInfo
+******************************************************************************/
+static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFreeCCBReturnSlots(psDevInfo,
+                             &psDevInfo->pui32KernelCCBRtnSlots,
+                             &psDevInfo->psKernelCCBRtnSlotsMemDesc);
+       RGXFreeCCB(psDevInfo,
+                  &psDevInfo->psKernelCCBCtl,
+                  &psDevInfo->psKernelCCBCtlMemDesc,
+                  &psDevInfo->psKernelCCB,
+                  &psDevInfo->psKernelCCBMemDesc);
+
+       RGXFreeCCB(psDevInfo,
+                  &psDevInfo->psFirmwareCCBCtl,
+                  &psDevInfo->psFirmwareCCBCtlMemDesc,
+                  &psDevInfo->psFirmwareCCB,
+                  &psDevInfo->psFirmwareCCBMemDesc);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFreeCCB(psDevInfo,
+                  &psDevInfo->psWorkEstFirmwareCCBCtl,
+                  &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+                  &psDevInfo->psWorkEstFirmwareCCB,
+                  &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+#endif
+
+       if (psDevInfo->psPowSyncPrim != NULL)
+       {
+               SyncPrimFree(psDevInfo->psPowSyncPrim);
+               psDevInfo->psPowSyncPrim = NULL;
+       }
+
+       if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL)
+       {
+               SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+               psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+                       psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+               psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfFwOsDataMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfFwOsData != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc);
+                       psDevInfo->psRGXFWIfFwOsData = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc);
+               psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL;
+       }
+
+       if (psDevInfo->psCompletedMemDesc)
+       {
+               if (psDevInfo->pui32CompletedById)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc);
+                       psDevInfo->pui32CompletedById = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCompletedMemDesc);
+               psDevInfo->psCompletedMemDesc = NULL;
+       }
+       if (psDevInfo->psEndTimeMemDesc)
+       {
+               if (psDevInfo->pui64EndTimeById)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc);
+                       psDevInfo->pui64EndTimeById = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psEndTimeMemDesc);
+               psDevInfo->psEndTimeMemDesc = NULL;
+       }
+       if (psDevInfo->psStartTimeMemDesc)
+       {
+               if (psDevInfo->pui64StartTimeById)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc);
+                       psDevInfo->pui64StartTimeById = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psStartTimeMemDesc);
+               psDevInfo->psStartTimeMemDesc = NULL;
+       }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       if (psDevInfo->hTimerQueryLock)
+       {
+               OSLockDestroy(psDevInfo->hTimerQueryLock);
+               psDevInfo->hTimerQueryLock = NULL;
+       }
+#endif
+
+       if (psDevInfo->psRGXFWHeapGuardPageReserveMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWHeapGuardPageReserveMemDesc);
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXFreeFirmware
+
+ @Description Frees all the firmware-related allocations
+
+ @Input       psDevInfo
+******************************************************************************/
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO        *psDevInfo)
+{
+       RGXFreeFwOsData(psDevInfo);
+
+       if (psDevInfo->psRGXFWIfConnectionCtl)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc);
+               psDevInfo->psRGXFWIfConnectionCtl = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfConnectionCtlMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc);
+               psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfOsInit)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc);
+               psDevInfo->psRGXFWIfOsInit = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfOsInitMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc);
+               psDevInfo->psRGXFWIfOsInitMemDesc = NULL;
+       }
+
+       RGXFreeFwSysData(psDevInfo);
+       if (psDevInfo->psRGXFWIfSysInit)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc);
+               psDevInfo->psRGXFWIfSysInit = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfSysInitMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc);
+               psDevInfo->psRGXFWIfSysInitMemDesc = NULL;
+       }
+}
+
+/******************************************************************************
+ FUNCTION      : RGXAcquireKernelCCBSlot
+
+ PURPOSE       : Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS    : psCCB - the CCB
+                       : Address of space if available, NULL otherwise
+
+ RETURNS       : PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                       const RGXFWIF_CCB_CTL *psKCCBCtl,
+                                                                                       IMG_UINT32              *pui32Offset)
+{
+       IMG_UINT32      ui32OldWriteOffset, ui32NextWriteOffset;
+#if defined(PDUMP)
+       const DEVMEM_MEMDESC *psKCCBCtrlMemDesc = psDevInfo->psKernelCCBCtlMemDesc;
+#endif
+
+       ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+       ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+#if defined(PDUMP)
+       /* Wait for sufficient CCB space to become available */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, 0,
+                             "Wait for kCCB woff=%u", ui32NextWriteOffset);
+       DevmemPDumpCBP(psKCCBCtrlMemDesc,
+                      offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+                      ui32NextWriteOffset,
+                      1,
+                      (psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+       if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset)
+       {
+               return PVRSRV_ERROR_KERNEL_CCB_FULL;
+       }
+       *pui32Offset = ui32NextWriteOffset;
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXPollKernelCCBSlot
+
+ PURPOSE       : Poll for space in Kernel CCB
+
+ PARAMETERS    : psCCB - the CCB
+                       : Address of space if available, NULL otherwise
+
+ RETURNS       : PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+                                                                                const RGXFWIF_CCB_CTL *psKCCBCtl)
+{
+       IMG_UINT32      ui32OldWriteOffset, ui32NextWriteOffset;
+
+       ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+       ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+
+               if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+               {
+                       return PVRSRV_OK;
+               }
+
+               /*
+                * The following check doesn't impact performance, since the
+                * CPU has to wait for the GPU anyway (full kernel CCB).
+                */
+               if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+               {
+                       return PVRSRV_ERROR_KERNEL_CCB_FULL;
+               }
+
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXGetCmdMemCopySize
+
+ PURPOSE       : Calculates actual size of KCCB command getting used
+
+ PARAMETERS    : eCmdType     Type of KCCB command
+
+ RETURNS       : Returns actual size of KCCB command on success else zero
+******************************************************************************/
+static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType)
+{
+       /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD
+        * This will account alignment requirement of uCmdData union
+        *
+        * Then add command-data size depending on command type to calculate actual
+        * command size required to do mem copy
+        *
+        * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct.
+        */
+       switch (eCmdType)
+       {
+               case RGXFWIF_KCCB_CMD_KICK:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_MMUCACHE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA);
+               }
+#if defined(SUPPORT_USC_BREAKPOINT)
+               case RGXFWIF_KCCB_CMD_BP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA);
+               }
+#endif
+               case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA);
+               }
+               case RGXFWIF_KCCB_CMD_CLEANUP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST);
+               }
+               case RGXFWIF_KCCB_CMD_POW:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST);
+               }
+               case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE:
+               case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_FORCE_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA);
+               }
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+               case RGXFWIF_KCCB_CMD_REGCONFIG:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA);
+               }
+#endif
+               case RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS);
+               }
+#if defined(SUPPORT_PDVFS)
+               case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA);
+               }
+#endif
+               case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_COUNTER_DUMP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_DA_BLKS);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS);
+               }
+               case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE:
+               case RGXFWIF_KCCB_CMD_WDG_CFG:
+               case RGXFWIF_KCCB_CMD_PHR_CFG:
+               case RGXFWIF_KCCB_CMD_HEALTH_CHECK:
+               case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE:
+               case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL:
+               {
+                       /* No command specific data */
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData);
+               }
+#if defined(SUPPORT_VALIDATION)
+               case RGXFWIF_KCCB_CMD_RGXREG:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_GPUMAP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_GPUMAP_DATA);
+               }
+#endif
+               default:
+               {
+                       /* Invalid (OR) Unused (OR) Newly added command type */
+                       return 0; /* Error */
+               }
+       }
+}
+
+PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                      IMG_UINT32 ui32SlotNum,
+                                                                         IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRVWaitForValueKM(
+                     (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum],
+                                 RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED,
+                                 RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM");
+
+#if defined(PDUMP)
+       /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */
+       if (PDumpCheckFlagsWrite(psDevInfo->psDeviceNode, ui32PDumpFlags))
+       {
+               PDUMPCOMMENT(psDevInfo->psDeviceNode, "Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum,
+                                        RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED);
+
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                                                               ui32SlotNum * sizeof(IMG_UINT32),
+                                                                               RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED,
+                                                                               RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED,
+                                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                                               ui32PDumpFlags);
+               PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32");
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+#endif
+
+       return eError;
+}
+
+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                                                         RGXFWIF_KCCB_CMD    *psKCCBCmd,
+                                                                         IMG_UINT32          uiPDumpFlags,
+                                                                         IMG_UINT32          *pui32CmdKCCBSlot)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE      *psDeviceNode = psDevInfo->psDeviceNode;
+       RGXFWIF_CCB_CTL         *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+       IMG_UINT8                       *pui8KCCB = psDevInfo->psKernelCCB;
+       IMG_UINT32                      ui32NewWriteOffset;
+       IMG_UINT32                      ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+       IMG_UINT32                      ui32CmdMemCopySize;
+
+#if !defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+#else
+       IMG_BOOL bContCaptureOn = PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); /* client connected or in pdump init phase */
+       IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, uiPDumpFlags); /* Are we in capture range or continuous and not in a power transition */
+
+       if (bContCaptureOn)
+       {
+               /* in capture range */
+               if (bPDumpEnabled)
+               {
+                       if (!psDevInfo->bDumpedKCCBCtlAlready)
+                       {
+                               /* entering capture range */
+                               psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE;
+
+                               /* Wait for the live FW to catch up */
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d",
+                                               __func__,
+                                               psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+                               PVRSRVPollForValueKM(psDeviceNode,
+                                                    (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset,
+                                                    ui32OldWriteOffset, 0xFFFFFFFF,
+                                                    POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP);
+
+                               /* Dump Init state of Kernel CCB control (read and write offset) */
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags,
+                                               "Initial state of kernel CCB Control, roff: %d, woff: %d",
+                                               psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+
+                               DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+                                               0,
+                                               sizeof(RGXFWIF_CCB_CTL),
+                                               uiPDumpFlags);
+                       }
+               }
+       }
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+       if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) ||
+               (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) &&
+               !PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:"
+                                                               "driver state = %u / firmware state = %u;"
+                                                               "expected READY (%u/%u) or ACTIVE (%u/%u);",
+                                                               __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo),
+                                                               RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY,
+                                                               RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE));
+               eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE;
+               goto _RGXSendCommandRaw_Exit;
+       }
+#endif
+
+       PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize);
+       if (!OSLockIsLocked(psDeviceNode->hPowerLock))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s called without power lock held!",
+                               __func__));
+               PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+       }
+
+       /* Acquire a slot in the CCB */
+       eError = RGXAcquireKernelCCBSlot(psDevInfo, psKCCBCtl, &ui32NewWriteOffset);
+       if (eError != PVRSRV_OK)
+       {
+               goto _RGXSendCommandRaw_Exit;
+       }
+
+       /* Calculate actual size of command to optimize device mem copy */
+       ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType);
+       PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND);
+
+       /* Copy the command into the CCB */
+       OSCachedMemCopyWMB(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+                       psKCCBCmd, ui32CmdMemCopySize);
+
+       /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */
+       if (pui32CmdKCCBSlot)
+       {
+               *pui32CmdKCCBSlot = ui32OldWriteOffset;
+
+               /* Each such command enqueue needs to reset the slot value first. This is so that a caller
+                * doesn't get to see stale/false value in allotted slot */
+               OSWriteDeviceMem32WithWMB(&psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset],
+                                         RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE);
+#if defined(PDUMP)
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags,
+                                                         "Reset kCCB slot number %u", ui32OldWriteOffset);
+               DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                  ui32OldWriteOffset * sizeof(IMG_UINT32),
+                                                  sizeof(IMG_UINT32),
+                                                  uiPDumpFlags);
+#endif
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %x",
+                        __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType));
+       }
+
+       /* Move past the current command */
+       psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+
+       OSWriteMemoryBarrier(&psKCCBCtl->ui32WriteOffset);
+
+#if defined(PDUMP)
+       if (bContCaptureOn)
+       {
+               /* in capture range */
+               if (bPDumpEnabled)
+               {
+                       /* Dump new Kernel CCB content */
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                       uiPDumpFlags, "Dump kCCB cmd woff = %d",
+                                       ui32OldWriteOffset);
+                       DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc,
+                                       ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+                                       ui32CmdMemCopySize,
+                                       uiPDumpFlags);
+
+                       /* Dump new kernel CCB write offset */
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                       uiPDumpFlags, "Dump kCCBCtl woff: %d",
+                                       ui32NewWriteOffset);
+                       DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+                                       offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+                                       sizeof(IMG_UINT32),
+                                       uiPDumpFlags);
+
+                       /* mimic the read-back of the write from above */
+                       DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+                                       offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+                                       ui32NewWriteOffset,
+                                       0xFFFFFFFF,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       uiPDumpFlags);
+               }
+               /* out of capture range */
+               else
+               {
+                       eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit);
+               }
+       }
+#endif
+
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "MTS kick for kernel CCB");
+       /*
+        * Kick the MTS to schedule the firmware.
+        */
+       __MTSScheduleWrite(psDevInfo, RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK);
+
+       PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE,
+                  RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPDumpFlags);
+
+#if defined(SUPPORT_AUTOVZ)
+       RGXUpdateAutoVzWdgToken(psDevInfo);
+#endif
+
+#if defined(NO_HARDWARE)
+       /* keep the roff updated because fw isn't there to update it */
+       psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+_RGXSendCommandRaw_Exit:
+       return eError;
+}
+
+/******************************************************************************
+ FUNCTION      : _AllocDeferredCommand
+
+ PURPOSE       : Allocate a KCCB command and add it to KCCB deferred list
+
+ PARAMETERS    : psDevInfo     RGX device info
+                       : eKCCBType             Firmware Command type
+                       : psKCCBCmd             Firmware Command
+                       : uiPDumpFlags  Pdump flags
+
+ RETURNS       : PVRSRV_OK     If all went good, PVRSRV_ERROR_RETRY otherwise.
+******************************************************************************/
+static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                          RGXFWIF_KCCB_CMD   *psKCCBCmd,
+                                          IMG_UINT32         uiPDumpFlags)
+{
+       RGX_DEFERRED_KCCB_CMD *psDeferredCommand;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand));
+
+       if (!psDeferredCommand)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Deferring a KCCB command failed: allocation failure: requesting retry"));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       psDeferredCommand->sKCCBcmd = *psKCCBCmd;
+       psDeferredCommand->uiPDumpFlags = uiPDumpFlags;
+       psDeferredCommand->psDevInfo = psDevInfo;
+
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode));
+       psDevInfo->ui32KCCBDeferredCommandsCount++;
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION      : _FreeDeferredCommand
+
+ PURPOSE       : Remove from the deferred list the sent deferred KCCB command
+
+ PARAMETERS    : psNode                        Node in deferred list
+                       : psDeferredKCCBCmd     KCCB Command to free
+
+ RETURNS       : None
+******************************************************************************/
+static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd)
+{
+       dllist_remove_node(psNode);
+       psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--;
+       OSFreeMem(psDeferredKCCBCmd);
+}
+
+/******************************************************************************
+ FUNCTION      : RGXSendCommandsFromDeferredList
+
+ PURPOSE       : Try send KCCB commands in deferred list to KCCB
+                 Should be called by holding PowerLock
+
+ PARAMETERS    : psDevInfo     RGX device info
+                       : bPoll         Poll for space in KCCB
+
+ RETURNS       : PVRSRV_OK     If all commands in deferred list are sent to KCCB,
+                         PVRSRV_ERROR_KERNEL_CCB_FULL otherwise.
+******************************************************************************/
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       DLLIST_NODE *psNode, *psNext;
+       RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd;
+       DLLIST_NODE sCommandList;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode));
+
+       /* !!! Important !!!
+        *
+        * The idea of moving the whole list hLockKCCBDeferredCommandsList below
+        * to the temporary list is only valid under the principle that all of the
+        * operations are also protected by the power lock. It must be held
+        * so that the order of the commands doesn't get messed up while we're
+        * performing the operations on the local list.
+        *
+        * The necessity of releasing the hLockKCCBDeferredCommandsList comes from
+        * the fact that _FreeDeferredCommand() is allocating memory and it can't
+        * be done in atomic context (inside section protected by a spin lock).
+        *
+        * We're using spin lock here instead of mutex to quickly perform a check
+        * if the list is empty in MISR without a risk that the MISR is going
+        * to sleep due to a lock.
+        */
+
+       /* move the whole list to a local list so it can be processed without lock */
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList);
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               if (dllist_is_empty(&sCommandList))
+               {
+                       return PVRSRV_OK;
+               }
+
+               /* For every deferred KCCB command, try to send it*/
+               dllist_foreach_node(&sCommandList, psNode, psNext)
+               {
+                       psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode);
+                       eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo,
+                                                  &psTempDeferredKCCBCmd->sKCCBcmd,
+                                                  psTempDeferredKCCBCmd->uiPDumpFlags,
+                                                  NULL /* We surely aren't interested in kCCB slot number of deferred command */);
+                       if (eError != PVRSRV_OK)
+                       {
+                               if (!bPoll)
+                               {
+                                       eError = PVRSRV_ERROR_KERNEL_CCB_FULL;
+                                       goto cleanup_;
+                               }
+                               break;
+                       }
+
+                       _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd);
+               }
+
+               if (bPoll)
+               {
+                       PVRSRV_ERROR eErrPollForKCCBSlot;
+
+                       /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the
+                        * outer loop times-out, we'll still want to return KCCB_FULL to caller
+                        */
+                       eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc,
+                                                                  psDevInfo->psKernelCCBCtl);
+                       if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL)
+                       {
+                               eError = PVRSRV_ERROR_KERNEL_CCB_FULL;
+                               goto cleanup_;
+                       }
+               }
+       } END_LOOP_UNTIL_TIMEOUT();
+
+cleanup_:
+       /* if the local list is not empty put it back to the deferred list head
+        * so that the old order of commands is retained */
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList);
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                                                                 RGXFWIF_KCCB_CMD    *psKCCBCmd,
+                                                                                 IMG_UINT32          uiPDumpFlags,
+                                                                                 IMG_UINT32          *pui32CmdKCCBSlot)
+{
+       IMG_BOOL     bPoll = (pui32CmdKCCBSlot != NULL);
+       PVRSRV_ERROR eError;
+
+       /*
+        * First try to Flush all the cmds in deferred list.
+        *
+        * We cannot defer an incoming command if the caller is interested in
+        * knowing the command's kCCB slot: it plans to poll/wait for a
+        * response from the FW just after the command is enqueued, so we must
+        * poll for space to be available.
+        */
+       eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll);
+       if (eError == PVRSRV_OK)
+       {
+               eError = RGXSendCommandRaw(psDevInfo,
+                                                                  psKCCBCmd,
+                                                                  uiPDumpFlags,
+                                                                  pui32CmdKCCBSlot);
+       }
+
+       /*
+        * If we don't manage to enqueue one of the deferred commands or the command
+        * passed as argument because the KCCB is full, insert the latter into the deferred commands list.
+        * The deferred commands will also be flushed eventually by:
+        *  - one more KCCB command sent for any DM
+        *  - RGX_MISRHandler_CheckFWActivePowerState
+        */
+       if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+       {
+               if (pui32CmdKCCBSlot == NULL)
+               {
+                       eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPDumpFlags);
+               }
+               else
+               {
+                       /* Let the caller retry. Otherwise if we deferred the command and returned OK,
+                        * the caller can end up looking in a stale CCB slot.
+                        */
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Couldn't flush the deferred queue for a command (Type:%d) "
+                                               "- will be retried", __func__, psKCCBCmd->eCmdType));
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO        *psDevInfo,
+                                                                                                        RGXFWIF_KCCB_CMD       *psKCCBCmd,
+                                                                                                        IMG_UINT32                     ui32PDumpFlags,
+                                                                                                        IMG_UINT32         *pui32CmdKCCBSlot)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+       /* Ensure Rogue is powered up before kicking MTS */
+       eError = PVRSRVPowerLock(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: failed to acquire powerlock (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+
+               goto _PVRSRVPowerLock_Exit;
+       }
+
+       PDUMPPOWCMDSTART(psDeviceNode);
+       eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE_ON,
+                                                                                PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+
+               goto _PVRSRVSetDevicePowerStateKM_Exit;
+       }
+
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                                                 psKCCBCmd,
+                                                                                 ui32PDumpFlags,
+                                             pui32CmdKCCBSlot);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+#if defined(DEBUG)
+               /* PVRSRVDebugRequest must be called without powerlock */
+               PVRSRVPowerUnlock(psDeviceNode);
+               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+               goto _PVRSRVPowerLock_Exit;
+#endif
+       }
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVPowerLock_Exit:
+       return eError;
+}
+
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+#if defined(SUPPORT_VALIDATION)
+PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT64 ui64RegVal,
+                                                                         IMG_UINT64 ui64Size,
+                                                                         IMG_UINT32 ui32Offset,
+                                                                         IMG_BOOL bWriteOp)
+{
+       RGXFWIF_KCCB_CMD sRgxRegsCmd = {0};
+       IMG_UINT32 ui32kCCBCommandSlot;
+       PVRSRV_ERROR eError;
+
+       sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG;
+       sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal;
+       sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size;
+       sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset;
+       sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp;
+
+       eError =  RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                                          RGXFWIF_DM_GP,
+                                                                                          &sRgxRegsCmd,
+                                                                                          PDUMP_FLAGS_CONTINUOUS,
+                                                                                          &ui32kCCBCommandSlot);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot");
+
+       if (bWriteOp)
+       {
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo,
+                                                                                 ui32kCCBCommandSlot,
+                                                 PDUMP_FLAGS_CONTINUOUS);
+               PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+       }
+
+       return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGX_MISRHandler_ScheduleProcessQueues
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+                               the queue for all the DMs)
+******************************************************************************/
+static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData)
+{
+       PVRSRV_DEVICE_NODE     *psDeviceNode = pvData;
+       PVRSRV_RGXDEV_INFO     *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR           eError;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               return;
+       }
+
+       /* Check whether it's worth waking up the GPU */
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST) &&
+               (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               /* For now, guest drivers will always wake-up the GPU */
+               RGXFWIF_GPU_UTIL_FWCB  *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+               IMG_BOOL               bGPUHasWorkWaiting;
+
+               bGPUHasWorkWaiting =
+                   (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+               if (!bGPUHasWorkWaiting)
+               {
+                       /* all queues are empty, don't wake up the GPU */
+                       PVRSRVPowerUnlock(psDeviceNode);
+                       return;
+               }
+       }
+
+       PDUMPPOWCMDSTART(psDeviceNode);
+       /* wake up the GPU */
+       eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE_ON,
+                                                                                PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+
+               PVRSRVPowerUnlock(psDeviceNode);
+               return;
+       }
+
+       /* uncounted kick to the FW */
+       HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED);
+       __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED);
+
+       PVRSRVPowerUnlock(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       return OSInstallMISR(phMISR,
+                       RGX_MISRHandler_ScheduleProcessQueues,
+                       psDeviceNode,
+                       "RGX_ScheduleProcessQueues");
+}
+
+PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                              RGXFWIF_DM          eKCCBType,
+                                              RGXFWIF_KCCB_CMD    *psKCCBCmd,
+                                              IMG_UINT32          ui32PDumpFlags,
+                                              IMG_UINT32          *pui32CmdKCCBSlot)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiMMUSyncUpdate;
+
+       /* Don't send the command/power up request if the device is de-initialising.
+        * The de-init thread could destroy the device whilst the power up
+        * sequence below is accessing the HW registers.
+        */
+       if (unlikely((psDevInfo == NULL) ||
+                    (psDevInfo->psDeviceNode == NULL) ||
+                    (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)))
+       {
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       /* For validation, force the core to different dust count states with each kick */
+       if ((eKCCBType == RGXFWIF_DM_GEOM) || (eKCCBType == RGXFWIF_DM_CDM))
+       {
+               if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN)
+               {
+                       IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount);
+                       PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32NumDusts);
+               }
+       }
+
+       if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE)
+       {
+               if (psDevInfo->ui32ECCRAMErrInjInterval > 0U)
+               {
+                       --psDevInfo->ui32ECCRAMErrInjInterval;
+               }
+               else
+               {
+                       IMG_UINT64 ui64ECCRegVal = 0U;
+
+                       psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL;
+
+                       if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_SLC)
+                       {
+                               PVR_LOG(("ECC RAM Error Inject SLC"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN;
+                       }
+                       else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_USC)
+                       {
+                               PVR_LOG(("ECC RAM Error Inject USC"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN;
+                       }
+                       else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_TPU)
+                       {
+#if defined(RGX_FEATURE_MAX_TPU_PER_SPU)
+                               PVR_LOG(("ECC RAM Error Inject Swift TPU"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SWIFT_EN;
+#else
+                               PVR_LOG(("ECC RAM Error Inject TPU MCU L0"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN;
+#endif
+                       }
+                       else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_RASCAL)
+                       {
+#if defined(RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN)
+                               PVR_LOG(("ECC RAM Error Inject RASCAL"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN;
+#else
+                               PVR_LOG(("ECC RAM Error Inject USC"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN;
+#endif
+                       }
+                       else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_MARS)
+                       {
+                               PVR_LOG(("ECC RAM Error Inject MARS"));
+                               ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_MARS_EN;
+                       }
+                       else
+                       {
+                       }
+
+                       OSWriteMemoryBarrier(NULL);
+                       OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal);
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Write reg ECC_RAM_ERR_INJ");
+                       PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal, PDUMP_FLAGS_CONTINUOUS);
+                       OSWriteMemoryBarrier(NULL);
+               }
+       }
+#endif
+
+       /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful
+          in a scenario with several applications allocating resources. */
+       eError = PVRSRVPowerLock(psDevInfo->psDeviceNode);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+
+               /* If system is found powered OFF, Retry scheduling the command */
+               if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF))
+               {
+                       eError = PVRSRV_ERROR_RETRY;
+               }
+
+               goto RGXScheduleCommand_exit;
+       }
+
+       if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))
+       {
+               /* If we have the power lock the device is valid but the deinit
+                * thread could be waiting for the lock. */
+               PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       /* Ensure device is powered up before sending any commands */
+       PDUMPPOWCMDSTART(psDevInfo->psDeviceNode);
+       eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode,
+                                            PVRSRV_DEV_POWER_STATE_ON,
+                                            PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDevInfo->psDeviceNode);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               goto _PVRSRVSetDevicePowerStateKM_Exit;
+       }
+
+       eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate);
+       if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot);
+       if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+       PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+
+RGXScheduleCommand_exit:
+       return eError;
+}
+
+/*
+ * RGXCheckFirmwareCCB
+ */
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
+       IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) ||
+                                                                (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) &&
+                                                                 KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)),
+                                                                "FW-KM connection is down");
+#endif
+
+       while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+       {
+               /* Point to the next command */
+               const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+               HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType);
+               switch (psFwCCBCmd->eCmdType)
+               {
+               case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+               {
+                       if (psDevInfo->bPDPEnabled)
+                       {
+                               PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_BACKING,
+                                           "Request to add backing to ZSBuffer");
+                       }
+                       RGXProcessRequestZSBufferBacking(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+               {
+                       if (psDevInfo->bPDPEnabled)
+                       {
+                               PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_UNBACKING,
+                                           "Request to remove backing from ZSBuffer");
+                       }
+                       RGXProcessRequestZSBufferUnbacking(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+               {
+                       if (psDevInfo->bPDPEnabled)
+                       {
+                               PDUMP_PANIC(psDevInfo->psDeviceNode, FREELIST_GROW,
+                                           "Request to grow the free list");
+                       }
+                       RGXProcessRequestGrow(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+               {
+                       if (psDevInfo->bPDPEnabled)
+                       {
+                               PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION,
+                                           "Request to reconstruct free lists");
+                       }
+
+                       if (PVRSRV_VZ_MODE_IS(GUEST))
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists",
+                                               __func__,
+                                               psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+                                               psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+                       }
+                       else
+                       {
+                               PVR_ASSERT(psDevInfo->psRGXFWIfHWRInfoBufCtl);
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists",
+                                               __func__,
+                                               psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+                                               psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1,
+                                               psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+                       }
+
+                       RGXProcessRequestFreelistsReconstruction(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION:
+               {
+                       /* Notify client drivers */
+                       /* Client notification of device error will be achieved by
+                        * clients calling UM function RGXGetLastDeviceError() */
+                       psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT;
+
+                       /* Notify system layer */
+                       {
+                               PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                               PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+                               const RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault =
+                                               &psFwCCBCmd->uCmdData.sCmdFWPagefault;
+
+                               if (psDevConfig->pfnSysDevErrorNotify)
+                               {
+                                       PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                                       sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT;
+                                       sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr;
+
+                                       psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                         &sErrorData);
+                               }
+                       }
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+               {
+                       DLLIST_NODE *psNode, *psNext;
+                       const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification =
+                                       &psFwCCBCmd->uCmdData.sCmdContextResetNotification;
+                       RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL;
+                       IMG_UINT32 ui32ErrorPid = 0;
+
+                       OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock);
+
+                       dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+                       {
+                               RGX_SERVER_COMMON_CONTEXT *psThisContext =
+                                               IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+                               /* If the notification applies to all contexts update reset info
+                                * for all contexts, otherwise only do so for the appropriate ID.
+                                */
+                               if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS)
+                               {
+                                       /* Notification applies to all contexts */
+                                       psThisContext->eLastResetReason    = psCmdContextResetNotification->eResetReason;
+                                       psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+                               }
+                               else
+                               {
+                                       /* Notification applies to one context only */
+                                       if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID)
+                                       {
+                                               psServerCommonContext = psThisContext;
+                                               psServerCommonContext->eLastResetReason    = psCmdContextResetNotification->eResetReason;
+                                               psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+                                               ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext);
+                                               break;
+                                       }
+                               }
+                       }
+
+                       if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)",
+                                               __func__,
+                                               (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+                                               psCmdContextResetNotification->ui32ResetJobRef));
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)",
+                                               __func__,
+                                               psServerCommonContext,
+                                               psCmdContextResetNotification->ui32ServerCommonContextID,
+                                               (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+                                               psCmdContextResetNotification->ui32ResetJobRef));
+                       }
+
+                       /* Increment error counter (if appropriate) */
+                       if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM)
+                       {
+                               /* Avoid wrapping the error count (which would then
+                                * make it appear we had far fewer errors), by limiting
+                                * it to IMG_UINT32_MAX.
+                                */
+                               if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX)
+                               {
+                                       psDevInfo->sErrorCounts.ui32WGPErrorCount++;
+                               }
+                       }
+                       else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM)
+                       {
+                               /* Avoid wrapping the error count (which would then
+                                * make it appear we had far fewer errors), by limiting
+                                * it to IMG_UINT32_MAX.
+                                */
+                               if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX)
+                               {
+                                       psDevInfo->sErrorCounts.ui32TRPErrorCount++;
+                               }
+                       }
+                       OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock);
+
+                       /* Notify system layer */
+                       {
+                               PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                               PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                               if (psDevConfig->pfnSysDevErrorNotify)
+                               {
+                                       PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                                       sErrorData.eResetReason = psCmdContextResetNotification->eResetReason;
+                                       sErrorData.pid = ui32ErrorPid;
+
+                                       /* Populate error data according to reset reason */
+                                       switch (psCmdContextResetNotification->eResetReason)
+                                       {
+                                               case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM:
+                                               case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM:
+                                               {
+                                                       sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+                                                       sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM;
+                                                       break;
+                                               }
+                                               default:
+                                               {
+                                                       break;
+                                               }
+                                       }
+
+                                       psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                         &sErrorData);
+                               }
+                       }
+
+                       /* Notify if a page fault */
+                       if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF)
+                       {
+                               DevmemIntPFNotify(psDevInfo->psDeviceNode,
+                                               psCmdContextResetNotification->ui64PCAddress,
+                                               psCmdContextResetNotification->sFaultAddress);
+                       }
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+               {
+                       PVRSRV_ERROR eError;
+                       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+                       OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE);
+                       eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__));
+                               PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+                       }
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+               {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                       IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+                       IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+                       switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+                       {
+                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+                       {
+                               PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+                               break;
+                       }
+                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+                       {
+                               PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+                               break;
+                       }
+                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+                       {
+                               PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+                               break;
+                       }
+                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+                       {
+                               PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+                               break;
+                       }
+                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+                       {
+                               PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+                               break;
+                       }
+                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES:
+                       {
+                               PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+                               break;
+                       }
+               }
+#endif
+                       break;
+               }
+               case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE:
+               {
+#if defined(SUPPORT_PDVFS)
+                       PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate);
+#endif
+                       break;
+               }
+
+               case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+               {
+                       if (psDevInfo->psRGXFWIfFwSysData != NULL  &&
+                                       psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF)
+                       {
+                               PVRSRV_ERROR eError;
+
+                               /* Power down... */
+                               eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+                                               PVRSRV_SYS_POWER_STATE_OFF, PVRSRV_POWER_FLAGS_NONE);
+                               if (eError == PVRSRV_OK)
+                               {
+                                       /* Clear the FW faulted flags... */
+                                       psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED);
+
+                                       /* Power back up again... */
+                                       eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+                                                       PVRSRV_SYS_POWER_STATE_ON, PVRSRV_POWER_FLAGS_NONE);
+
+                                       /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */
+                                       if (eError == PVRSRV_OK)
+                                       {
+                                               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+                                               {
+                                                       eError = RGXFWHealthCheckCmd(psDevInfo);
+                                                       if (eError != PVRSRV_ERROR_RETRY)
+                                                       {
+                                                               break;
+                                                       }
+                                                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+                                               } END_LOOP_UNTIL_TIMEOUT();
+                                       }
+                               }
+
+                               /* Notify client drivers and system layer of FW fault */
+                               {
+                                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                                       /* Client notification of device error will be achieved by
+                                        * clients calling UM function RGXGetLastDeviceError() */
+                                       psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR;
+
+                                       /* Notify system layer */
+                                       if (psDevConfig->pfnSysDevErrorNotify)
+                                       {
+                                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                                               sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR;
+                                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                                 &sErrorData);
+                                       }
+                               }
+
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)",
+                                                       __func__, PVRSRVGetErrorString(eError)));
+                               }
+                       }
+                       break;
+               }
+#if defined(SUPPORT_VALIDATION)
+               case RGXFWIF_FWCCB_CMD_REG_READ:
+               {
+                       psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue;
+                       complete(&psDevInfo->sFwRegs.sRegComp);
+                       break;
+               }
+#if defined(SUPPORT_SOC_TIMER)
+               case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS:
+               {
+                       if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER)
+                       {
+                               PVRSRV_ERROR eSOCtimerErr = RGXValidateSOCUSCTimer(psDevInfo,
+                                                                                     PDUMP_NONE,
+                                                                                     psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray,
+                                                                                     psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary,
+                                                                                     psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers);
+                               if (PVRSRV_OK == eSOCtimerErr)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time"));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time"));
+                               }
+                       }
+                       break;
+               }
+#endif
+#endif
+               default:
+               {
+                       /* unknown command */
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)",
+                                __func__, psFwCCBCmd->eCmdType));
+                       /* Assert on magic value corruption */
+                       PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD);
+               }
+               }
+
+               /* Update read offset */
+               psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+       }
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+*/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+               DEVMEM_MEMDESC  *psFWFrameworkMemDesc,
+               IMG_PBYTE               pbyGPUFRegisterList,
+               IMG_UINT32              ui32FrameworkRegisterSize)
+{
+       PVRSRV_ERROR    eError;
+       RGXFWIF_RF_REGISTERS    *psRFReg;
+
+       eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+                       (void **)&psRFReg);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to map firmware render context state (%u)",
+                        __func__, eError));
+               return eError;
+       }
+
+       OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+
+       /* Release the CPU mapping */
+       DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+       /*
+        * Dump the FW framework buffer
+        */
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump FWFramework buffer");
+       DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+*/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE     *psDeviceNode,
+               DEVMEM_MEMDESC          **ppsFWFrameworkMemDesc,
+               IMG_UINT32                      ui32FrameworkCommandSize)
+{
+       PVRSRV_ERROR                    eError;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+
+       /*
+               Allocate device memory for the firmware GPU framework state.
+               Sufficient info to kick one or more DMs should be contained in this buffer
+        */
+       PDUMPCOMMENT(psDeviceNode, "Allocate Rogue firmware framework state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                       ui32FrameworkCommandSize,
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwGPUFrameworkState",
+                       ppsFWFrameworkMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to allocate firmware framework state (%u)",
+                        __func__, eError));
+               return eError;
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE  *psDevNode,
+                                                                                               volatile IMG_UINT32     __iomem *pui32LinMemAddr,
+                                                                                               IMG_UINT32                      ui32Value,
+                                                                                               IMG_UINT32                      ui32Mask)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDevNode->pvDevice;
+       const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+       ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+                                       psKCCBCtl->ui32WriteOffset -
+                                       psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+       ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount;
+
+       for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+                               ui32MaxRetries > 0;
+                               ui32MaxRetries--)
+       {
+
+               /*
+                * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function
+                * does not generate an error message. In this case, the PollForValueKM is expected to
+                * timeout as there is work ongoing on the GPU which may take longer than the timeout period.
+                */
+               eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE);
+               if (eError != PVRSRV_ERROR_TIMEOUT)
+               {
+                       break;
+               }
+
+               RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)",
+                                       __func__, PVRSRVGetErrorString(eError),
+                                                               pui32LinMemAddr, ui32Value));
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+               IMG_UINT32 ui32Config,
+               IMG_UINT32 *pui32ConfigState,
+               IMG_BOOL bSetNotClear)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+       RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 };
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       RGXFWIF_SYSDATA *psSysData;
+       IMG_UINT32 ui32kCCBCommandSlot;
+       IMG_BOOL bWaitForFwUpdate = IMG_FALSE;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       if (!psDevInfo)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       psDeviceNode = psDevInfo->psDeviceNode;
+       psSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       if (NULL == psSysData)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Fw Sys Config is not mapped into CPU space", __func__));
+               return PVRSRV_ERROR_INVALID_CPU_ADDR;
+       }
+
+       /* apply change and ensure the new data is written to memory
+        * before requesting the FW to read it
+        */
+       ui32Config = ui32Config & RGXFWIF_INICFG_ALL;
+       if (bSetNotClear)
+       {
+               psSysData->ui32ConfigFlags |= ui32Config;
+       }
+       else
+       {
+               psSysData->ui32ConfigFlags &= ~ui32Config;
+       }
+
+       /* return current/new value to caller */
+       if (pui32ConfigState)
+       {
+               *pui32ConfigState = psSysData->ui32ConfigFlags;
+       }
+
+       OSMemoryBarrier(&psSysData->ui32ConfigFlags);
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock");
+
+       /* notify FW to update setting */
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               /* Ask the FW to update its cached version of the value */
+               sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL;
+
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                                                         &sStateFlagCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock);
+               bWaitForFwUpdate = IMG_TRUE;
+       }
+
+unlock:
+       PVRSRVPowerUnlock(psDeviceNode);
+       if (bWaitForFwUpdate)
+       {
+               /* Wait for the value to be updated as the FW validates
+                * the parameters and modifies the ui32ConfigFlags
+                * accordingly
+                * (for completeness as registered callbacks should also
+                *  not permit invalid transitions)
+                */
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+               PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+       }
+       return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO      *psDevInfo,
+                                                                          RGXFWIF_DM                   eDM,
+                                                                          RGXFWIF_KCCB_CMD             *psKCCBCmd,
+                                                                          RGXFWIF_CLEANUP_TYPE eCleanupType,
+                                                                          IMG_UINT32                   ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32kCCBCommandSlot;
+
+       /* Clean-up commands sent during frame capture intervals must be dumped even when not in capture range... */
+       ui32PDumpFlags |= PDUMP_FLAGS_INTERVAL;
+
+       psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+       psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+
+       /*
+               Send the cleanup request to the firmware. If the resource is still busy
+               the firmware will tell us and we'll drop out with a retry.
+       */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                                         eDM,
+                                                                                         psKCCBCmd,
+                                                                                         ui32PDumpFlags,
+                                                                                         &ui32kCCBCommandSlot);
+       if (eError != PVRSRV_OK)
+       {
+               /* If caller may retry, fail with no error message */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       PVR_DPF((PVR_DBG_ERROR ,"RGXScheduleCommandAndGetKCCBSlot() failed (%s) in %s()",
+                                PVRSRVGETERRORSTRING(eError), __func__));
+               }
+               goto fail_command;
+       }
+
+       /* Wait for command kCCB slot to be updated by FW */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                                 "Wait for the firmware to reply to the cleanup command");
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot,
+                                                                         ui32PDumpFlags);
+       /*
+               If the firmware hasn't got back to us in a timely manner
+               then bail and let the caller retry the command.
+        */
+       if (eError == PVRSRV_ERROR_TIMEOUT)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.",
+                        __func__));
+
+               eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+               PVRSRVDebugRequest(psDevInfo->psDeviceNode,
+                               DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+#endif
+               goto fail_poll;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               goto fail_poll;
+       }
+
+#if defined(PDUMP)
+       /*
+        * The cleanup request to the firmware will tell us if a given resource is busy or not.
+        * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is
+        * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers
+        * and they will re-issue the cleanup request until it succeed.
+        *
+        * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+        * that cleanup requests are only submitted if the resource is unused.
+        * If this is not the case, the following poll will block infinitely, making sure
+        * the issue doesn't go unnoticed.
+        */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                       "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+                       eDM,
+                       psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+                       psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                                                       ui32kCCBCommandSlot * sizeof(IMG_UINT32),
+                                                                       0,
+                                                                       RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY,
+                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                       ui32PDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32");
+#endif
+
+       /*
+               If the command has was run but a resource was busy, then the request
+               will need to be retried.
+       */
+       if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY))
+       {
+               if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+               }
+               eError = PVRSRV_ERROR_RETRY;
+               goto fail_requestbusy;
+       }
+
+       return PVRSRV_OK;
+
+fail_requestbusy:
+fail_poll:
+fail_command:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+/*
+       RGXRequestCommonContextCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+               RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+               RGXFWIF_DM eDM,
+               IMG_UINT32 ui32PDumpFlags)
+{
+       RGXFWIF_KCCB_CMD                        sRCCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+       PRGXFWIF_FWCOMMONCONTEXT        psFWCommonContextFWAddr;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+
+       /* Force retry if this context's CCB is currently being dumped
+        * as part of the stalled CCB debug */
+       if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>",
+                        __func__,
+                        (void*)psServerCommonContext->psClientCCB));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext);
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Common ctx cleanup Request DM%d [context = 0x%08x]",
+                       eDM, psFWCommonContextFWAddr.ui32Addr);
+       PDUMPCOMMENT(psDeviceNode, "Wait for CCB to be empty before common ctx cleanup");
+
+       RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags);
+#endif
+
+       /* Setup our command data, the cleanup call will fill in the rest */
+       sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+       /* Request cleanup of the firmware resource */
+       eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+                                                                          eDM,
+                                                                          &sRCCleanUpCmd,
+                                                                          RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+                                                                          ui32PDumpFlags);
+
+       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to schedule a memory context cleanup with error (%u)",
+                        __func__, eError));
+       }
+
+       return eError;
+}
+
+/*
+ * RGXFWRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         PRGXFWIF_HWRTDATA psHWRTData)
+{
+       RGXFWIF_KCCB_CMD                        sHWRTDataCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+
+       PDUMPCOMMENT(psDeviceNode, "HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr);
+
+       sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+       eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+                                          RGXFWIF_DM_GP,
+                                          &sHWRTDataCleanUpCmd,
+                                          RGXFWIF_CLEANUP_HWRTDATA,
+                                          PDUMP_FLAGS_NONE);
+
+       if (eError != PVRSRV_OK)
+       {
+               /* If caller may retry, fail with no error message */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to schedule a HWRTData cleanup with error (%u)",
+                                __func__, eError));
+               }
+       }
+
+       return eError;
+}
+
+/*
+       RGXFWRequestFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                PRGXFWIF_FREELIST psFWFreeList)
+{
+       RGXFWIF_KCCB_CMD                        sFLCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+       /* Setup our command data, the cleanup call will fill in the rest */
+       sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+       /* Request cleanup of the firmware resource */
+       eError = RGXScheduleCleanupCommand(psDevInfo,
+                                                                          RGXFWIF_DM_GP,
+                                                                          &sFLCleanUpCmd,
+                                                                          RGXFWIF_CLEANUP_FREELIST,
+                                                                          PDUMP_FLAGS_NONE);
+
+       if (eError != PVRSRV_OK)
+       {
+               /* If caller may retry, fail with no error message */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to schedule a memory context cleanup with error (%u)",
+                                __func__, eError));
+               }
+       }
+
+       return eError;
+}
+
+/*
+       RGXFWRequestZSBufferCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                PRGXFWIF_ZSBUFFER psFWZSBuffer)
+{
+       RGXFWIF_KCCB_CMD                        sZSBufferCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+       /* Setup our command data, the cleanup call will fill in the rest */
+       sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+       /* Request cleanup of the firmware resource */
+       eError = RGXScheduleCleanupCommand(psDevInfo,
+                                                                          RGXFWIF_DM_3D,
+                                                                          &sZSBufferCleanUpCmd,
+                                                                          RGXFWIF_CLEANUP_ZSBUFFER,
+                                                                          PDUMP_FLAGS_NONE);
+
+       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to schedule a memory context cleanup with error (%u)",
+                        __func__, eError));
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+               IMG_UINT32 ui32HCSDeadlineMs)
+{
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the Hard Context Switching deadline inside RGXFWIfRuntimeCfg");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, ui32HCSDeadlineMS),
+                                                         ui32HCSDeadlineMs,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_KCCB_CMD        sCmpKCCBCmd = { 0 };
+
+       sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+       return  RGXScheduleCommand(psDevInfo,
+                                                          RGXFWIF_DM_GP,
+                                                          &sCmpKCCBCmd,
+                                                          PDUMP_FLAGS_CONTINUOUS);
+}
+
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSid,
+                               RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
+{
+       PVRSRV_ERROR             eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD         sOSOnlineStateCmd = { 0 };
+       RGXFWIF_SYSDATA          *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
+       sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+       sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
+
+#if defined(SUPPORT_AUTOVZ)
+       {
+               IMG_BOOL bConnectionDown = IMG_FALSE;
+
+               PVR_UNREFERENCED_PARAMETER(psFwSysData);
+               sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE;
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       /* Send the offline command regardless if power lock is held or not.
+                        * Under AutoVz this is done during regular driver deinit, store-to-ram suspend
+                        * or (optionally) from a kernel panic callback. Deinit and suspend operations
+                        * take the lock in the rgx pre/post power functions as expected.
+                        * The kernel panic callback is a last resort way of letting the firmware know that
+                        * the VM is unrecoverable and the vz connection must be disabled. It cannot wait
+                        * on other kernel threads to finish and release the lock. */
+                       eError = RGXSendCommand(psDevInfo,
+                                                                       &sOSOnlineStateCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               /* Guests and Host going offline should wait for confirmation
+                * from the Firmware of the state change. If this fails, break
+                * the connection on the OS Driver's end as backup. */
+               if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS))
+               {
+                       LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2)
+                       {
+                               if (KM_FW_CONNECTION_IS(READY, psDevInfo))
+                               {
+                                       bConnectionDown = IMG_TRUE;
+                                       break;
+                               }
+                       } END_LOOP_UNTIL_TIMEOUT();
+
+                       if (!bConnectionDown)
+                       {
+                               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+                       }
+               }
+       }
+#else
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               /* no reason for Guests to update their state or any other VM's.
+                * This is the Hypervisor and Host driver's responsibility. */
+               return PVRSRV_OK;
+       }
+       else if (eOSOnlineState == RGXFWIF_OS_ONLINE)
+       {
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = RGXScheduleCommand(psDevInfo,
+                                       RGXFWIF_DM_GP,
+                                       &sOSOnlineStateCmd,
+                                       PDUMP_FLAGS_CONTINUOUS);
+                       if (eError != PVRSRV_ERROR_RETRY) break;
+
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+       }
+       else if (psFwSysData)
+       {
+               const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags =
+                        (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+
+               /* Attempt several times until the FW manages to offload the OS */
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       IMG_UINT32 ui32kCCBCommandSlot;
+
+                       /* Send request */
+                       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                 RGXFWIF_DM_GP,
+                                                                                                         &sOSOnlineStateCmd,
+                                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                                         &ui32kCCBCommandSlot);
+                       if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue;
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+                       /* Wait for FW to process the cmd */
+                       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_);
+
+                       /* read the OS state */
+                       OSMemoryBarrier(NULL);
+                       /* check if FW finished offloading the OSID and is stopped */
+                       if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE)
+                       {
+                               eError = PVRSRV_OK;
+                               break;
+                       }
+                       else
+                       {
+                               eError = PVRSRV_ERROR_TIMEOUT;
+                       }
+
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+return_ :
+#endif
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+               IMG_UINT32 ui32OSid,
+               IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD        sOSidPriorityCmd = { 0 };
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
+       psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid);
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)),
+                                                         ui32Priority ,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                               RGXFWIF_DM_GP,
+                               &sOSidPriorityCmd,
+                               PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+               CONNECTION_DATA *psConnection,
+               PVRSRV_RGXDEV_INFO *psDevInfo,
+               IMG_UINT32 ui32Priority,
+               RGXFWIF_DM eDM)
+{
+       IMG_UINT32                              ui32CmdSize;
+       IMG_UINT8                               *pui8CmdPtr;
+       RGXFWIF_KCCB_CMD                sPriorityCmd = { 0 };
+       RGXFWIF_CCB_CMD_HEADER  *psCmdHeader;
+       RGXFWIF_CMD_PRIORITY    *psCmd;
+       PVRSRV_ERROR                    eError;
+       IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
+       RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext);
+
+       eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority);
+
+       /*
+               Get space for command
+        */
+       ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+       eError = RGXAcquireCCB(psClientCCB,
+                       ui32CmdSize,
+                       (void **) &pui8CmdPtr,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__));
+               }
+               goto fail_ccbacquire;
+       }
+
+       /*
+               Write the command header and command
+       */
+       psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+       psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+       psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+       pui8CmdPtr += sizeof(*psCmdHeader);
+
+       psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+       psCmd->i32Priority = i32Priority;
+       pui8CmdPtr += sizeof(*psCmd);
+
+       /*
+               We should reserve space in the kernel CCB here and fill in the command
+               directly.
+               This is so if there isn't space in the kernel CCB we can return with
+               retry back to services client before we take any operations
+        */
+
+       /*
+               Submit the command
+        */
+       RGXReleaseCCB(psClientCCB,
+                       ui32CmdSize,
+                       PDUMP_FLAGS_CONTINUOUS);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__));
+               return eError;
+       }
+
+       /* Construct the priority command. */
+       sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+       sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+       sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+       sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+       sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+       sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                               eDM,
+                               &sPriorityCmd,
+                               PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to submit set priority command with error (%u)",
+                               __func__,
+                               eError));
+               goto fail_cmdacquire;
+       }
+
+       psContext->i32Priority = i32Priority;
+
+       return PVRSRV_OK;
+
+fail_ccbacquire:
+fail_checkpriority:
+fail_cmdacquire:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo,
+                            IMG_UINT32 ui32PHRMode)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 };
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG;
+       psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the Periodic Hardware Reset Mode inside RGXFWIfRuntimeCfg");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, ui32PHRMode),
+                                                         ui32PHRMode,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                           RGXFWIF_DM_GP,
+                                           &sCfgPHRCmd,
+                                           PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                       IMG_UINT32 ui32WdgPeriodUs)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 };
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG;
+       psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the firmware watchdog period inside RGXFWIfRuntimeCfg");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, ui32WdgPeriodUs),
+                                                         ui32WdgPeriodUs,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                                                       RGXFWIF_DM_GP,
+                                                                       &sCfgWdgCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+
+
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious)
+{
+       /* Attempt to detect and deal with any stalled client contexts.
+        * bIgnorePrevious may be set by the caller if they know a context to be
+        * stalled, as otherwise this function will only identify stalled
+        * contexts which have not been previously reported.
+        */
+
+       IMG_UINT32 ui32StalledClientMask = 0;
+
+       if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock)))
+       {
+               PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning..."));
+               return;
+       }
+
+       ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo);
+
+       ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
+
+       ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo);
+
+       if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+       {
+               ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
+       }
+
+       /* If at least one DM stalled bit is different than before */
+       if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask))
+       {
+               if (ui32StalledClientMask > 0)
+               {
+                       static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+                                       "force";
+#else
+                                       "warn";
+#endif
+                       /* Print all the stalled DMs */
+                       PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s%s%s",
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D)));
+
+                       PVR_LOG(("Trying to identify stalled context...(%s) [%d]",
+                                pszStalledAction, bIgnorePrevious));
+
+                       DumpStalledContextInfo(psDevInfo);
+               }
+               else
+               {
+                       if (psDevInfo->ui32StalledClientMask> 0)
+                       {
+                               /* Indicate there are no stalled DMs */
+                               PVR_LOG(("No further stalled client contexts exist"));
+                       }
+               }
+               psDevInfo->ui32StalledClientMask = ui32StalledClientMask;
+               psDevInfo->pvEarliestStalledClientCCB = NULL;
+       }
+       OSLockRelease(psDevInfo->hCCBStallCheckLock);
+}
+
+/*
+       RGXUpdateHealthStatus
+*/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed)
+{
+       const PVRSRV_DATA*           psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_HEALTH_STATUS  eNewStatus   = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+       PVRSRV_DEVICE_HEALTH_REASON  eNewReason   = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+       PVRSRV_RGXDEV_INFO*          psDevInfo;
+       const RGXFWIF_TRACEBUF*      psRGXFWIfTraceBufCtl;
+       const RGXFWIF_SYSDATA*       psFwSysData;
+       const RGXFWIF_OSDATA*        psFwOsData;
+       const RGXFWIF_CCB_CTL*       psKCCBCtl;
+       IMG_UINT32                   ui32ThreadCount;
+       IMG_BOOL                     bKCCBCmdsWaiting;
+
+       PVR_ASSERT(psDevNode != NULL);
+       psDevInfo = psDevNode->pvDevice;
+
+       /* If the firmware is not yet initialised or has already deinitialised, stop here */
+       if (psDevInfo  == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL ||
+               psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)
+       {
+               return PVRSRV_OK;
+       }
+
+       psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       psFwOsData = psDevInfo->psRGXFWIfFwOsData;
+
+       /* If this is a quick update, then include the last current value... */
+       if (!bCheckAfterTimePassed)
+       {
+               eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus);
+               eNewReason = OSAtomicRead(&psDevNode->eHealthReason);
+       }
+
+       /* Decrement the SLR holdoff counter (if non-zero) */
+       if (psDevInfo->ui32SLRHoldoffCounter > 0)
+       {
+               psDevInfo->ui32SLRHoldoffCounter--;
+       }
+
+       /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */
+       if (PVRSRVIsDevicePowered(psDevNode))
+       {
+               if (psRGXFWIfTraceBufCtl != NULL)
+               {
+                       /*
+                          Firmware thread checks...
+                        */
+                       for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++)
+                       {
+                               const IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+                               /*
+                               Check if the FW has hit an assert...
+                               */
+                               if (*pszTraceAssertInfo != '\0')
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)",
+                                                       __func__, ui32ThreadCount, pszTraceAssertInfo,
+                                                       psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+                                                       psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+                                       eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+                                       goto _RGXUpdateHealthStatus_Exit;
+                               }
+
+                               /*
+                                  Check the threads to see if they are in the same poll locations as last time...
+                               */
+                               if (bCheckAfterTimePassed)
+                               {
+                                       if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0  &&
+                                               psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount])
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)",
+                                                               __func__, ui32ThreadCount,
+                                                               ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+                                                               psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET,
+                                                               psFwSysData->aui32CrPollMask[ui32ThreadCount]));
+                                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                                               eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING;
+                                               goto _RGXUpdateHealthStatus_Exit;
+                                       }
+                                       psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount];
+                               }
+                       }
+
+                       /*
+                       Check if the FW has faulted...
+                       */
+                       if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s: Firmware has faulted and needs to restart",
+                                               __func__));
+                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT;
+                               if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED)
+                               {
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING;
+                               }
+                               else
+                               {
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING;
+                               }
+                               goto _RGXUpdateHealthStatus_Exit;
+                       }
+               }
+
+               /*
+                  Event Object Timeouts check...
+               */
+               if (!bCheckAfterTimePassed)
+               {
+                       if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)",
+                                               __func__,
+                                               psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                               eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+                       }
+                       psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+               }
+
+               /*
+                  Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check
+                  that some have executed since then.
+               */
+               bKCCBCmdsWaiting = IMG_FALSE;
+               psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+               if (psKCCBCtl != NULL)
+               {
+                       if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask  ||
+                                       psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)",
+                                               __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+                               eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+                       }
+
+                       if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+                       {
+                               bKCCBCmdsWaiting = IMG_TRUE;
+                       }
+               }
+
+               if (bCheckAfterTimePassed && psFwOsData != NULL)
+               {
+                       IMG_UINT32 ui32KCCBCmdsExecuted = psFwOsData->ui32KCCBCmdsExecuted;
+
+                       if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+                       {
+                               /*
+                                  If something was waiting last time then the Firmware has stopped processing commands.
+                               */
+                               if (psDevInfo->bKCCBCmdsWaitingLastTime)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!",
+                                                       __func__));
+                                       eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+                               }
+
+                               /*
+                                  If no commands are currently pending and nothing happened since the last poll, then
+                                  schedule a dummy command to ping the firmware so we know it is alive and processing.
+                               */
+                               if (!bKCCBCmdsWaiting)
+                               {
+                                       /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the
+                                        * PMR lock itself, because some bridge functions will take the PMR lock
+                                        * before calling RGXScheduleCommand
+                                        */
+                                       PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice);
+
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)",
+                                                               __func__, eError));
+                                       }
+                                       else
+                                       {
+                                               bKCCBCmdsWaiting = IMG_TRUE;
+                                       }
+                               }
+                       }
+
+                       psDevInfo->bKCCBCmdsWaitingLastTime     = bKCCBCmdsWaiting;
+                       psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+               }
+       }
+
+       /*
+          Interrupt counts check...
+       */
+       if (bCheckAfterTimePassed  && psFwOsData != NULL)
+       {
+               IMG_UINT32  ui32LISRCount   = 0;
+               IMG_UINT32  ui32FWCount     = 0;
+               IMG_UINT32  ui32MissingInts = 0;
+
+               /* Add up the total number of interrupts issued, sampled/received and missed... */
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+               /* Only the Host OS has a sample count, so only one counter to check. */
+               ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_OS];
+               ui32FWCount   += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_OS]);
+#else
+               IMG_UINT32  ui32Index;
+
+               for (ui32Index = 0;  ui32Index < RGXFW_THREAD_NUM;  ui32Index++)
+               {
+                       ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index];
+                       ui32FWCount   += psFwOsData->aui32InterruptCount[ui32Index];
+               }
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+
+               if (ui32LISRCount < ui32FWCount)
+               {
+                       ui32MissingInts = (ui32FWCount-ui32LISRCount);
+               }
+
+               if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime  &&
+                   ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime  &&
+                   psDevInfo->ui32MissingInterruptsLastTime > 1)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts",
+                                       __func__, ui32MissingInts));
+                       eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS;
+
+                       /* Schedule the MISRs to help mitigate the problems of missing interrupts. */
+                       OSScheduleMISR(psDevInfo->pvMISRData);
+                       if (psDevInfo->pvAPMISRData != NULL)
+                       {
+                               OSScheduleMISR(psDevInfo->pvAPMISRData);
+                       }
+               }
+               psDevInfo->ui32InterruptCountLastTime    = ui32LISRCount;
+               psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts;
+       }
+
+       /*
+          Stalled CCB check...
+       */
+       if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+       {
+               RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE);
+       }
+
+       /* Notify client driver and system layer of any eNewStatus errors */
+       if (eNewStatus > PVRSRV_DEVICE_HEALTH_STATUS_OK)
+       {
+               /* Client notification of device error will be achieved by
+                * clients calling UM function RGXGetLastDeviceError() */
+               psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR;
+
+               /* Notify system layer */
+               {
+                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                       if (psDevConfig->pfnSysDevErrorNotify)
+                       {
+                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                               sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR;
+                               sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus;
+                               sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason;
+
+                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                                                 &sErrorData);
+                       }
+               }
+       }
+
+       /*
+          Finished, save the new status...
+       */
+_RGXUpdateHealthStatus_Exit:
+       OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
+       OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
+       RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason);
+
+       /*
+        * Attempt to service the HWPerf buffer to regularly transport idle/periodic
+        * packets to host buffer.
+        */
+       if (psDevNode->pfnServiceHWPerf != NULL)
+       {
+               PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: "
+                                       "Error occurred when servicing HWPerf buffer (%d)",
+                                       __func__, eError));
+               }
+       }
+
+       /* Attempt to refresh timer correlation data */
+       RGXTimeCorrRestartPeriodic(psDevNode);
+
+       return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+#if defined(SUPPORT_AUTOVZ)
+void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)))
+       {
+               /* read and write back the alive token value to confirm to the
+                * virtualisation watchdog that this connection is healthy */
+               KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo);
+       }
+}
+
+/*
+       RGXUpdateAutoVzWatchdog
+*/
+void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode)
+{
+       if (likely(psDevNode != NULL))
+       {
+               PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+               if (unlikely((psDevInfo  == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered ||
+                       psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)))
+               {
+                       /* If the firmware is not initialised, stop here */
+                       return;
+               }
+               else
+               {
+                       PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode);
+                       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock");
+
+                       RGXUpdateAutoVzWdgToken(psDevInfo);
+                       PVRSRVPowerUnlock(psDevNode);
+               }
+       }
+}
+#endif /* SUPPORT_AUTOVZ */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+       if (psCurrentServerCommonContext == NULL)
+       {
+               /* the context has already been freed so there is nothing to do here */
+               return PVRSRV_OK;
+       }
+
+       return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode,
+                                 psCurrentServerCommonContext->psClientCCB,
+                                 eKickTypeDM);
+}
+
+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             IMG_UINT32 ui32VerbLevel)
+{
+       if (psCurrentServerCommonContext == NULL)
+       {
+               /* the context has already been freed so there is nothing to do here */
+               return;
+       }
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+       {
+               /* If high verbosity requested, dump whole CCB */
+               DumpCCB(psCurrentServerCommonContext->psDevInfo,
+                       psCurrentServerCommonContext->sFWCommonContextFWAddr,
+                       psCurrentServerCommonContext->psClientCCB,
+                       pfnDumpDebugPrintf,
+                       pvDumpDebugFile);
+       }
+       else
+       {
+               /* Otherwise, only dump first stalled command in the CCB */
+               DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr,
+                                     psCurrentServerCommonContext->psClientCCB,
+                                     pfnDumpDebugPrintf,
+                                     pvDumpDebugFile);
+       }
+}
+
+PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+               IMG_UINT32 *pui32NumCleanupCtl,
+               RGXFWIF_DM eDM,
+               IMG_BOOL bKick,
+               RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+               RGX_ZSBUFFER_DATA              *psZSBuffer,
+               RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer)
+{
+       PVRSRV_ERROR eError;
+       PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+       PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D));
+       PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D));
+
+       if (bKick)
+       {
+               if (psKMHWRTDataSet)
+               {
+                       PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+                       eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc,
+                                       offsetof(RGXFWIF_HWRTDATA, sCleanupState),
+                                       RFW_FWADDR_NOREF_FLAG);
+                       PVR_RETURN_IF_ERROR(eError);
+
+                       *(psCleanupCtlWrite++) = psCleanupCtl;
+               }
+
+               if (eDM == RGXFWIF_DM_3D)
+               {
+                       RGXFWIF_PRBUFFER_TYPE eBufferType;
+                       RGX_ZSBUFFER_DATA *psBuffer = NULL;
+
+                       for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++)
+                       {
+                               switch (eBufferType)
+                               {
+                               case RGXFWIF_PRBUFFER_ZSBUFFER:
+                                       psBuffer = psZSBuffer;
+                                       break;
+                               case RGXFWIF_PRBUFFER_MSAABUFFER:
+                                       psBuffer = psMSAAScratchBuffer;
+                                       break;
+                               case RGXFWIF_PRBUFFER_MAXSUPPORTED:
+                                       psBuffer = NULL;
+                                       break;
+                               }
+                               if (psBuffer)
+                               {
+                                       (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr +
+                                                       offsetof(RGXFWIF_PRBUFFER, sCleanupState);
+                                       psBuffer = NULL;
+                               }
+                       }
+               }
+       }
+
+       *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+       PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       PVRSRV_RGXDEV_INFO       *psDevInfo;
+       RGXFWIF_HWRINFOBUF       *psHWRInfoBuf;
+       IMG_UINT32               i;
+
+       if (psDevNode->pvDevice == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_DEVINFO;
+       }
+       psDevInfo = psDevNode->pvDevice;
+
+       psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl;
+
+       for (i = 0 ; i < RGXFWIF_DM_MAX ; i++)
+       {
+               /* Reset the HWR numbers */
+               psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0;
+               psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0;
+               psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0;
+               psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0;
+       }
+
+       for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+       {
+               psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+       }
+
+       psHWRInfoBuf->ui32WriteIndex = 0;
+       psHWRInfoBuf->ui32DDReqCount = 0;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+               IMG_DEV_PHYADDR *psPhyAddr,
+               IMG_UINT32 ui32LogicalOffset,
+               IMG_UINT32 ui32Log2PageSize,
+               IMG_UINT32 ui32NumOfPages,
+               IMG_BOOL *bValid)
+{
+
+       PVRSRV_ERROR eError;
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMRLockSysPhysAddresses failed (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       eError = PMR_DevPhysAddr(psPMR,
+                       ui32Log2PageSize,
+                       ui32NumOfPages,
+                       ui32LogicalOffset,
+                       psPhyAddr,
+                       bValid);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMR_DevPhysAddr failed (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+
+       eError = PMRUnlockSysPhysAddresses(psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMRUnLockSysPhysAddresses failed (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       return eError;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psDevInfo->bDumpedKCCBCtlAlready)
+       {
+               /* exiting capture range or pdump block */
+               psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+               /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                               PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER,
+                               "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+                               psDevInfo->psKernelCCBCtl,
+                               ui32WriteOffset,
+                               ui32WriteOffset);
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+                               offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+                               ui32WriteOffset,
+                               0xffffffff,
+                               PDUMP_POLL_OPERATOR_EQUAL,
+                               PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError));
+               }
+       }
+
+       return eError;
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXClientConnectCompatCheck_ClientAgainstFW
+
+ @Description
+
+ Check compatibility of client and firmware (build options)
+ at the connection time.
+
+ @Input psDeviceNode - device node
+ @Input ui32ClientBuildOptions - build options for the client
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+#if !defined(NO_HARDWARE) || defined(PDUMP)
+#if !defined(NO_HARDWARE)
+       IMG_UINT32              ui32BuildOptionsMismatch;
+       IMG_UINT32              ui32BuildOptionsFW;
+#endif
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+       if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.",
+                        __func__));
+               return PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated))
+               {
+                       /* No need to wait if the FW has already updated the values */
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+#endif
+
+#if defined(PDUMP)
+       {
+               PVRSRV_ERROR eError;
+
+               PDUMPCOMMENT(psDeviceNode, "Compatibility check: client and FW build options");
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                               offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+                               ui32ClientBuildOptions,
+                               0xffffffff,
+                               PDUMP_POLL_OPERATOR_EQUAL,
+                               PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)",
+                                       __func__,
+                                       eError));
+                       return eError;
+               }
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions;
+       ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+
+       if (ui32BuildOptionsMismatch != 0)
+       {
+               if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+                                       "extra options present in client: (0x%x). Please check rgx_options.h",
+                                       ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+               }
+
+               if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+                                       "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+                                       ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+               }
+
+               return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RGXFwRawHeapAllocMap
+
+ @Description Register firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+ @Input sDevPAddr    - Heap address
+ @Input ui64DevPSize - Heap size
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+******************************************************************************/
+PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                 IMG_UINT32 ui32OSID,
+                                                                 IMG_DEV_PHYADDR sDevPAddr,
+                                                                 IMG_UINT64 ui64DevPSize)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH];
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS |
+                                                                                                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID));
+       PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
+                                                                                                                  PHYS_HEAP_USAGE_FW_MAIN);
+       PHYS_HEAP_CONFIG sFwHeapConfig;
+
+       PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK);
+
+       if (psFwMainConfig == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found."));
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+
+       if (!ui64DevPSize ||
+               !sDevPAddr.uiAddr ||
+               ui32OSID >= RGX_NUM_OS_SUPPORTED ||
+               ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       sFwHeapConfig = *psFwMainConfig;
+       sFwHeapConfig.sStartAddr.uiAddr = 0;
+       sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr;
+       sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
+       sFwHeapConfig.eType = PHYS_HEAP_TYPE_LMA;
+
+       eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, szRegionRAName, &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
+       PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID);
+
+       eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
+       PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID);
+
+       psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID];
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID);
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+       /* don't clear the heap of other guests on allocation */
+       uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL);
+#endif
+
+       /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */
+       if (psDeviceNode->bAutoVzFwIsUp)
+       {
+               uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+               DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+       }
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                                         uiRawFwHeapAllocFlags,
+                                                         psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName,
+                                                         &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
+
+       /* Mark this devmem heap as premapped so allocations will not require device mapping. */
+       DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+
+       if (ui32OSID == RGXFW_HOST_OS)
+       {
+               /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly
+                * No memory allocated from these sub-heaps will be individually mapped into the device's
+                * address space so they can remain marked permanently as premapped. */
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE);
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE);
+       }
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RGXFwRawHeapUnmapFree
+
+ @Description Unregister firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+
+******************************************************************************/
+void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  IMG_UINT32 ui32OSID)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       /* remove the premap status, so the heap can be unmapped and freed */
+       if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID])
+       {
+               DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE);
+       }
+
+       if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID])
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+               psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL;
+       }
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvHalt
+
+@Description    Halt the RISC-V FW core (required for certain operations
+                done through Debug Module)
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW");
+
+       /* Send halt request (no need to select one or more harts on this RISC-V core) */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN |
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until hart is halted */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DMSTATUS,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       /* Clear halt request */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Send halt request (no need to select one or more harts on this RISC-V core) */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN |
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+
+       /* Wait until hart is halted */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32),
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Clear halt request */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvIsHalted
+
+@Description    Check if the RISC-V FW is halted
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         IMG_BOOL
+******************************************************************************/
+IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(NO_HARDWARE)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       /* Assume the core is always halted in nohw */
+       return IMG_TRUE;
+#else
+
+       return (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS) &
+               RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U;
+#endif
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvResume
+
+@Description    Resume the RISC-V FW core
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW");
+
+       /* Send resume request (no need to select one or more harts on this RISC-V core) */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN |
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until hart is resumed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DMSTATUS,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       /* Clear resume request */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Send resume request (no need to select one or more harts on this RISC-V core) */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN |
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+
+       /* Wait until hart is resumed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32),
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Clear resume request */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvCheckAbstractCmdError
+
+@Description    Check for RISC-V abstract command errors and clear them
+
+@Input          psDevInfo    Pointer to GPU device info
+
+@Return         RGXRISCVFW_ABSTRACT_CMD_ERR
+******************************************************************************/
+static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr;
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR;
+
+       /* Check error status */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT,
+                   ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+
+       /* Check error status */
+       eCmdErr = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)
+                 & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK)
+                 >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT;
+
+       if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr));
+
+               /* Clear the error (note CMDERR field is write-1-to-clear) */
+               OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                              ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK);
+       }
+#endif
+
+       return eCmdErr;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadReg
+
+@Description    Read a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 *pui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32RegAddr);
+       PVR_UNREFERENCED_PARAMETER(pui32Value);
+
+       /* Reading HW registers is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Send abstract register read command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_READ |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                      ui32RegAddr);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR)
+       {
+               /* Read register value */
+               *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0);
+       }
+       else
+       {
+               *pui32Value = 0U;
+       }
+
+       return PVRSRV_OK;
+#endif
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollReg
+
+@Description    Poll for a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Poll RISC-V register 0x%x (expected 0x%08x)",
+                             ui32RegAddr, ui32Value);
+
+       /* Send abstract register read command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_READ |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                  ui32RegAddr,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvCheckAbstractCmdError(psDevInfo);
+
+       /* Check read value */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DATA0,
+                   ui32Value,
+                   0xFFFFFFFF,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32RegAddr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       /* Polling HW registers is currently not required driverlive */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteReg
+
+@Description    Write a value to the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32RegAddr,
+                              IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Write RISC-V register 0x%x (value 0x%08x)",
+                             ui32RegAddr, ui32Value);
+
+       /* Prepare data to be written to register */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0,
+                  ui32Value, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Send abstract register write command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_WRITE |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                  ui32RegAddr,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Prepare data to be written to register */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value);
+
+       /* Send abstract register write command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_WRITE |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                      ui32RegAddr);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvCheckSysBusError
+
+@Description    Check for RISC-V system bus errors and clear them
+
+@Input          psDevInfo    Pointer to GPU device info
+
+@Return         RGXRISCVFW_SYSBUS_ERR
+******************************************************************************/
+static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXRISCVFW_SYSBUS_ERR eSBError;
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       eSBError = RISCV_SYSBUS_NO_ERROR;
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBCS,
+                   RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT,
+                   ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+
+       eSBError = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS)
+                & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK)
+                >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT;
+
+       if (eSBError != RISCV_SYSBUS_NO_ERROR)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError));
+
+               /* Clear the error (note SBERROR field is write-1-to-clear) */
+               OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS,
+                              ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK);
+       }
+#endif
+
+       return eSBError;
+}
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadAbstractMem
+
+@Description    Read a value at the given address in RISC-V memory space
+                using RISC-V abstract memory commands
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(pui32Value);
+
+       /* Reading memory is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Prepare read address  */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr);
+
+       /* Send abstract memory read command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_READ |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR)
+       {
+               /* Read memory value */
+               *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0);
+       }
+       else
+       {
+               *pui32Value = 0U;
+       }
+
+       return PVRSRV_OK;
+#endif
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollAbstractMem
+
+@Description    Poll for a value at the given address in RISC-V memory space
+                using RISC-V abstract memory commands
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Poll RISC-V address 0x%x (expected 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Prepare read address  */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1,
+                  ui32Addr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Send abstract memory read command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_READ |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvCheckAbstractCmdError(psDevInfo);
+
+       /* Check read value */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DATA0,
+                   ui32Value,
+                   0xFFFFFFFF,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       /* Polling memory is currently not required driverlive */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadSysBusMem
+
+@Description    Read a value at the given address in RISC-V memory space
+                using the RISC-V system bus
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(pui32Value);
+
+       /* Reading memory is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Configure system bus to read 32 bit every time a new address is provided */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_SBCS,
+                      (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) |
+                      RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN);
+
+       /* Perform read */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr);
+
+       /* Wait until system bus is idle */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR)
+       {
+               /* Read value from debug system bus */
+               *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0);
+       }
+       else
+       {
+               *pui32Value = 0U;
+       }
+
+       return PVRSRV_OK;
+#endif
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollSysBusMem
+
+@Description    Poll for a value at the given address in RISC-V memory space
+                using the RISC-V system bus
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Poll RISC-V address 0x%x (expected 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Configure system bus to read 32 bit every time a new address is provided */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS,
+                  (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) |
+                  RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Perform read */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0,
+                  ui32Addr,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until system bus is idle */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvCheckSysBusError(psDevInfo);
+
+       /* Check read value */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBDATA0,
+                   ui32Value,
+                   0xFFFFFFFF,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       /* Polling memory is currently not required driverlive */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadMem
+
+@Description    Read a value at the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32Addr,
+                             IMG_UINT32 *pui32Value)
+{
+       if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END)
+       {
+               return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value);
+       }
+
+       return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value);
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollMem
+
+@Description    Poll a value at the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32Addr,
+                             IMG_UINT32 ui32Value)
+{
+       if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END)
+       {
+               return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value);
+       }
+
+       return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value);
+}
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteAbstractMem
+
+@Description    Write a value at the given address in RISC-V memory space
+                using RISC-V abstract memory commands
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Write RISC-V address 0x%x (value 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Prepare write address */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1,
+                  ui32Addr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Prepare write data */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0,
+                  ui32Value, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Send abstract register write command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_WRITE |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Prepare write address */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr);
+
+       /* Prepare write data */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value);
+
+       /* Send abstract memory write command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_WRITE |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteSysBusMem
+
+@Description    Write a value at the given address in RISC-V memory space
+                using the RISC-V system bus
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Write RISC-V address 0x%x (value 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Configure system bus to read 32 bit every time a new address is provided */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS,
+                  RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Prepare write address */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0,
+                  ui32Addr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Prepare write data and initiate write */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBDATA0,
+                  ui32Value, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until system bus is idle */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Configure system bus for 32 bit accesses */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_SBCS,
+                      RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT);
+
+       /* Prepare write address */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr);
+
+       /* Prepare write data and initiate write */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value);
+
+       /* Wait until system bus is idle */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteMem
+
+@Description    Write a value to the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32Addr,
+                              IMG_UINT32 ui32Value)
+{
+       if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END)
+       {
+               return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value);
+       }
+
+       return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value);
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvDmiOp
+
+@Description    Acquire the powerlock and perform an operation on the RISC-V
+                Debug Module Interface, but only if the GPU is powered on.
+
+@Input          psDevInfo       Pointer to device info
+@InOut          pui64DMI        Encoding of a request for the RISC-V Debug
+                                Module with same format as the 'dmi' register
+                                from the RISC-V debug specification (v0.13+).
+                                On return, this is updated with the result of
+                                the request, encoded the same way.
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT64 *pui64DMI)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(pui64DMI);
+
+       /* Accessing DM registers is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+#define DMI_BASE     RGX_CR_FWCORE_DMI_RESERVED00
+#define DMI_STRIDE  (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00)
+#define DMI_REG(r)  ((DMI_BASE) + (DMI_STRIDE) * (r))
+
+#define DMI_OP_SHIFT            0U
+#define DMI_OP_MASK             0x3ULL
+#define DMI_DATA_SHIFT          2U
+#define DMI_DATA_MASK           0x3FFFFFFFCULL
+#define DMI_ADDRESS_SHIFT       34U
+#define DMI_ADDRESS_MASK        0xFC00000000ULL
+
+#define DMI_OP_NOP                 0U
+#define DMI_OP_READ                1U
+#define DMI_OP_WRITE           2U
+#define DMI_OP_RESERVED                3U
+
+#define DMI_OP_STATUS_SUCCESS  0U
+#define DMI_OP_STATUS_RESERVED 1U
+#define DMI_OP_STATUS_FAILED   2U
+#define DMI_OP_STATUS_BUSY         3U
+
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+       PVRSRV_ERROR eError;
+       IMG_UINT64 ui64Op, ui64Address, ui64Data;
+
+       ui64Op      = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT;
+       ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT;
+       ui64Data    = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT;
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               ui64Op = DMI_OP_STATUS_FAILED;
+               goto dmiop_update;
+       }
+
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               ui64Op = DMI_OP_STATUS_FAILED;
+               goto dmiop_release_lock;
+       }
+
+       if (ePowerState == PVRSRV_DEV_POWER_STATE_ON)
+       {
+               switch (ui64Op)
+               {
+                       case DMI_OP_NOP:
+                               ui64Op = DMI_OP_STATUS_SUCCESS;
+                               break;
+                       case DMI_OP_WRITE:
+                               OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                                               DMI_REG(ui64Address),
+                                               (IMG_UINT32)ui64Data);
+                               ui64Op = DMI_OP_STATUS_SUCCESS;
+                               break;
+                       case DMI_OP_READ:
+                               ui64Data = (IMG_UINT64)OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+                                               DMI_REG(ui64Address));
+                               ui64Op = DMI_OP_STATUS_SUCCESS;
+                               break;
+                       default:
+                               PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op));
+                               ui64Op = DMI_OP_STATUS_FAILED;
+                               break;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not "
+                                       "possible while the GPU is powered off", __func__));
+
+               ui64Op = DMI_OP_STATUS_FAILED;
+       }
+
+dmiop_release_lock:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+dmiop_update:
+       *pui64DMI = (ui64Op << DMI_OP_SHIFT) |
+               (ui64Address << DMI_ADDRESS_SHIFT) |
+               (ui64Data << DMI_DATA_SHIFT);
+
+       return eError;
+#endif
+}
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+/*
+       RGXReadMETAAddr
+*/
+static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+       IMG_UINT8 __iomem  *pui8RegBase = psDevInfo->pvRegsBaseKM;
+       IMG_UINT32 ui32Value;
+
+       /* Wait for Slave Port to be Ready */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                       (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Issue the Read */
+       OSWriteHWReg32(
+                       psDevInfo->pvRegsBaseKM,
+                       RGX_CR_META_SP_MSLVCTRL0,
+                       ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+       (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0);
+
+       /* Wait for Slave Port to be Ready: read complete */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                       (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Read the value */
+       ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX);
+
+       *pui32Value = ui32Value;
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGXWriteMETAAddr
+*/
+static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value)
+{
+       IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+
+       /* Wait for Slave Port to be Ready */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                       (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Issue the Write */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value);
+
+       return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value)
+{
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               return RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value);
+       }
+#endif
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               return RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value);
+       }
+#endif
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value)
+{
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               return RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value);
+       }
+#endif
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               return RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value);
+       }
+#endif
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32FwVA,
+                             IMG_CPU_PHYADDR *psCpuPA,
+                             IMG_DEV_PHYADDR *psDevPA,
+                             IMG_UINT64 *pui64RawPTE)
+{
+       PVRSRV_ERROR eError       = PVRSRV_OK;
+       IMG_CPU_PHYADDR sCpuPA    = {0U};
+       IMG_DEV_PHYADDR sDevPA    = {0U};
+       IMG_UINT64 ui64RawPTE     = 0U;
+       MMU_FAULT_DATA sFaultData = {0U};
+       MMU_CONTEXT *psFwMMUCtx   = psDevInfo->psKernelMMUCtx;
+       IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX);
+       IMG_UINT32 ui32FwHeapEnd  = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
+       IMG_UINT32 ui32OSID       = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE;
+       IMG_UINT32 ui32HeapId;
+       PHYS_HEAP *psPhysHeap;
+
+       /* MIPS uses the same page size as the OS, while others default to 4K pages */
+       IMG_UINT32 ui32FwPageSize = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ?
+                                    OSGetPageSize() : BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT);
+       IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1));
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED),
+                                     eError, ErrorExit);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) ||
+                                      (psDevPA != NULL) ||
+                                      (pui64RawPTE != NULL)),
+                                     eError, ErrorExit);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(((ui32FwVA >= ui32FwHeapBase) &&
+                                     (ui32FwVA < ui32FwHeapEnd)),
+                                     eError, ErrorExit);
+
+       ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ?
+                     PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID);
+       psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId];
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               /* MIPS is equipped with a dedicated MMU  */
+               RGXMipsCheckFaultAddress(psFwMMUCtx, ui32FwVA, &sFaultData);
+       }
+       else
+       {
+               IMG_UINT64 ui64FwDataBaseMask;
+               IMG_DEV_VIRTADDR sDevVAddr;
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK |
+                                                RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK |
+                                                RGXFW_SEGMMU_DATA_BASE_ADDRESS);
+               }
+               else
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR)
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+               {
+                       ui64FwDataBaseMask = ~(RGXRISCVFW_GET_REGION_BASE(0xF));
+               }
+               else
+#endif
+               {
+                       PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit);
+               }
+
+               sDevVAddr.uiAddr = (ui32FwVA & ui64FwDataBaseMask) | RGX_FIRMWARE_RAW_HEAP_BASE;
+
+               /* Fw CPU shares a subset of the GPU's VA space */
+               MMU_CheckFaultAddress(psFwMMUCtx, &sDevVAddr, &sFaultData);
+       }
+
+       ui64RawPTE = sFaultData.sLevelData[MMU_LEVEL_1].ui64Address;
+
+       if (eError == PVRSRV_OK)
+       {
+               IMG_BOOL bValidPage = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ?
+                                      BITMASK_HAS(ui64RawPTE, RGXMIPSFW_TLB_VALID) :
+                                      BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN);
+               if (!bValidPage)
+               {
+                       /* don't report invalid pages */
+                       eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+               }
+               else
+               {
+                       sDevPA.uiAddr = ui32PageOffset + ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ?
+                                       RGXMIPSFW_TLB_GET_PA(ui64RawPTE) :
+                                       (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK));
+
+                       /* Only the Host's Firmware heap is present in the Host's CPU IPA space */
+                       if (ui32OSID == RGXFW_HOST_OS)
+                       {
+                               PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA);
+                       }
+                       else
+                       {
+                               sCpuPA.uiAddr = 0U;
+                       }
+               }
+       }
+
+       if (psCpuPA != NULL)
+       {
+               *psCpuPA = sCpuPA;
+       }
+
+       if (psDevPA != NULL)
+       {
+               *psDevPA = sDevPA;
+       }
+
+       if (pui64RawPTE != NULL)
+       {
+               *pui64RawPTE = ui64RawPTE;
+       }
+
+ErrorExit:
+       return eError;
+}
+
+/******************************************************************************
+ End of file (rgxfwutils.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxfwutils.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxfwutils.h
new file mode 100644 (file)
index 0000000..d69f92f
--- /dev/null
@@ -0,0 +1,1362 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXFWUTILS_H
+#define RGXFWUTILS_H
+
+#include "rgx_memallocflags.h"
+#include "log2.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+#include "devicemem_utils.h"
+#include "rgxmem.h"
+
+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT   "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */
+
+static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                        PVRSRV_MEMALLOCFLAGS_T *puiFlags,
+                                                                                        DEVMEM_HEAP **ppsFwHeap)
+{
+       PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags);
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (ePhysHeap)
+       {
+#if defined(SUPPORT_SECURITY_VALIDATION)
+               /* call with GPU_SECURE from RGXSetupFwSysData */
+               case PVRSRV_PHYS_HEAP_GPU_SECURE:
+#endif
+               case PVRSRV_PHYS_HEAP_FW_CODE:
+               case PVRSRV_PHYS_HEAP_FW_PRIV_DATA:
+               case PVRSRV_PHYS_HEAP_FW_MAIN:
+               {
+                       *ppsFwHeap = psDevInfo->psFirmwareMainHeap;
+                       break;
+               }
+               case PVRSRV_PHYS_HEAP_FW_CONFIG:
+               {
+                       *ppsFwHeap = psDevInfo->psFirmwareConfigHeap;
+                       break;
+               }
+               case PVRSRV_PHYS_HEAP_FW_PREMAP0:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP1:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP2:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP3:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP4:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP5:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP6:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP7:
+               {
+                       IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0;
+
+                       PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID");
+                       *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID];
+                       break;
+               }
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: invalid phys heap", __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       break;
+               }
+       }
+
+       return eError;
+}
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                                                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                       const IMG_CHAR *pszText,
+                                                                                       DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP *psFwHeap;
+       IMG_DEVMEM_ALIGN_T uiAlign;
+
+       PVR_DPF_ENTERED;
+
+       /* Enforce the standard pre-fix naming scheme callers must follow */
+       PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+       /* Imported from AppHint , flag to poison allocations when freed */
+       uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag;
+
+       eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+#define MIPS_CACHE_LINE_SIZE_IN_BYTES  16
+       uiAlign = (psFwHeap == psDevInfo->psFirmwareConfigHeap) ?
+                               (RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) :
+/*
+ * Aligning fw based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead of SLC(64 bytes)
+ * to have more compact memory with less wastage and hopefully save some tlb misses.
+ */
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? MIPS_CACHE_LINE_SIZE_IN_BYTES
+                               : GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)));
+
+       eError = DevmemAllocateAndMap(psFwHeap,
+                               uiSize,
+                               uiAlign,
+                               uiFlags,
+                               pszText,
+                               ppsMemDescPtr,
+                               &sTmpDevVAddr);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                         IMG_DEVMEM_SIZE_T uiSize,
+                                                                                                         IMG_DEVMEM_ALIGN_T uiAlign,
+                                                                                                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                                         const IMG_CHAR *pszText,
+                                                                                                         DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP *psFwHeap;
+
+       PVR_DPF_ENTERED;
+
+       /* Enforce the standard pre-fix naming scheme callers must follow */
+       PVR_ASSERT((pszText != NULL) &&
+                       (pszText[0] == 'F') && (pszText[1] == 'w') &&
+                       (pszText[2] == 'E') && (pszText[3] == 'x'));
+
+       /* Imported from AppHint , flag to poison allocations when freed */
+       uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag;
+
+       eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       eError = DevmemAllocateExportable(psDeviceNode,
+                                                                         uiSize,
+                                                                         uiAlign,
+                                                                         RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ?
+                                                                         ExactLog2(uiAlign) :
+                                                                         DevmemGetHeapLog2PageSize(psFwHeap),
+                                                                         uiFlags,
+                                                                         pszText,
+                                                                         ppsMemDescPtr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError));
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       /*
+               We need to map it so the heap for this allocation
+               is set
+       */
+       eError = DevmemMapToDevice(*ppsMemDescPtr,
+                                                          psDevInfo->psFirmwareMainHeap,
+                                                          &sTmpDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               DevmemFree(*ppsMemDescPtr);
+               PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError));
+       }
+
+       PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                               IMG_DEVMEM_SIZE_T uiSize,
+                                                                                               IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                                                               IMG_UINT32 ui32NumPhysChunks,
+                                                                                               IMG_UINT32 ui32NumVirtChunks,
+                                                                                               IMG_UINT32 *pui32MappingTable,
+                                                                                               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                               const IMG_CHAR *pszText,
+                                                                                               DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP *psFwHeap;
+       IMG_UINT32 ui32Align;
+
+       PVR_DPF_ENTERED;
+
+       /* Enforce the standard pre-fix naming scheme callers must follow */
+       PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+       ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+       /* Imported from AppHint , flag to poison allocations when freed */
+       uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag;
+
+       eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       eError = DevmemAllocateSparse(psDevInfo->psDeviceNode,
+                                                               uiSize,
+                                                               uiChunkSize,
+                                                               ui32NumPhysChunks,
+                                                               ui32NumVirtChunks,
+                                                               pui32MappingTable,
+                                                               ui32Align,
+                                                               DevmemGetHeapLog2PageSize(psFwHeap),
+                                                               uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING,
+                                                               pszText,
+                                                               ppsMemDescPtr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+       /*
+               We need to map it so the heap for this allocation
+               is set
+       */
+       eError = DevmemMapToDevice(*ppsMemDescPtr,
+                                  psFwHeap,
+                                  &sTmpDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               DevmemFree(*ppsMemDescPtr);
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               DEVMEM_MEMDESC *psMemDesc)
+{
+       PVR_DPF_ENTERED1(psMemDesc);
+
+       DevmemReleaseDevVirtAddr(psMemDesc);
+       DevmemFree(psMemDesc);
+
+       PVR_DPF_RETURN;
+}
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+       /*
+       *  In order to avoid having to issue three 32-bit reads to detect the
+       *  lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+       *  in the MSB of the high 32-bit word. If the wrap happens, we just read
+       *  the register again (it will not wrap again so soon).
+       */
+       if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+       {
+               ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+       }
+
+       return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT;
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency, and write-combine will
+ * suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS      (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                      PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \
+                                      PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                      PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                      PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+                                      PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                      PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                      PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                      PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                      PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                      PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN))
+
+#define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                         PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                         PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                         PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                         PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                         PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                         PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                         PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                         PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                         PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN))
+
+#define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG))
+
+#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN))
+
+/* Firmware memory that is not accessible by the CPU. */
+#define RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                             PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                             PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                             PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                             PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/* Firmware shared memory that is supposed to be read-only to the CPU.
+ * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE
+ * flag on the allocations. */
+#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */
+#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL))
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE           (0)                     /*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG          (1U << 0)       /*!< It is safe to immediately release the reference to the pointer,
+                                                                                                 otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+#if defined(SUPPORT_TBI_INTERFACE)
+IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                              IMG_BOOL                 bEnableSignatureChecks,
+                              IMG_UINT32               ui32SignatureChecksBufSize,
+                              IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                              IMG_UINT64               ui64HWPerfFilter,
+                              IMG_UINT32               ui32ConfigFlags,
+                              IMG_UINT32               ui32ConfigFlagsExt,
+                              IMG_UINT32               ui32FwOsCfgFlags,
+                              IMG_UINT32               ui32LogType,
+                              IMG_UINT32               ui32FilterFlags,
+                              IMG_UINT32               ui32JonesDisableMask,
+                              IMG_UINT32               ui32HWRDebugDumpLimit,
+                              IMG_UINT32               ui32HWPerfCountersDataSize,
+                              IMG_UINT32               *pui32TPUTrilinearFracMask,
+                              RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                              FW_PERF_CONF             eFirmwarePerf,
+                              IMG_UINT32               ui32KCCBSizeLog2);
+
+
+
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       RGXSetupFwAllocation
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          psDevInfo       Device Info struct
+@Input          uiAllocFlags    Flags determining type of memory allocation
+@Input          ui32Size        Size of memory allocation
+@Input          pszName         Allocation label
+@Input          psFwPtr         Address of the firmware pointer to set
+@Input          ppvCpuPtr       Address of the cpu pointer to set
+@Input          ui32DevVAFlags  Any combination of  RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                                                 PVRSRV_MEMALLOCFLAGS_T       uiAllocFlags,
+                                                                 IMG_UINT32           ui32Size,
+                                                                 const IMG_CHAR       *pszName,
+                                                                 DEVMEM_MEMDESC       **ppsMemDesc,
+                                                                 RGXFWIF_DEV_VIRTADDR *psFwPtr,
+                                                                 void                 **ppvCpuPtr,
+                                                                 IMG_UINT32           ui32DevVAFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXSetFirmwareAddress
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          ppDest          Address of the pointer to set
+@Input          psSrc           MemDesc describing the pointer
+@Input          ui32Flags       Any combination of RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR        *ppDest,
+                                                                  DEVMEM_MEMDESC               *psSrc,
+                                                                  IMG_UINT32                   uiOffset,
+                                                                  IMG_UINT32                   ui32Flags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXSetMetaDMAAddress
+
+@Description    Fills a Firmware structure used to setup the Meta DMA with two
+                pointers to the same data, one on 40 bit and one on 32 bit
+                (pointer in the FW memory space).
+
+@Input          ppDest          Address of the structure to set
+@Input          psSrcMemDesc    MemDesc describing the pointer
+@Input          psSrcFWDevVAddr Firmware memory space pointer
+
+@Return         void
+*/ /**************************************************************************/
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR             *psDest,
+                                                 DEVMEM_MEMDESC                *psSrcMemDesc,
+                                                 RGXFWIF_DEV_VIRTADDR  *psSrcFWDevVAddr,
+                                                 IMG_UINT32                    uiOffset);
+
+
+/*************************************************************************/ /*!
+@Function       RGXUnsetFirmwareAddress
+
+@Description    Unsets a pointer in a firmware data structure
+
+@Input          psSrc           MemDesc describing the pointer
+
+@Return         void
+*/ /**************************************************************************/
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc);
+
+/*************************************************************************/ /*!
+@Function       FWCommonContextAllocate
+
+@Description    Allocate a FW common context. This allocates the HW memory
+                for the context, the CCB and wires it all together.
+
+@Input          psConnection            Connection this context is being created on
+@Input          psDeviceNode            Device node to create the FW context on
+                                        (must be RGX device node)
+@Input          eRGXCCBRequestor        RGX_CCB_REQUESTOR_TYPE enum constant which
+                                        which represents the requestor of this FWCC
+@Input          eDM                     Data Master type
+@Input          psServerMMUContext      Server MMU memory context.
+@Input          psAllocatedMemDesc      Pointer to pre-allocated MemDesc to use
+                                        as the FW context or NULL if this function
+                                        should allocate it
+@Input          ui32AllocatedOffset     Offset into pre-allocate MemDesc to use
+                                        as the FW context. If psAllocatedMemDesc
+                                        is NULL then this parameter is ignored
+@Input          psFWMemContextMemDesc   MemDesc of the FW memory context this
+                                        common context resides on
+@Input          psContextStateMemDesc   FW context state (context switch) MemDesc
+@Input          ui32CCBAllocSizeLog2    Size of the CCB for this context
+@Input          ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context
+@Input          ui32ContextFlags        Flags which specify properties of the context
+@Input          ui32Priority            Priority of the context
+@Input          ui32MaxDeadlineMS       Max deadline limit in MS that the workload can run
+@Input          ui64RobustnessAddress   Address for FW to signal a context reset
+@Input          psInfo                  Structure that contains extra info
+                                        required for the creation of the context
+                                        (elements might change from core to core)
+@Return         PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+                                                                        PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                        RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+                                                                        RGXFWIF_DM eDM,
+                                                                        SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                        DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                                        IMG_UINT32 ui32AllocatedOffset,
+                                                                        DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                                        DEVMEM_MEMDESC *psContextStateMemDesc,
+                                                                        IMG_UINT32 ui32CCBAllocSizeLog2,
+                                                                        IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+                                                                        IMG_UINT32 ui32ContextFlags,
+                                                                        IMG_UINT32 ui32Priority,
+                                                                        IMG_UINT32 ui32MaxDeadlineMS,
+                                                                        IMG_UINT64 ui64RobustnessAddress,
+                                                                        RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                                        RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                           IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                          SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                                                          PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr);
+
+PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                     IMG_UINT32 ui32ContextFlags);
+/*!
+*******************************************************************************
+@Function       RGXScheduleProcessQueuesKM
+
+@Description    Software command complete handler
+                (sends uncounted kicks for all the DMs through the MISR)
+
+@Input          hCmdCompHandle  RGX device node
+
+@Return         None
+******************************************************************************/
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+#if defined(SUPPORT_VALIDATION)
+/*!
+*******************************************************************************
+@Function       RGXScheduleRgxRegCommand
+
+@Input          psDevInfo       Device Info struct
+@Input          ui64RegVal      Value to write into FW register
+@Input          ui64Size        Register size
+@Input          ui32Offset      Register Offset
+@Input          bWriteOp        Register Write or Read toggle
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT64 ui64RegVal,
+                                                                         IMG_UINT64 ui64Size,
+                                                                         IMG_UINT32 ui32Offset,
+                                                                         IMG_BOOL bWriteOp);
+
+#endif
+
+/*!
+*******************************************************************************
+
+@Function       RGXInstallProcessQueuesMISR
+
+@Description    Installs the MISR to handle Process Queues operations
+
+@Input          phMISR          Pointer to the MISR handler
+@Input          psDeviceNode    RGX Device node
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandWithPowLockAndGetKCCBSlot
+
+@Description    Sends a command to a particular DM without honouring
+                pending cache operations but taking the power lock.
+
+@Input          psDevInfo       Device Info
+@Input          psKCCBCmd       The cmd to send.
+@Input          ui32PDumpFlags  Pdump flags
+@Output         pui32CmdKCCBSlot   When non-NULL:
+                                   - Pointer on return contains the kCCB slot
+                                     number in which the command was enqueued.
+                                   - Resets the value of the allotted slot to
+                                     RGXFWIF_KCCB_RTN_SLOT_RST
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO        *psDevInfo,
+                                                                                                        RGXFWIF_KCCB_CMD       *psKCCBCmd,
+                                                                                                        IMG_UINT32                     ui32PDumpFlags,
+                                                                                                        IMG_UINT32                     *pui32CmdKCCBSlot);
+
+#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \
+  RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL)
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandAndGetKCCBSlot
+
+@Description    Sends a command to a particular DM without honouring
+                pending cache operations or the power lock.
+                The function flushes any deferred KCCB commands first.
+
+@Input          psDevInfo       Device Info
+@Input          psKCCBCmd       The cmd to send.
+@Input          uiPdumpFlags    PDump flags.
+@Output         pui32CmdKCCBSlot   When non-NULL:
+                                   - Pointer on return contains the kCCB slot
+                                     number in which the command was enqueued.
+                                   - Resets the value of the allotted slot to
+                                     RGXFWIF_KCCB_RTN_SLOT_RST
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                 RGXFWIF_KCCB_CMD   *psKCCBCmd,
+                                                                                 PDUMP_FLAGS_T      uiPdumpFlags,
+                                                                                 IMG_UINT32         *pui32CmdKCCBSlot);
+
+#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \
+  RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL)
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommand
+
+@Description    Sends a command to a particular DM and kicks the firmware but
+                first schedules any commands which have to happen before
+                handle
+
+@Input          psDevInfo          Device Info
+@Input          eDM                To which DM the cmd is sent.
+@Input          psKCCBCmd          The cmd to send.
+@Input          ui32PDumpFlags     PDump flags
+@Output         pui32CmdKCCBSlot   When non-NULL:
+                                   - Pointer on return contains the kCCB slot
+                                     number in which the command was enqueued.
+                                   - Resets the value of the allotted slot to
+                                     RGXFWIF_KCCB_RTN_SLOT_RST
+@Return                        PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                         RGXFWIF_DM         eKCCBType,
+                                                                                         RGXFWIF_KCCB_CMD   *psKCCBCmd,
+                                                                                         IMG_UINT32         ui32PDumpFlags,
+                                                                                         IMG_UINT32         *pui32CmdKCCBSlot);
+#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \
+  RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL)
+
+/*************************************************************************/ /*!
+@Function       RGXWaitForKCCBSlotUpdate
+
+@Description    Waits until the required kCCB slot value is updated by the FW
+                (signifies command completion). Additionally, dumps a relevant
+                PDump poll command.
+
+@Input          psDevInfo       Device Info
+@Input          ui32SlotNum     The kCCB slot number to wait for an update on
+@Input          ui32PDumpFlags
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                      IMG_UINT32 ui32SlotNum,
+                                                                         IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXFrameworkCopyCommand
+
+@Description    Copy framework command into FW addressable buffer
+
+@param          psDeviceNode
+@param          psFWFrameworkMemDesc
+@param          pbyGPUFRegisterList
+@param          ui32FrameworkRegisterSize
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                  DEVMEM_MEMDESC       *psFWFrameworkMemDesc,
+                                                                                  IMG_PBYTE            pbyGPUFRegisterList,
+                                                                                  IMG_UINT32           ui32FrameworkRegisterSize);
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXFrameworkCreateKM
+
+@Description    Create FW addressable buffer for framework
+
+@param          psDeviceNode
+@param          ppsFWFrameworkMemDesc
+@param          ui32FrameworkRegisterSize
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                                               DEVMEM_MEMDESC     ** ppsFWFrameworkMemDesc,
+                                                                               IMG_UINT32         ui32FrameworkRegisterSize);
+
+/*************************************************************************/ /*!
+@Function       RGXPollForGPCommandCompletion
+
+@Description    Polls for completion of a submitted GP command. Poll is done
+                on a value matching a masked read from the address.
+
+@Input          psDevNode       Pointer to device node struct
+@Input          pui32LinMemAddr CPU linear address to poll
+@Input          ui32Value       Required value
+@Input          ui32Mask        Mask
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                       volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+                                                                       IMG_UINT32                   ui32Value,
+                                                                       IMG_UINT32                   ui32Mask);
+
+/*************************************************************************/ /*!
+@Function       RGXStateFlagCtrl
+
+@Description    Set and return FW internal state flags.
+
+@Input          psDevInfo       Device Info
+@Input          ui32Config      AppHint config flags
+@Output         pui32State      Current AppHint state flag configuration
+@Input          bSetNotClear    Set or clear the provided config flags
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32Config,
+                               IMG_UINT32 *pui32State,
+                               IMG_BOOL bSetNotClear);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestCommonContextCleanUp
+
+@Description    Schedules a FW common context cleanup. The firmware doesn't
+                block waiting for the resource to become idle but rather
+                notifies the host that the resources is busy.
+
+@Input          psDeviceNode    pointer to device node
+@Input          psServerCommonContext context to be cleaned up
+@Input          eDM             Data master, to which the cleanup command should
+                                be sent
+@Input          ui32PDumpFlags  PDump continuous flag
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                                                         RGXFWIF_DM eDM,
+                                                                                         IMG_UINT32 ui32PDumpFlags);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestHWRTDataCleanUp
+
+@Description    Schedules a FW HWRTData memory cleanup. The firmware doesn't
+                block waiting for the resource to become idle but rather
+                notifies the host that the resources is busy.
+
+@Input          psDeviceNode    pointer to device node
+@Input          psHWRTData      firmware address of the HWRTData for clean-up
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                PRGXFWIF_HWRTDATA psHWRTData);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestFreeListCleanUp
+
+@Description    Schedules a FW FreeList cleanup. The firmware doesn't block
+                waiting for the resource to become idle but rather notifies the
+                host that the resources is busy.
+
+@Input          psDeviceNode    pointer to device node
+@Input          psFWFreeList    firmware address of the FreeList for clean-up
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+                                                                                PRGXFWIF_FREELIST psFWFreeList);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestZSBufferCleanUp
+
+@Description    Schedules a FW ZS Buffer cleanup. The firmware doesn't block
+                waiting for the resource to become idle but rather notifies the
+                host that the resources is busy.
+
+@Input          psDevInfo       pointer to device node
+@Input          psFWZSBuffer    firmware address of the ZS Buffer for clean-up
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                PRGXFWIF_ZSBUFFER psFWZSBuffer);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+                                                               CONNECTION_DATA *psConnection,
+                                                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32Priority,
+                                                               RGXFWIF_DM eDM);
+
+/*!
+*******************************************************************************
+@Function       RGXFWSetHCSDeadline
+
+@Description    Requests the Firmware to set a new Hard Context Switch timeout
+                deadline. Context switches that surpass that deadline cause the
+                system to kill the currently running workloads.
+
+@Input          psDeviceNode    pointer to device node
+@Input          ui32HCSDeadlineMs  The deadline in milliseconds.
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32HCSDeadlineMs);
+
+/*!
+*******************************************************************************
+@Function       RGXFWChangeOSidPriority
+
+@Description    Requests the Firmware to change the priority of an operating
+                system. Higher priority number equals higher priority on the
+                scheduling system.
+
+@Input          psDevInfo       pointer to device info
+@Input          ui32OSid        The OSid whose priority is to be altered
+@Input          ui32Priority    The new priority number for the specified OSid
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                        IMG_UINT32 ui32OSid,
+                                                                        IMG_UINT32 ui32Priority);
+
+/*!
+*******************************************************************************
+@Function       RGXFWHealthCheckCmd
+
+@Description    Ping the firmware to check if it is responsive.
+
+@Input          psDevInfo       pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXFWSetFwOsState
+
+@Description    Requests the Firmware to change the guest OS Online states.
+                This should be initiated by the VMM when a guest VM comes
+                online or goes offline. If offline, the FW offloads any current
+                resource from that OSID. The request is repeated until the FW
+                has had time to free all the resources or has waited for
+                workloads to finish.
+
+@Input          psDevInfo       pointer to device info
+@Input          ui32OSid        The Guest OSid whose state is being altered
+@Input          eOSOnlineState  The new state (Online or Offline)
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32OSid,
+                                                               RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
+
+#if defined(SUPPORT_AUTOVZ)
+/*!
+*******************************************************************************
+@Function       RGXUpdateAutoVzWdgToken
+
+@Description    If the driver-firmware connection is active, read the
+                firmware's watchdog token and copy its value back into the OS
+                token. This indicates to the firmware that this driver is alive
+                and responsive.
+
+@Input          psDevInfo       pointer to device info
+******************************************************************************/
+void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+/*!
+*******************************************************************************
+@Function       RGXFWConfigPHR
+
+@Description    Configure the Periodic Hardware Reset functionality
+
+@Input          psDevInfo       pointer to device info
+@Input          ui32PHRMode     desired PHR mode
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo,
+                            IMG_UINT32 ui32PHRMode);
+
+/*!
+*******************************************************************************
+@Function       RGXFWConfigWdg
+
+@Description    Configure the Safety watchdog trigger period
+
+@Input          psDevInfo        pointer to device info
+@Input          ui32WdgPeriodUs  requested period in microseconds
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                            IMG_UINT32 ui32WdgPeriod);
+
+/*!
+*******************************************************************************
+@Function       RGXCheckFirmwareCCB
+
+@Description    Processes all commands that are found in the Firmware CCB.
+
+@Input          psDevInfo       pointer to device
+
+@Return         None
+******************************************************************************/
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXCheckForStalledClientContexts
+
+@Description    Checks all client contexts, for the device with device info
+                provided, to see if any are waiting for a fence to signal and
+                optionally force signalling of the fence for the context which
+                has been waiting the longest.
+                This function is called by RGXUpdateHealthStatus() and also
+                may be invoked from other trigger points.
+
+@Input          psDevInfo       pointer to device info
+@Input          bIgnorePrevious If IMG_TRUE, any stalled contexts will be
+                                indicated immediately, rather than only
+                                checking against any previous stalled contexts
+
+@Return         None
+******************************************************************************/
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious);
+
+/*!
+*******************************************************************************
+@Function       RGXUpdateHealthStatus
+
+@Description    Tests a number of conditions which might indicate a fatal error
+                has occurred in the firmware. The result is stored in the
+                device node eHealthStatus.
+
+@Input         psDevNode        Pointer to device node structure.
+@Input         bCheckAfterTimePassed  When TRUE, the function will also test
+                                for firmware queues and polls not changing
+                                since the previous test.
+
+                                Note: if not enough time has passed since the
+                                last call, false positives may occur.
+
+@Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed);
+
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
+#if defined(SUPPORT_AUTOVZ)
+/*!
+*******************************************************************************
+@Function       RGXUpdateAutoVzWatchdog
+
+@Description    Updates AutoVz watchdog that maintains the fw-driver connection
+
+@Input         psDevNode        Pointer to device node structure.
+******************************************************************************/
+void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode);
+#endif /* SUPPORT_AUTOVZ */
+
+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             IMG_UINT32 ui32VerbLevel);
+
+/*!
+*******************************************************************************
+@Function       AttachKickResourcesCleanupCtls
+
+@Description    Attaches the cleanup structures to a kick command so that
+                submission reference counting can be performed when the
+                firmware processes the command
+
+@Output         apsCleanupCtl   Array of CleanupCtl structure pointers to populate.
+@Output         pui32NumCleanupCtl  Number of CleanupCtl structure pointers written out.
+@Input          eDM             Which data master is the subject of the command.
+@Input          bKick           TRUE if the client originally wanted to kick this DM.
+@Input          psRTDataCleanup Optional RTData cleanup associated with the command.
+@Input          psZBuffer       Optional ZSBuffer associated with the command.
+
+@Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+                                                                       IMG_UINT32 *pui32NumCleanupCtl,
+                                                                       RGXFWIF_DM eDM,
+                                                                       IMG_BOOL bKick,
+                                                                       RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+                                                                       RGX_ZSBUFFER_DATA              *psZSBuffer,
+                                                                       RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer);
+
+/*!
+*******************************************************************************
+@Function       RGXResetHWRLogs
+
+@Description    Resets the HWR Logs buffer
+                (the hardware recovery count is not reset)
+
+@Input          psDevNode       Pointer to the device
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*!
+*******************************************************************************
+@Function       RGXGetPhyAddr
+
+@Description    Get the physical address of a PMR at an offset within it
+
+@Input          psPMR           PMR of the allocation
+@Input          ui32LogicalOffset  Logical offset
+
+@Output         psPhyAddr       Physical address of the allocation
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+                                                  IMG_DEV_PHYADDR *psPhyAddr,
+                                                  IMG_UINT32 ui32LogicalOffset,
+                                                  IMG_UINT32 ui32Log2PageSize,
+                                                  IMG_UINT32 ui32NumOfPages,
+                                                  IMG_BOOL *bValid);
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+@Function       RGXPdumpDrainKCCB
+
+@Description    Wait for the firmware to execute all the commands in the kCCB
+
+@Input          psDevInfo       Pointer to the device
+@Input          ui32WriteOffset Woff we have to POL for the Roff to be equal to
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                          IMG_UINT32 ui32WriteOffset);
+#endif /* PDUMP */
+
+/*!
+*******************************************************************************
+@Function       RGXFwRawHeapAllocMap
+
+@Description    Register and maps to device, a raw firmware physheap
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                 IMG_UINT32 ui32OSID,
+                                                                 IMG_DEV_PHYADDR sDevPAddr,
+                                                                 IMG_UINT64 ui64DevPSize);
+
+/*!
+*******************************************************************************
+@Function       RGXFwRawHeapUnmapFree
+
+@Description    Unregister and unmap from device, a raw firmware physheap
+******************************************************************************/
+void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  IMG_UINT32 ui32OSID);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvHalt
+
+@Description    Halt the RISC-V FW core (required for certain operations
+                done through Debug Module)
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvIsHalted
+
+@Description    Check if the RISC-V FW is halted
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         IMG_BOOL
+******************************************************************************/
+IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvResume
+
+@Description    Resume the RISC-V FW core
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadReg
+
+@Description    Read a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 *pui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollReg
+
+@Description    Poll for a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteReg
+
+@Description    Write a value to the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32RegAddr,
+                              IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollMem
+
+@Description    Poll for a value at the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32Addr,
+                             IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvDmiOp
+
+@Description    Acquire the powerlock and perform an operation on the RISC-V
+                Debug Module Interface, but only if the GPU is powered on.
+
+@Input          psDevInfo       Pointer to device info
+@InOut          pui64DMI        Encoding of a request for the RISC-V Debug
+                                Module with same format as the 'dmi' register
+                                from the RISC-V debug specification (v0.13+).
+                                On return, this is updated with the result of
+                                the request, encoded the same way.
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT64 *pui64DMI);
+
+/*!
+*******************************************************************************
+@Function       RGXReadFWModuleAddr
+
+@Description    Read a value at the given address in META or RISCV memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in META or RISCV memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                 IMG_UINT32 ui32Addr,
+                                 IMG_UINT32 *pui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXWriteFWModuleAddr
+
+@Description    Write a value to the given address in META or RISC memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                  IMG_UINT32 ui32MemAddr,
+                                  IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXGetFwMapping
+
+@Description    Retrieve any of the CPU Physical Address, Device Physical
+                Address or the raw value of the page table entry associated
+                with the firmware virtual address given.
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32FwVA        The Fw VA that needs decoding
+@Output         psCpuPA         Pointer to the resulting CPU PA
+@Output         psDevPA         Pointer to the resulting Dev PA
+@Output         pui64RawPTE     Pointer to  the raw Page Table Entry value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    IMG_UINT32 ui32FwVA,
+                                    IMG_CPU_PHYADDR *psCpuPA,
+                                    IMG_DEV_PHYADDR *psDevPA,
+                                    IMG_UINT64 *pui64RawPTE);
+
+#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ)
+#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers."
+#endif
+
+#if defined(SUPPORT_AUTOVZ_HW_REGS)
+/* AutoVz with hw support */
+#define KM_GET_FW_CONNECTION(psDevInfo)                                OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3)
+#define KM_GET_OS_CONNECTION(psDevInfo)                                OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2)
+#define KM_SET_OS_CONNECTION(val, psDevInfo)           OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val)
+
+#define KM_GET_FW_ALIVE_TOKEN(psDevInfo)                       OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1)
+#define KM_GET_OS_ALIVE_TOKEN(psDevInfo)                       OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0)
+#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo)          OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val)
+
+#else
+
+#if defined(SUPPORT_AUTOVZ)
+#define KM_GET_FW_ALIVE_TOKEN(psDevInfo)                       (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken)
+#define KM_GET_OS_ALIVE_TOKEN(psDevInfo)                       (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken)
+#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo)          OSWriteDeviceMem32WithWMB((volatile IMG_UINT32 *) &psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val)
+#endif /* defined(SUPPORT_AUTOVZ) */
+
+#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)))
+/* native, static-vz and AutoVz using shared memory */
+#define KM_GET_FW_CONNECTION(psDevInfo)                        (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState)
+#define KM_GET_OS_CONNECTION(psDevInfo)                        (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState)
+#define KM_SET_OS_CONNECTION(val, psDevInfo)   OSWriteDeviceMem32WithWMB((void*)&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val)
+#else
+/* dynamic-vz & nohw */
+#define KM_GET_FW_CONNECTION(psDevInfo)                        (RGXFW_CONNECTION_FW_ACTIVE)
+#define KM_GET_OS_CONNECTION(psDevInfo)                        (RGXFW_CONNECTION_OS_ACTIVE)
+#define KM_SET_OS_CONNECTION(val, psDevInfo)
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */
+#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */
+
+#if defined(SUPPORT_AUTOVZ)
+#define RGX_FIRST_RAW_HEAP_OSID                RGXFW_HOST_OS
+#else
+#define RGX_FIRST_RAW_HEAP_OSID                RGXFW_GUEST_OSID_START
+#endif
+
+#define KM_OS_CONNECTION_IS(val, psDevInfo)            (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val)
+#define KM_FW_CONNECTION_IS(val, psDevInfo)            (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val)
+
+#endif /* RGXFWUTILS_H */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxhwperf.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxhwperf.c
new file mode 100644 (file)
index 0000000..a6e2dd4
--- /dev/null
@@ -0,0 +1,694 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+#include "process_stats.h"
+#include "rgx_hwperf_table.h"
+#include "rgxinit.h"
+
+#include "info_page_defs.h"
+
+/* This is defined by default to enable producer callbacks.
+ * Clients of the TL interface can disable the use of the callback
+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */
+#define SUPPORT_TL_PRODUCER_CALLBACK 1
+
+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */
+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT)
+
+/* Defines size of buffers returned from acquire/release calls */
+#define FW_STREAM_BUFFER_SIZE (0x80000)
+#define HOST_STREAM_BUFFER_SIZE (0x20000)
+
+/* Must be at least as large as two tl packets of maximum size */
+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+              "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+              "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+
+static inline IMG_UINT32
+RGXHWPerfGetPackets(IMG_UINT32  ui32BytesExp,
+                    IMG_UINT32  ui32AllowedSize,
+                    RGX_PHWPERF_V2_PACKET_HDR psCurPkt )
+{
+       IMG_UINT32 sizeSum = 0;
+
+       /* Traverse the array to find how many packets will fit in the available space. */
+       while ( sizeSum < ui32BytesExp  &&
+                       sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize )
+       {
+               sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+               psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+       }
+
+       return sizeSum;
+}
+
+static inline void
+RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo,
+                          IMG_BOOL bIsReaderConnected)
+{
+       if (!bIsReaderConnected)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full "
+                       "and no reader is currently connected, suspending event collection. "
+                       "Connect a reader or restart driver to avoid event loss.", __func__));
+               psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE;
+       }
+}
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+
+static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock(
+       RGX_HWPERF_BVNC_BLOCK   * const psBlocks,
+       IMG_UINT16                              * const pui16Count,
+       const IMG_UINT16                ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */
+       const IMG_UINT16                ui16NumCounters,
+       const IMG_UINT16                ui16NumBlocks)
+{
+       const IMG_UINT16 ui16Count = *pui16Count;
+
+       if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN)
+       {
+               RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count];
+
+               /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the
+               number of blocks and counters) but PVRScopeServices expects the latter (plus the number of blocks and counters). The conversion
+               could always be moved to PVRScopeServices, but it's less code this way. */
+               psBlock->ui16BlockID            = (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK) ? (ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK) : ui16BlockID;
+               if ((ui16BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK)
+               {
+                       psBlock->ui16NumCounters        = RGX_CNTBLK_COUNTERS_MAX;
+               }
+               else
+               {
+                       psBlock->ui16NumCounters        = ui16NumCounters;
+               }
+               psBlock->ui16NumBlocks          = ui16NumBlocks;
+
+               *pui16Count = ui16Count + 1;
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC)
+{
+       IMG_PCHAR pszBVNC;
+       PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+       if ((pszBVNC = RGXDevBVNCString(psDevInfo)))
+       {
+               size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1);
+               OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1);
+               memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength);
+       }
+       else
+       {
+               *psBVNC->aszBvncString = 0;
+       }
+
+       psBVNC->ui32BvncKmFeatureFlags = 0x0;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG;
+       }
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG;
+       }
+#endif
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG;
+       }
+#endif
+#if defined(RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG;
+       }
+#endif
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG;
+       }
+#if defined(RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG;
+       }
+#endif
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG;
+       }
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_MULTICORE_FLAG;
+       }
+
+#ifdef SUPPORT_WORKLOAD_ESTIMATION
+       /* Not a part of BVNC feature line and so doesn't need the feature supported check */
+       psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+#endif
+
+       /* Define the HW counter block counts. */
+       {
+               RGX_HWPERF_BVNC_BLOCK                                   * const psBlocks        = psBVNC->aBvncBlocks;
+               IMG_UINT16                                                              * const pui16Count      = &psBVNC->ui16BvncBlocks;
+               const RGXFW_HWPERF_CNTBLK_TYPE_MODEL    *asCntBlkTypeModel;
+               const IMG_UINT32                                                ui32CntBlkModelLen      = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+               IMG_UINT32                                                              ui32BlkCfgIdx;
+               size_t                                                                  uiCount;
+               IMG_BOOL                                                                bOk                                     = IMG_TRUE;
+
+               // Initialise to zero blocks
+               *pui16Count = 0;
+
+               // Add all the blocks
+               for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+               {
+                       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL    * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx];
+                       RGX_HWPERF_CNTBLK_RT_INFO                               sCntBlkRtInfo;
+                       /* psCntBlkInfo->ui8NumUnits gives compile-time info. For BVNC agnosticism, we use this: */
+                       if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo))
+                       {
+                               bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->ui32CntBlkIdBase, psCntBlkInfo->ui8NumCounters, sCntBlkRtInfo.ui32NumUnits);
+                       }
+               }
+
+               /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */
+               PVR_ASSERT(bOk);
+
+               // Zero the remaining entries
+               uiCount = *pui16Count;
+               OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks));
+       }
+
+       return PVRSRV_OK;
+}
+
+/*
+       PVRSRVRGXConfigMuxHWPerfCountersKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM(
+               CONNECTION_DATA               *psConnection,
+               PVRSRV_DEVICE_NODE            *psDeviceNode,
+               IMG_UINT32                     ui32ArrayLen,
+               RGX_HWPERF_CONFIG_MUX_CNTBLK  *psBlockConfigs)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sKccbCmd;
+       DEVMEM_MEMDESC*         psFwBlkConfigsMemDesc;
+       RGX_HWPERF_CONFIG_MUX_CNTBLK* psFwArray;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO      *psDevice;
+
+       PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+       psDevice = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0",
+                         PVRSRV_ERROR_INVALID_PARAMS);
+       PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL",
+                         PVRSRV_ERROR_INVALID_PARAMS);
+
+       PVR_DPF_ENTERED;
+
+       /* Fill in the command structure with the parameters needed
+        */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS;
+       sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+       /* used for passing counters config to the Firmware, write-only for the CPU */
+       eError = DevmemFwAllocate(psDevice,
+                                 sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen,
+                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                 PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                 PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                 "FwHWPerfCountersConfigBlock",
+                                 &psFwBlkConfigsMemDesc);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
+
+       eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs,
+                             psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1);
+
+       eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2);
+
+       OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen);
+       DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+                          0,
+                          sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen,
+                          PDUMP_FLAGS_CONTINUOUS);
+
+       /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM parameters set, calling FW"));*/
+
+       /* Ask the FW to carry out the HWPerf configuration command
+        */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
+                                                 RGXFWIF_DM_GP,
+                                                                                         &sKccbCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2);
+
+       /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM command scheduled for FW"));*/
+
+       /* Wait for FW to complete */
+       eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3);
+
+       /* Release temporary memory used for block configuration
+        */
+       RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+       DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+       DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc);
+
+       /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM firmware completed"));*/
+
+       PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen));
+
+       PVR_DPF_RETURN_OK;
+
+fail3:
+       DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+fail2:
+       RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+fail1:
+       DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+/*
+       PVRSRVRGXConfigCustomCountersReadingHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+               CONNECTION_DATA             * psConnection,
+               PVRSRV_DEVICE_NODE          * psDeviceNode,
+               IMG_UINT16                    ui16CustomBlockID,
+               IMG_UINT16                    ui16NumCustomCounters,
+               IMG_UINT32                  * pui32CustomCounterIDs)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sKccbCmd;
+       DEVMEM_MEMDESC*         psFwSelectCntrsMemDesc = NULL;
+       IMG_UINT32*                     psFwArray;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO      *psDevice = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psDeviceNode);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters));
+
+       /* Fill in the command structure with the parameters needed */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS;
+       sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters;
+       sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID;
+
+       if (ui16NumCustomCounters > 0)
+       {
+               PVR_ASSERT(pui32CustomCounterIDs);
+
+               /* used for passing counters config to the Firmware, write-only for the CPU */
+               eError = DevmemFwAllocate(psDevice,
+                                         sizeof(IMG_UINT32) * ui16NumCustomCounters,
+                                         PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                         PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                         PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                         PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                         PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                         PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                         PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                         PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                         "FwHWPerfConfigCustomCounters",
+                                         &psFwSelectCntrsMemDesc);
+               PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
+
+               eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs,
+                                     psFwSelectCntrsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1);
+
+               eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2);
+
+               OSCachedMemCopyWMB(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters);
+               DevmemPDumpLoadMem(psFwSelectCntrsMemDesc,
+                                  0,
+                                  sizeof(IMG_UINT32) * ui16NumCustomCounters,
+                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+
+       /* Push in the KCCB the command to configure the custom counters block */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
+                                                 RGXFWIF_DM_GP,
+                                                                                         &sKccbCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail3);
+
+       PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled"));
+
+       /* Wait for FW to complete */
+       eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3);
+
+       PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed"));
+
+       if (ui16NumCustomCounters > 0)
+       {
+               /* Release temporary memory used for block configuration */
+               RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+               DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+               DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc);
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters));
+
+       PVR_DPF_RETURN_OK;
+
+fail3:
+       if (psFwSelectCntrsMemDesc)
+       {
+               DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+       }
+fail2:
+       if (psFwSelectCntrsMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+       }
+fail1:
+       if (psFwSelectCntrsMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc);
+       }
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/*
+       PVRSRVRGXConfigureHWPerfBlocksKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM(
+               CONNECTION_DATA          * psConnection,
+               PVRSRV_DEVICE_NODE       * psDeviceNode,
+               IMG_UINT32                 ui32CtrlWord,
+               IMG_UINT32                 ui32ArrayLen,
+               RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs)
+{
+       PVRSRV_ERROR             eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD         sKccbCmd;
+       DEVMEM_MEMDESC           *psFwBlkConfigsMemDesc;
+       RGX_HWPERF_CONFIG_CNTBLK *psFwArray;
+       IMG_UINT32               ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO       *psDevice;
+
+       PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+
+       psDevice = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(ui32CtrlWord);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+       PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+
+       PVR_DPF_ENTERED;
+
+       /* Fill in the command structure with the parameters needed */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS;
+       sKccbCmd.uCmdData.sHWPerfCfgDABlks.ui32NumBlocks = ui32ArrayLen;
+
+       /* used for passing counters config to the Firmware, write-only for the CPU */
+       eError = DevmemFwAllocate(psDevice,
+                                 sizeof(RGX_HWPERF_CONFIG_CNTBLK) * ui32ArrayLen,
+                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                 PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                 PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                 "FwHWPerfCountersDAConfigBlock",
+                                 &psFwBlkConfigsMemDesc);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
+
+       eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgDABlks.sBlockConfigs,
+                                 psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1);
+
+       eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevMemAcquireCpuVirtAddr", fail2);
+
+       OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+       DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+                          0,
+                          sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+                          PDUMP_FLAGS_CONTINUOUS);
+
+       /* Ask the FW to carry out the HWPerf configuration command. */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
+                                                 RGXFWIF_DM_GP,
+                                                 &sKccbCmd,
+                                                 PDUMP_FLAGS_CONTINUOUS,
+                                                 &ui32kCCBCommandSlot);
+
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2);
+
+       /* Wait for FW to complete */
+       eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3);
+
+       /* Release temporary memory used for block configuration. */
+       RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+       DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+       DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc);
+
+       PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED",
+                ui32ArrayLen));
+
+       PVR_DPF_RETURN_OK;
+
+fail3:
+       DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+
+fail2:
+       RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+
+fail1:
+       DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc);
+
+       PVR_DPF_RETURN_RC (eError);
+}
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+       PVRSRV_DEVICE_NODE* psRgxDevNode;
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+       /* TL Open/close state */
+       IMG_HANDLE          hSD[RGX_HWPERF_MAX_STREAM_ID];
+
+       /* TL Acquire/release state */
+       IMG_PBYTE                       pHwpBuf[RGX_HWPERF_MAX_STREAM_ID];                      /*!< buffer returned to user in acquire call */
+       IMG_PBYTE                       pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID];           /*!< pointer to end of HwpBuf */
+       IMG_PBYTE                       pTlBuf[RGX_HWPERF_MAX_STREAM_ID];                       /*!< buffer obtained via TlAcquireData */
+       IMG_PBYTE                       pTlBufPos[RGX_HWPERF_MAX_STREAM_ID];            /*!< initial position in TlBuf to acquire packets */
+       IMG_PBYTE                       pTlBufRead[RGX_HWPERF_MAX_STREAM_ID];           /*!< pointer to the last packet read */
+       IMG_UINT32                      ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID];       /*!< length of acquired TlBuf */
+       IMG_BOOL                        bRelease[RGX_HWPERF_MAX_STREAM_ID];             /*!< used to determine whether or not to release currently held TlBuf */
+
+
+} RGX_KM_HWPERF_DEVDATA;
+
+PVRSRV_ERROR RGXHWPerfConfigMuxCounters(
+               RGX_HWPERF_CONNECTION           *psHWPerfConnection,
+               IMG_UINT32                                           ui32NumBlocks,
+               RGX_HWPERF_CONFIG_MUX_CNTBLK    *asBlockConfigs)
+{
+       PVRSRV_ERROR           eError = PVRSRV_OK;
+       RGX_KM_HWPERF_DEVDATA* psDevData;
+       RGX_HWPERF_DEVICE *psHWPerfDev;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Validate input argument values supplied by the caller */
+       if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+       while (psHWPerfDev)
+       {
+               psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+               /* Call the internal server API */
+               eError = PVRSRVRGXConfigMuxHWPerfCountersKM(NULL,
+                                                           psDevData->psRgxDevNode,
+                                                           ui32NumBlocks,
+                                                           asBlockConfigs);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+
+       return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT16              ui16CustomBlockID,
+               IMG_UINT16          ui16NumCustomCounters,
+               IMG_UINT32         *pui32CustomCounterIDs)
+{
+       PVRSRV_ERROR            eError;
+       RGX_HWPERF_DEVICE       *psHWPerfDev;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Validate input arguments supplied by the caller */
+       PVR_LOG_RETURN_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid",
+                          PVRSRV_ERROR_INVALID_PARAMS);
+       PVR_LOG_RETURN_IF_FALSE((0 != ui16NumCustomCounters), "uiNumBlocks invalid",
+                                  PVRSRV_ERROR_INVALID_PARAMS);
+       PVR_LOG_RETURN_IF_FALSE((NULL != pui32CustomCounterIDs),"asBlockConfigs invalid",
+                                  PVRSRV_ERROR_INVALID_PARAMS);
+
+       /* Check # of blocks */
+       PVR_LOG_RETURN_IF_FALSE((!(ui16CustomBlockID > RGX_HWPERF_MAX_CUSTOM_BLKS)),"ui16CustomBlockID invalid",
+                                  PVRSRV_ERROR_INVALID_PARAMS);
+
+       /* Check # of counters */
+       PVR_LOG_RETURN_IF_FALSE((!(ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)),"ui16NumCustomCounters invalid",
+                                  PVRSRV_ERROR_INVALID_PARAMS);
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+       while (psHWPerfDev)
+       {
+               RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+               eError = PVRSRVRGXConfigCustomCountersKM(NULL,
+                                                                psDevData->psRgxDevNode,
+                                                                                                ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlCustHWPerfKM");
+
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxhwperf.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxhwperf.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxhwperf.h
new file mode 100644 (file)
index 0000000..8819fe4
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX HWPerf functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+
+#include "rgxhwperf_common.h"
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s) Rogue specific
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM(
+       CONNECTION_DATA               *psConnection,
+       PVRSRV_DEVICE_NODE            *psDeviceNode,
+       IMG_UINT32                     ui32ArrayLen,
+       RGX_HWPERF_CONFIG_MUX_CNTBLK  *psBlockConfigs);
+
+
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+       CONNECTION_DATA    * psConnection,
+       PVRSRV_DEVICE_NODE * psDeviceNode,
+       IMG_UINT16           ui16CustomBlockID,
+       IMG_UINT16           ui16NumCustomCounters,
+       IMG_UINT32         * pui32CustomCounterIDs);
+
+PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM(
+       CONNECTION_DATA       * psConnection,
+       PVRSRV_DEVICE_NODE    * psDeviceNode,
+       IMG_UINT32            ui32CtrlWord,
+       IMG_UINT32            ui32ArrayLen,
+       RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+
+#endif /* RGXHWPERF_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxinit.c
new file mode 100644 (file)
index 0000000..dd572a5
--- /dev/null
@@ -0,0 +1,5127 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_bridge_init.h"
+#include "rgx_bridge_init.h"
+#include "syscommon.h"
+#include "rgx_heaps.h"
+#include "rgxheapconfig.h"
+#include "rgxpower.h"
+#include "tlstream.h"
+#include "pvrsrv_tlstreams.h"
+
+#include "rgxinit.h"
+#include "rgxbvnc.h"
+#include "rgxmulticore.h"
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "rgxfwdbg.h"
+#include "info_page.h"
+
+#include "rgxfwimageutils.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "rgxmipsmmuinit.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+#include "physmem_lma.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "htbserver.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "rgxtimecorr.h"
+#include "rgxshader.h"
+
+#include "rgx_bvnc_defs_km.h"
+#if defined(PDUMP)
+#include "rgxstartstop.h"
+#endif
+
+#include "rgx_fwif_alignchecks.h"
+#include "vmm_pvz_client.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "rgxsoctimer.h"
+#endif
+
+#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION)
+#include "pdump_physmem.h"
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid);
+static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap);
+#endif
+
+/* Services internal heap identification used in this file only */
+#define RGX_FIRMWARE_MAIN_HEAP_IDENT   "FwMain"   /*!< RGX Main Firmware Heap identifier */
+#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */
+
+#define RGX_MMU_PAGE_SIZE_4KB   (   4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB  (  16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB  (  64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB   (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB   (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#if !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+@Function       SampleIRQCount
+@Description    Utility function taking snapshots of RGX FW interrupt count.
+@Input          psDevInfo    Device Info structure
+
+@Return         IMG_BOOL     Returns IMG_TRUE if RGX FW IRQ is not equal to
+                             sampled RGX FW IRQ count for any RGX FW thread.
+ */ /**************************************************************************/
+static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_BOOL bReturnVal = IMG_FALSE;
+       volatile IMG_UINT32 *aui32SampleIrqCount = psDevInfo->aui32SampleIRQCount;
+       IMG_UINT32 ui32IrqCnt;
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+       if PVRSRV_VZ_MODE_IS(GUEST)
+       {
+               bReturnVal = IMG_TRUE;
+       }
+       else
+       {
+               get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_OS, psDevInfo);
+
+               if (ui32IrqCnt != aui32SampleIrqCount[RGXFW_THREAD_0])
+               {
+                       aui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt;
+                       bReturnVal = IMG_TRUE;
+               }
+       }
+#else
+       IMG_UINT32 ui32TID;
+
+       for_each_irq_cnt(ui32TID)
+       {
+               get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo);
+
+               /* treat unhandled interrupts here to align host count with fw count */
+               if (aui32SampleIrqCount[ui32TID] != ui32IrqCnt)
+               {
+                       aui32SampleIrqCount[ui32TID] = ui32IrqCnt;
+                       bReturnVal = IMG_TRUE;
+               }
+       }
+#endif
+
+       return bReturnVal;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHostSafetyEvents
+@Description    Returns the event status masked to keep only the safety
+                events handled by the Host
+@Input          psDevInfo    Device Info structure
+@Return         IMG_UINT32   Status of Host-handled safety events
+ */ /**************************************************************************/
+static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0))
+       {
+               return 0;
+       }
+       else
+       {
+               IMG_UINT32 ui32SafetyEventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE);
+               return (ui32SafetyEventStatus & psDevInfo->ui32HostSafetyEventMask);
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       RGXSafetyEventCheck
+@Description    Clears the Event Status register and checks if any of the
+                safety events need Host handling
+@Input          psDevInfo    Device Info structure
+@Return         IMG_BOOL     Are there any safety events for Host to handle ?
+ */ /**************************************************************************/
+static INLINE IMG_BOOL RGXSafetyEventCheck(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_BOOL bSafetyEvent = IMG_FALSE;
+
+       if (psDevInfo->ui32HostSafetyEventMask != 0)
+       {
+               IMG_UINT32 ui32EventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_STATUS);
+
+               if (BIT_ISSET(ui32EventStatus, RGX_CR_EVENT_STATUS_SAFETY_SHIFT))
+               {
+                       /* clear the safety event */
+                       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, RGX_CR_EVENT_CLEAR_SAFETY_EN);
+
+                       /* report if there is anything for the Host to handle */
+                       bSafetyEvent = (RGXHostSafetyEvents(psDevInfo) != 0);
+               }
+       }
+
+       return bSafetyEvent;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXSafetyEventHandler
+@Description    Handles the Safety Events that the Host is responsible for
+@Input          psDevInfo    Device Info structure
+ */ /**************************************************************************/
+static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo);
+       RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE;
+
+       if (ui32HostSafetyStatus != 0)
+       {
+               /* clear the safety bus events handled by the Host */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE, ui32HostSafetyStatus);
+
+               if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT))
+               {
+                       IMG_UINT32 ui32FaultFlag;
+                       IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS);
+                       IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT -
+                                                                                               RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT;
+
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW));
+
+                       for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++)
+                       {
+                               if (BIT_ISSET(ui32FaultFW, ui32FaultFlag))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).",
+                                                __func__, BIT(ui32FaultFlag)));
+                                       eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR;
+                               }
+                               else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).",
+                                                __func__, BIT(ui32FaultFlag)));
+
+                                       /* Only report this if we haven't detected a more serious error */
+                                       if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR)
+                                       {
+                                               eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK;
+                                       }
+                               }
+                       }
+
+                       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW);
+               }
+
+               if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT))
+               {
+                       volatile RGXFWIF_POW_STATE ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState;
+
+                       if (ePowState == RGXFWIF_POW_ON)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__));
+
+                               /* Only report this if we haven't detected a more serious error */
+                               if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR)
+                               {
+                                       eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG;
+                               }
+                       }
+               }
+
+               /* Notify client and system layer of any error */
+               if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE)
+               {
+                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                       /* Client notification of device error will be achieved by
+                        * clients calling UM function RGXGetLastDeviceError() */
+                       psDevInfo->eLastDeviceError = eResetReason;
+
+                       /* Notify system layer of any error */
+                       if (psDevConfig->pfnSysDevErrorNotify)
+                       {
+                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                               sErrorData.eResetReason = eResetReason;
+
+                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                 &sErrorData);
+                       }
+               }
+       }
+}
+
+static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       IMG_UINT32 ui32idx;
+#endif
+
+       RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       PVR_DPF((PVR_DBG_ERROR,
+               "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %llu",
+                        psDeviceNode->sDevId.ui32InternalID,
+                        psDeviceNode->sLISRExecutionInfo.ui32Status,
+                        psDeviceNode->sLISRExecutionInfo.ui64Clockns));
+
+       for_each_irq_cnt(ui32idx)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               MSG_IRQ_CNT_TYPE " %u: InterruptCountSnapshot: 0x%X",
+                               ui32idx, psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx]));
+       }
+#else
+       PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION"));
+#endif
+
+       return SampleIRQCount(psDevInfo);
+}
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_BOOL bScheduleMISR;
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               bScheduleMISR = IMG_TRUE;
+       }
+       else
+       {
+               bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo);
+       }
+
+       if (bScheduleMISR)
+       {
+               OSScheduleMISR(psDevInfo->pvMISRData);
+
+               if (psDevInfo->pvAPMISRData != NULL)
+               {
+                       OSScheduleMISR(psDevInfo->pvAPMISRData);
+               }
+       }
+}
+
+static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                  IMG_UINT32 ui32IRQStatusReg,
+                                                                  IMG_UINT32 ui32IRQStatusEventMsk,
+                                                                  IMG_UINT32 ui32IRQClearReg,
+                                                                  IMG_UINT32 ui32IRQClearMask)
+{
+       IMG_UINT32 ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg);
+
+       if (ui32IRQStatus & ui32IRQStatusEventMsk)
+       {
+               /* acknowledge and clear the interrupt */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask);
+               return IMG_TRUE;
+       }
+       else
+       {
+               /* spurious interrupt */
+               return IMG_FALSE;
+       }
+}
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+static IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       return RGXAckHwIrq(psDevInfo,
+                                          RGX_CR_META_SP_MSLVIRQSTATUS,
+                                          RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
+                                          RGX_CR_META_SP_MSLVIRQSTATUS,
+                                          RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
+}
+#endif
+
+static IMG_BOOL RGXAckIrqMIPS(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       return RGXAckHwIrq(psDevInfo,
+                                          RGX_CR_MIPS_WRAPPER_IRQ_STATUS,
+                                          RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
+                                          RGX_CR_MIPS_WRAPPER_IRQ_CLEAR,
+                                          RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
+}
+
+static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+               /* status & clearing registers are available on both Host and Guests
+                * and are agnostic of the Fw CPU type. Due to the remappings done by
+                * the 2nd stage device MMU, all drivers assume they are accessing
+                * register bank 0 */
+       return RGXAckHwIrq(psDevInfo,
+                                          RGX_CR_IRQ_OS0_EVENT_STATUS,
+                                          RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN,
+                                          RGX_CR_IRQ_OS0_EVENT_CLEAR,
+                                          RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
+}
+
+static IMG_BOOL RGX_LISRHandler(void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_BOOL bIrqAcknowledged = IMG_FALSE;
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       IMG_UINT32 ui32idx, ui32IrqCnt;
+
+       for_each_irq_cnt(ui32idx)
+       {
+               get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo);
+               UPDATE_LISR_DBG_SNAPSHOT(ui32idx, ui32IrqCnt);
+       }
+       UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT);
+       UPDATE_LISR_DBG_TIMESTAMP();
+#endif
+
+       UPDATE_LISR_DBG_COUNTER();
+
+       if (psDevInfo->bRGXPowered)
+       {
+               IMG_BOOL bSafetyEvent = RGXSafetyEventCheck(psDevInfo);
+
+               if ((psDevInfo->pfnRGXAckIrq == NULL) || psDevInfo->pfnRGXAckIrq(psDevInfo) || bSafetyEvent)
+               {
+                       bIrqAcknowledged = IMG_TRUE;
+
+                       if (SampleIRQCount(psDevInfo) || bSafetyEvent)
+                       {
+                               UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED);
+                               UPDATE_MISR_DBG_COUNTER();
+
+                               OSScheduleMISR(psDevInfo->pvMISRData);
+
+#if defined(SUPPORT_AUTOVZ)
+                               RGXUpdateAutoVzWdgToken(psDevInfo);
+#endif
+                               if (psDevInfo->pvAPMISRData != NULL)
+                               {
+                                       OSScheduleMISR(psDevInfo->pvAPMISRData);
+                               }
+                       }
+                       else
+                       {
+                               UPDATE_LISR_DBG_STATUS(RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED);
+                       }
+               }
+               else
+               {
+                       UPDATE_LISR_DBG_STATUS(RGX_LISR_NOT_TRIGGERED_BY_HW);
+               }
+       }
+       else
+       {
+               /* AutoVz drivers rebooting while the firmware is active must acknowledge
+                * and clear the hw IRQ line before the RGXInit() has finished. */
+               if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp &&
+                         (psDevInfo->pfnRGXAckIrq != NULL) &&
+                         psDevInfo->pfnRGXAckIrq(psDevInfo)))
+               {
+                       UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED);
+               }
+       }
+
+       return bIrqAcknowledged;
+}
+
+static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE        *psDeviceNode)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       /* First check whether there are pending commands in Deferred KCCB List */
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead))
+       {
+               OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+               return;
+       }
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       /* Powerlock to avoid further Power transition requests
+          while KCCB deferred list is being processed */
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to acquire PowerLock (device: %p, error: %s)",
+                                __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+               return;
+       }
+
+       /* Try to send deferred KCCB commands Do not Poll from here*/
+       eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                                "%s could not flush Deferred KCCB list, KCCB is full.",
+                                __func__));
+       }
+}
+
+static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevice;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE)
+       {
+               RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+       }
+
+       if (psFwSysData->ePowState == RGXFWIF_POW_IDLE)
+       {
+               /* The FW is IDLE and therefore could be shut down */
+               eError = RGXActivePowerRequest(psDeviceNode);
+
+               if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+               {
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: Failed RGXActivePowerRequest call (device: %p) with %s",
+                                       __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+                               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+                       }
+                       else
+                       {
+                               /* Re-schedule the power down request as it was deferred. */
+                               OSScheduleMISR(psDevInfo->pvAPMISRData);
+                       }
+               }
+       }
+
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_IDLE       RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE     RGXFWIF_GPU_UTIL_STATE_ACTIVE
+#define GPU_BLOCKED    RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS 64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_HANDLE hGpuUtilUser,
+                                       RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+       IMG_UINT64 ui64TimeNow;
+       IMG_UINT32 ui32Attempts;
+       IMG_UINT32 ui32Remainder;
+
+
+       /***** (1) Initialise return stats *****/
+
+       psReturnStats->bValid = IMG_FALSE;
+       psReturnStats->ui64GpuStatIdle       = 0;
+       psReturnStats->ui64GpuStatActive     = 0;
+       psReturnStats->ui64GpuStatBlocked    = 0;
+       psReturnStats->ui64GpuStatCumulative = 0;
+
+       if (hGpuUtilUser == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       psAggregateStats = hGpuUtilUser;
+
+
+       /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */
+       for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++)
+       {
+               IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+               IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+               IMG_UINT32 i = 0;
+
+
+               /***** (2) Get latest data from shared area *****/
+
+               OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+               /*
+                * First attempt at detecting if the FW is in the middle of an update.
+                * This should also help if the FW is in the middle of a 64 bit variable update.
+                */
+               while (((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+                               (aui64TmpCounters[ui64LastState] !=
+                                psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+                          (i < MAX_ITERATIONS))
+               {
+                       ui64LastWord  = psUtilFWCb->ui64LastWord;
+                       ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+                       aui64TmpCounters[GPU_IDLE]    = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+                       aui64TmpCounters[GPU_ACTIVE]  = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE];
+                       aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+                       i++;
+               }
+
+               OSLockRelease(psDevInfo->hGPUUtilLock);
+
+               if (i == MAX_ITERATIONS)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "RGXGetGpuUtilStats could not get reliable data after trying %u times", i));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+
+               /***** (3) Compute return stats *****/
+
+               /* Update temp counters to account for the time since the last update to the shared ones */
+               OSMemoryBarrier(NULL); /* Ensure the current time is read after the loop above */
+               ui64TimeNow    = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDeviceNode));
+               ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+               ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+               aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+               /* Get statistics for a user since its last request */
+               psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+                                                                            psAggregateStats->ui64GpuStatIdle);
+               psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE],
+                                                                              psAggregateStats->ui64GpuStatActive);
+               psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+                                                                               psAggregateStats->ui64GpuStatBlocked);
+               psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle +
+                                                      psReturnStats->ui64GpuStatActive +
+                                                      psReturnStats->ui64GpuStatBlocked;
+
+               if (psAggregateStats->ui64TimeStamp != 0)
+               {
+                       IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp;
+                       /* We expect to return at least 75% of the time since the last call in GPU stats */
+                       IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4);
+
+                       /*
+                        * If the returned stats are substantially lower than the time since
+                        * the last call, then the Host might have read a partial update from the FW.
+                        * If this happens, try sampling the shared counters again.
+                        */
+                       if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                        "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low "
+                                        "(call period %" IMG_UINT64_FMTSPEC ")",
+                                        __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall));
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again",
+                                        __func__, ui32Attempts));
+                               continue;
+                       }
+               }
+
+               break;
+       }
+
+
+       /***** (4) Update aggregate stats for the current user *****/
+
+       psAggregateStats->ui64GpuStatIdle    += psReturnStats->ui64GpuStatIdle;
+       psAggregateStats->ui64GpuStatActive  += psReturnStats->ui64GpuStatActive;
+       psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked;
+       psAggregateStats->ui64TimeStamp       = ui64TimeNow;
+
+
+       /***** (5) Convert return stats to microseconds *****/
+
+       psReturnStats->ui64GpuStatIdle       = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder);
+       psReturnStats->ui64GpuStatActive     = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder);
+       psReturnStats->ui64GpuStatBlocked    = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder);
+       psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder);
+
+       /* Check that the return stats make sense */
+       if (psReturnStats->ui64GpuStatCumulative == 0)
+       {
+               /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+                * returned 0. This could happen if the GPU frequency value
+                * is not well calibrated and the FW is updating the GPU state
+                * while the Host is reading it.
+                * When such an event happens frequently, timers or the aggregate
+                * stats might not be accurate...
+                */
+               PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data."));
+               return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+       }
+
+       psReturnStats->bValid = IMG_TRUE;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser)
+{
+       RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+       /* NoStats used since this may be called outside of the register/de-register
+        * process calls which track memory use. */
+       psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
+       if (psAggregateStats == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psAggregateStats->ui64GpuStatIdle    = 0;
+       psAggregateStats->ui64GpuStatActive  = 0;
+       psAggregateStats->ui64GpuStatBlocked = 0;
+       psAggregateStats->ui64TimeStamp      = 0;
+
+       /* Not used */
+       psAggregateStats->bValid = IMG_FALSE;
+       psAggregateStats->ui64GpuStatCumulative = 0;
+
+       *phGpuUtilUser = psAggregateStats;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser)
+{
+       RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+       if (hGpuUtilUser == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psAggregateStats = hGpuUtilUser;
+       OSFreeMemNoStats(psAggregateStats);
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGX MISR Handler
+*/
+static void RGX_MISRHandler_Main (void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       /* Give the HWPerf service a chance to transfer some data from the FW
+        * buffer to the host driver transport layer buffer.
+        */
+       RGXHWPerfDataStoreCB(psDeviceNode);
+
+       /* Inform other services devices that we have finished an operation */
+       PVRSRVNotifyCommandCompletion(psDeviceNode);
+
+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD)
+       /* Normally, firmware CCB only exists for the primary FW thread unless PDVFS
+          is running on the second[ary] FW thread, here we process said CCB */
+       RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice);
+#endif
+
+       /* Handle Safety events if necessary */
+       RGXSafetyEventHandler(psDeviceNode->pvDevice);
+
+       /* Signal the global event object */
+       PVRSRVSignalGlobalEO();
+
+       /* Process the Firmware CCB for pending commands */
+       RGXCheckFirmwareCCB(psDeviceNode->pvDevice);
+
+       /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */
+       RGXTimeCorrRestartPeriodic(psDeviceNode);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Process Workload Estimation Specific commands from the FW */
+       WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+#endif
+
+       if (psDevInfo->pvAPMISRData == NULL)
+       {
+               RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+       }
+}
+#endif /* !defined(NO_HARDWARE) */
+
+
+#if defined(PDUMP)
+static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+               PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PMR *psFWDataPMR;
+       RGXMIPSFW_BOOT_DATA *psBootData;
+       IMG_DEV_PHYADDR sTmpAddr;
+       IMG_UINT32 ui32BootConfOffset, ui32ParamOffset, i;
+       PVRSRV_ERROR eError;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+       ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA);
+       ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET;
+
+       /* The physical addresses used by a pdump player will be different
+        * than the ones we have put in the MIPS bootloader configuration data.
+        * We have to tell the pdump player to replace the original values with the real ones.
+        */
+       PDUMPCOMMENT(psDeviceNode, "Pass new boot parameters to the FW");
+
+       /* Rogue Registers physical address */
+       ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64RegBase);
+
+       eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME,
+                       0x0,
+                       psFWDataPMR,
+                       ui32ParamOffset,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError));
+               return eError;
+       }
+
+       /* Page Table physical Address */
+       eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr);
+       if (eError !=  PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "RGXBootldrDataInit: MMU_AcquireBaseAddr failed (%u)",
+                        eError));
+               return eError;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+                                                                        (void **)&psBootData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire pointer to FW data (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       psBootData = IMG_OFFSET_ADDR(psBootData, ui32BootConfOffset);
+
+       for (i = 0; i < psBootData->ui32PTNumPages; i++)
+       {
+               ui32ParamOffset = ui32BootConfOffset +
+                       offsetof(RGXMIPSFW_BOOT_DATA, aui64PTPhyAddr[0])
+                       + i * sizeof(psBootData->aui64PTPhyAddr[0]);
+
+               eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+                               psFWDataPMR,
+                               0,
+                               ui32ParamOffset,
+                               PDUMP_FLAGS_CONTINUOUS,
+                               MMU_LEVEL_1,
+                               sTmpAddr.uiAddr,
+                               i << psBootData->ui32PTLog2PageSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError));
+                       return eError;
+               }
+       }
+
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+
+       /* Stack physical address */
+       ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64StackPhyAddr);
+
+       eError = PDumpMemLabelToMem64(psFWDataPMR,
+                       psFWDataPMR,
+                       RGXGetFWImageSectionOffset(NULL, MIPS_STACK),
+                       ui32ParamOffset,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError));
+               return eError;
+       }
+
+       return eError;
+}
+#endif /* PDUMP */
+
+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                      PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       /* Save information used on power transitions for later
+        * (when RGXStart and RGXStop are executed)
+        */
+       psDevInfo->sLayerParams.psDevInfo = psDevInfo;
+       psDevInfo->sLayerParams.psDevConfig = psDevConfig;
+#if defined(PDUMP)
+       psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) || defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ||
+           RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               IMG_DEV_PHYADDR sKernelMMUCtxPCAddr;
+
+               if (psDevInfo->psDeviceNode->bAutoVzFwIsUp)
+               {
+                       /* If AutoVz firmware is up at this stage, the driver initialised it
+                        * during a previous life-cycle. The firmware's memory is already pre-mapped
+                        * and the MMU page tables reside in the predetermined memory carveout.
+                        * The Kernel MMU Context created in this life-cycle is a dummy structure
+                        * that is not used for mapping.
+                        * To program the Device's BIF with the correct PC address, use the base
+                        * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */
+#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
+                       sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
+#else
+                       PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig,
+                                                                                                                          PHYS_HEAP_USAGE_FW_MAIN);
+                       eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG;
+                       PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)");
+
+                       sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr +
+                                                                                (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED);
+#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */
+               }
+               else
+               {
+                       eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx,
+                                                    &sKernelMMUCtxPCAddr);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog"));
+                               return eError;
+                       }
+               }
+
+               psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr;
+       }
+       else
+#endif
+       {
+               PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR);
+               PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+               IMG_DEV_PHYADDR sPhyAddr;
+               IMG_BOOL bValid;
+
+#if defined(SUPPORT_ALT_REGBASE)
+               psDevInfo->sLayerParams.sGPURegAddr = psDevConfig->sAltRegsGpuPBase;
+#else
+               /* The physical address of the GPU registers needs to be translated
+                * in case we are in a LMA scenario
+                */
+               PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL],
+                               1,
+                               &sPhyAddr,
+                               &(psDevConfig->sRegsCpuPBase));
+
+               psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr;
+#endif
+
+               /* Register bank must be aligned to 512KB (as per the core integration) to
+                * prevent the FW accessing incorrect registers */
+               if ((psDevInfo->sLayerParams.sGPURegAddr.uiAddr & 0x7FFFFU) != 0U)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Register bank must be aligned to 512KB, but current address (0x%016"IMG_UINT64_FMTSPECX") is not",
+                                               psDevInfo->sLayerParams.sGPURegAddr.uiAddr));
+                       return PVRSRV_ERROR_INIT_FAILURE;
+               }
+
+               eError = RGXGetPhyAddr(psFWCodePMR,
+                               &sPhyAddr,
+                               RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE),
+                               OSGetPageShift(), /* FW will be using the same page size as the OS */
+                               1,
+                               &bValid);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address"));
+                       return eError;
+               }
+
+               psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr;
+
+               eError = RGXGetPhyAddr(psFWDataPMR,
+                               &sPhyAddr,
+                               RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA),
+                               OSGetPageShift(),
+                               1,
+                               &bValid);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address"));
+                       return eError;
+               }
+
+               psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr;
+
+               eError = RGXGetPhyAddr(psFWCodePMR,
+                               &sPhyAddr,
+                               RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE),
+                               OSGetPageShift(),
+                               1,
+                               &bValid);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address"));
+                       return eError;
+               }
+
+               psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr;
+
+               psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr;
+
+               psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid;
+       }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       /* Send information used on power transitions to the trusted device as
+        * in this setup the driver cannot start/stop the GPU and perform resets
+        */
+       if (psDevConfig->pfnTDSetPowerParams)
+       {
+               PVRSRV_TD_POWER_PARAMS sTDPowerParams;
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr;
+               }
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+               {
+                       sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr;
+               }
+#endif
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+               {
+                       sTDPowerParams.sGPURegAddr    = psDevInfo->sLayerParams.sGPURegAddr;
+                       sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr;
+                       sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr;
+                       sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr;
+               }
+
+               eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData,
+                                                                                                 &sTDPowerParams);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!"));
+               eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+#endif
+
+       return eError;
+}
+
+/*
+       RGXSystemHasFBCDCVersion31
+*/
+static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32 ui32FBCDCVersionOverride = 0;
+#endif
+
+#if defined(FIX_HW_ERN_66622_BIT_MASK)
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622))
+       {
+#if defined(SUPPORT_VALIDATION)
+               void *pvAppHintState = NULL;
+
+               IMG_UINT32 ui32AppHintDefault;
+
+               OSCreateKMAppHintState(&pvAppHintState);
+               ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE;
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride,
+                                    &ui32AppHintDefault, &ui32FBCDCVersionOverride);
+               OSFreeKMAppHintState(pvAppHintState);
+
+               if (ui32FBCDCVersionOverride > 0)
+               {
+                       if (ui32FBCDCVersionOverride == 2)
+                       {
+                               return IMG_TRUE;
+                       }
+               }
+               else
+#endif
+               {
+                       if (psDeviceNode->psDevConfig->bHasFBCDCVersion31)
+                       {
+                               return IMG_TRUE;
+                       }
+               }
+       }
+       else
+#endif
+       {
+
+#if defined(SUPPORT_VALIDATION)
+               if (ui32FBCDCVersionOverride == 2)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!",
+                                __func__));
+               }
+#endif
+
+#if !defined(NO_HARDWARE)
+               if (psDeviceNode->psDevConfig->bHasFBCDCVersion31)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: System uses FBCDC3.1 but GPU doesn't support it!",
+                                __func__));
+               }
+#endif
+       }
+
+       return IMG_FALSE;
+}
+
+/*
+       RGXDevMMUAttributes
+*/
+static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                              IMG_BOOL bKernelMemoryCtx)
+{
+       MMU_DEVICEATTRIBS *psMMUDevAttrs;
+
+       if ((psDeviceNode->pfnCheckDeviceFeature) &&
+               PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+       {
+               psMMUDevAttrs = bKernelMemoryCtx ?
+                               psDeviceNode->psFirmwareMMUDevAttrs :
+                               psDeviceNode->psMMUDevAttrs;
+       }
+       else
+       {
+               psMMUDevAttrs = psDeviceNode->psMMUDevAttrs;
+       }
+
+       return psMMUDevAttrs;
+}
+
+/*
+ * RGXInitDevPart2
+ */
+PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE        *psDeviceNode,
+               IMG_UINT32                      ui32DeviceFlags,
+               IMG_UINT32                      ui32HWPerfHostFilter,
+               RGX_ACTIVEPM_CONF               eActivePMConf)
+{
+       PVRSRV_ERROR                    eError;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_DEV_POWER_STATE  eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+       PVRSRV_DEVICE_CONFIG    *psDevConfig = psDeviceNode->psDevConfig;
+
+       /* Assume system layer has turned power on by this point, required before powering device */
+       psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+       PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 2");
+
+#if defined(PDUMP)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               RGXPDumpBootldrData(psDeviceNode, psDevInfo);
+       }
+#endif
+#if defined(TIMING) || defined(DEBUG)
+       OSUserModeAccessToPerfCountersEn();
+#endif
+
+       /* Initialise Device Flags */
+       psDevInfo->ui32DeviceFlags = 0;
+       RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE);
+
+       /* Allocate DVFS Table (needs to be allocated before GPU trace events
+        *  component is initialised because there is a dependency between them) */
+       psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+       PVR_LOG_GOTO_IF_NOMEM(psDevInfo->psGpuDVFSTable, eError, ErrorExit);
+
+       if (psDevInfo->ui32HWPerfHostFilter == 0)
+       {
+               RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter);
+       }
+
+       /* If HWPerf enabled allocate all resources for the host side buffer. */
+       if (psDevInfo->ui32HWPerfHostFilter != 0)
+       {
+               if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand"
+                               " initialisation failed."));
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Initialise work estimation lock */
+       eError = OSLockCreate(&psDevInfo->hWorkEstLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit);
+#endif
+
+       /* Initialise lists of ZSBuffers */
+       eError = OSLockCreate(&psDevInfo->hLockZSBuffer);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockZSBuffer)", ErrorExit);
+       dllist_init(&psDevInfo->sZSBufferHead);
+       psDevInfo->ui32ZSBufferCurrID = 1;
+
+       /* Initialise lists of growable Freelists */
+       eError = OSLockCreate(&psDevInfo->hLockFreeList);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockFreeList)", ErrorExit);
+       dllist_init(&psDevInfo->sFreeListHead);
+       psDevInfo->ui32FreelistCurrID = 1;
+
+       eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(DebugFaultInfoLock)", ErrorExit);
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock);
+               PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit);
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               eError = OSLockCreate(&psDevInfo->hNMILock);
+               PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(NMILock)", ErrorExit);
+       }
+
+       /* Setup GPU utilisation stats update callback */
+       eError = OSLockCreate(&psDevInfo->hGPUUtilLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(GPUUtilLock)", ErrorExit);
+#if !defined(NO_HARDWARE)
+       psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+#endif
+
+       eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+       psDevInfo->eActivePMConf = eActivePMConf;
+
+       /* set-up the Active Power Mgmt callback */
+#if !defined(NO_HARDWARE)
+       {
+               RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+               IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+               IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+                                                          (eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+
+               if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE)))
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__));
+                       bEnableAPM = IMG_FALSE;
+               }
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ)
+               /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */
+               PVR_ASSERT(bEnableAPM == IMG_FALSE);
+#endif
+
+               if (bEnableAPM)
+               {
+                       eError = OSInstallMISR(&psDevInfo->pvAPMISRData,
+                                       RGX_MISRHandler_CheckFWActivePowerState,
+                                       psDeviceNode,
+                                       "RGX_CheckFWActivePower");
+                       PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(APMISR)", ErrorExit);
+
+                       /* Prevent the device being woken up before there is something to do. */
+                       eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+               }
+       }
+#endif
+
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM,
+                                           RGXQueryAPMState,
+                                           RGXSetAPMState,
+                                           psDeviceNode,
+                                           NULL);
+
+       RGXTimeCorrInitAppHintCallbacks(psDeviceNode);
+
+       /* Register the device with the power manager */
+       eError = PVRSRVRegisterPowerDevice(psDeviceNode,
+                       (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState,
+                       (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState,
+                       psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+                       &RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+                       &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+                       &RGXDustCountChange,
+                       (IMG_HANDLE)psDeviceNode,
+                       PVRSRV_DEV_POWER_STATE_OFF,
+                       eDefaultPowerState);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit);
+
+       eError = RGXSetPowerParams(psDevInfo, psDevConfig);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit);
+
+#if defined(SUPPORT_VALIDATION)
+       {
+               void *pvAppHintState = NULL;
+
+               IMG_UINT32 ui32AppHintDefault;
+
+               OSCreateKMAppHintState(&pvAppHintState);
+               ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL;
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TestSLRInterval,
+                                    &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval);
+               PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d",
+                       ui32AppHintDefault, psDevInfo->ui32TestSLRInterval));
+               OSFreeKMAppHintState(pvAppHintState);
+               psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval;
+               psDevInfo->ui32SLRSkipFWAddr = 0;
+
+               ui32AppHintDefault = 0;
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ECCRAMErrInj, &ui32AppHintDefault, &psDevInfo->ui32ECCRAMErrInjModule);
+               psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL;
+
+#if defined(PDUMP) && defined(SUPPORT_VALIDATION)
+               /* POL on ECC RAM GPU fault events, MARS is FW fault */
+               if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE &&
+                       psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_MARS)
+               {
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Verify ECC fault event");
+                       eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME,
+                                                                RGX_CR_SCRATCH11,
+                                                                1U,
+                                                                0xFFFFFFFF,
+                                                                PDUMP_FLAGS_DEINIT,
+                                                                PDUMP_POLL_OPERATOR_EQUAL);
+               }
+#endif
+       }
+#endif
+
+#if defined(PDUMP)
+#if defined(NO_HARDWARE)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle");
+
+       /* Kick the FW once, in case it still needs to detect and set the idle state */
+       PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME,
+                          RGX_CR_MTS_SCHEDULE,
+                          RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK,
+                          PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT);
+
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                       offsetof(RGXFWIF_SYSDATA, ePowState),
+                                       RGXFWIF_POW_IDLE,
+                                       0xFFFFFFFFU,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", ErrorExit);
+#endif
+
+       /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT,
+                             "RGX deinitialisation commands");
+
+       psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW;
+
+       if (! PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = RGXStop(&psDevInfo->sLayerParams);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit);
+       }
+
+       psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW);
+#endif
+
+#if !defined(NO_HARDWARE)
+       eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXInstallProcessQueuesMISR", ErrorExit);
+
+       /* Register RGX to receive notifies when other devices complete some work */
+       PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+       /* Register the interrupt handlers */
+       eError = OSInstallMISR(&psDevInfo->pvMISRData,
+                       RGX_MISRHandler_Main,
+                       psDeviceNode,
+                       "RGX_Main");
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit);
+
+       /* Register appropriate mechanism for clearing hw interrupts */
+       if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, IRQ_PER_OS)) && (!PVRSRV_VZ_MODE_IS(NATIVE)))
+       {
+               psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated;
+       }
+       else if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               psDevInfo->pfnRGXAckIrq = NULL;
+       }
+       else
+       {
+               /* native and host drivers must clear the unique GPU physical interrupt */
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+               {
+                       psDevInfo->pfnRGXAckIrq = RGXAckIrqMIPS;
+               }
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+               else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       psDevInfo->pfnRGXAckIrq = RGXAckIrqMETA;
+               }
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+               else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+               {
+                       psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated;
+               }
+#endif
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: GPU IRQ clearing mechanism not implemented "
+                                                                       "for the this architecture.", __func__));
+                       PVR_LOG_GOTO_WITH_ERROR("pfnRGXAckIrq", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit);
+               }
+       }
+
+#if defined(RGX_IRQ_HYPERV_HANDLER)
+               /* The hypervisor receives and acknowledges the GPU irq, then it injects an
+                * irq only in the recipient OS. The KM driver doesn't handle the GPU irq line */
+               psDevInfo->pfnRGXAckIrq = NULL;
+#endif
+
+       eError = SysInstallDeviceLISR(psDevConfig->hSysData,
+                                                                 psDevConfig->ui32IRQ,
+                                                                 PVRSRV_MODNAME,
+                                                                 RGX_LISRHandler,
+                                                                 psDeviceNode,
+                                                                 &psDevInfo->pvLISRData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SysInstallDeviceLISR", ErrorExit);
+#endif /* !defined(NO_HARDWARE) */
+
+#if defined(PDUMP)
+/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside
+ * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the
+ * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its
+ * last parameter which will not exist on architectures which do not have this
+ * feature.
+ * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for
+ * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this
+ * means we can build the kernel driver without having to worry about the BVNC
+ * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given
+ *  architecture, whereas the FEATURE is only defined for those BVNCs that
+ *  support it).
+ */
+#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)
+       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY)))
+#endif
+       {
+               if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) &&
+                               !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+               {
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "System has NO cache snooping");
+               }
+               else
+               {
+                       if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig))
+                       {
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                     "System has CPU cache snooping");
+                       }
+                       if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+                       {
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                     "System has DEVICE cache snooping");
+                       }
+               }
+       }
+#endif
+
+#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK)
+       if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY))
+#endif
+       {
+               eError = PVRSRVTQLoadShaders(psDeviceNode);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit);
+       }
+
+       psDevInfo->bDevInit2Done = IMG_TRUE;
+
+       return PVRSRV_OK;
+
+ErrorExit:
+       DevPart2DeInitRGX(psDeviceNode);
+
+       return eError;
+}
+
+#define VZ_RGX_FW_FILENAME_SUFFIX ".vz"
+#define RGX_64K_FW_FILENAME_SUFFIX ".64k"
+#define RGX_FW_FILENAME_MAX_SIZE   ((sizeof(RGX_FW_FILENAME)+ \
+                       RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX) + sizeof(RGX_64K_FW_FILENAME_SUFFIX)))
+
+static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_CHAR *pszFWFilenameStr,
+               IMG_CHAR *pszFWpFilenameStr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const IMG_CHAR * const pszFWFilenameSuffix =
+                       PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX;
+
+       const IMG_CHAR * const pszFWFilenameSuffix2 =
+                       ((OSGetPageSize() == RGX_MMU_PAGE_SIZE_64KB) &&
+                        RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+                       ? RGX_64K_FW_FILENAME_SUFFIX : "";
+
+       OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE,
+                       "%s." RGX_BVNC_STR_FMTSPEC "%s%s",
+                       RGX_FW_FILENAME,
+                       psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+                       psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+                       pszFWFilenameSuffix, pszFWFilenameSuffix2);
+
+       OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE,
+                       "%s." RGX_BVNC_STRP_FMTSPEC "%s%s",
+                       RGX_FW_FILENAME,
+                       psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+                       psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+                       pszFWFilenameSuffix, pszFWFilenameSuffix2);
+}
+
+PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 OS_FW_IMAGE **ppsRGXFW,
+                                 const IMG_BYTE **ppbFWData)
+{
+       IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE];
+       IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE];
+       IMG_CHAR *pszLoadedFwStr;
+       PVRSRV_ERROR eErr;
+
+       /* Prepare the image filenames to use in the following code */
+       _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr);
+
+       /* Get pointer to Firmware image */
+       pszLoadedFwStr = aszFWFilenameStr;
+       eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW);
+       if (eErr == PVRSRV_ERROR_NOT_FOUND)
+       {
+               pszLoadedFwStr = aszFWpFilenameStr;
+               eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW);
+               if (eErr == PVRSRV_ERROR_NOT_FOUND)
+               {
+                       pszLoadedFwStr = RGX_FW_FILENAME;
+                       eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW);
+                       if (eErr == PVRSRV_ERROR_NOT_FOUND)
+                       {
+                               PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s' (%s)",
+                                               aszFWFilenameStr, PVRSRVGetErrorString(eErr)));
+                       }
+               }
+       }
+
+       if (eErr == PVRSRV_OK)
+       {
+               PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr));
+               *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW);
+       }
+       else
+       {
+               *ppbFWData = NULL;
+       }
+
+       return eErr;
+
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /* set up fw memory contexts */
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR       eError;
+
+#if defined(SUPPORT_AUTOVZ)
+       PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
+
+       if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp))
+       {
+               /* Temporarily swap the MMU and default GPU physheap to allow the page
+                * tables of all memory mapped by the FwKernel context to be placed
+                * in a dedicated memory carveout. This should allow the firmware mappings to
+                * persist after a Host kernel crash or driver reset. */
+
+               psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+       }
+#endif
+
+       /* Register callbacks for creation of device memory contexts */
+       psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+       psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+       /* Create the memory context for the firmware. */
+       eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+                                    &psDevInfo->psKernelDevmemCtx);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed DevmemCreateContext (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_create_ctx;
+       }
+
+       eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT,
+                                     &psDevInfo->psFirmwareMainHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed DevmemFindHeapByName (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_find_heap;
+       }
+
+       eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT,
+                                     &psDevInfo->psFirmwareConfigHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed DevmemFindHeapByName (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_find_heap;
+       }
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_UINT32 ui32OSID;
+               for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+               {
+                       IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH];
+
+                       OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+                       eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
+                                                                                 &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap);
+               }
+       }
+#endif
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_DEV_PHYADDR sPhysHeapBase;
+               IMG_UINT32 ui32OSID;
+
+               eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap);
+
+               for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+               {
+                       IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)};
+
+                       eError = RGXFwRawHeapAllocMap(psDeviceNode,
+                                                                                 ui32OSID,
+                                                                                 sRawFwHeapBase,
+                                                                                 RGX_FIRMWARE_RAW_HEAP_SIZE);
+                       if (eError != PVRSRV_OK)
+                       {
+                               for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--)
+                               {
+                                       RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+                               }
+                               PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap);
+                       }
+               }
+
+#if defined(SUPPORT_AUTOVZ)
+               /* restore default Px setup */
+               psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
+#endif
+       }
+#else
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap);
+       }
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE);
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE);
+       }
+
+       return eError;
+
+failed_to_find_heap:
+       /*
+        * Clear the mem context create callbacks before destroying the RGX firmware
+        * context to avoid a spurious callback.
+        */
+       psDeviceNode->pfnRegisterMemoryContext = NULL;
+       psDeviceNode->pfnUnregisterMemoryContext = NULL;
+       DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+       psDevInfo->psKernelDevmemCtx = NULL;
+failed_to_create_ctx:
+       return eError;
+}
+
+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR       eError;
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+#if defined(SUPPORT_AUTOVZ)
+               PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
+
+               psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+
+               if (!psDeviceNode->bAutoVzFwIsUp)
+#endif
+               {
+                       IMG_UINT32 ui32OSID;
+
+                       for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+                       {
+                               RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+                       }
+               }
+#if defined(SUPPORT_AUTOVZ)
+               psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
+#endif
+       }
+#else
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig);
+
+               if (psDevInfo->psFirmwareMainHeap)
+               {
+                       DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE);
+               }
+               if (psDevInfo->psFirmwareConfigHeap)
+               {
+                       DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE);
+               }
+       }
+#endif
+
+       /*
+        * Clear the mem context create callbacks before destroying the RGX firmware
+        * context to avoid a spurious callback.
+        */
+       psDeviceNode->pfnRegisterMemoryContext = NULL;
+       psDeviceNode->pfnUnregisterMemoryContext = NULL;
+
+       if (psDevInfo->psKernelDevmemCtx)
+       {
+               eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_UINT32 ui32AlignChecksSizeUM,
+                                      IMG_UINT32 aui32AlignChecksUM[])
+{
+       static const IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM};
+       IMG_UINT32 ui32UMChecksOffset = ARRAY_SIZE(aui32AlignChecksKM) + 1;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+       IMG_UINT32 i, *paui32FWAlignChecks;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Skip the alignment check if the driver is guest
+          since there is no firmware to check against */
+       PVRSRV_VZ_RET_IF_MODE(GUEST, eError);
+
+       if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: FW Alignment Check Mem Descriptor is NULL",
+                        __func__));
+               return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+                                         (void **) &paui32FWAlignChecks);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to acquire kernel address for alignment checks (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       paui32FWAlignChecks += ui32UMChecksOffset;
+       if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Mismatching sizes of RGXFW_ALIGN_CHECKS_INIT"
+                        " array between UM(%d) and FW(%d)",
+                        __func__,
+                        ui32AlignChecksSizeUM,
+                        *paui32FWAlignChecks));
+               eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+               goto return_;
+       }
+
+       for (i = 0; i < ui32AlignChecksSizeUM; i++)
+       {
+               if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i])
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: size/offset mismatch in RGXFW_ALIGN_CHECKS_INIT[%d]"
+                                       " between UM(%d) and FW(%d)",
+                                       __func__, i, aui32AlignChecksUM[i], paui32FWAlignChecks[i]));
+                       eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+               }
+       }
+
+       if (eError == PVRSRV_ERROR_INVALID_ALIGNMENT)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Check for FW/KM structure"
+                               " alignment failed.", __func__));
+       }
+
+return_:
+
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+
+       return eError;
+}
+
+static
+PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_DEVMEM_SIZE_T ui32Size,
+                                       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags,
+                                       const IMG_PCHAR pszText,
+                                       DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY)
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+       }
+#endif
+
+       uiMemAllocFlags = (uiMemAllocFlags |
+                                          PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                          PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &
+                          RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       uiMemAllocFlags &= PVRSRV_MEMALLOCFLAGS_TDFWMASK;
+#endif
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate FW %s memory", pszText);
+
+       eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                               ui32Size,
+                                                                               1ULL << uiLog2Align,
+                                                                               uiMemAllocFlags,
+                                                                               pszText,
+                                                                               ppsMemDescPtr);
+
+       return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is redundant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are
+ build-compatible then server-firmware are build-compatible as well.
+
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit)
+{
+#if !defined(NO_HARDWARE)
+       IMG_UINT32                      ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW);
+
+       ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW;
+
+       /* Check if the FW is missing support for any features required by the driver */
+       if (~ui32BuildOptionsFWKMPart & ui32BuildOptions)
+       {
+               ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+               /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+               ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+               if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+                                       "extra options present in the KM driver: (0x%x). Please check rgx_options.h",
+                                       ui32BuildOptions & ui32BuildOptionsMismatch ));
+                       return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+               }
+
+               if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+                                       "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+                                       ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+                       return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+               }
+               PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ."));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGXFWIF_OSINIT *psFwOsInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       IMG_UINT32                      ui32DDKVersion;
+       PVRSRV_ERROR            eError;
+
+       ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW DDK version");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+                       ui32DDKVersion,
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+       {
+               PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).",
+                               PVRVERSION_MAJ, PVRVERSION_MIN,
+                               PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion),
+                               PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion)));
+               eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+               PVR_DBG_BREAK;
+               return eError;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]",
+                               PVRVERSION_MAJ, PVRVERSION_MIN,
+                               PVRVERSION_MAJ, PVRVERSION_MIN));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGXFWIF_OSINIT *psFwOsInit)
+{
+       PVRSRV_ERROR            eError=PVRSRV_OK;
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       IMG_UINT32                      ui32DDKBuild;
+
+       ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW DDK build");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+                       ui32DDKBuild,
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+       {
+               PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).",
+                               ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+               eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+               PVR_DBG_BREAK;
+               return eError;
+#endif
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]",
+                               ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild));
+       }
+#endif
+       return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGXFWIF_OSINIT *psFwOsInit)
+{
+#if !defined(NO_HARDWARE)
+       IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+       PVRSRV_ERROR                            eError;
+
+       sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+                                       psDevInfo->sDevFeatureCfg.ui32V,
+                                       psDevInfo->sDevFeatureCfg.ui32N,
+                                       psDevInfo->sDevFeatureCfg.ui32C);
+#endif
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW BVNC (struct version)");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+                       sBVNC.ui32LayoutVersion,
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW BVNC (BVNC part - Lower 32 bits)");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC),
+                       (IMG_UINT32)sBVNC.ui64BVNC,
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW BVNC (BVNC part - Higher 32 bits)");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+                       sizeof(IMG_UINT32),
+                       (IMG_UINT32)(sBVNC.ui64BVNC >> 32),
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC);
+
+       if (!bCompatibleAll)
+       {
+               if (!bCompatibleVersion)
+               {
+                       PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).",
+                                       __func__,
+                                       sBVNC.ui32LayoutVersion,
+                                       psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+
+               if (!bCompatibleBVNC)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)",
+                                       RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC)));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+       }
+#endif
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGXFWIF_OSINIT *psFwOsInit)
+{
+#if defined(PDUMP) || !defined(NO_HARDWARE)
+       IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B |
+                                 RGX_BVNC_PACK_MASK_V |
+                                 RGX_BVNC_PACK_MASK_N |
+                                 RGX_BVNC_PACK_MASK_C;
+
+       PVRSRV_ERROR                            eError;
+       RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+
+#if defined(PDUMP)
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+
+#if !defined(NO_HARDWARE)
+       RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+       IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC;
+#endif
+
+       if (psDevInfo->bIgnoreHWReportedBVNC)
+       {
+               PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)"));
+               return PVRSRV_OK;
+       }
+
+#if defined(PDUMP) || !defined(NO_HARDWARE)
+#if defined(COMPAT_BVNC_MASK_V)
+       ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V;
+#endif
+#if defined(COMPAT_BVNC_MASK_N)
+       ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+       ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+
+       sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+                                                                       psDevInfo->sDevFeatureCfg.ui32V,
+                                                                       psDevInfo->sDevFeatureCfg.ui32N,
+                                                                       psDevInfo->sDevFeatureCfg.ui32C);
+
+#if defined(FIX_HW_BRN_38344_BIT_MASK)
+       if (RGX_IS_BRN_SUPPORTED(psDevInfo, 38344) && (psDevInfo->sDevFeatureCfg.ui32C >= 10))
+       {
+               ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C;
+       }
+#endif
+       if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C))
+       {
+               PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")),
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")),
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")),
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+       }
+#endif
+
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "Compatibility check: Layout version of compchecks struct");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+                       sSWBVNC.ui32LayoutVersion,
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+
+       PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check started");
+       if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C))
+       {
+               PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+               PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Compatibility check: HW BNC and FW BNC (Lower 32 bits)");
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                               offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                               offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC),
+                               (IMG_UINT32)sSWBVNC.ui64BVNC ,
+                               (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V),
+                               PDUMP_POLL_OPERATOR_EQUAL,
+                               ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+                       return eError;
+               }
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Compatibility check: HW BNC and FW BNC (Higher 32 bits)");
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                               offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                               offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+                               sizeof(IMG_UINT32),
+                               (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32),
+                               (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32),
+                               PDUMP_POLL_OPERATOR_EQUAL,
+                               ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+                       return eError;
+               }
+
+               PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+       }
+       if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V)
+       {
+               PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags);
+               PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags);
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Compatibility check: HW V and FW V");
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                       offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+                                       ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0),
+                                       (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)),
+                                       RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0),
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+                       return eError;
+               }
+               PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags);
+       }
+       PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check finished");
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC;
+
+       sHWBVNC.ui64BVNC &= ui64MaskBVNC;
+       sSWBVNC.ui64BVNC &= ui64MaskBVNC;
+
+       RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC);
+
+       if (!bCompatibleAll)
+       {
+               if (!bCompatibleVersion)
+               {
+                       PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+                                       __func__,
+                                       sHWBVNC.ui32LayoutVersion,
+                                       sSWBVNC.ui32LayoutVersion));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+
+               if (!bCompatibleBVNC)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).",
+                                       RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC)));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]",
+                               RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC)));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGXFWIF_OSINIT *psFwOsInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       PVRSRV_ERROR            eError;
+#endif
+#if defined(PDUMP)
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+       IMG_UINT32      ui32FWCoreIDValue = 0;
+       IMG_CHAR *pcRGXFW_PROCESSOR = NULL;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE;
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+       }
+       else
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               switch (RGX_GET_FEATURE_VALUE(psDevInfo, META))
+               {
+               case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break;
+               case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break;
+               case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break;
+               case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__));
+                       PVR_ASSERT(0);
+               }
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+       }
+       else
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE;
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV;
+       }
+       else
+#endif
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__));
+               PVR_ASSERT(0);
+       }
+
+#if defined(PDUMP)
+       PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+       PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "Compatibility check: KM driver and HW FW Processor version");
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                       offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion),
+                       ui32FWCoreIDValue,
+                       0xffffffff,
+                       PDUMP_POLL_OPERATOR_EQUAL,
+                       ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+       PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue)
+       {
+               PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).",
+                                pcRGXFW_PROCESSOR,
+                                ui32FWCoreIDValue,
+                                pcRGXFW_PROCESSOR,
+                                psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion));
+               eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH;
+               PVR_DBG_BREAK;
+               return eError;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].",
+                                pcRGXFW_PROCESSOR,
+                                ui32FWCoreIDValue,
+                                pcRGXFW_PROCESSOR,
+                                psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion));
+       }
+#endif
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+#if !defined(NO_HARDWARE)
+       IMG_UINT32                      ui32RegValue;
+       IMG_UINT8                       ui8FwOsCount;
+       IMG_UINT32                      ui32FwTimeout = MAX_HW_TIME_US;
+
+       LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
+       {
+               if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated))
+               {
+                       /* No need to wait if the FW has already updated the values */
+                       break;
+               }
+               OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       ui32RegValue = 0;
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if ((!PVRSRV_VZ_MODE_IS(GUEST)) &&
+               RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+                                       __func__, eError));
+                       goto chk_exit;
+               }
+
+               if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+               {
+                       eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+                                       __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError));
+                       goto chk_exit;
+               }
+       }
+#endif
+
+       if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated))
+       {
+               eError = PVRSRV_ERROR_TIMEOUT;
+               PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)",
+                               __func__, eError));
+               if (PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's "
+                                                                       "OsConfig initialisation data was not accepted by the firmware", __func__));
+               }
+               goto chk_exit;
+       }
+
+       ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
+       if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
+               (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
+                               __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount));
+       }
+#endif /* defined(NO_HARDWARE) */
+
+       eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+               if (eError != PVRSRV_OK)
+               {
+                       goto chk_exit;
+               }
+
+               eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+               if (eError != PVRSRV_OK)
+               {
+                       goto chk_exit;
+               }
+       }
+
+       eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       eError = PVRSRV_OK;
+chk_exit:
+
+       return eError;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXSoftReset
+@Description    Resets some modules of the RGX device
+@Input          psDeviceNode           Device node
+@Input          ui64ResetValue1 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET
+                                register).
+@Input          ui64ResetValue2 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET2
+                                register).
+@Return         PVRSRV_ERROR
+ */ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_UINT64  ui64ResetValue1,
+               IMG_UINT64  ui64ResetValue2)
+{
+       PVRSRV_RGXDEV_INFO        *psDevInfo;
+       IMG_BOOL        bSoftReset = IMG_FALSE;
+       IMG_UINT64      ui64SoftResetMask = 0;
+
+       PVR_ASSERT(psDeviceNode != NULL);
+       PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       /* the device info */
+       psDevInfo = psDeviceNode->pvDevice;
+#if defined(RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+       {
+               ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+       }else
+#endif
+       {
+               ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL;
+       }
+
+#if defined(RGX_CR_SOFT_RESET2_MASKFULL)
+       if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) &&
+                       ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2))
+       {
+               bSoftReset = IMG_TRUE;
+       }
+#endif
+
+       if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Set in soft-reset */
+       OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1);
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2);
+       }
+#endif
+
+       /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+       (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+       }
+#endif
+
+       /* Take the modules out of reset... */
+       OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0);
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0);
+       }
+#endif
+
+       /* ...and fence again */
+       (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline;
+
+static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+                       psDevInfo->psTrampoline->hPdumpPages,
+#endif
+                       &psDevInfo->psTrampoline->sPages);
+
+       if (psDevInfo->psTrampoline != &sNullTrampoline)
+       {
+               OSFreeMem(psDevInfo->psTrampoline);
+       }
+       psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline;
+}
+
+#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size))
+#define TRAMPOLINE_ALLOC_MAX_RETRIES (3)
+
+static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+       IMG_INT32 i, j;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETRIES];
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate pages for trampoline");
+
+       /* Retry the allocation of the trampoline block (16KB), retaining any
+        * previous allocations overlapping  with the target range until we get an
+        * allocation that doesn't overlap with the target range.
+        * Any allocation like this will require a maximum of 3 tries as we are
+        * allocating a physical contiguous block of memory, not individual pages.
+        * Free the unused allocations at the end only after the desired range
+        * is obtained to prevent the alloc function from returning the same bad
+        * range repeatedly.
+        */
+       for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETRIES; i++)
+       {
+               pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE));
+               eError = DevPhysMemAlloc(psDeviceNode,
+                               RGXMIPSFW_TRAMPOLINE_SIZE,
+                               RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE,
+                               0,         // (init) u8Value
+                               IMG_FALSE, // bInitPage,
+#if defined(PDUMP)
+                               psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+                               "TrampolineRegion",
+                               &pasTrampoline[i]->hPdumpPages,
+#endif
+                               &pasTrampoline[i]->sPages,
+                               &pasTrampoline[i]->sPhysAddr);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s failed (%u)",
+                                __func__,
+                                eError));
+                       goto fail;
+               }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+               /* Set the persistent uiOSid value so that we free from the correct
+                * base arena when unloading the driver and freeing the trampoline.
+                */
+               pasTrampoline[i]->sPages.uiOSid = 0;    /* Firmware global arena */
+#endif
+
+               if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr,
+                               RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+                               RGXMIPSFW_TRAMPOLINE_SIZE))
+               {
+                       break;
+               }
+       }
+       if (TRAMPOLINE_ALLOC_MAX_RETRIES == i)
+       {
+               /* Failed to find a physical allocation after 3 attempts */
+               eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s failed to allocate non-overlapping pages (%u)",
+                               __func__, eError));
+               /* Fall through, clean up and return error. */
+       }
+       else
+       {
+               /* Remember the last physical block allocated, it will not be freed */
+               psDevInfo->psTrampoline = pasTrampoline[i];
+       }
+
+fail:
+       /* free all unused allocations */
+       for (j = 0; j < i; j++)
+       {
+               DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+                               pasTrampoline[j]->hPdumpPages,
+#endif
+                               &pasTrampoline[j]->sPages);
+               OSFreeMem(pasTrampoline[j]);
+       }
+
+       return eError;
+}
+
+#undef RANGES_OVERLAP
+
+
+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T    uiFWCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWDataLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememDataLen)
+{
+       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR        eError;
+       IMG_DEVMEM_SIZE_T       uiDummyLen;
+       DEVMEM_MEMDESC          *psDummyMemDesc = NULL;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+               (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32))
+       {
+               eError = RGXAllocTrampoline(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "Failed to allocate trampoline region (%u)",
+                                       eError));
+                       goto failTrampolineMemDescAlloc;
+               }
+       }
+
+       /*
+        * Set up Allocation for FW code section
+        */
+       uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                         PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                         PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                         PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                         PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                         PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                         PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                         PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                         PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE);
+
+       eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                       uiFWCodeLen,
+                       uiMemAllocFlags,
+                       "FwExCodeRegion",
+                       &psDevInfo->psRGXFWCodeMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to allocate fw code mem (%u)",
+                        eError));
+               goto failFWCodeMemDescAlloc;
+       }
+
+       eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+                                         &psDevInfo->sFWCodeDevVAddrBase);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to acquire devVAddr for fw code mem (%u)",
+                        eError));
+               goto failFWCodeMemDescAqDevVirt;
+       }
+
+       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST))))
+       {
+               /*
+                * The FW code must be the first allocation in the firmware heap, otherwise
+                * the bootloader will not work (the FW will not be able to find the bootloader).
+                */
+               PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE);
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               /*
+                * Allocate Dummy Pages so that Data segment allocation gets the same
+                * device virtual address as specified in MIPS firmware linker script
+                */
+               uiDummyLen = RGXGetFWImageSectionMaxSize(NULL, MIPS_CODE) +
+                               RGXGetFWImageSectionMaxSize(NULL, MIPS_EXCEPTIONS_CODE) +
+                               RGXGetFWImageSectionMaxSize(NULL, MIPS_BOOT_CODE) -
+                               uiFWCodeLen; /* code actual size */
+
+               if (uiDummyLen > 0)
+               {
+                       eError = DevmemFwAllocateExportable(psDeviceNode,
+                                       uiDummyLen,
+                                       OSGetPageSize(),
+                                       uiMemAllocFlags,
+                                       "FwExDummyPages",
+                                       &psDummyMemDesc);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "Failed to allocate fw dummy mem (%u)",
+                                        eError));
+                               goto failDummyMemDescAlloc;
+                       }
+               }
+       }
+
+       /*
+        * Set up Allocation for FW data section
+        */
+       uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                       PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA);
+
+       eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                       uiFWDataLen,
+                       uiMemAllocFlags,
+                       "FwExDataRegion",
+                       &psDevInfo->psRGXFWDataMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to allocate fw data mem (%u)",
+                        eError));
+               goto failFWDataMemDescAlloc;
+       }
+
+       eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+                                         &psDevInfo->sFWDataDevVAddrBase);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to acquire devVAddr for fw data mem (%u)",
+                        eError));
+               goto failFWDataMemDescAqDevVirt;
+       }
+
+       if (uiFWCorememCodeLen != 0)
+       {
+               /*
+                * Set up Allocation for FW coremem code section
+                */
+               uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                               PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE);
+
+               eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                               uiFWCorememCodeLen,
+                               uiMemAllocFlags,
+                               "FwExCorememCodeRegion",
+                               &psDevInfo->psRGXFWCorememCodeMemDesc);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to allocate fw coremem code mem, size: %"  IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)",
+                                uiFWCorememCodeLen, uiMemAllocFlags, eError));
+                       goto failFWCorememCodeMemDescAlloc;
+               }
+
+               eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc,
+                                                 &psDevInfo->sFWCorememCodeDevVAddrBase);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to acquire devVAddr for fw coremem mem code (%u)",
+                                eError));
+                       goto failFWCorememCodeMemDescAqDevVirt;
+               }
+
+               eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr,
+                                     psDevInfo->psRGXFWCorememCodeMemDesc,
+                                     0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr);
+       }
+       else
+       {
+               psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0;
+               psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0;
+       }
+
+       if (uiFWCorememDataLen != 0)
+       {
+               /*
+                * Set up Allocation for FW coremem data section
+                */
+               uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                               PVRSRV_MEMALLOCFLAG_GPU_READABLE  |
+                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                               PVRSRV_MEMALLOCFLAG_CPU_READABLE  |
+                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                               PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA))
+                               & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+
+               eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                               uiFWCorememDataLen,
+                               uiMemAllocFlags,
+                               "FwExCorememDataRegion",
+                               &psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to allocate fw coremem data mem, "
+                                "size: %"  IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)",
+                                uiFWCorememDataLen,
+                                uiMemAllocFlags,
+                                eError));
+                       goto failFWCorememDataMemDescAlloc;
+               }
+
+               eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                               &psDevInfo->sFWCorememDataStoreDevVAddrBase);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to acquire devVAddr for fw coremem mem data (%u)",
+                                eError));
+                       goto failFWCorememDataMemDescAqDevVirt;
+               }
+
+               eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr,
+                               psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                               0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr);
+       }
+       else
+       {
+               psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0;
+               psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0;
+       }
+
+       /* Free Dummy Pages */
+       if (psDummyMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc);
+       }
+
+       return PVRSRV_OK;
+
+failFWCorememDataMemDescFwAddr:
+failFWCorememDataMemDescAqDevVirt:
+       if (uiFWCorememDataLen != 0)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+               psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+       }
+failFWCorememDataMemDescAlloc:
+failFWCorememCodeMemDescFwAddr:
+failFWCorememCodeMemDescAqDevVirt:
+       if (uiFWCorememCodeLen != 0)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc);
+               psDevInfo->psRGXFWCorememCodeMemDesc = NULL;
+       }
+failFWCorememCodeMemDescAlloc:
+failFWDataMemDescAqDevVirt:
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+       psDevInfo->psRGXFWDataMemDesc = NULL;
+failFWDataMemDescAlloc:
+       if (psDummyMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc);
+       }
+failDummyMemDescAlloc:
+failFWCodeMemDescAqDevVirt:
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+       psDevInfo->psRGXFWCodeMemDesc = NULL;
+failFWCodeMemDescAlloc:
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+               (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32))
+       {
+               RGXFreeTrampoline(psDeviceNode);
+       }
+failTrampolineMemDescAlloc:
+       return eError;
+}
+
+/*
+       AppHint parameter interface
+ */
+static
+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT32 *pui32Value)
+{
+       PVRSRV_ERROR eResult;
+
+       eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+       *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK;
+       return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT32 *pui32Value)
+{
+       PVRSRV_ERROR eResult;
+
+       eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+       if (PVRSRV_OK == eResult)
+       {
+               if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE)
+               {
+                       *pui32Value = 0; /* Trace */
+               }
+               else
+               {
+                       *pui32Value = 1; /* TBI */
+               }
+       }
+       return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 const void *psPrivate,
+                                 IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eResult;
+       IMG_UINT32 ui32RGXFWLogType;
+
+       eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType);
+       if (PVRSRV_OK == eResult)
+       {
+               if (0 == ui32RGXFWLogType)
+               {
+                       BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE);
+               }
+               eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value);
+       }
+       return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const void *psPrivate,
+                                  IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eResult;
+       IMG_UINT32 ui32RGXFWLogType = ui32Value;
+
+       eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType);
+       if (PVRSRV_OK != eResult)
+       {
+               return eResult;
+       }
+
+       /* 0 - trace, 1 - tbi */
+       if (0 == ui32Value)
+       {
+               BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE);
+       }
+#if defined(SUPPORT_TBI_INTERFACE)
+       else if (1 == ui32Value)
+       {
+               BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE);
+       }
+#endif
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Invalid parameter %u specified to set FW log type AppHint.",
+                        __func__, ui32Value));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType);
+       return eResult;
+}
+
+#if defined(DEBUG)
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       const void *psPrivate,
+                                                                       IMG_BOOL *pbValue)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+       *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->uiFWPoisonOnFreeFlag)
+               ? IMG_TRUE
+               : IMG_FALSE;
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       const void *psPrivate,
+                                                                       IMG_BOOL bValue)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+       psDevInfo->uiFWPoisonOnFreeFlag = bValue
+                       ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+                       : 0ULL;
+
+       return PVRSRV_OK;
+}
+#endif
+
+/*
+ * RGXInitFirmware
+ */
+PVRSRV_ERROR
+RGXInitFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+               IMG_BOOL                 bEnableSignatureChecks,
+               IMG_UINT32               ui32SignatureChecksBufSize,
+               IMG_UINT32               ui32HWPerfFWBufSizeKB,
+               IMG_UINT64               ui64HWPerfFilter,
+               IMG_UINT32               ui32ConfigFlags,
+               IMG_UINT32               ui32LogType,
+               IMG_UINT32               ui32FilterFlags,
+               IMG_UINT32               ui32JonesDisableMask,
+               IMG_UINT32               ui32HWRDebugDumpLimit,
+               IMG_UINT32               ui32HWPerfCountersDataSize,
+               IMG_UINT32               *pui32TPUTrilinearFracMask,
+               RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+               FW_PERF_CONF             eFirmwarePerf,
+               IMG_UINT32               ui32KCCBSizeLog2,
+               IMG_UINT32               ui32ConfigFlagsExt,
+               IMG_UINT32               ui32FwOsCfgFlags)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+#if defined(DEBUG)
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault;
+       IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE;
+#endif
+
+       eError = RGXSetupFirmware(psDeviceNode,
+                       bEnableSignatureChecks,
+                       ui32SignatureChecksBufSize,
+                       ui32HWPerfFWBufSizeKB,
+                       ui64HWPerfFilter,
+                       ui32ConfigFlags,
+                       ui32ConfigFlagsExt,
+                       ui32FwOsCfgFlags,
+                       ui32LogType,
+                       ui32FilterFlags,
+                       ui32JonesDisableMask,
+                       ui32HWRDebugDumpLimit,
+                       ui32HWPerfCountersDataSize,
+                       pui32TPUTrilinearFracMask,
+                       eRGXRDPowerIslandingConf,
+                       eFirmwarePerf,
+                       ui32KCCBSizeLog2);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)",
+                        eError));
+               goto failed_init_firmware;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup,
+                                                   RGXFWTraceQueryFilter,
+                                                   RGXFWTraceSetFilter,
+                                                   psDeviceNode,
+                                                   NULL);
+               PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType,
+                                                   RGXFWTraceQueryLogType,
+                                                   RGXFWTraceSetLogType,
+                                                   psDeviceNode,
+                                                   NULL);
+       }
+
+#if defined(DEBUG)
+       OSCreateKMAppHintState(&pvAppHintState);
+
+       ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+       OSGetKMAppHintBOOL(psDeviceNode,
+                       pvAppHintState,
+                       EnableFWPoisonOnFree,
+                       &ui32AppHintDefault,
+                       &bEnableFWPoisonOnFree);
+
+       OSFreeKMAppHintState(pvAppHintState);
+
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree,
+                                          RGXQueryFWPoisonOnFree,
+                                          RGXSetFWPoisonOnFree,
+                                          psDeviceNode,
+                                          NULL);
+
+       psDevInfo->uiFWPoisonOnFreeFlag = bEnableFWPoisonOnFree
+                       ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+                       : 0ULL;
+#else
+       psDevInfo->uiFWPoisonOnFreeFlag = 0ULL;
+#endif
+
+       psDevInfo->ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK;
+       psDevInfo->ui32LastClockSource = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+       return PVRSRV_OK;
+
+failed_init_firmware:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                        DEVMEM_MEMDESC **psMemDesc,
+                                                                        IMG_UINT32 *puiSyncPrimVAddr,
+                                                                        IMG_UINT32 *puiSyncPrimBlockSize)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_ERROR eError;
+       RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+       IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+       IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+       IMG_UINT32 ui32CoherencyFlag = 0;
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       /* Size and align are 'expanded' because we request an Exportalign allocation */
+       eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+                       &uiUFOBlockSize,
+                       &ui32UFOBlockAlign);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+               PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+       {
+               ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT;
+       }
+       else
+       {
+               ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED;
+       }
+
+       eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                               uiUFOBlockSize,
+                                                                               ui32UFOBlockAlign,
+                                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) |
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                                               ui32CoherencyFlag,
+                                                                               "FwExUFOBlock",
+                                                                               psMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+       *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+       return PVRSRV_OK;
+
+e1:
+       DevmemFwUnmapAndFree(psDevInfo, *psMemDesc);
+e0:
+       return eError;
+}
+
+/* See device.h for function declaration */
+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       DEVMEM_MEMDESC *psMemDesc)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       /*
+               If the system has snooping of the device cache then the UFO block
+               might be in the cache so we need to flush it out before freeing
+               the memory
+
+               When the device is being shutdown/destroyed we don't care anymore.
+               Several necessary data structures to issue a flush were destroyed
+               already.
+        */
+       if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+               psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+       {
+               RGXFWIF_KCCB_CMD sFlushInvalCmd;
+               PVRSRV_ERROR eError;
+               IMG_UINT32 ui32kCCBCommandSlot;
+
+               /* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "Submit SLC flush and invalidate");
+#endif
+               sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+               eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo,
+                                                                                                                &sFlushInvalCmd,
+                                                                                                                PDUMP_FLAGS_CONTINUOUS,
+                                                                                                                &ui32kCCBCommandSlot);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to schedule SLC flush command with error (%u)",
+                                __func__,
+                                eError));
+               }
+               else
+               {
+                       /* Wait for the SLC flush to complete */
+                       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: SLC flush and invalidate aborted with error (%u)",
+                                        __func__,
+                                        eError));
+                       }
+                       else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] &
+                                                         RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE))
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+                       }
+               }
+       }
+
+       RGXUnsetFirmwareAddress(psMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psMemDesc);
+}
+
+static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+
+       psDevInfo->bDevInit2Done = IMG_FALSE;
+
+#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK)
+       if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY))
+#endif
+       {
+               if ((psDevInfo->hTQUSCSharedMem != NULL) &&
+                   (psDevInfo->hTQCLISharedMem != NULL))
+               {
+                       PVRSRVTQUnloadShaders(psDeviceNode);
+               }
+       }
+
+#if !defined(NO_HARDWARE)
+       if (psDevInfo->pvLISRData != NULL)
+       {
+               (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData);
+       }
+       if (psDevInfo->pvMISRData != NULL)
+       {
+               (void) OSUninstallMISR(psDevInfo->pvMISRData);
+       }
+       if (psDevInfo->hProcessQueuesMISR != NULL)
+       {
+               (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+       }
+       if (psDevInfo->pvAPMISRData != NULL)
+       {
+               (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+       }
+       if (psDeviceNode->hCmdCompNotify != NULL)
+       {
+               /* Cancel notifications to this device */
+               PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+               psDeviceNode->hCmdCompNotify = NULL;
+       }
+#endif /* !NO_HARDWARE */
+
+       /* Remove the device from the power manager */
+       PVRSRVRemovePowerDevice(psDeviceNode);
+
+       psDevInfo->pfnGetGpuUtilStats = NULL;
+       if (psDevInfo->hGPUUtilLock != NULL)
+       {
+               OSLockDestroy(psDevInfo->hGPUUtilLock);
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+               (psDevInfo->hNMILock != NULL))
+       {
+               OSLockDestroy(psDevInfo->hNMILock);
+       }
+
+       if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) &&
+               (psDevInfo->hMMUCtxUnregLock != NULL))
+       {
+               OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+       }
+
+       if (psDevInfo->hDebugFaultInfoLock != NULL)
+       {
+               OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+       }
+
+       /* De-init Freelists/ZBuffers... */
+       if (psDevInfo->hLockFreeList != NULL)
+       {
+               OSLockDestroy(psDevInfo->hLockFreeList);
+       }
+
+       if (psDevInfo->hLockZSBuffer != NULL)
+       {
+               OSLockDestroy(psDevInfo->hLockZSBuffer);
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* De-init work estimation lock */
+       if (psDevInfo->hWorkEstLock != NULL)
+       {
+               OSLockDestroy(psDevInfo->hWorkEstLock);
+       }
+#endif
+
+       /* Free DVFS Table */
+       if (psDevInfo->psGpuDVFSTable != NULL)
+       {
+               OSFreeMem(psDevInfo->psGpuDVFSTable);
+               psDevInfo->psGpuDVFSTable = NULL;
+       }
+}
+
+/*
+       DevDeInitRGX
+ */
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO              *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+       PVRSRV_ERROR                    eError;
+       DEVICE_MEMORY_INFO              *psDevMemoryInfo;
+       IMG_UINT32              ui32Temp=0;
+
+       if (!psDevInfo)
+       {
+               /* Can happen if DevInitRGX failed */
+               PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo"));
+               return PVRSRV_OK;
+       }
+
+       if (psDevInfo->psRGXFWIfOsInit)
+       {
+               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+       }
+
+       DeviceDepBridgeDeInit(psDevInfo);
+
+#if defined(PDUMP)
+       DevmemIntFreeDefBackingPage(psDeviceNode,
+                                                               &psDeviceNode->sDummyPage,
+                                                               DUMMY_PAGE);
+       DevmemIntFreeDefBackingPage(psDeviceNode,
+                                                               &psDeviceNode->sDevZeroPage,
+                                                               DEV_ZERO_PAGE);
+#endif
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
+               PVR_UNREFERENCED_PARAMETER(ui32Temp);
+       }
+       else
+#else
+       {
+               /*Delete the Dummy page related info */
+               ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
+               if (0 != ui32Temp)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Dummy page reference counter is non zero (%u)",
+                                __func__,
+                                ui32Temp));
+                       PVR_ASSERT(0);
+               }
+       }
+#endif
+
+       /*Delete the Dummy page related info */
+       ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter);
+       if (0 != ui32Temp)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Zero page reference counter is non zero (%u)",
+                        __func__,
+                        ui32Temp));
+       }
+
+#if defined(PDUMP)
+       if (NULL != psDeviceNode->sDummyPage.hPdumpPg)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active");
+       }
+
+       if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active");
+       }
+#endif
+
+       /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+       OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
+
+       /* Destroy the zero page lock */
+       OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+       OSLockDestroy(psDevInfo->hCounterDumpingLock);
+#endif
+
+       RGXDeInitMultiCoreInfo(psDeviceNode);
+
+       /* Unregister debug request notifiers first as they could depend on anything. */
+
+       RGXDebugDeinit(psDevInfo);
+
+       /* De-initialise in reverse order, so stage 2 init is undone first. */
+       if (psDevInfo->bDevInit2Done)
+       {
+               DevPart2DeInitRGX(psDeviceNode);
+       }
+
+       /* Unregister MMU related stuff */
+       eError = RGXMMUInit_Unregister(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)",
+                        eError));
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               /* Unregister MMU related stuff */
+               eError = RGXMipsMMUInit_Unregister(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)",
+                                eError));
+               }
+       }
+
+       /* UnMap Regs */
+       if (psDevInfo->pvRegsBaseKM != NULL)
+       {
+#if !defined(NO_HARDWARE)
+               OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+                                                psDevInfo->ui32RegSize);
+#endif /* !NO_HARDWARE */
+               psDevInfo->pvRegsBaseKM = NULL;
+       }
+
+#if 0 /* not required at this time */
+       if (psDevInfo->hTimer)
+       {
+               eError = OSRemoveTimer(psDevInfo->hTimer);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "DevDeInitRGX: Failed to remove timer"));
+                       return eError;
+               }
+               psDevInfo->hTimer = NULL;
+       }
+#endif
+
+       psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+       RGXDeInitHeaps(psDevMemoryInfo);
+
+       if (psDevInfo->psRGXFWCodeMemDesc)
+       {
+               /* Free fw code */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW code memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+               psDevInfo->psRGXFWCodeMemDesc = NULL;
+       }
+       else if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "No firmware code memory to free"));
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+               (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32))
+       {
+               if (psDevInfo->psTrampoline->sPages.u.pvHandle)
+               {
+                       /* Free trampoline region */
+                       PDUMPCOMMENT(psDeviceNode, "Freeing trampoline memory");
+                       RGXFreeTrampoline(psDeviceNode);
+               }
+       }
+
+       if (psDevInfo->psRGXFWDataMemDesc)
+       {
+               /* Free fw data */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW data memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+               psDevInfo->psRGXFWDataMemDesc = NULL;
+       }
+       else if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "No firmware data memory to free"));
+       }
+
+       if (psDevInfo->psRGXFWCorememCodeMemDesc)
+       {
+               /* Free fw core mem code */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem code memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc);
+               psDevInfo->psRGXFWCorememCodeMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+       {
+               /* Free fw core mem data */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem data store memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+               psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+       }
+
+       /*
+          Free the firmware allocations.
+        */
+       RGXFreeFirmware(psDevInfo);
+
+       /* De-initialise non-device specific (TL) users of RGX device memory */
+       RGXHWPerfDeinit(psDevInfo);
+       RGXHWPerfHostDeInit(psDevInfo);
+       eError = HTBDeInit();
+       PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+       RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+       /* destroy the stalled CCB locks */
+       OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+       OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+
+       /* destroy the context list locks */
+       OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+       OSLockDestroy(psDevInfo->hBPLock);
+       OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+       OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+       OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+       OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+       OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+       OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+       OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+       OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+       OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+
+       /* Free device BVNC string */
+       if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString)
+       {
+               OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString);
+       }
+
+       /* DeAllocate devinfo */
+       OSFreeMem(psDevInfo);
+
+       psDeviceNode->pvDevice = NULL;
+
+       return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+       psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+/* Takes a log2 page size parameter and calculates a suitable page size
+ * for the RGX heaps. Returns 0 if parameter is wrong.*/
+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize)
+{
+       IMG_BOOL bFound = IMG_FALSE;
+
+       /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT,
+        * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/
+       if (uiLog2PageSize == 0U ||
+           (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) ||
+           (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Provided incompatible log2 page size %u",
+                               __func__,
+                               uiLog2PageSize));
+               PVR_ASSERT(0);
+               return 0;
+       }
+
+       do
+       {
+               switch (uiLog2PageSize)
+               {
+                       case RGX_HEAP_4KB_PAGE_SHIFT:
+                       case RGX_HEAP_16KB_PAGE_SHIFT:
+                       case RGX_HEAP_64KB_PAGE_SHIFT:
+                       case RGX_HEAP_256KB_PAGE_SHIFT:
+                       case RGX_HEAP_1MB_PAGE_SHIFT:
+                       case RGX_HEAP_2MB_PAGE_SHIFT:
+                               /* All good, RGX page size equals given page size
+                                * => use it as default for heaps */
+                               bFound = IMG_TRUE;
+                               break;
+                       default:
+                               /* We have to fall back to a smaller device
+                                * page size than given page size because there
+                                * is no exact match for any supported size. */
+                               uiLog2PageSize -= 1U;
+                               break;
+               }
+       } while (!bFound);
+
+       return uiLog2PageSize;
+}
+
+/* First 16-bits define possible types */
+#define HEAP_INST_VALUE_MASK     (0xFFFF)
+#define HEAP_INST_DEFAULT_VALUE  (1U)  /* Used to show either the heap is always instantiated by default (pfn = NULL)
+                                             OR
+                                             that this is the default configuration of the heap with an Alternative BRN */
+#define HEAP_INST_BRN_DEP_VALUE  (2U)  /* The inclusion of this heap is dependent on the brn being present */
+#define HEAP_INST_FEAT_DEP_VALUE (3U)  /* The inclusion of this heap is dependent on the feature being present */
+#define HEAP_INST_BRN_ALT_VALUE  (4U)  /* This entry is a possible alternative to the default determined by a BRN */
+#define HEAP_INST_FEAT_ALT_VALUE (5U)  /* The entry is a possible alternative to the default determined by a Feature define */
+
+/* Latter 16-bits define other flags we may need */
+#define HEAP_INST_NON4K_FLAG     (1 << 16U) /* This is a possible NON4K Entry and we should use the device
+                                               NON4K size when instantiating */
+
+typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration
+typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*);
+
+struct RGX_HEAP_INFO_TAG
+{
+       IMG_CHAR           *pszName;
+       IMG_UINT64         ui64HeapBase;
+       IMG_DEVMEM_SIZE_T  uiHeapLength;
+       IMG_DEVMEM_SIZE_T  uiHeapReservedRegionLength;
+       IMG_UINT32         ui32Log2ImportAlignment;
+       PFN_IS_PRESENT     pfnIsHeapPresent;
+       IMG_UINT32         ui32HeapInstanceFlags;
+};
+
+/* Feature Present function prototypes */
+
+static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+#if defined(FIX_HW_BRN_65273_BIT_MASK)
+       if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+       {
+               return (((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_ALT_VALUE) ||
+                       ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_DEP_VALUE)) ?
+                       IMG_TRUE : IMG_FALSE;
+       }
+       else
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+       {
+               return ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_DEFAULT_VALUE) ? IMG_TRUE : IMG_FALSE;
+       }
+}
+
+static IMG_BOOL BRN63142IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+       PVR_UNREFERENCED_PARAMETER(pksHeapInfo);
+
+#if defined(FIX_HW_BRN_63142_BIT_MASK)
+       if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142))
+       {
+               PVR_ASSERT((pksHeapInfo->ui64HeapBase & IMG_UINT64_C(0x3FFFFFFFF)) +
+                           pksHeapInfo->uiHeapLength == IMG_UINT64_C(0x400000000));
+
+               return IMG_TRUE;
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+
+       return IMG_FALSE;
+}
+
+static IMG_BOOL FBCDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+       PVR_UNREFERENCED_PARAMETER(pksHeapInfo);
+
+       if (RGX_GET_FEATURE_VALUE(psDevInfo, FBC_MAX_DEFAULT_DESCRIPTORS))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+static IMG_BOOL FBCLargeDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+       PVR_UNREFERENCED_PARAMETER(pksHeapInfo);
+
+       if (RGX_GET_FEATURE_VALUE(psDevInfo, FBC_MAX_LARGE_DESCRIPTORS))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+static IMG_BOOL TextureStateIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+       PVR_UNREFERENCED_PARAMETER(pksHeapInfo);
+#if defined(RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, BINDLESS_IMAGE_AND_TEXTURE_STATE))
+       {
+               return IMG_TRUE;
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+       return IMG_FALSE;
+}
+
+static IMG_BOOL SignalSnoopingIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+       PVR_UNREFERENCED_PARAMETER(pksHeapInfo);
+
+#if defined(RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING))
+       {
+               return IMG_TRUE;
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+
+       return IMG_FALSE;
+}
+
+static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
+{
+       /* Used to determine the correct table row to instantiate as a heap by checking
+        * the Heap size and base at run time VS the current table instance
+        */
+       IMG_UINT64 ui64MainSubHeapSize;
+
+       /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST)))
+       {
+#if defined(FIX_HW_BRN_65101_BIT_MASK)
+               if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101))
+               {
+                       ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101;
+               }
+               else
+#endif
+               {
+                       ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL;
+               }
+       }
+       else
+       {
+               ui64MainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE;
+       }
+
+       /* Determine if we should include this entry based upon previous checks */
+       return (pksHeapInfo->uiHeapLength == ui64MainSubHeapSize &&
+               pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ?
+               IMG_TRUE : IMG_FALSE;
+}
+
+static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo)
+{
+       /* Used to determine the correct table row to instantiate as a heap by checking
+        * the Heap base at run time VS the current table instance
+        */
+
+       /* Determine if we should include this entry based upon previous checks */
+       return (pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_CONFIG_HEAP_BASE) ? IMG_TRUE : IMG_FALSE;
+}
+
+/* Blueprint array. note: not all heaps are available to clients*/
+
+static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] =
+{
+       /* Name                             HeapBase                                 HeapLength                               HeapReservedRegionLength                     Log2ImportAlignment pfnPresent                   HeapInstanceFlags   */
+       {RGX_GENERAL_SVM_HEAP_IDENT,        RGX_GENERAL_SVM_HEAP_BASE,               RGX_GENERAL_SVM_HEAP_SIZE,               0,                                           0,                  NULL,                        HEAP_INST_DEFAULT_VALUE },
+       {RGX_GENERAL_HEAP_IDENT,            RGX_GENERAL_HEAP_BASE,                   RGX_GENERAL_HEAP_SIZE,                   (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  BRN65273IsPresent,           HEAP_INST_DEFAULT_VALUE },
+       {RGX_GENERAL_HEAP_IDENT,            RGX_GENERAL_BRN_65273_HEAP_BASE,         RGX_GENERAL_BRN_65273_HEAP_SIZE,         (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  BRN65273IsPresent,           HEAP_INST_BRN_ALT_VALUE },
+       {RGX_GENERAL_NON4K_HEAP_IDENT,      RGX_GENERAL_NON4K_HEAP_BASE,             RGX_GENERAL_NON4K_HEAP_SIZE,             0,                                           0,                  BRN65273IsPresent,           HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG },
+       {RGX_GENERAL_NON4K_HEAP_IDENT,      RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE,   RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE,   0,                                           0,                  BRN65273IsPresent,           HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG },
+       {RGX_PDSCODEDATA_HEAP_IDENT,        RGX_PDSCODEDATA_HEAP_BASE,               RGX_PDSCODEDATA_HEAP_SIZE,               (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  BRN65273IsPresent,           HEAP_INST_DEFAULT_VALUE },
+       {RGX_PDSCODEDATA_HEAP_IDENT,        RGX_PDSCODEDATA_BRN_65273_HEAP_BASE,     RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE,     (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  BRN65273IsPresent,           HEAP_INST_BRN_ALT_VALUE },
+       {RGX_RGNHDR_BRN_63142_HEAP_IDENT,   RGX_RGNHDR_BRN_63142_HEAP_BASE,          RGX_RGNHDR_BRN_63142_HEAP_SIZE,          0,                                           0,                  BRN63142IsPresent,           HEAP_INST_BRN_DEP_VALUE },
+       {RGX_USCCODE_HEAP_IDENT,            RGX_USCCODE_HEAP_BASE,                   RGX_USCCODE_HEAP_SIZE,                   (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  BRN65273IsPresent,           HEAP_INST_DEFAULT_VALUE },
+       {RGX_USCCODE_HEAP_IDENT,            RGX_USCCODE_BRN_65273_HEAP_BASE,         RGX_USCCODE_BRN_65273_HEAP_SIZE,         (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  BRN65273IsPresent,           HEAP_INST_BRN_ALT_VALUE },
+       {RGX_TQ3DPARAMETERS_HEAP_IDENT,     RGX_TQ3DPARAMETERS_HEAP_BASE,            RGX_TQ3DPARAMETERS_HEAP_SIZE,            0,                                           0,                  BRN65273IsPresent,           HEAP_INST_DEFAULT_VALUE },
+       {RGX_TQ3DPARAMETERS_HEAP_IDENT,     RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE,  RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE,  0,                                           0,                  BRN65273IsPresent,           HEAP_INST_BRN_ALT_VALUE },
+       {RGX_VK_CAPT_REPLAY_HEAP_IDENT,     RGX_VK_CAPT_REPLAY_HEAP_BASE,            RGX_VK_CAPT_REPLAY_HEAP_SIZE,            0,                                           0,                  NULL,                        HEAP_INST_DEFAULT_VALUE },
+       {RGX_SIGNALS_HEAP_IDENT,            RGX_SIGNALS_HEAP_BASE,                   RGX_SIGNALS_HEAP_SIZE,                   0,                                           0,                  SignalSnoopingIsPresent,     HEAP_INST_FEAT_DEP_VALUE},
+       {RGX_FBCDC_HEAP_IDENT,              RGX_FBCDC_HEAP_BASE,                     RGX_FBCDC_HEAP_SIZE,                     0,                                           0,                  FBCDescriptorIsPresent,      HEAP_INST_FEAT_DEP_VALUE},
+       {RGX_FBCDC_LARGE_HEAP_IDENT,        RGX_FBCDC_LARGE_HEAP_BASE,               RGX_FBCDC_LARGE_HEAP_SIZE,               0,                                           0,                  FBCLargeDescriptorIsPresent, HEAP_INST_FEAT_DEP_VALUE},
+       {RGX_CMP_MISSION_RMW_HEAP_IDENT,    RGX_CMP_MISSION_RMW_HEAP_BASE,           RGX_CMP_MISSION_RMW_HEAP_SIZE,           0,                                           0,                  NULL,                        HEAP_INST_DEFAULT_VALUE },
+       {RGX_CMP_SAFETY_RMW_HEAP_IDENT,     RGX_CMP_SAFETY_RMW_HEAP_BASE,            RGX_CMP_SAFETY_RMW_HEAP_SIZE,            0,                                           0,                  NULL,                        HEAP_INST_DEFAULT_VALUE },
+       {RGX_TEXTURE_STATE_HEAP_IDENT,      RGX_TEXTURE_STATE_HEAP_BASE,             RGX_TEXTURE_STATE_HEAP_SIZE,             0,                                           0,                  TextureStateIsPresent,       HEAP_INST_FEAT_DEP_VALUE},
+       {RGX_VISIBILITY_TEST_HEAP_IDENT,    RGX_VISIBILITY_TEST_HEAP_BASE,           RGX_VISIBILITY_TEST_HEAP_SIZE,           0,                                           0,                  BRN65273IsPresent,           HEAP_INST_DEFAULT_VALUE },
+       {RGX_VISIBILITY_TEST_HEAP_IDENT,    RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0,                                           0,                  BRN65273IsPresent,           HEAP_INST_BRN_ALT_VALUE },
+       {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE,        RGX_MMU_INIA_BRN_65273_HEAP_SIZE,        0,                                           0,                  BRN65273IsPresent,           HEAP_INST_BRN_DEP_VALUE },
+       {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE,        RGX_MMU_INIB_BRN_65273_HEAP_SIZE,        0,                                           0,                  BRN65273IsPresent,           HEAP_INST_BRN_DEP_VALUE }
+};
+
+static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] =
+{
+       /* Name                          HeapBase                             HeapLength                                 HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent     HeapInstanceFlags*/
+       {RGX_FIRMWARE_MAIN_HEAP_IDENT,   RGX_FIRMWARE_MAIN_HEAP_BASE,    RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE,            0,                       0,                  FWBRN65101IsPresent, HEAP_INST_DEFAULT_VALUE},
+       {RGX_FIRMWARE_MAIN_HEAP_IDENT,   RGX_FIRMWARE_MAIN_HEAP_BASE,    RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL,   0,                       0,                  FWBRN65101IsPresent, HEAP_INST_DEFAULT_VALUE},
+       {RGX_FIRMWARE_MAIN_HEAP_IDENT,   RGX_FIRMWARE_MAIN_HEAP_BASE,    RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0,                       0,                  FWBRN65101IsPresent, HEAP_INST_BRN_ALT_VALUE},
+       {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE,  RGX_FIRMWARE_CONFIG_HEAP_SIZE,                  0,                       0,                  FWVZConfigPresent,   HEAP_INST_DEFAULT_VALUE},
+};
+
+/* Generic counting method. */
+static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                   const RGX_HEAP_INFO  pksHeapInfo[],
+                                   IMG_UINT32           ui32HeapListSize,
+                                   IMG_UINT32*          ui32HeapCount)
+{
+       IMG_UINT32 i;
+
+       /* Loop over rows in the heap data array using callback to decide if we
+        * should include the heap
+        */
+       for (i = 0; i < ui32HeapListSize; i++)
+       {
+               const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i];
+
+               if (psHeapInfo->pfnIsHeapPresent)
+               {
+                       if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo))
+                       {
+                               /* We don't need to create this heap */
+                               continue;
+                       }
+               }
+
+               (*ui32HeapCount)++;
+       }
+}
+/* Generic heap instantiator */
+static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO     *psDevInfo,
+                                         const RGX_HEAP_INFO     pksHeapInfo[],
+                                         IMG_UINT32              ui32HeapListSize,
+                                         DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor)
+{
+       IMG_UINT32 i;
+       /* We now have a list of the heaps to include and so we should loop over this
+        * list and instantiate.
+        */
+       for (i = 0; i < ui32HeapListSize; i++)
+       {
+               IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
+               IMG_UINT32 ui32Log2DataPageSize = 0;
+
+               const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i];
+
+               if (psHeapInfo->pfnIsHeapPresent)
+               {
+                       if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo))
+                       {
+                               /* We don't need to create this heap */
+                               continue;
+                       }
+               }
+
+               if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG)
+               {
+                       ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize;
+               }
+               else
+               {
+                       ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift;
+               }
+
+               HeapCfgBlueprintInit(psHeapInfo->pszName,
+                                        psHeapInfo->ui64HeapBase,
+                                        psHeapInfo->uiHeapLength,
+                                        psHeapInfo->uiHeapReservedRegionLength,
+                                        ui32Log2DataPageSize,
+                                        psHeapInfo->ui32Log2ImportAlignment,
+                                        *psDeviceMemoryHeapCursor);
+
+               (*psDeviceMemoryHeapCursor)++;
+       }
+}
+
+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    DEVICE_MEMORY_INFO *psNewMemoryInfo)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+
+       IMG_UINT32 ui32HeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp);
+       IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW);
+       IMG_UINT32 ui32CountedHeapSize;
+
+       IMG_UINT32 ui32HeapCount = 0;
+       IMG_UINT32 ui32FWHeapCount = 0;
+
+       /* Count heaps required for the app heaps */
+       _CountRequiredHeaps(psDevInfo,
+                               gasRGXHeapLayoutApp,
+                               ui32HeapListSize,
+                               &ui32HeapCount);
+
+       /* Count heaps required for the FW heaps */
+       _CountRequiredHeaps(psDevInfo,
+                               gasRGXHeapLayoutFW,
+                               ui32FWHeapListSize,
+                               &ui32FWHeapCount);
+
+       ui32CountedHeapSize = (ui32HeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED);
+
+       psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize);
+       PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0);
+
+       /* Initialise the heaps */
+       psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+       /* Instantiate App Heaps */
+       _InstantiateRequiredHeaps(psDevInfo,
+                                     gasRGXHeapLayoutApp,
+                                     ui32HeapListSize,
+                                     &psDeviceMemoryHeapCursor);
+
+       /* Instantiate FW Heaps */
+       _InstantiateRequiredHeaps(psDevInfo,
+                                     gasRGXHeapLayoutFW,
+                                     ui32FWHeapListSize,
+                                     &psDeviceMemoryHeapCursor);
+
+       /* set the heap count */
+       psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+       /* Check we have allocated the correct # of heaps, minus any VZ heaps as these
+        * have not been created at this point
+        */
+       PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED));
+
+       /*
+          In the new heap setup, we initialise 2 configurations:
+               1 - One will be for the firmware only (index 1 in array)
+                       a. This primarily has the firmware heap in it.
+                       b. It also has additional guest OSID firmware heap(s)
+                               - Only if the number of support firmware OSID > 1
+               2 - Others shall be for clients only (index 0 in array)
+                       a. This has all the other client heaps in it.
+        */
+       psNewMemoryInfo->uiNumHeapConfigs = 2;
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+       PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeapConfigArray, eError, e1);
+
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_UINT32 ui32OSid;
+
+               /* Create additional raw firmware heaps */
+               for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK)
+                       {
+                               /* if any allocation fails, free previously allocated heaps and abandon initialisation */
+                               for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--)
+                               {
+                                       RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
+                                       psDeviceMemoryHeapCursor--;
+                               }
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto e1;
+                       }
+
+                       /* Append additional firmware heaps to host driver firmware context heap configuration */
+                       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1;
+
+                       /* advance to the next heap */
+                       psDeviceMemoryHeapCursor++;
+               }
+       }
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+       return PVRSRV_OK;
+e1:
+       OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+e0:
+       return eError;
+}
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+#if (RGX_NUM_OS_SUPPORTED > 1)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_UINT32 ui32OSid;
+               DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap;
+
+               /* Delete all guest firmware heaps */
+               for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
+                       psDeviceMemoryHeapCursor++;
+               }
+       }
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+       OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+       OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
+                                                                                                                  PHYS_HEAP_USAGE_FW_MAIN);
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       /* VZ heap validation */
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL,
+                                                               "FW Main heap is required for VZ Guest.",
+                                                               PVRSRV_ERROR_PHYSHEAP_CONFIG);
+       }
+#endif
+
+       if (psFwMainConfig != NULL)
+       {
+               /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided
+                  into subheaps, shared usage with other heaps is not allowed.  */
+               PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN,
+                                                               "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.",
+                                                               PVRSRV_ERROR_PHYSHEAP_CONFIG);
+       }
+
+       if (psFwMainConfig == NULL)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__));
+       }
+       else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__));
+       }
+       else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */
+       {
+               IMG_UINT64 uFwMainSubHeapSize;
+               PHYS_HEAP_CONFIG sFwHeapConfig;
+
+               /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST)))
+               {
+#if defined(FIX_HW_BRN_65101_BIT_MASK)
+                       if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101))
+                       {
+                               uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101;
+                       }
+                       else
+#endif
+                       {
+                               uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL;
+                       }
+               }
+               else
+               {
+                       uFwMainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE;
+               }
+
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__));
+
+               PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                                         "Invalid firmware physical heap size.", ErrorDeinit);
+
+               /* Now we construct RAs to manage the FW heaps */
+
+#if defined(SUPPORT_AUTOVZ)
+               if (PVRSRV_VZ_MODE_IS(HOST))
+               {
+                       /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers:
+                        *  MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb;
+                        *  MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */
+                       IMG_UINT64 uMaxFwMmuPageTableSize = 1 * 1024 * 1024;
+
+                       sFwHeapConfig = *psFwMainConfig;
+
+                       /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap.
+                        * If a different base address is specified for this reserved range, use the overriding define instead. */
+#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
+                       sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
+                       sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
+#else
+                       sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
+                       sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
+#endif
+
+                       sFwHeapConfig.uiSize = uMaxFwMmuPageTableSize;
+                       sFwHeapConfig.ui32UsageFlags = 0;
+
+                       eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw MMU subheap",
+                                                     &psDeviceNode->psFwMMUReservedPhysHeap);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit);
+               }
+#endif
+
+               /* Subheap layout: Main + (optional MIPS reserved range) + Config */
+               sFwHeapConfig = *psFwMainConfig;
+               sFwHeapConfig.uiSize = uFwMainSubHeapSize;
+               sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN;
+
+               eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Main subheap", &psDeviceNode->psFWMainPhysHeap);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit);
+
+               sFwHeapConfig = *psFwMainConfig;
+               sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+               sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+               sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+               sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG;
+
+               eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Cfg subheap", &psDeviceNode->psFWCfgPhysHeap);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit);
+       }
+
+       /* Acquire FW heaps */
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit);
+
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit);
+
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit);
+
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit);
+
+       return eError;
+
+ErrorDeinit:
+       PVR_ASSERT(IMG_FALSE);
+       PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+
+       return eError;
+}
+
+static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize)
+{
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE;
+       IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+       /* Get the page size for the dummy page from the NON4K heap apphint */
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState,
+                                GeneralNon4KHeapPageSize, &ui32AppHintDefault,
+                         &ui32GeneralNon4KHeapPageSize);
+       *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
+       OSFreeKMAppHintState(pvAppHintState);
+}
+
+/* RGXRegisterDevice
+ *
+ * NOTE: No PDUMP statements are allowed in until Part 2 of the device initialisation
+ * is reached.
+ */
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+       DEVICE_MEMORY_INFO *psDevMemoryInfo;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = HWPERF_HOST_TL_STREAM_SIZE_DEFAULT, ui32HWPerfHostBufSizeKB;
+
+       ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE;
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB,
+                            &ui32AppHintDefault, &ui32HWPerfHostBufSizeKB);
+       OSFreeKMAppHintState(pvAppHintState);
+       pvAppHintState = NULL;
+
+       /*********************
+        * Device node setup *
+        *********************/
+       /* Setup static data and callbacks on the device agnostic device node */
+#if defined(PDUMP)
+       psDeviceNode->sDevId.pszPDumpRegName    = RGX_PDUMPREG_NAME;
+       psDeviceNode->sDevId.pszPDumpDevName    = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]);
+       psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+#endif /* PDUMP */
+
+       OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
+       OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
+
+       /* Configure MMU specific stuff */
+       RGXMMUInit_Register(psDeviceNode);
+
+       psDeviceNode->pfnDevSLCFlushRange = NULL;
+       psDeviceNode->pfnInvalFBSCTable = NULL;
+
+       psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL;
+
+       psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+       psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick;
+
+       psDeviceNode->pfnInitDeviceCompatCheck  = &RGXDevInitCompatCheck;
+
+       /* Register callbacks for creation of device memory contexts */
+       psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+       psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+       /* Register callbacks for Unified Fence Objects */
+       psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+       psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+       /* Register callback for checking the device's health */
+       psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus;
+
+#if defined(SUPPORT_AUTOVZ)
+       /* Register callback for updating the virtualization watchdog */
+       psDeviceNode->pfnUpdateAutoVzWatchdog = RGXUpdateAutoVzWatchdog;
+#endif
+
+       /* Register method to service the FW HWPerf buffer */
+       psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+       /* Register callback for getting the device version information string */
+       psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+       /* Register callback for getting the device clock speed */
+       psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+       /* Register callback for soft resetting some device modules */
+       psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+       /* Register callback for resetting the HWR logs */
+       psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+       /* Register callback for resetting the HWR logs */
+       psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC;
+
+       /* Register callback for checking alignment of UM structures */
+       psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck;
+
+       /*Register callback for checking the supported features and getting the
+        * corresponding values */
+       psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported;
+       psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue;
+
+       /* Callback for checking if system layer supports FBC 3.1 */
+       psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31;
+
+       /* Callback for getting the MMU device attributes */
+       psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes;
+
+       /* Register callback for initialising device-specific physical memory heaps */
+       psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit;
+
+       /* Set up required support for dummy page */
+       OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
+       OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0);
+
+       /* Set the order to 0 */
+       psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0;
+       psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0;
+
+       /* Set the size of the Dummy page to zero */
+       psDeviceNode->sDummyPage.ui32Log2PgSize = 0;
+
+       /* Set the size of the Zero page to zero */
+       psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0;
+
+       /* Set the Dummy page phys addr */
+       psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+       /* Set the Zero page phys addr */
+       psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+       /* The lock can be acquired from MISR (Z-buffer) path */
+       eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
+               return eError;
+       }
+
+       /* Create the lock for zero page */
+       eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__));
+               goto free_dummy_page;
+       }
+#if defined(PDUMP)
+       psDeviceNode->sDummyPage.hPdumpPg = NULL;
+       psDeviceNode->sDevZeroPage.hPdumpPg = NULL;
+#endif
+
+       /*********************
+        * Device info setup *
+        *********************/
+       /* Allocate device control block */
+       psDevInfo = OSAllocZMem(sizeof(*psDevInfo));
+       if (psDevInfo == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+       /* Default psTrampoline to point to null struct */
+       psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline;
+
+       /* create locks for the context lists stored in the DevInfo structure.
+        * these lists are modified on context create/destroy and read by the
+        * watchdog thread
+        */
+
+       eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+               goto e0;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+               goto e1;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+               goto e2;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__));
+               goto e3;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__));
+               goto e4;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+               goto e5;
+       }
+
+       eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__));
+               goto e6;
+       }
+       dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead));
+
+       dllist_init(&(psDevInfo->sRenderCtxtListHead));
+       dllist_init(&(psDevInfo->sComputeCtxtListHead));
+       dllist_init(&(psDevInfo->sTransferCtxtListHead));
+       dllist_init(&(psDevInfo->sTDMCtxtListHead));
+       dllist_init(&(psDevInfo->sKickSyncCtxtListHead));
+
+       dllist_init(&(psDevInfo->sCommonCtxtListHead));
+       psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+
+       eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__));
+               goto e7;
+       }
+
+       eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__));
+               goto e8;
+       }
+
+       eError = OSLockCreate(&psDevInfo->hBPLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__));
+               goto e9;
+       }
+
+       eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__));
+               goto e10;
+       }
+
+       eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__));
+               goto e11;
+       }
+       eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__));
+               goto e12;
+       }
+
+       dllist_init(&psDevInfo->sMemoryContextList);
+
+       /* initialise ui32SLRHoldoffCounter */
+       if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT)
+       {
+               psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+       }
+       else
+       {
+               psDevInfo->ui32SLRHoldoffCounter = 0;
+       }
+
+       /* Setup static data and callbacks on the device specific device info */
+       psDevInfo->psDeviceNode         = psDeviceNode;
+
+       psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+       psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+       /*
+        * Map RGX Registers
+        */
+       psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize;
+       psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase;
+
+#if !defined(NO_HARDWARE)
+       psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase,
+                       psDeviceNode->psDevConfig->ui32RegsSize,
+                       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+       if (psDevInfo->pvRegsBaseKM == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to create RGX register mapping",
+                        __func__));
+               eError = PVRSRV_ERROR_BAD_MAPPING;
+               goto e13;
+       }
+#endif
+
+       psDeviceNode->pvDevice = psDevInfo;
+
+       eError = RGXBvncInitialiseConfiguration(psDeviceNode);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Unsupported HW device detected by driver",
+                        __func__));
+               goto e14;
+       }
+
+       _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize);
+
+       /*Set the zero & dummy page sizes as needed for the heap with largest page size */
+       psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
+       psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
+
+       eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo);
+       if (eError != PVRSRV_OK)
+       {
+               goto e14;
+       }
+
+       eError = RGXHWPerfInit(psDevInfo);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14);
+
+       eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw);
+
+#if defined(SUPPORT_VALIDATION)
+       /* This completion will be signaled by the ISR when processing
+        * the answer CCB command carrying an RGX Register read value */
+       init_completion(&psDevInfo->sFwRegs.sRegComp);
+       psDevInfo->sFwRegs.ui64RegVal = 0;
+
+#if defined(SUPPORT_SOC_TIMER)
+       {
+               IMG_BOOL ui32AppHintDefault = IMG_FALSE;
+               IMG_BOOL bInitSocTimer;
+               void *pvAppHintState = NULL;
+
+               OSCreateKMAppHintState(&pvAppHintState);
+               OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer);
+               OSFreeKMAppHintState(pvAppHintState);
+
+               if (bInitSocTimer)
+               {
+                       eError = RGXInitSOCUSCTimer(psDeviceNode);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSOCUSCTimer", ErrorDeInitHWPerfHost);
+               }
+       }
+#endif
+#endif
+
+       /* Register callback for dumping debug info */
+       eError = RGXDebugInit(psDevInfo);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", ErrorDeInitHWPerfHost);
+
+       /* Register callback for fw mmu init */
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               psDeviceNode->pfnFwMMUInit = RGXMipsMMUInit_Register;
+       }
+
+       /* The device shared-virtual-memory heap address-space size is stored here for faster
+          look-up without having to walk the device heap configuration structures during
+          client device connection  (i.e. this size is relative to a zero-based offset) */
+#if defined(FIX_HW_BRN_65273_BIT_MASK)
+       if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+       {
+               psDeviceNode->ui64GeneralSVMHeapTopVA = 0;
+       }else
+#endif
+       {
+               psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE;
+       }
+
+       if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit)
+       {
+               psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig,
+                               psDevInfo->sDevFeatureCfg.ui64Features);
+       }
+
+       psDeviceNode->bHasSystemDMA = psDeviceNode->psDevConfig->bHasDma;
+
+       /* Initialise the device dependent bridges */
+       eError = DeviceDepBridgeInit(psDevInfo);
+       PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit");
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+       eError = OSLockCreate(&psDevInfo->hCounterDumpingLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__));
+               goto ErrorDeInitDeviceDepBridge;
+       }
+#endif
+
+       /* Initialise error counters */
+       memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS));
+
+       return PVRSRV_OK;
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ErrorDeInitDeviceDepBridge:
+       DeviceDepBridgeDeInit(psDevInfo);
+#endif
+
+ErrorDeInitHWPerfHost:
+       RGXHWPerfHostDeInit(psDevInfo);
+
+ErrorDeInitHWPerfFw:
+       RGXHWPerfDeinit(psDevInfo);
+
+e14:
+#if !defined(NO_HARDWARE)
+       OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+                       psDevInfo->ui32RegSize);
+
+e13:
+#endif /* !NO_HARDWARE */
+       OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+e12:
+       OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+e11:
+       OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+e10:
+       OSLockDestroy(psDevInfo->hBPLock);
+e9:
+       OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+e8:
+       OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+e7:
+       OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+e6:
+       OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+e5:
+       OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+e4:
+       OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+e3:
+       OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+e2:
+       OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+e1:
+       OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+e0:
+       OSFreeMem(psDevInfo);
+
+       /* Destroy the zero page lock created above */
+       OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
+
+free_dummy_page:
+       /* Destroy the dummy page lock created above */
+       OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString;
+       if (NULL == psz)
+       {
+               IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN];
+               size_t uiBVNCStringSize;
+               size_t uiStringLength;
+
+               uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d",
+                               psDevInfo->sDevFeatureCfg.ui32B,
+                               psDevInfo->sDevFeatureCfg.ui32V,
+                               psDevInfo->sDevFeatureCfg.ui32N,
+                               psDevInfo->sDevFeatureCfg.ui32C);
+               PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN);
+
+               uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR);
+               psz = OSAllocMem(uiBVNCStringSize);
+               if (NULL != psz)
+               {
+                       OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize);
+                       psDevInfo->sDevFeatureCfg.pszBVNCString = psz;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                       "%s: Allocating memory for BVNC Info string failed",
+                                       __func__));
+               }
+       }
+
+       return psz;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDevVersionString
+@Description    Gets the version string for the given device node and returns
+                a pointer to it in ppszVersionString. It is then the
+                responsibility of the caller to free this memory.
+@Input          psDeviceNode            Device node from which to obtain the
+                                        version string
+@Output                ppszVersionString       Contains the version string upon return
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_CHAR **ppszVersionString)
+{
+#if defined(NO_HARDWARE) || defined(EMULATOR)
+       const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)";
+#else
+       const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)";
+#endif
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_PCHAR pszBVNC;
+       size_t uiStringLength;
+
+       if (psDeviceNode == NULL || ppszVersionString == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       pszBVNC = RGXDevBVNCString(psDevInfo);
+
+       if (NULL == pszBVNC)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       uiStringLength = OSStringLength(pszBVNC);
+       uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */
+       *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+       if (*ppszVersionString == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString,
+               pszBVNC);
+
+       return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXDevClockSpeed
+@Description    Gets the clock speed for the given device node and returns
+                it in pui32RGXClockSpeed.
+@Input          psDeviceNode           Device node
+@Output         pui32RGXClockSpeed  Variable for storing the clock speed
+@Return         PVRSRV_ERROR
+ */ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_PUINT32  pui32RGXClockSpeed)
+{
+       RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+       /* get clock speed */
+       *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+       return PVRSRV_OK;
+}
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+/*!
+ *******************************************************************************
+
+ @Function             RGXInitFwRawHeap
+
+ @Description  Called to perform additional initialisation
+ ******************************************************************************/
+static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid)
+{
+       IMG_UINT32 uiStringLength;
+       IMG_UINT32 uiStringLengthMax = 32;
+
+       IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
+
+       uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1);
+
+       /* Start by allocating memory for this OSID heap identification string */
+       psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+       if (psDevMemHeap->pszName == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
+       OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid);
+
+       /* Use the common blueprint template support function to initialise the heap */
+       HeapCfgBlueprintInit(psDevMemHeap->pszName,
+                                RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE),
+                                RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                0,
+                                ui32Log2RgxDefaultPageShift,
+                                0,
+                                psDevMemHeap);
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function             RGXDeInitFwRawHeap
+
+ @Description  Called to perform additional deinitialisation
+ ******************************************************************************/
+static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap)
+{
+       IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE;
+       IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
+
+       /* Safe to do as the guest firmware heaps are last in the list */
+       if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase &&
+           psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan)
+       {
+               void *pszName = (void*)psDevMemHeap->pszName;
+               OSFreeMem(pszName);
+       }
+}
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+/******************************************************************************
+ End of file (rgxinit.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxinit.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxinit.h
new file mode 100644 (file)
index 0000000..6cc8c8b
--- /dev/null
@@ -0,0 +1,281 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX initialisation header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXINIT_H)
+#define RGXINIT_H
+
+#include "connection_server.h"
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_bridge.h"
+#include "fwload.h"
+
+#if defined(__linux__)
+#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware
+#else
+#define OS_FW_VERIFY_FUNCTION NULL
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXInitDevPart2
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                         IMG_UINT32                    ui32DeviceFlags,
+                                                         IMG_UINT32                    ui32HWPerfHostFilter,
+                                                         RGX_ACTIVEPM_CONF             eActivePMConf);
+
+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T    ui32FWCodeLen,
+                                  IMG_DEVMEM_SIZE_T    ui32FWDataLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememDataLen);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXInitFirmware
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR
+RGXInitFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                IMG_BOOL                 bEnableSignatureChecks,
+                IMG_UINT32               ui32SignatureChecksBufSize,
+                IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                IMG_UINT64               ui64HWPerfFilter,
+                IMG_UINT32               ui32ConfigFlags,
+                IMG_UINT32               ui32LogType,
+                IMG_UINT32               ui32FilterFlags,
+                IMG_UINT32               ui32JonesDisableMask,
+                IMG_UINT32               ui32HWRDebugDumpLimit,
+                IMG_UINT32               ui32HWPerfCountersDataSize,
+                IMG_UINT32               *pui32TPUTrilinearFracMask,
+                RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+                FW_PERF_CONF             eFirmwarePerf,
+                IMG_UINT32               ui32KCCBSizeLog2,
+                IMG_UINT32               ui32ConfigFlagsExt,
+                IMG_UINT32               ui32FwOsCfgFlags);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXLoadAndGetFWData
+
+ @Description
+
+ Load FW and return pointer to FW data.
+
+ @Input psDeviceNode - device node
+
+ @Input ppsRGXFW - fw pointer
+
+ @Output ppbFWData - pointer to FW data (NULL if an error occurred)
+
+ @Return PVRSRV_ERROR - PVRSRV_OK on success
+                        PVRSRV_ERROR_NOT_READY if filesystem is not ready
+                        PVRSRV_ERROR_NOT_FOUND if no suitable FW image found
+                        PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc memory for FW image
+                        PVRSRV_ERROR_NOT_AUTHENTICATED if FW image failed verification
+
+******************************************************************************/
+PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 OS_FW_IMAGE **ppsRGXFW,
+                                 const IMG_BYTE **ppbFWData);
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function     RGXInitHWPerfCounters
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE  *psDeviceNode);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input:   psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDevBVNCString
+
+ @Description
+
+ Returns the Device BVNC string. It will allocate and fill it first, if necessary.
+
+ @Input:   psDevInfo - device info (must not be null)
+
+ @Return   IMG_PCHAR - pointer to BVNC string
+
+******************************************************************************/
+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input psDeviceNode - device info. structure
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#if !defined(NO_HARDWARE)
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsRegister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsUnregister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXInitCreateFWKernelMemoryContext
+
+ @Description   Called to perform initialisation during firmware kernel context
+                creation.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXDeInitDestroyFWKernelMemoryContext
+
+ @Description   Called to perform deinitialisation during firmware kernel
+                context destruction.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* RGXINIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer.h
new file mode 100644 (file)
index 0000000..431a7b6
--- /dev/null
@@ -0,0 +1,812 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declaration of an interface layer used to abstract code that
+                can be compiled outside of the DDK, potentially in a
+                completely different OS.
+                All the headers included by this file must also be copied to
+                the alternative source tree.
+                All the functions declared here must have a DDK implementation
+                inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and
+                another different implementation in case they are used outside
+                of the DDK.
+                All of the functions accept as a first parameter a
+                "const void *hPrivate" argument. It should be used to pass
+                around any implementation specific data required.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXLAYER_H)
+#define RGXLAYER_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "img_elf.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#include "pvrsrv_firmware_boot.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgx_fw_info.h"
+#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */
+#include "rgx_meta.h"
+#include "rgx_mips.h"
+#include "rgx_riscv.h"
+
+#include "rgxdefs_km.h"
+/* includes:
+ * rgx_cr_defs_km.h,
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemCopy
+
+ @Description    MemCopy implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the destination
+ @Input          pvSrc      : Pointer to the source location
+ @Input          uiSize     : The amount of memory to copy in bytes
+
+ @Return         void
+
+******************************************************************************/
+void RGXMemCopy(const void *hPrivate,
+                void *pvDst,
+                void *pvSrc,
+                size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemSet
+
+ @Description    MemSet implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the start of the memory region
+ @Input          ui8Value   : The value to be written
+ @Input          uiSize     : The number of bytes to be set to ui8Value
+
+ @Return         void
+
+******************************************************************************/
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXCommentLog
+
+ @Description    Generic log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+__printf(2, 3)
+void RGXCommentLog(const void *hPrivate,
+                   const IMG_CHAR *pszString,
+                   ...);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXErrorLog
+
+ @Description    Generic error log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+__printf(2, 3)
+void RGXErrorLog(const void *hPrivate,
+                 const IMG_CHAR *pszString,
+                 ...);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetOSPageSize
+
+ @Description    Return Page size used on OS
+
+ @Input          hPrivate   : Implementation specific data
+
+ @Return         IMG_UINT32
+
+******************************************************************************/
+
+IMG_UINT32 RGXGetOSPageSize(const void *hPrivate);
+
+/* This is used to get the value of a specific feature from hprivate.
+ * Should be used instead of calling RGXDeviceHasFeature.  */
+#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \
+                       RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/* This is used to check if a specific feature with value is enabled.
+ * Should be used instead of calling RGXDeviceGetFeatureValue.  */
+#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \
+                       (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0)
+
+/* This is used to get the value of a specific feature from hPrivate.
+ * Should be used instead of calling RGXDeviceGetFeatureValue.  */
+#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \
+                       RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX)
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceGetFeatureValue
+
+ @Description    Checks if a device has a particular feature with values
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64Feature  : Feature with values to check
+
+ @Return         Value >= 0 if the given feature is available, -1 otherwise
+
+******************************************************************************/
+IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasFeature
+
+ @Description    Checks if a device has a particular feature
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64Feature  : Feature to check
+
+ @Return         IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetFWCorememSize
+
+ @Description    Get the FW coremem size
+
+ @Input          hPrivate   : Implementation specific data
+
+ @Return         FW coremem size
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXWriteReg32/64
+
+ @Description   Write a value to a 32/64 bit RGX register
+
+ @Input         hPrivate         : Implementation specific data
+ @Input         ui32RegAddr      : Register offset inside the register bank
+ @Input         ui32/64RegValue  : New register value
+
+ @Return        void
+
+******************************************************************************/
+void RGXWriteReg32(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT32 ui32RegValue);
+
+void RGXWriteReg64(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT64 ui64RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadReg32/64
+
+ @Description    Read a 32/64 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui32RegAddr  : Register offset inside the register bank
+
+ @Return         Register value
+
+******************************************************************************/
+IMG_UINT32 RGXReadReg32(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadModifyWriteReg32
+
+ @Description    Read-modify-write a 32 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data.
+ @Input          ui32RegAddr  : Register offset inside the register bank.
+ @Input          ui32RegValue : New register value.
+ @Input          ui32RegMask  : Keep the bits set in the mask.
+
+ @Return         Always returns PVRSRV_OK
+
+******************************************************************************/
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+                                   IMG_UINT32 ui32RegAddr,
+                                   IMG_UINT64 ui64RegValue,
+                                   IMG_UINT64 ui64RegKeepMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXPollReg32/64
+
+ @Description    Poll on a 32/64 bit RGX register until some bits are set/unset
+
+ @Input          hPrivate         : Implementation specific data
+ @Input          ui32RegAddr      : Register offset inside the register bank
+ @Input          ui32/64RegValue  : Value expected from the register
+ @Input          ui32/64RegMask   : Only the bits set in this mask will be
+                                    checked against uiRegValue
+
+ @Return         PVRSRV_OK if the poll succeeds,
+                 PVRSRV_ERROR_TIMEOUT if the poll takes too long
+
+******************************************************************************/
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT32 ui32RegValue,
+                          IMG_UINT32 ui32RegMask);
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64RegValue,
+                          IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXWaitCycles
+
+ @Description    Wait for a number of GPU cycles and/or microseconds
+
+ @Input          hPrivate    : Implementation specific data
+ @Input          ui32Cycles  : Number of GPU cycles to wait for in pdumps,
+                               it can also be used when running driver-live
+                               if desired (ignoring the next parameter)
+ @Input          ui32WaitUs  : Number of microseconds to wait for when running
+                               driver-live
+
+ @Return         void
+
+******************************************************************************/
+void RGXWaitCycles(const void *hPrivate,
+                   IMG_UINT32 ui32Cycles,
+                   IMG_UINT32 ui32WaitUs);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireKernelMMUPC
+
+ @Description     Acquire the Kernel MMU Page Catalogue device physical address
+
+ @Input           hPrivate  : Implementation specific data
+ @Input           psPCAddr  : Returned page catalog address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXWriteKernelMMUPC32/64
+
+ @Description     Write the Kernel MMU Page Catalogue to the 32/64 bit
+                  RGX register passed as argument.
+                  In a driver-live scenario without PDump these functions
+                  are the same as RGXWriteReg32/64 and they don't need
+                  to be reimplemented.
+
+ @Input           hPrivate        : Implementation specific data
+ @Input           ui32PCReg       : Register offset inside the register bank
+ @Input           ui32AlignShift  : PC register alignshift
+ @Input           ui32Shift       : PC register shift
+ @Input           ui32/64PCVal    : Page catalog value (aligned and shifted)
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT64 ui64PCVal);
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT32 ui32PCVal);
+#else  /* defined(PDUMP) */
+
+#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \
+       RGXWriteReg64(priv, pcreg, pcval)
+
+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \
+       RGXWriteReg32(priv, pcreg, pcval)
+
+#endif /* defined(PDUMP) */
+
+
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireGPURegsAddr
+
+ @Description     Acquire the GPU registers base device physical address
+
+ @Input           hPrivate       : Implementation specific data
+ @Input           psGPURegsAddr  : Returned GPU registers base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXMIPSWrapperConfig
+
+ @Description     Write GPU register bank transaction ID and MIPS boot mode
+                  to the MIPS wrapper config register (passed as argument).
+                  In a driver-live scenario without PDump this is the same as
+                  RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate          : Implementation specific data
+ @Input           ui32RegAddr       : Register offset inside the register bank
+ @Input           ui64GPURegsAddr   : GPU registers base address
+ @Input           ui32GPURegsAlign  : Register bank transactions alignment
+ @Input           ui32BootMode      : Mips BOOT ISA mode
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64GPURegsAddr,
+                          IMG_UINT32 ui32GPURegsAlign,
+                          IMG_UINT32 ui32BootMode);
+#else
+#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \
+       RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode))
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS bootloader
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootRemapAddr  : Base address of the remapped bootloader
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXBootRemapConfig
+
+ @Description     Configure the bootloader remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXBootRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+               RGXWriteReg64(priv, c1reg, (c1val)); \
+               RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+       } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireCodeRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS code
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psCodeRemapAddr  : Base address of the remapped code
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXCodeRemapConfig
+
+ @Description     Configure the code remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXCodeRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+               RGXWriteReg64(priv, c1reg, (c1val)); \
+               RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+       } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireDataRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS data
+                  accessed through remap region
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psDataRemapAddr  : Base address of the remapped data
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDataRemapConfig
+
+ @Description     Configure the data remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXDataRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+               RGXWriteReg64(priv, c1reg, (c1val)); \
+               RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+       } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireTrampolineRemapAddr
+
+ @Description     Acquire the device physical address of the MIPS data
+                  accessed through remap region
+
+ @Input           hPrivate             : Implementation specific data
+ @Output          psTrampolineRemapAddr: Base address of the remapped data
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXTrampolineRemapConfig
+
+ @Description     Configure the trampoline remap registers passed as arguments.
+                  In a driver-live scenario without PDump this is the same as
+                  two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input           hPrivate             : Implementation specific data
+ @Input           ui32Config1RegAddr   : Remap config1 register offset
+ @Input           ui64Config1RegValue  : Remap config1 register value
+ @Input           ui32Config2RegAddr   : Remap config2 register offset
+ @Input           ui64Config2PhyAddr   : Output remapped aligned physical address
+ @Input           ui64Config2PhyMask   : Mask for the output physical address
+ @Input           ui64Config2Settings  : Extra settings for this remap region
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXTrampolineRemapConfig(const void *hPrivate,
+                        IMG_UINT32 ui32Config1RegAddr,
+                        IMG_UINT64 ui64Config1RegValue,
+                        IMG_UINT32 ui32Config2RegAddr,
+                        IMG_UINT64 ui64Config2PhyAddr,
+                        IMG_UINT64 ui64Config2PhyMask,
+                        IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+               RGXWriteReg64(priv, c1reg, (c1val)); \
+               RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+       } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDoFWSlaveBoot
+
+ @Description     Returns whether or not a FW Slave Boot is required
+                  while powering on
+
+ @Input           hPrivate       : Implementation specific data
+
+ @Return          IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXFabricCoherencyTest
+
+ @Description    Performs a coherency test
+
+ @Input          hPrivate         : Implementation specific data
+
+ @Return         PVRSRV_OK if the test succeeds,
+                 PVRSRV_ERROR_INIT_FAILURE if the test fails at some point
+
+******************************************************************************/
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate);
+
+/* This is used to check if a specific ERN/BRN is enabled from hprivate.
+ * Should be used instead of calling RGXDeviceHasErnBrn.  */
+#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \
+                       RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK)
+
+#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \
+                       RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasErnBrn
+
+ @Description    Checks if a device has a particular errata
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64ErnsBrns : Flags to check
+
+ @Return         IMG_TRUE if the given errata is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceSLCBanks
+
+ @Description    Returns the number of SLC banks used by the device
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Number of SLC banks
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceCacheLineSize
+
+ @Description    Returns the device cache line size
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Cache line size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDevicePhysBusWidth
+
+ @Description    Returns the device physical bus width
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Physical bus width
+
+******************************************************************************/
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDevicePA0IsValid
+
+ @Description    Returns true if the device physical address 0x0 is a valid
+                 address and can be accessed by the GPU.
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         IMG_TRUE if device physical address 0x0 is a valid address,
+                 IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootCodeAddr
+
+ @Description     Acquire the device virtual address of the RISCV boot code
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootCodeAddr   : Boot code base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootDataAddr
+
+ @Description     Acquire the device virtual address of the RISCV boot data
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootDataAddr   : Boot data base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXDeviceAckIrq
+
+ @Description   Checks the implementation specific IRQ status register,
+                clearing it if necessary and returning the IRQ status.
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return:       IRQ status
+
+******************************************************************************/
+IMG_BOOL RGXDeviceAckIrq(const void *hPrivate);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGXLAYER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer_impl.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer_impl.c
new file mode 100644 (file)
index 0000000..c82b04c
--- /dev/null
@@ -0,0 +1,1320 @@
+/*************************************************************************/ /*!
+@File
+@Title          DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxlayer_impl.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "rgxfwutils.h"
+#include "rgxfwimageutils.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "pmr.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#endif
+
+void RGXMemCopy(const void *hPrivate,
+               void *pvDst,
+               void *pvSrc,
+               size_t uiSize)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       OSDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       OSDeviceMemSet(pvDst, ui8Value, uiSize);
+}
+
+void RGXCommentLog(const void *hPrivate,
+               const IMG_CHAR *pszString,
+               ...)
+{
+#if defined(PDUMP)
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       va_list argList;
+       va_start(argList, pszString);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList);
+       va_end(argList);
+#else
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+void RGXErrorLog(const void *hPrivate,
+               const IMG_CHAR *pszString,
+               ...)
+{
+       IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+       va_list argList;
+
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+       va_start(argList, pszString);
+       vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+       va_end(argList);
+
+       PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer));
+}
+
+IMG_UINT32 RGXGetOSPageSize(const void *hPrivate)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       return OSGetPageSize();
+}
+
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate)
+{
+#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX)
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32CorememSize = 0;
+
+       PVR_ASSERT(hPrivate != NULL);
+
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+       {
+               ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE);
+       }
+
+       return ui32CorememSize;
+#else
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+       return 0U;
+#endif
+}
+
+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+       }
+
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                  ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags);
+}
+
+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+       }
+
+       PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                  ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags);
+}
+
+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+       IMG_UINT32 ui32RegValue;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+       {
+               ui32RegValue = IMG_UINT32_MAX;
+       }
+       else
+#endif
+       {
+               ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+       }
+
+       PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                      ui32RegAddr, psParams->ui32PdumpFlags);
+
+       return ui32RegValue;
+}
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+       IMG_UINT64 ui64RegValue;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+       {
+               ui64RegValue = IMG_UINT64_MAX;
+       }
+       else
+#endif
+       {
+               ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+       }
+
+       PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                      ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+       return ui64RegValue;
+}
+
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+               IMG_UINT32 ui32RegAddr,
+               IMG_UINT64 uiRegValueNew,
+               IMG_UINT64 uiRegKeepMask)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+#if defined(PDUMP)
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* only use the new values for bits we update according to the keep mask */
+       uiRegValueNew &= ~uiRegKeepMask;
+
+#if defined(PDUMP)
+
+       PDUMP_BLKSTART(ui32PDumpFlags);
+
+       /* Store register offset to temp PDump variable */
+       PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                                   ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags);
+
+       /* Keep the bits set in the mask */
+       PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                               uiRegKeepMask, ui32PDumpFlags);
+
+       /* OR the new values */
+       PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                              uiRegValueNew, ui32PDumpFlags);
+
+       /* Do the actual register write */
+       PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                               ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+       PDUMP_BLKEND(ui32PDumpFlags);
+
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+
+       {
+               IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+               uiRegValue &= uiRegKeepMask;
+               OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+               IMG_UINT32 ui32RegAddr,
+               IMG_UINT32 ui32RegValue,
+               IMG_UINT32 ui32RegMask)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                               (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+                               ui32RegValue,
+                               ui32RegMask,
+                               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+       }
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                       RGX_PDUMPREG_NAME,
+                       ui32RegAddr,
+                       ui32RegValue,
+                       ui32RegMask,
+                       psParams->ui32PdumpFlags,
+                       PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+               IMG_UINT32 ui32RegAddr,
+               IMG_UINT64 ui64RegValue,
+               IMG_UINT64 ui64RegMask)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       /* Split lower and upper words */
+       IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32);
+       IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue);
+       IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32);
+       IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                               (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4),
+                               ui32UpperValue,
+                               ui32UpperMask,
+                               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                               (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+                               ui32LowerValue,
+                               ui32LowerMask,
+                               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+       }
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                       RGX_PDUMPREG_NAME,
+                       ui32RegAddr + 4,
+                       ui32UpperValue,
+                       ui32UpperMask,
+                       psParams->ui32PdumpFlags,
+                       PDUMP_POLL_OPERATOR_EQUAL);
+
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                       RGX_PDUMPREG_NAME,
+                       ui32RegAddr,
+                       ui32LowerValue,
+                       ui32LowerMask,
+                       psParams->ui32PdumpFlags,
+                       PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+}
+
+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+       OSWaitus(ui32TimeUs);
+       PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr;
+}
+
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+               IMG_UINT32 ui32PCReg,
+               IMG_UINT32 ui32PCRegAlignShift,
+               IMG_UINT32 ui32PCRegShift,
+               IMG_UINT64 ui64PCVal)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* Write the cat-base address */
+       OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal);
+
+       /* Pdump catbase address */
+       MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+                       RGX_PDUMPREG_NAME,
+                       ui32PCReg,
+                       8,
+                       ui32PCRegAlignShift,
+                       ui32PCRegShift,
+                       PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+               IMG_UINT32 ui32PCReg,
+               IMG_UINT32 ui32PCRegAlignShift,
+               IMG_UINT32 ui32PCRegShift,
+               IMG_UINT32 ui32PCVal)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* Write the cat-base address */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+
+       /* Pdump catbase address */
+       MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+                       RGX_PDUMPREG_NAME,
+                       ui32PCReg,
+                       4,
+                       ui32PCRegAlignShift,
+                       ui32PCRegShift,
+                       PDUMP_FLAGS_CONTINUOUS);
+}
+#endif /* defined(PDUMP) */
+
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr;
+}
+
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+               IMG_UINT32 ui32RegAddr,
+               IMG_UINT64 ui64GPURegsAddr,
+               IMG_UINT32 ui32GPURegsAlign,
+               IMG_UINT32 ui32BootMode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+                       ui32RegAddr,
+                       (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode);
+
+       PDUMP_BLKSTART(ui32PDumpFlags);
+
+       /* Store register offset to temp PDump variable */
+       PDumpRegLabelToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                                  ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+       /* Align register transactions identifier */
+       PDumpWriteVarSHRValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                               ui32GPURegsAlign, ui32PDumpFlags);
+
+       /* Enable micromips instruction encoding */
+       PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                              ui32BootMode, ui32PDumpFlags);
+
+       /* Do the actual register write */
+       PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                               ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags);
+
+       PDUMP_BLKEND(ui32PDumpFlags);
+}
+#endif
+
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr;
+}
+
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr;
+}
+
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr;
+}
+
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr;
+}
+
+#if defined(PDUMP)
+static inline
+void RGXWriteRemapConfig2Reg(void __iomem *pvRegs,
+               PMR *psPMR,
+               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT32 ui32RegAddr,
+               IMG_UINT64 ui64PhyAddr,
+               IMG_UINT64 ui64PhyMask,
+               IMG_UINT64 ui64Settings)
+{
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+       PVR_ASSERT(psPMR != NULL);
+       psDevNode = PMR_DeviceNode(psPMR);
+
+       OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings);
+
+       PDUMP_BLKSTART(ui32PDumpFlags);
+
+       /* Store memory offset to temp PDump variable */
+       PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR,
+                                    uiLogicalOffset, ui32PDumpFlags);
+
+       /* Keep only the relevant bits of the output physical address */
+       PDumpWriteVarANDValueOp(psDevNode, ":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags);
+
+       /* Extra settings for this remapped region */
+       PDumpWriteVarORValueOp(psDevNode, ":SYSMEM:$1", ui64Settings, ui32PDumpFlags);
+
+       /* Do the actual register write */
+       PDumpInternalVarToReg64(psDevNode, RGX_PDUMPREG_NAME, ui32RegAddr,
+                               ":SYSMEM:$1", ui32PDumpFlags);
+
+       PDUMP_BLKEND(ui32PDumpFlags);
+}
+
+void RGXBootRemapConfig(const void *hPrivate,
+               IMG_UINT32 ui32Config1RegAddr,
+               IMG_UINT64 ui64Config1RegValue,
+               IMG_UINT32 ui32Config2RegAddr,
+               IMG_UINT64 ui64Config2PhyAddr,
+               IMG_UINT64 ui64Config2PhyMask,
+               IMG_UINT64 ui64Config2Settings)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* Write remap config1 register */
+       RGXWriteReg64(hPrivate,
+                       ui32Config1RegAddr,
+                       ui64Config1RegValue);
+
+       /* Write remap config2 register */
+       RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+                       psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+                       psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset,
+                       ui32Config2RegAddr,
+                       ui64Config2PhyAddr,
+                       ui64Config2PhyMask,
+                       ui64Config2Settings);
+}
+
+void RGXCodeRemapConfig(const void *hPrivate,
+               IMG_UINT32 ui32Config1RegAddr,
+               IMG_UINT64 ui64Config1RegValue,
+               IMG_UINT32 ui32Config2RegAddr,
+               IMG_UINT64 ui64Config2PhyAddr,
+               IMG_UINT64 ui64Config2PhyMask,
+               IMG_UINT64 ui64Config2Settings)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* Write remap config1 register */
+       RGXWriteReg64(hPrivate,
+                       ui32Config1RegAddr,
+                       ui64Config1RegValue);
+
+       /* Write remap config2 register */
+       RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+                       psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+                       psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset,
+                       ui32Config2RegAddr,
+                       ui64Config2PhyAddr,
+                       ui64Config2PhyMask,
+                       ui64Config2Settings);
+}
+
+void RGXDataRemapConfig(const void *hPrivate,
+               IMG_UINT32 ui32Config1RegAddr,
+               IMG_UINT64 ui64Config1RegValue,
+               IMG_UINT32 ui32Config2RegAddr,
+               IMG_UINT64 ui64Config2PhyAddr,
+               IMG_UINT64 ui64Config2PhyMask,
+               IMG_UINT64 ui64Config2Settings)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* Write remap config1 register */
+       RGXWriteReg64(hPrivate,
+                       ui32Config1RegAddr,
+                       ui64Config1RegValue);
+
+       /* Write remap config2 register */
+       RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+                       psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+                       psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset,
+                       ui32Config2RegAddr,
+                       ui64Config2PhyAddr,
+                       ui64Config2PhyMask,
+                       ui64Config2Settings);
+}
+
+void RGXTrampolineRemapConfig(const void *hPrivate,
+               IMG_UINT32 ui32Config1RegAddr,
+               IMG_UINT64 ui64Config1RegValue,
+               IMG_UINT32 ui32Config2RegAddr,
+               IMG_UINT64 ui64Config2PhyAddr,
+               IMG_UINT64 ui64Config2PhyMask,
+               IMG_UINT64 ui64Config2Settings)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* write the register for real, without PDump */
+       OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+                       ui32Config1RegAddr,
+                       ui64Config1RegValue);
+
+       PDUMP_BLKSTART(ui32PDumpFlags);
+
+       /* Store the memory address in a PDump variable */
+       PDumpPhysHandleToInternalVar64(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                       psDevInfo->psTrampoline->hPdumpPages,
+                       ui32PDumpFlags);
+
+       /* Keep only the relevant bits of the input physical address */
+       PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                       ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK,
+                       ui32PDumpFlags);
+
+       /* Enable bit */
+       PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                       RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+                       ui32PDumpFlags);
+
+       /* Do the PDump register write */
+       PDumpInternalVarToReg64(psDevInfo->psDeviceNode,
+                       RGX_PDUMPREG_NAME,
+                       ui32Config1RegAddr,
+                       ":SYSMEM:$1",
+                       ui32PDumpFlags);
+
+       PDUMP_BLKEND(ui32PDumpFlags);
+
+       /* this can be written directly */
+       RGXWriteReg64(hPrivate,
+                       ui32Config2RegAddr,
+                       (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings);
+}
+#endif
+
+#define MAX_NUM_COHERENCY_TESTS  (10)
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS)
+       {
+               return IMG_FALSE;
+       }
+
+       psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig;
+
+       return PVRSRVSystemSnoopingOfCPUCache(psDevConfig);
+}
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Wait for Slave Port to be Ready */
+       eError = RGXPollReg32(hPrivate,
+                       RGX_CR_META_SP_MSLVCTRL1,
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                       RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+       if (eError != PVRSRV_OK) return eError;
+
+       /* Issue a Write */
+       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+       return eError;
+}
+#endif
+
+extern void do_invalid_range(unsigned long start, unsigned long len);
+/*
+ * The fabric coherency test is performed when platform supports fabric coherency
+ * either in the form of ACE-lite or Full-ACE. This test is done quite early
+ * with the firmware processor quiescent and makes exclusive use of the slave
+ * port interface for reading/writing through the device memory hierarchy. The
+ * rationale for the test is to ensure that what the CPU writes to its dcache
+ * is visible to the GPU via coherency snoop miss/hit and vice-versa without
+ * any intervening cache maintenance by the writing agent.
+ */
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate)
+{
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 *pui32FabricCohTestBufferCpuVA;
+       DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc;
+       RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA;
+       IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64);
+       IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64);
+       IMG_UINT32 ui32SLCCTRL = 0;
+       IMG_UINT32 ui32OddEven;
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       IMG_BOOL   bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE);
+#endif
+       IMG_UINT32 ui32TestType;
+       IMG_UINT32 ui32OddEvenSeed = 1;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bFullTestPassed = IMG_TRUE;
+       IMG_BOOL bExit = IMG_FALSE;
+#if defined(DEBUG)
+       IMG_BOOL bSubTestPassed = IMG_FALSE;
+#endif
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       PVR_LOG(("Starting fabric coherency test ....."));
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (bFeatureS7)
+       {
+               IMG_UINT64 ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF);
+
+               /* Configure META to use SLC force-linefill for the bootloader segment */
+               RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+                               (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+       }
+       else
+#endif
+       {
+               /* Bypass the SLC when IO coherency is enabled */
+               ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS);
+               RGXWriteReg32(hPrivate,
+                               RGX_CR_SLC_CTRL_BYPASS,
+                               ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN);
+       }
+
+       /* Size and align are 'expanded' because we request an export align allocation */
+       eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+                       &uiFabricCohTestBlockSize,
+                       &uiFabricCohTestBlockAlign);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       /* Allocate, acquire cpu address and set firmware address */
+       eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+                       uiFabricCohTestBlockSize,
+                       uiFabricCohTestBlockAlign,
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                       PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT |
+                       PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                       "FwExFabricCoherencyTestBuffer",
+                       &psFabricCohTestBufferMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemFwAllocateExportable() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemAcquireCpuVirtAddr() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e1;
+       }
+
+       /* Create a FW address which is uncached in the Meta DCache and in the SLC
+        * using the Meta bootloader segment.
+        * This segment is the only one configured correctly out of reset
+        * (when this test is meant to be executed).
+        */
+       eError = RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA,
+                       psFabricCohTestBufferMemDesc,
+                       0,
+                       RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e2);
+
+       /* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+       sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+       sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+       sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+       /* Map the buffer in the bootloader segment as uncached */
+       sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+       sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+
+       for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++)
+       {
+               IMG_CPU_PHYADDR sCpuPhyAddr;
+               IMG_BOOL bValid;
+               PMR *psPMR;
+
+               /* Acquire underlying PMR CpuPA in preparation for cache maintenance */
+               (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR);
+               eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid);
+               if (eError != PVRSRV_OK || bValid == IMG_FALSE)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "PMR_CpuPhysAddr error: %s, exiting",
+                                       PVRSRVGetErrorString(eError)));
+                       bExit = IMG_TRUE;
+                       continue;
+               }
+
+               /* Here we do two passes [runs] mostly to account for the effects of using
+                  the different seed (i.e. ui32OddEvenSeed) value to read and write */
+               for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++)
+               {
+                       IMG_UINT32 i;
+
+#if defined(DEBUG)
+                       switch (ui32TestType)
+                       {
+                       case 0:
+                               PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven));
+                               break;
+                       case 1:
+                               PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven));
+                               break;
+                       case 2:
+                               PVR_LOG(("CPU:Write/GPU:Read Snoop Hit  Test: starting [run #%u]", ui32OddEven));
+                               break;
+                       case 3:
+                               PVR_LOG(("GPU:Write/CPU:Read Snoop Hit  Test: starting [run #%u]", ui32OddEven));
+                               break;
+                       default:
+                               PVR_LOG(("Internal error, exiting test"));
+                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                               bExit = IMG_TRUE;
+                               continue;
+                       }
+#endif
+
+                       for (i = 0; i < 2 && bExit == IMG_FALSE; i++)
+                       {
+                               IMG_UINT32 ui32FWAddr;
+                               IMG_UINT32 ui32FWValue;
+                               IMG_UINT32 ui32FWValue2;
+                               IMG_CPU_PHYADDR sCpuPhyAddrStart;
+                               IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+                               IMG_UINT32 ui32LastFWValue = ~0;
+                               IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32);
+
+                               /* Calculate next address and seed value to write/read from slave-port */
+                               ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset;
+                               sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset;
+                               sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr;
+                               ui32OddEvenSeed += 1;
+
+                               if (ui32TestType & 0x1)
+                               {
+                                       ui32FWValue = i + ui32OddEvenSeed;
+
+                                       switch (ui32TestType)
+                                       {
+                                       case 1:
+                                       case 3:
+                                               /* Clean dcache to ensure there is no stale data in dcache that might over-write
+                                                  what we are about to write via slave-port here because if it drains from the CPU
+                                                  dcache before we read it, it would corrupt what we are going to read back via
+                                                  the CPU */
+                                               sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+                                               CacheOpExec(psDevInfo->psDeviceNode,
+                                                               (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+                                                               (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+                                                               sCpuPhyAddrStart,
+                                                               sCpuPhyAddrEnd,
+                                                               PVRSRV_CACHE_OP_CLEAN);
+                                               break;
+                                       }
+
+                                       /* Write the value using the RGX slave-port interface */
+                                       eError = RGXWriteFWModuleAddr(psDevInfo, ui32FWAddr, ui32FWValue);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "RGXWriteFWModuleAddr error: %s, exiting",
+                                                               PVRSRVGetErrorString(eError)));
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+
+                                       /* Read back value using RGX slave-port interface, this is used
+                                          as a sort of memory barrier for the above write */
+                                       eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue2);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "RGXReadFWModuleAddr error: %s, exiting",
+                                                               PVRSRVGetErrorString(eError)));
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+                                       else if (ui32FWValue != ui32FWValue2)
+                                       {
+                                               /* Fatal error, we should abort */
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x",
+                                                               i,
+                                                               ui32FWValue,
+                                                               ui32FWValue2));
+                                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+
+                                       if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig))
+                                       {
+                                               /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory
+                                                  region is discarded before we read (i.e. next read must trigger a cache miss).
+                                                  If there is snooping of device cache, then any prefetching done by the CPU
+                                                  will reflect the most up to date datum writing by GPU into said location,
+                                                  that is to say prefetching must be coherent so CPU d-flush is not needed */
+                                               sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+                                               CacheOpExec(psDevInfo->psDeviceNode,
+                                                               (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+                                                               (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+                                                               sCpuPhyAddrStart,
+                                                               sCpuPhyAddrEnd,
+                                                               PVRSRV_CACHE_OP_INVALIDATE);
+                        do_invalid_range(0x0, 0x200000);
+                                       }
+                               }
+                               else
+                               {
+                                       IMG_UINT32 ui32RAWCpuValue;
+
+                                       /* Ensures line is in dcache */
+                                       ui32FWValue = IMG_UINT32_MAX;
+
+                                       /* Dirty allocation in dcache */
+                                       ui32RAWCpuValue = i + ui32OddEvenSeed;
+                                       pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed;
+
+                                       /* Flush possible cpu store-buffer(ing) on LMA */
+                                       OSWriteMemoryBarrier(&pui32FabricCohTestBufferCpuVA[i]);
+
+                                       switch (ui32TestType)
+                                       {
+                                       case 0:
+                                               /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so
+                                                  memory is coherent before the SlavePort reads */
+                                               sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+                                               CacheOpExec(psDevInfo->psDeviceNode,
+                                                               (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+                                                               (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+                                                               sCpuPhyAddrStart,
+                                                               sCpuPhyAddrEnd,
+                                                               PVRSRV_CACHE_OP_FLUSH);
+                                               break;
+                                       }
+
+                                       /* Read back value using RGX slave-port interface */
+                                       eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "RGXReadFWModuleAddr error: %s, exiting",
+                                                               PVRSRVGetErrorString(eError)));
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+
+                                       /* We are being mostly paranoid here, just to account for CPU RAW operations */
+                                       sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+                                       CacheOpExec(psDevInfo->psDeviceNode,
+                                                       (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+                                                       (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+                                                       sCpuPhyAddrStart,
+                                                       sCpuPhyAddrEnd,
+                                                       PVRSRV_CACHE_OP_FLUSH);
+                                       if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue)
+                                       {
+                                               /* Fatal error, we should abort */
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "At Offset: %d, RAW by CPU failed: expected: %x, got: %x",
+                                                               i,
+                                                               ui32RAWCpuValue,
+                                                               pui32FabricCohTestBufferCpuVA[i]));
+                                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+                               }
+
+                               /* Compare to see if sub-test passed */
+                               if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue)
+                               {
+#if defined(DEBUG)
+                                       bSubTestPassed = IMG_TRUE;
+#endif
+                               }
+                               else
+                               {
+                                       bFullTestPassed = IMG_FALSE;
+                                       eError = PVRSRV_ERROR_INIT_FAILURE;
+#if defined(DEBUG)
+                                       bSubTestPassed = IMG_FALSE;
+#endif
+                                       if (ui32LastFWValue != ui32FWValue)
+                                       {
+#if defined(DEBUG)
+                                               PVR_LOG(("At Offset: %d, Expected: %x, Got: %x",
+                                                               i,
+                                                               (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i],
+                                                                               (ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue));
+#endif
+                                       }
+                                       else
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "test encountered unexpected error, exiting"));
+                                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+                               }
+
+                               ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i];
+                       }
+
+#if defined(DEBUG)
+                       if (bExit)
+                       {
+                               continue;
+                       }
+
+                       switch (ui32TestType)
+                       {
+                       case 0:
+                               PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       case 1:
+                               PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       case 2:
+                               PVR_LOG(("CPU:Write/GPU:Read Snoop Hit  Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       case 3:
+                               PVR_LOG(("GPU:Write/CPU:Read Snoop Hit  Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       default:
+                               PVR_LOG(("Internal error, exiting test"));
+                               bExit = IMG_TRUE;
+                               continue;
+                       }
+#endif
+               }
+       }
+
+       RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc);
+e2:
+       DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc);
+e1:
+       DevmemFwUnmapAndFree(psDevInfo, psFabricCohTestBufferMemDesc);
+
+e0:
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (bFeatureS7)
+       {
+               /* Restore bootloader segment settings */
+               IMG_UINT64 ui64SegOutAddrTopCached   = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF);
+               RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+                               (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+       }
+       else
+#endif
+       {
+               /* Restore SLC bypass settings */
+               RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL);
+       }
+
+       bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed;
+       if (bFullTestPassed)
+       {
+               PVR_LOG(("fabric coherency test: PASSED"));
+               psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1;
+       }
+       else
+       {
+               PVR_LOG(("fabric coherency test: FAILED"));
+               psDevInfo->ui32CoherencyTestsDone++;
+       }
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+       return PVRSRV_OK;
+#endif
+}
+
+IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+       IMG_INT32 i32Ret = -1;
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PVR_ASSERT(hPrivate != NULL);
+
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       psDeviceNode = psDevInfo->psDeviceNode;
+
+       if ((psDeviceNode->pfnGetDeviceFeatureValue))
+       {
+               i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature);
+       }
+
+       return i32Ret;
+}
+
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0;
+}
+
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS))
+       {
+               return 0;
+       }
+       return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS);
+}
+
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))
+       {
+               return 0;
+       }
+       return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS);
+}
+
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH))
+       {
+               return 0;
+       }
+       return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH);
+}
+
+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return psDevInfo->sLayerParams.bDevicePA0IsValid;
+}
+
+void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase;
+}
+
+void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase;
+}
+
+IMG_BOOL RGXDeviceAckIrq(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return (psDevInfo->pfnRGXAckIrq != NULL) ?
+                       psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer_impl.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxlayer_impl.h
new file mode 100644 (file)
index 0000000..4d7c0f0
--- /dev/null
@@ -0,0 +1,67 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXLAYER_IMPL_H)
+#define RGXLAYER_IMPL_H
+
+#include "rgxlayer.h"
+#include "device_connection.h"
+
+typedef struct _RGX_LAYER_PARAMS_
+{
+       void *psDevInfo;
+       void *psDevConfig;
+#if defined(PDUMP)
+       IMG_UINT32 ui32PdumpFlags;
+#endif
+
+       IMG_DEV_PHYADDR sPCAddr;
+       IMG_DEV_PHYADDR sGPURegAddr;
+       IMG_DEV_PHYADDR sBootRemapAddr;
+       IMG_DEV_PHYADDR sCodeRemapAddr;
+       IMG_DEV_PHYADDR sDataRemapAddr;
+       IMG_DEV_PHYADDR sTrampolineRemapAddr;
+       IMG_BOOL bDevicePA0IsValid;
+} RGX_LAYER_PARAMS;
+
+#endif /* RGXLAYER_IMPL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmipsmmuinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmipsmmuinit.c
new file mode 100644 (file)
index 0000000..0e6c0ab
--- /dev/null
@@ -0,0 +1,1045 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmipsmmuinit.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+#include "rgxheapconfig.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "log2.h"
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+/* Currently there is no page directory for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PDE_PROTMASK        0
+/* Currently there is no page catalog for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PCE_PROTMASK       0
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+                                                                                  const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+                                                                                  const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+                                                                                  const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                                                                                  IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+/* Cached policy */
+static IMG_UINT32 gui32CachedPolicy;
+
+static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                            MMU_DEVICEATTRIBS *psDevAttrs,
+                                            IMG_UINT64 *pui64Addr);
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_BOOL bPhysBusAbove32Bit = 0;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH))
+       {
+               bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+       }
+
+       sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+               PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+
+       /*
+        * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently
+        */
+       sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */
+       sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */
+
+       sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */
+       sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */
+
+       sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits of the PC */
+       sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */
+
+       sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+       sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */
+
+       /*
+        *  Setup sRGXMMUTopLevelDevVAddrConfig
+        */
+       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */
+       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0;
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0;
+
+       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */
+       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0;
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0;
+
+       sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */
+       sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+       /*
+        * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently
+        */
+       sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0;
+
+       /* No PD used for MIPS */
+       sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0;
+       sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0;
+       sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+
+       sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0);
+       sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0;
+
+       sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_4KBDP.
+        */
+       sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+
+       if (bPhysBusAbove32Bit)
+       {
+               sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT;
+               gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT;
+       }
+       else
+       {
+               sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK;
+               gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY;
+       }
+
+       sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT;
+       sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+
+       sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK |
+                                           RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN;
+       sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN;
+       sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_4KBDP
+        */
+       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0;
+
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0;
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+       sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+       /*
+        * Setup gsPageSizeConfig4KB
+        */
+       gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+       gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+       gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+       gsPageSizeConfig4KB.uiRefCount = 0;
+       gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+       /*
+        * Setup sRGXMMUPDEConfig_16KBDP
+        */
+       sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0;
+       sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */
+       sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */
+
+       sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0;
+       sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0;
+
+       sRGXMMUPDEConfig_16KBDP.uiProtMask = 0;
+       sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0;
+       sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet
+        */
+       sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0;
+       sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */
+       sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */
+
+       sRGXMMUPTEConfig_16KBDP.uiProtMask = 0;
+       sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0;
+       sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_16KBDP
+        */
+       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0;
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = 0;
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0;
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig16KB
+        */
+       gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+       gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+       gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+       gsPageSizeConfig16KB.uiRefCount = 0;
+       gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size. Not supported yet
+ *
+ */
+
+       /*
+        * Setup sRGXMMUPDEConfig_64KBDP
+        */
+       sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0;
+       sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0;
+       sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0;
+       sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0;
+
+       sRGXMMUPDEConfig_64KBDP.uiProtMask = 0;
+       sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0;
+       sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUPTEConfig_64KBDP.
+        *
+        */
+       sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+       if (bPhysBusAbove32Bit)
+       {
+               sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT;
+               gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT;
+       }
+       else
+       {
+               sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK;
+               gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY;
+       }
+
+       /* Even while using 64K pages, MIPS still aligns addresses to 4K */
+       sRGXMMUPTEConfig_64KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT;
+       sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
+
+       sRGXMMUPTEConfig_64KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK |
+                                            RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN;
+       sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN;
+       sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_64KBDP.
+        */
+       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0;
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0;
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000);
+       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+       sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+       /*
+        * Setup gsPageSizeConfig64KB.
+        */
+       gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+       gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+       gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+       gsPageSizeConfig64KB.uiRefCount = 0;
+       gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size. Not supported yet
+ *
+ */
+
+       /*
+        * Setup sRGXMMUPDEConfig_256KBDP
+        */
+       sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0;
+       sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0;
+       sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0;
+       sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0;
+
+       sRGXMMUPDEConfig_256KBDP.uiProtMask = 0;
+       sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0;
+       sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+        */
+       sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0;
+       sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0;
+       sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPTEConfig_256KBDP.uiProtMask = 0;
+       sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0;
+       sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_256KBDP
+        */
+       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0;
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0;
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0;
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig256KB
+        */
+       gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+       gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+       gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+       gsPageSizeConfig256KB.uiRefCount = 0;
+       gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUPDEConfig_1MBDP.  Not supported yet
+        */
+       sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0;
+       sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0;
+       sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0;
+       sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0;
+
+       sRGXMMUPDEConfig_1MBDP.uiProtMask = 0;
+       sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0;
+       sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUPTEConfig_1MBDP
+        */
+       sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0;
+       sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0;
+       sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPTEConfig_1MBDP.uiProtMask = 0;
+       sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0;
+       sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_1MBDP
+        */
+       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0;
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0;
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0;
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig1MB
+        */
+       gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+       gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+       gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+       gsPageSizeConfig1MB.uiRefCount = 0;
+       gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet
+        */
+       sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0;
+       sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0;
+       sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0;
+       sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0;
+
+       sRGXMMUPDEConfig_2MBDP.uiProtMask = 0;
+       sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0;
+       sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUPTEConfig_2MBDP
+        */
+       sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0;
+
+       sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0;
+       sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0;
+       sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0;
+
+       sRGXMMUPTEConfig_2MBDP.uiProtMask = 0;
+       sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0;
+       sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_2MBDP
+        */
+       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0;
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0;
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0;
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig2MB
+        */
+       gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+       gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+       gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+       gsPageSizeConfig2MB.uiRefCount = 0;
+       gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUDeviceAttributes
+        */
+       sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV;
+       sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1;
+
+       /*
+        * The page table fits in one or more big physically adjacent pages,
+        * at most as big as the page table itself.
+        * To calculate its alignment/page size, calculate the log2 size of the page
+        * table taking into account all OSes, then round that down to a valid MIPS
+        * log2 page size (12, 14, 16 for a 4K, 16K, 64K page size).
+        */
+       sRGXMMUDeviceAttributes.ui32BaseAlign =
+               (CeilLog2(RGX_NUM_OS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U;
+
+       /* 256K alignment might be too hard to achieve, fall back to 64K */
+       sRGXMMUDeviceAttributes.ui32BaseAlign =
+               MIN(sRGXMMUDeviceAttributes.ui32BaseAlign, RGXMIPSFW_LOG2_PAGE_SIZE_64K);
+
+
+
+       /* The base configuration is set to 4kB pages*/
+       sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP;
+       sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+       /* Functions for deriving page table/dir/cat protection bits */
+       sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+       sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+       sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+       /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+          on per-heap basis */
+       sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+       sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+       psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+       psDeviceNode->pfnValidateOrTweakPhysAddrs = RGXCheckTrampolineAddrs;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                            MMU_DEVICEATTRIBS *psDevAttrs,
+                                            IMG_UINT64 *pui64Addr)
+{
+       if (PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS))
+       {
+               /*
+                * If mapping for the MIPS FW context, check for sensitive PAs
+                */
+               if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs)
+               {
+                       PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice;
+
+                       if ((RGX_GET_FEATURE_VALUE(psDevice, PHYS_BUS_WIDTH) == 32) &&
+                                RGXMIPSFW_SENSITIVE_ADDR(*pui64Addr))
+                       {
+                               *pui64Addr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(*pui64Addr);
+                       }
+                       /* FIX_HW_BRN_63553 is mainlined for all MIPS cores */
+                       else if (*pui64Addr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__));
+                               return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE;
+                       }
+               }
+       }
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+       psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+       psDeviceNode->psFirmwareMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+       PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+                        gsPageSizeConfig4KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+                        gsPageSizeConfig4KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+                        gsPageSizeConfig16KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+                        gsPageSizeConfig16KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+                        gsPageSizeConfig64KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+                        gsPageSizeConfig64KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+                        gsPageSizeConfig256KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+                        gsPageSizeConfig256KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+                        gsPageSizeConfig1MB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+                        gsPageSizeConfig1MB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+                        gsPageSizeConfig2MB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+                        gsPageSizeConfig2MB.uiRefCount));
+#endif
+       if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+               gsPageSizeConfig16KB.uiRefCount > 0 ||
+               gsPageSizeConfig64KB.uiRefCount > 0 ||
+               gsPageSizeConfig256KB.uiRefCount > 0 ||
+               gsPageSizeConfig1MB.uiRefCount > 0 ||
+               gsPageSizeConfig2MB.uiRefCount > 0
+               )
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+       }
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+       PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+                               E.g, for 4KiB pages, this parameter must be 12.
+                               For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+       IMG_UINT32 ui32MMUFlags = 0;
+
+       if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+       {
+               /* read/write */
+               ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN;
+       }
+       else if (MMU_PROTFLAGS_READABLE & uiProtFlags)
+       {
+               /* read only */
+       }
+       else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+       {
+               /* write only */
+               ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN;
+       }
+       else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified..."));
+       }
+
+       /* cache coherency */
+       if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches"));
+       }
+
+       /* cache setup */
+       if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+       {
+               ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED;
+       }
+       else
+       {
+               ui32MMUFlags |= gui32CachedPolicy <<
+                               RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+       }
+
+       if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+       {
+               ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN;
+               ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN;
+       }
+
+       if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+       {
+               /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */
+       }
+
+       return ui32MMUFlags;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device"));
+
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+                               RGXPutPageSizeConfigCB has to be called to ensure correct
+                               refcounting.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+                                                                                  const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+                                                                                  const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+                                                                                  const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                                                                                  IMG_HANDLE *phPriv)
+{
+       MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+       switch (uiLog2DataPageSize)
+       {
+       case RGXMIPSFW_LOG2_PAGE_SIZE_64K:
+               psPageSizeConfig = &gsPageSizeConfig64KB;
+               break;
+       case RGXMIPSFW_LOG2_PAGE_SIZE_4K:
+               psPageSizeConfig = &gsPageSizeConfig4KB;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                                "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                                uiLog2DataPageSize));
+               *phPriv = NULL;
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+
+       /* Refer caller's pointers to the data */
+       *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+       *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+       *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+       /* Increment ref-count - not that we're allocating anything here
+          (I'm using static structs), but one day we might, so we want
+          the Get/Put code to be balanced properly */
+       psPageSizeConfig->uiRefCount++;
+
+       /* This is purely for debug statistics */
+       psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+                                                                                 psPageSizeConfig->uiRefCount);
+#endif
+
+       *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+       PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+                               configurations set in RGXGetPageSizeConfig.  This can
+                               be a no-op.
+                               Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+       MMU_PAGESIZECONFIG *psPageSizeConfig;
+       IMG_UINT32 uiLog2DataPageSize;
+
+       uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+       switch (uiLog2DataPageSize)
+       {
+       case RGXMIPSFW_LOG2_PAGE_SIZE_64K:
+               psPageSizeConfig = &gsPageSizeConfig64KB;
+               break;
+       case RGXMIPSFW_LOG2_PAGE_SIZE_4K:
+               psPageSizeConfig = &gsPageSizeConfig4KB;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                                "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                                uiLog2DataPageSize));
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+
+       /* Ref-count here is not especially useful, but it's an extra
+          check that the API is being used correctly */
+       psPageSizeConfig->uiRefCount--;
+#else
+       PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32PDE);
+       PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+       PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+       return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(ui64PDE);
+       PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+       PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+       return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx,
+                              IMG_UINT32 ui32FwVA,
+                              MMU_FAULT_DATA *psOutFaultData)
+{
+       IMG_UINT32 *pui32PageTable = NULL;
+       PVRSRV_ERROR eError = MMU_AcquireCPUBaseAddr(psFwMMUCtx, (void**) &pui32PageTable);
+       MMU_LEVEL_DATA *psMMULevelData;
+       IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX);
+       IMG_UINT32 ui32PageSize = OSGetPageSize();
+
+       /* MIPS Firmware CPU must use the same page size as the Host */
+       IMG_UINT32 ui32PTEIndex = ((ui32FwVA & ~(ui32PageSize - 1)) - ui32FwHeapBase) / ui32PageSize;
+
+       psOutFaultData->eTopLevel = MMU_LEVEL_1;
+       psOutFaultData->eType = MMU_FAULT_TYPE_NON_PM;
+
+       psMMULevelData = &psOutFaultData->sLevelData[MMU_LEVEL_1];
+       psMMULevelData->uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+       psMMULevelData->ui32Index = ui32PTEIndex;
+       psMMULevelData->ui32NumOfEntries = RGX_FIRMWARE_RAW_HEAP_SIZE / ui32PageSize;
+
+       if ((eError == PVRSRV_OK) && (pui32PageTable != NULL))
+       {
+               psMMULevelData->ui64Address = pui32PageTable[ui32PTEIndex];
+       }
+       else
+       {
+               psMMULevelData->ui64Address = 0U;
+       }
+
+       psMMULevelData->psDebugStr = BITMASK_HAS(psMMULevelData->ui64Address,
+                                                RGXMIPSFW_TLB_VALID) ?
+                                    ("valid") : ("not valid");
+}
+
+
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmipsmmuinit.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmipsmmuinit.h
new file mode 100644 (file)
index 0000000..b2b3940
--- /dev/null
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation for the MIPS firmware
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef SRVKM_RGXMIPSMMUINIT_H
+#define SRVKM_RGXMIPSMMUINIT_H
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+#include "rgx_mips.h"
+
+/*
+
+               Labelling of fields within virtual address. No PD and PC are used currently for
+               the MIPS MMU
+*/
+/*
+Page Table entry #
+*/
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT        (12U)
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+
+
+/* PC entries related definitions */
+/* No PC is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN            (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT         (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK        (0U)
+
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT     (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK    (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN        (0U)
+
+/* PD entries related definitions */
+/* No PD is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN            (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT         (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK        (0U)
+
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT     (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK    (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN        (0U)
+
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx,
+                              IMG_UINT32 ui32FwVA,
+                              MMU_FAULT_DATA *psOutFaultData);
+
+#endif /* #ifndef SRVKM_RGXMIPSMMUINIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmmuinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmmuinit.c
new file mode 100644 (file)
index 0000000..629e7ab
--- /dev/null
@@ -0,0 +1,1079 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "rgx_heaps.h"
+#include "pdump_km.h"
+
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1)
+
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+#define RGX_MMUCTRL_PTE_PROTMASK       (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+               RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+               RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+               RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
+               RGX_MMUCTRL_PT_DATA_CC_EN | \
+               RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+               RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK       (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+               ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+               RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK       (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+               RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+               const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+               const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+               const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+               IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /* Setup of Px Entries:
+        *
+        *
+        * PAGE TABLE (8 Byte):
+        *
+        * | 62              | 61...40         | 39...12 (varies) | 11...6          | 5             | 4      | 3               | 2               | 1         | 0     |
+        * | PM/Meta protect | VP Page (39:18) | Physical Page    | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
+        *
+        *
+        * PAGE DIRECTORY (8 Byte):
+        *
+        *  | 40            | 39...5  (varies)        | 4          | 3...1     | 0     |
+        *  | Entry Pending | Page Table base address | (reserved) | Page Size | Valid |
+        *
+        *
+        * PAGE CATALOGUE (4 Byte):
+        *
+        *  | 31...4                      | 3...2      | 1             | 0     |
+        *  | Page Directory base address | (reserved) | Entry Pending | Valid |
+        *
+        */
+
+
+       /* Example how to get the PD address from a PC entry.
+        * The procedure is the same for PD and PT entries to retrieve PT and Page addresses:
+        *
+        * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
+        *  | 31...4   | 3...2      | 1             | 0     |
+        *  | PD Addr  | 0          | 0             | 0     |
+        *
+        * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
+        *  | 27...0   |
+        *  | PD Addr  |
+        *
+        * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
+        *  | 39...0   |
+        *  | PD Addr  |
+        *
+        */
+
+
+       sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+                       PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]);
+
+       /*
+        * Setup sRGXMMUPCEConfig
+        */
+       sRGXMMUPCEConfig.uiBytesPerEntry = 4;     /* 32 bit entries */
+       sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
+
+       sRGXMMUPCEConfig.uiAddrShift = 4;         /* Shift this many bits to get PD address */
+       sRGXMMUPCEConfig.uiAddrLog2Align = 12;    /* Alignment of PD physical addresses. */
+
+       sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/
+       sRGXMMUPCEConfig.uiProtShift = 0;                       /* Shift this many bits to get the status bits */
+
+       sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN;     /* Mask to get entry valid bit of the PC */
+       sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
+
+       /*
+        *  Setup sRGXMMUTopLevelDevVAddrConfig
+        */
+       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
+       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PC index */
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
+                       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
+
+       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
+       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PD index */
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
+                       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
+
+       /*
+        *
+        *  Configuration for heaps with 4kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_4KBDP
+        */
+       sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+       sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
+
+       sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_4KBDP
+        */
+       sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+       sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
+       sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
+
+       sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_4KBDP
+        */
+       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+       sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig4KB
+        */
+       gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+       gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+       gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+       gsPageSizeConfig4KB.uiRefCount = 0;
+       gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+       /*
+        *
+        *  Configuration for heaps with 16kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_16KBDP
+        */
+       sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10;
+       sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10;
+
+       sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_16KBDP
+        */
+       sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+       sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
+       sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
+
+       sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_16KBDP
+        */
+       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+       sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig16KB
+        */
+       gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+       gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+       gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+       gsPageSizeConfig16KB.uiRefCount = 0;
+       gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+       /*
+        *
+        *  Configuration for heaps with 64kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_64KBDP
+        */
+       sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+       sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
+
+       sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_64KBDP
+        */
+       sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+       sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16;
+       sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
+
+       sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_64KBDP
+        */
+       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+       sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig64KB
+        */
+       gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+       gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+       gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+       gsPageSizeConfig64KB.uiRefCount = 0;
+       gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+       /*
+        *
+        *  Configuration for heaps with 256kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_256KBDP
+        */
+       sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+       sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
+
+       sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+        */
+       sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+       sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+       sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
+
+       sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_256KBDP
+        */
+       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+       sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig256KB
+        */
+       gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+       gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+       gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+       gsPageSizeConfig256KB.uiRefCount = 0;
+       gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUPDEConfig_1MBDP
+        */
+       sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       /*
+        * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+        * if they contain fewer entries.
+        */
+       sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6;
+       sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6;
+
+       sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_1MBDP
+        */
+       sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+       sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+       sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
+
+       sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_1MBDP
+        */
+       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+       sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig1MB
+        */
+       gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+       gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+       gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+       gsPageSizeConfig1MB.uiRefCount = 0;
+       gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUPDEConfig_2MBDP
+        */
+       sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       /*
+        * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+        * if they contain fewer entries.
+        */
+       sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6;
+       sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6;
+
+       sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_2MBDP
+        */
+       sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+       sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+       sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
+
+       sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_2MBDP
+        */
+       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+       sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig2MB
+        */
+       gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+       gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+       gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+       gsPageSizeConfig2MB.uiRefCount = 0;
+       gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUDeviceAttributes
+        */
+       sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+       sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+       sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+       sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+       sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+       /* Functions for deriving page table/dir/cat protection bits */
+       sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+       sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+       sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+       /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+          on per-heap basis */
+       sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+       sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL;
+
+       psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+       psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+       psDeviceNode->psMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+       PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+                       gsPageSizeConfig4KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+                       gsPageSizeConfig4KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+                       gsPageSizeConfig16KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+                       gsPageSizeConfig16KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+                       gsPageSizeConfig64KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+                       gsPageSizeConfig64KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+                       gsPageSizeConfig256KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+                       gsPageSizeConfig256KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+                       gsPageSizeConfig1MB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+                       gsPageSizeConfig1MB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+                       gsPageSizeConfig2MB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+                       gsPageSizeConfig2MB.uiRefCount));
+#endif
+       if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+                       gsPageSizeConfig16KB.uiRefCount > 0 ||
+                       gsPageSizeConfig64KB.uiRefCount > 0 ||
+                       gsPageSizeConfig256KB.uiRefCount > 0 ||
+                       gsPageSizeConfig1MB.uiRefCount > 0 ||
+                       gsPageSizeConfig2MB.uiRefCount > 0
+       )
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+       }
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+       return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+                E.g, for 4KiB pages, this parameter must be 12.
+                For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT64 ret_value = 0; /* 0 means invalid */
+
+       if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */
+       {
+               switch (uiLog2DataPageSize)
+               {
+               case RGX_HEAP_4KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+                       break;
+               case RGX_HEAP_16KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+                       break;
+               case RGX_HEAP_64KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+                       break;
+               case RGX_HEAP_256KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+                       break;
+               case RGX_HEAP_1MB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+                       break;
+               case RGX_HEAP_2MB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+                                       __FILE__, __LINE__, __func__, uiLog2DataPageSize));
+               }
+       }
+       return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+       return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT64 ui64MMUFlags=0;
+
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+       {
+               /* read/write */
+       }
+       else if (MMU_PROTFLAGS_READABLE & uiProtFlags)
+       {
+               /* read only */
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+       }
+       else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+       {
+               /* write only */
+               PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt8: write-only is not possible on this device"));
+       }
+       else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+       }
+
+       /* cache coherency */
+       if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+       }
+
+       /* cache setup */
+       if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
+       }
+
+       if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+       }
+
+       if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+       }
+
+       return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+                RGXPutPageSizeConfigCB has to be called to ensure correct
+                refcounting.
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+               const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+               const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+               const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+               IMG_HANDLE *phPriv)
+{
+       MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+       switch (uiLog2DataPageSize)
+       {
+       case RGX_HEAP_4KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig4KB;
+               break;
+       case RGX_HEAP_16KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig16KB;
+               break;
+       case RGX_HEAP_64KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig64KB;
+               break;
+       case RGX_HEAP_256KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig256KB;
+               break;
+       case RGX_HEAP_1MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig1MB;
+               break;
+       case RGX_HEAP_2MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig2MB;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                               "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                               uiLog2DataPageSize));
+               *phPriv = NULL;
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+
+       /* Refer caller's pointers to the data */
+       *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+       *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+       *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+       /* Increment ref-count - not that we're allocating anything here
+       (I'm using static structs), but one day we might, so we want
+       the Get/Put code to be balanced properly */
+       psPageSizeConfig->uiRefCount++;
+
+       /* This is purely for debug statistics */
+       psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+                       psPageSizeConfig->uiRefCount);
+#endif
+
+       *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+       PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+                configurations set in RGXGetPageSizeConfig.  This can
+                be a no-op.
+                Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+       MMU_PAGESIZECONFIG *psPageSizeConfig;
+       IMG_UINT32 uiLog2DataPageSize;
+
+       uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+       switch (uiLog2DataPageSize)
+       {
+       case RGX_HEAP_4KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig4KB;
+               break;
+       case RGX_HEAP_16KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig16KB;
+               break;
+       case RGX_HEAP_64KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig64KB;
+               break;
+       case RGX_HEAP_256KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig256KB;
+               break;
+       case RGX_HEAP_1MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig1MB;
+               break;
+       case RGX_HEAP_2MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig2MB;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                               "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                               uiLog2DataPageSize));
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+
+       /* Ref-count here is not especially useful, but it's an extra
+          check that the API is being used correctly */
+       psPageSizeConfig->uiRefCount--;
+#else
+       PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32PDE);
+       PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+       PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+       return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
+       {
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+               *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+               *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+               *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+               *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+               *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+               *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+               break;
+       default:
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmmuinit.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmmuinit.h
new file mode 100644 (file)
index 0000000..0591628
--- /dev/null
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef SRVKM_RGXMMUINIT_H
+#define SRVKM_RGXMMUINIT_H
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef SRVKM_RGXMMUINIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmulticore.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxmulticore.c
new file mode 100644 (file)
index 0000000..a888e70
--- /dev/null
@@ -0,0 +1,224 @@
+/*************************************************************************/ /*!
+@File           rgxmulticore.c
+@Title          Functions related to multicore devices
+@Codingstyle    IMG
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel mode workload estimation functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdevice.h"
+#include "rgxdefs_km.h"
+#include "pdump_km.h"
+#include "rgxmulticore.h"
+#include "multicore_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+/*
+ * check that register defines match our hardcoded definitions.
+ * Rogue has these, volcanic does not.
+ */
+#if ((RGX_MULTICORE_CAPABILITY_FRAGMENT_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN) || \
+     (RGX_MULTICORE_CAPABILITY_GEOMETRY_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN) || \
+     (RGX_MULTICORE_CAPABILITY_COMPUTE_EN  != RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN) || \
+     (RGX_MULTICORE_CAPABILITY_PRIMARY_EN  != RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) || \
+     (RGX_MULTICORE_ID_CLRMSK              != RGX_CR_MULTICORE_GPU_ID_CLRMSK))
+#error "Rogue definitions for RGX_CR_MULTICORE_GPU register have changed"
+#endif
+
+
+static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        IMG_UINT32 ui32CapsSize,
+                                        IMG_UINT32 *pui32NumCores,
+                                        IMG_UINT64 *pui64Caps);
+
+
+/*
+ * RGXInitMultiCoreInfo:
+ * Return multicore information to clients.
+ * Return not_supported on cores without multicore.
+ */
+static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT32 ui32CapsSize,
+                                 IMG_UINT32 *pui32NumCores,
+                                 IMG_UINT64 *pui64Caps)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psDeviceNode->ui32MultiCoreNumCores == 0)
+       {
+               /* MULTICORE not supported on this device */
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+       else
+       {
+               *pui32NumCores = psDeviceNode->ui32MultiCoreNumCores;
+               if (ui32CapsSize > 0)
+               {
+                       if (ui32CapsSize < psDeviceNode->ui32MultiCoreNumCores)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small"));
+                               eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+                       }
+                       else
+                       {
+                               IMG_UINT32 i;
+
+                               for (i = 0; i < psDeviceNode->ui32MultiCoreNumCores; ++i)
+                               {
+                                       pui64Caps[i] = psDeviceNode->pui64MultiCoreCapabilities[i];
+                               }
+                       }
+               }
+       }
+
+       return eError;
+}
+
+
+
+/*
+ * RGXInitMultiCoreInfo:
+ * Read multicore HW registers and fill in data structure for clients.
+ * Return not supported on cores without multicore.
+ */
+PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psDeviceNode->pfnGetMultiCoreInfo != NULL)
+       {
+               /* we only set this up once */
+               return PVRSRV_OK;
+       }
+
+       /* defaults for non-multicore devices */
+       psDeviceNode->ui32MultiCoreNumCores = 0;
+       psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1);
+       psDeviceNode->pui64MultiCoreCapabilities = NULL;
+       psDeviceNode->pfnGetMultiCoreInfo = NULL;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+       {
+               IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH));
+               IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU;
+               IMG_UINT32 ui32NumCores;
+               IMG_UINT32 i;
+
+               ui32NumCores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM);
+#if !defined(NO_HARDWARE)
+               /* check that the number of cores reported is in-bounds */
+               if (ui32NumCores > (RGX_CR_MULTICORE_SYSTEM_MASKFULL >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "invalid return (%u) read from MULTICORE_SYSTEM", ui32NumCores));
+                       return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+               }
+#else
+               /* for nohw set to max so clients can allocate enough memory for all pdump runs on any config */
+               ui32NumCores = RGX_MULTICORE_MAX_NOHW_CORES;
+#endif
+               PVR_DPF((PVR_DBG_MESSAGE, "Multicore system has %u cores", ui32NumCores));
+               PDUMPCOMMENT(psDeviceNode, "RGX Multicore has %d cores\n", ui32NumCores);
+
+               /* allocate storage for capabilities */
+               psDeviceNode->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDeviceNode->pui64MultiCoreCapabilities[0]));
+               if (psDeviceNode->pui64MultiCoreCapabilities == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__));
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+
+               psDeviceNode->ui32MultiCoreNumCores = ui32NumCores;
+
+               for (i = 0; i < ui32NumCores; ++i)
+               {
+       #if !defined(NO_HARDWARE)
+                       psDeviceNode->pui64MultiCoreCapabilities[i] =
+                                                       OSReadHWReg64(psDevInfo->pvRegsBaseKM, ui32MulticoreGPUReg) & RGX_CR_MULTICORE_GPU_MASKFULL;
+       #else
+                       /* emulation for what we think caps are */
+                       psDeviceNode->pui64MultiCoreCapabilities[i] =
+                                                          i | ((i == 0) ? (RGX_MULTICORE_CAPABILITY_PRIMARY_EN
+                                                                                         | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN) : 0)
+                                                          | RGX_MULTICORE_CAPABILITY_COMPUTE_EN
+                                                          | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN;
+       #endif
+                       PVR_DPF((PVR_DBG_MESSAGE, "Core %d has capabilities value 0x%x", i, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i] ));
+                       PDUMPCOMMENT(psDeviceNode, "\tCore %d has caps 0x%08x\n", i,
+                                    (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]);
+
+                       if (psDeviceNode->pui64MultiCoreCapabilities[i] & RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN)
+                       {
+                               psDeviceNode->ui32MultiCorePrimaryId = (psDeviceNode->pui64MultiCoreCapabilities[i]
+                                                                                                               & ~RGX_CR_MULTICORE_GPU_ID_CLRMSK)
+                                                                                                               >> RGX_CR_MULTICORE_GPU_ID_SHIFT;
+                       }
+
+                       ui32MulticoreGPUReg += ui32MulticoreRegBankOffset;
+               }
+
+               /* Register callback to return info about multicore setup to client bridge */
+               psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo;
+       }
+       else
+       {
+               /* MULTICORE not supported on this device */
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       return eError;
+}
+
+
+/*
+ * RGXDeinitMultiCoreInfo:
+ * Release resources and clear the MultiCore values in the DeviceNode.
+ */
+void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (psDeviceNode->pui64MultiCoreCapabilities != NULL)
+       {
+               OSFreeMem(psDeviceNode->pui64MultiCoreCapabilities);
+               psDeviceNode->pui64MultiCoreCapabilities = NULL;
+               psDeviceNode->ui32MultiCoreNumCores = 0;
+               psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1);
+       }
+       psDeviceNode->pfnGetMultiCoreInfo = NULL;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpdump.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpdump.c
new file mode 100644 (file)
index 0000000..750281d
--- /dev/null
@@ -0,0 +1,708 @@
+/*************************************************************************/ /*!
+@File           rgxpdump.c
+@Title          Device specific pdump routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+#include "pvrsrv.h"
+#include "devicemem_pdump.h"
+#include "rgxpdump.h"
+#include "rgx_bvnc_defs_km.h"
+#include "pdumpdesc.h"
+
+/*
+ * There are two different set of functions one for META/RISCV and one for MIPS
+ * because the Pdump player does not implement the support for
+ * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual,
+ * we have to use DevmemPDumpSaveToFile instead.
+ */
+static PVRSRV_ERROR _FWDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                                             IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PDUMPIF(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags);
+       PDUMPELSE(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags);
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       /* Gcov */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Gcov Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psFirmwareGcovBufferMemDesc,
+                                                                        0,
+                                                                        psDevInfo->ui32FirmwareGcovSize,
+                                                                        "firmware_gcov.img",
+                                                                        0,
+                                                                        ui32PDumpFlags);
+#endif
+       /* TA signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32SigTAChecksSize,
+                                                                "out.tasig",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+       /* 3D signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32Sig3DChecksSize,
+                                                                "out.3dsig",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM))
+       {
+               /* TDM signatures */
+               PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer");
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTDM2DChecksMemDesc,
+                                                                        0,
+                                                                        psDevInfo->ui32SigTDM2DChecksSize,
+                                                                        "out.tdmsig",
+                                                                        0,
+                                                                        ui32PDumpFlags);
+       }
+#endif
+
+       PDUMPFI(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags);
+
+       return PVRSRV_OK;
+}
+static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+                                                                                PVRSRV_DEVICE_NODE     *psDeviceNode,
+                                                                                IMG_UINT32                     ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32      ui32ThreadNum, ui32Size, ui32OutFileOffset;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       /* Dump trace buffers */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump trace buffers");
+       for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+       {
+               /*
+                * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+                * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+                * "expression must have a constant value".
+                */
+               const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff
+               = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+               /* ui32TracePointer tracepointer */
+               ui32Size = sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               uiTraceBufThreadNumOff,
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+
+               /* next, dump size of trace buffer in DWords */
+               ui32Size = sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords),
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+
+               /* trace buffer */
+               ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
+               PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+                                                               0, /* 0 offset in the trace buffer mem desc */
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+
+               /* assert info buffer */
+               ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+                               + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+                               + sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */
+                                                                       + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */
+                                                                       + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+       }
+
+       /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */
+       PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+
+       /* Dump hwperf buffer */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump HWPerf Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32RGXFWIfHWPerfBufSize,
+                                                                "out.hwperf",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+       return PVRSRV_OK;
+
+}
+
+
+static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                                               IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* TA signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+
+       DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTAChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32SigTAChecksSize,
+                                                                "out.tasig",
+                                                                0);
+
+       /* 3D signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+       DevmemPDumpSaveToFile(psDevInfo->psRGXFWSig3DChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32Sig3DChecksSize,
+                                                                "out.3dsig",
+                                                                0);
+
+#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM))
+       {
+               /* TDM signatures */
+               PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer");
+               DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTDM2DChecksMemDesc,
+                                                                        0,
+                                                                        psDevInfo->ui32SigTDM2DChecksSize,
+                                                                        "out.tdmsig",
+                                                                        0);
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32 ui32PDumpFlags)
+{
+       IMG_UINT32              ui32ThreadNum, ui32Size, ui32OutFileOffset;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       /* Dump trace buffers */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump trace buffers");
+       for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+       {
+               /*
+                * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+                * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+                * "expression must have a constant value".
+                */
+               const IMG_DEVMEM_OFFSET_T uiTraceBufOff
+               = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+               /* Same again... */
+               const IMG_DEVMEM_OFFSET_T uiTraceBufSpaceAssertBufOff
+               = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF_SPACE *)0)->sAssertBuf);
+
+               /* ui32TracePointer tracepointer */
+               ui32Size = sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               uiTraceBufOff,
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset);
+               ui32OutFileOffset += ui32Size;
+
+               /* next, dump size of trace buffer in DWords */
+               ui32Size = sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords),
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset);
+               ui32OutFileOffset += ui32Size;
+
+               /* trace buffer */
+               ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
+               PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+               DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+                                                               0, /* 0 offset in the trace buffer mem desc */
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset);
+               ui32OutFileOffset += ui32Size;
+
+               /* assert info buffer */
+               ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+                               + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+                               + sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               uiTraceBufOff + uiTraceBufSpaceAssertBufOff,
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset);
+               ui32OutFileOffset += ui32Size;
+       }
+
+       /* Dump hwperf buffer */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump HWPerf Buffer");
+       DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32RGXFWIfHWPerfBufSize,
+                                                                "out.hwperf",
+                                                                0);
+
+       return PVRSRV_OK;
+
+}
+
+
+/*
+ * PVRSRVPDumpSignatureBufferKM
+ */
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                          IMG_UINT32                   ui32PDumpFlags)
+{
+       if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+       {
+               return _MipsDumpSignatureBufferKM(psConnection,
+                                                                                 psDeviceNode,
+                                                                                 ui32PDumpFlags);
+       }
+       else
+       {
+               return _FWDumpSignatureBufferKM(psConnection,
+                                                                               psDeviceNode,
+                                                                               ui32PDumpFlags);
+       }
+}
+
+
+#if defined(SUPPORT_VALIDATION)
+PVRSRV_ERROR PVRSRVPDumpComputeCRCSignatureCheckKM(CONNECTION_DATA * psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                   IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)))
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       /*
+        * Add a PDUMP POLL on the KZ signature check status.
+        */
+       if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_NOERR_EN)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: match required");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_PDUMPREG_NAME,
+                                    RGX_CR_SCRATCH11,
+                                    1U,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+       }
+       else if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_ERR_EN)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: mismatch required");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_PDUMPREG_NAME,
+                                    RGX_CR_SCRATCH11,
+                                    2U,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+       }
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection,
+                                            PVRSRV_DEVICE_NODE * psDeviceNode,
+                                            IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+       return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVPDumpValCheckPreCommand
+ */
+PVRSRV_ERROR PVRSRVPDumpValCheckPreCommandKM(CONNECTION_DATA * psConnection,
+                                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                                             IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVPDumpValCheckPostCommand
+ */
+PVRSRV_ERROR PVRSRVPDumpValCheckPostCommandKM(CONNECTION_DATA * psConnection,
+                                              PVRSRV_DEVICE_NODE * psDeviceNode,
+                                              IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                                      IMG_UINT32 ui32PDumpFlags)
+{
+       if ((psDeviceNode->pfnCheckDeviceFeature) &&
+                PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+       {
+               return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+       }
+       else
+       {
+               return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+       }
+}
+
+PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32HeaderSize,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32LogicalWidth,
+                                                                       IMG_UINT32 ui32LogicalHeight,
+                                                                       IMG_UINT32 ui32PhysicalWidth,
+                                                                       IMG_UINT32 ui32PhysicalHeight,
+                                                                       PDUMP_PIXEL_FORMAT ePixFmt,
+                                                                       IMG_MEMLAYOUT eMemLayout,
+                                                                       IMG_FB_COMPRESSION eFBCompression,
+                                                                       const IMG_UINT32 *paui32FBCClearColour,
+                                                                       PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                                                                       IMG_PBYTE pbyPDumpImageHdr)
+{
+       IMG_PUINT32 pui32Word;
+       IMG_UINT32 ui32HeaderDataSize;
+
+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) || defined(RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK) || defined(RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK)
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32 ui32TFBCControl = (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >>
+                                                                                                                                                                         RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT;
+#endif
+
+       /* Validate parameters */
+       if (((IMAGE_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) ||
+               ((IMAGE_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       memset(pbyPDumpImageHdr, 0, IMAGE_HEADER_SIZE);
+
+       pui32Word = IMG_OFFSET_ADDR(pbyPDumpImageHdr, 0);
+       pui32Word[0] = (IMAGE_HEADER_TYPE << HEADER_WORD0_TYPE_SHIFT);
+       pui32Word[1] = (IMAGE_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+                                  (IMAGE_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+
+       ui32HeaderDataSize = ui32DataSize;
+       if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+       {
+               ui32HeaderDataSize += ui32HeaderSize;
+       }
+       pui32Word[2] = ui32HeaderDataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+
+       pui32Word[3] = ui32LogicalWidth << IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT;
+       pui32Word[4] = ui32LogicalHeight << IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT;
+
+       pui32Word[5] = ePixFmt << IMAGE_HEADER_WORD5_FORMAT_SHIFT;
+
+       pui32Word[6] = ui32PhysicalWidth << IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT;
+       pui32Word[7] = ui32PhysicalHeight << IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT;
+
+       pui32Word[8] = IMAGE_HEADER_WORD8_STRIDE_POSITIVE | IMAGE_HEADER_WORD8_BIFTYPE_NONE;
+
+       switch (eMemLayout)
+       {
+       case IMG_MEMLAYOUT_STRIDED:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_STRIDED;
+               break;
+       case IMG_MEMLAYOUT_TWIDDLED:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR, "Unsupported memory layout - %d", eMemLayout));
+               return PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT;
+       }
+
+       pui32Word[9] = 0;
+       if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+       {
+               switch (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FBCDC_ALGORITHM))
+               {
+               case 1:
+                       pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_BASE;
+                       break;
+               case 2:
+                       pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V2;
+                       break;
+               case 3:
+                       pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2;
+                       break;
+               case 4:
+                       pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V4;
+
+                       if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8  ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_ON;
+                       }
+
+                       pui32Word[9] |= (eFBCSwizzle << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) & IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK;
+
+                       break;
+               case 50:
+                       pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC;
+
+                       if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8  ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_25;
+                       }
+
+                       if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8  ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4 ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_37;
+                       }
+
+                       if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8  ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_50;
+                       }
+
+                       if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8  ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_75;
+                       }
+
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "Unsupported algorithm - %d",
+                                       PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FBCDC_ALGORITHM)));
+                       return PVRSRV_ERROR_NOT_ENABLED;
+               }
+       }
+
+       switch (GET_FBCDC_BLOCK_TYPE(eFBCompression))
+       {
+       case IMG_FB_COMPRESSION_NONE:
+               break;
+       case IMG_FB_COMPRESSION_DIRECT_8x8:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_8X8;
+               pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+               break;
+       case IMG_FB_COMPRESSION_DIRECT_16x4:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_16x4;
+               pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+               break;
+       case IMG_FB_COMPRESSION_DIRECT_32x2:
+               pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR, "Unsupported compression mode - %d", eFBCompression));
+               return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE;
+       }
+
+       pui32Word[10] = paui32FBCClearColour[0];
+       pui32Word[11] = paui32FBCClearColour[1];
+       pui32Word[12] = paui32FBCClearColour[2];
+       pui32Word[13] = paui32FBCClearColour[3];
+
+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT))
+       {
+               /* Should match current value of RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP */
+               IMG_UINT32 ui32TFBCGroup  = (ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) >>
+                                                                                                               RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT;
+               switch (ui32TFBCGroup)
+               {
+                       case RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0:
+                               pui32Word[14] = IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75;
+                               break;
+                       case RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1:
+                               pui32Word[14] = IMAGE_HEADER_WORD14_TFBC_GROUP_25_37_50;
+                               break;
+               }
+       }
+       else
+#endif
+       {
+               pui32Word[14] = IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75;
+       }
+
+#if defined(RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION))
+       {
+               /* Should match current value of RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME */
+               IMG_UINT32 ui32TFBCScheme = (ui32TFBCControl & ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) >>
+                                                                                                               RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT;
+               switch (ui32TFBCScheme)
+               {
+                       case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT:
+                               pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_ALL;
+                               break;
+                       case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION:
+                               pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_CORR;
+                               break;
+                       case RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD:
+                               pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY;
+                               break;
+                       default:
+                               PVR_DPF((PVR_DBG_ERROR, "Unsupported TFBC compression control scheme - %d", ui32TFBCScheme));
+                               return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE;
+               }
+       }
+       else
+#endif
+       {
+               /* Should always be set to 2 ("TFBC delta standard only") on cores without this feature */
+               pui32Word[14] |= IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY;
+       }
+
+#if defined(RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_NATIVE_YUV10))
+       {
+               IMG_UINT32 ui32TFBCOverrideYUV10 = (ui32TFBCControl & RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN);
+
+               if (ui32TFBCOverrideYUV10)
+               {
+                       pui32Word[14] |= IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN;
+               }
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32HeaderType,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32ElementType,
+                                                                       IMG_UINT32 ui32ElementCount,
+                                                                       IMG_PBYTE pbyPDumpDataHdr)
+{
+       IMG_PUINT32 pui32Word;
+
+       /* Validate parameters */
+       if (((DATA_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) ||
+               ((DATA_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       pui32Word = IMG_OFFSET_ADDR(pbyPDumpDataHdr, 0);
+
+       if (ui32HeaderType == DATA_HEADER_TYPE)
+       {
+               pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT);
+               pui32Word[1] = (DATA_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+                       (DATA_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+               pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+
+               pui32Word[3] = ui32ElementType << DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT;
+               pui32Word[4] = ui32ElementCount << DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT;
+       }
+
+       if (ui32HeaderType == IBIN_HEADER_TYPE)
+       {
+               pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT);
+               pui32Word[1] = (IBIN_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+                       (IBIN_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+               pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+       }
+
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+/******************************************************************************
+ End of file (rgxpdump.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpower.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpower.c
new file mode 100644 (file)
index 0000000..1a2a09e
--- /dev/null
@@ -0,0 +1,1628 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific power routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "rgxpower.h"
+#include "rgxinit.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxtimecorr.h"
+#include "devicemem_utils.h"
+#include "htbserver.h"
+#include "rgxstartstop.h"
+#include "rgxfwimageutils.h"
+#include "sync.h"
+#include "rgxdefs_km.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(SUPPORT_LINUX_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+#include "oskm_apphint.h"
+#endif
+
+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_KCCB_CMD sCmd;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32CmdKCCBSlot;
+
+       /* Send the Timeout notification to the FW */
+       sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+       sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT;
+
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                             &sCmd,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       return eError;
+}
+
+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+       IMG_UINT64 *paui64StatsCounters;
+       IMG_UINT64 ui64LastPeriod;
+       IMG_UINT64 ui64LastState;
+       IMG_UINT64 ui64LastTime;
+       IMG_UINT64 ui64TimeNow;
+
+       psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+       OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+       ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode));
+
+       /* Update counters to account for the time since the last update */
+       ui64LastState  = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+       ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+       ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+       paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+       /* Update state and time of the latest update */
+       psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+       OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       if (psDevConfig->pfnTDRGXStop == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!"));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData);
+#else
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       eError = RGXStop(&psDevInfo->sLayerParams);
+#endif
+
+       return eError;
+}
+
+/*
+       RGXPrePowerState
+*/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE                               hDevHandle,
+                              PVRSRV_DEV_POWER_STATE   eNewPowerState,
+                              PVRSRV_DEV_POWER_STATE   eCurrentPowerState,
+                              PVRSRV_POWER_FLAGS               ePwrFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE    *psDeviceNode = hDevHandle;
+
+       if ((eNewPowerState != eCurrentPowerState) &&
+           (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+       {
+               PVRSRV_RGXDEV_INFO    *psDevInfo = psDeviceNode->pvDevice;
+               RGXFWIF_KCCB_CMD      sPowCmd;
+               IMG_UINT32            ui32CmdKCCBSlot;
+
+               const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+               /* Send the Power off request to the FW */
+               sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+               sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+               sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED);
+
+               eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                                       __func__));
+                       return eError;
+               }
+
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                     &sPowCmd,
+                                                     PDUMP_FLAGS_NONE,
+                                                     &ui32CmdKCCBSlot);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request",
+                                       __func__));
+                       return eError;
+               }
+
+               /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies
+                  on the EventObject which is signalled in this MISR */
+               eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                                                 psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                 0x1, 0xFFFFFFFF);
+
+               /* Check the Power state after the answer */
+               if (eError == PVRSRV_OK)
+               {
+                       /* Finally, de-initialise some registers. */
+                       if (psFwSysData->ePowState == RGXFWIF_POW_OFF)
+                       {
+#if !defined(NO_HARDWARE)
+                               IMG_UINT32 ui32idx;
+
+                               /* Driver takes the VZ Fw-KM connection down, preventing the
+                                * firmware from submitting further interrupts */
+                               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+                               ui32idx = RGXFW_HOST_OS;
+#else
+                               for_each_irq_cnt(ui32idx)
+#endif /* RGX_FW_IRQ_OS_COUNTERS */
+                               {
+                                       IMG_UINT32 ui32IrqCnt;
+
+                                       get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo);
+
+                                       /* Wait for the pending FW processor to host interrupts to come back. */
+                                       eError = PVRSRVPollForValueKM(psDeviceNode,
+                                                                     (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx],
+                                                                     ui32IrqCnt,
+                                                                     0xffffffff,
+                                                                     POLL_FLAG_LOG_ERROR);
+
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "%s: Wait for pending interrupts failed (DevID %u)." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u",
+                                                               __func__,
+                                                               psDeviceNode->sDevId.ui32InternalID,
+                                                               ui32idx,
+                                                               psDevInfo->aui32SampleIRQCount[ui32idx],
+                                                               ui32IrqCnt));
+
+                                               RGX_WaitForInterruptsTimeout(psDevInfo);
+                                       }
+                               }
+#endif /* NO_HARDWARE */
+
+                               /* Update GPU frequency and timer correlation related data */
+                               RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+                               /* Update GPU state counters */
+                               _RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(SUPPORT_LINUX_DVFS)
+                               eError = SuspendDVFS(psDeviceNode);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__));
+                                       return eError;
+                               }
+#endif
+
+                               psDevInfo->bRGXPowered = IMG_FALSE;
+
+                               eError = RGXDoStop(psDeviceNode);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       /* Power down failures are treated as successful since the power was removed but logged. */
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)",
+                                                       __func__, PVRSRVGetErrorString(eError)));
+                                       psDevInfo->ui32ActivePMReqNonIdle++;
+                                       eError = PVRSRV_OK;
+                               }
+                       }
+                       else
+                       {
+                               /* the sync was updated but the pow state isn't off -> the FW denied the transition */
+                               eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+                               if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED))
+                               {       /* It is an error for a forced request to be denied */
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s: Failure to power off during a forced power off. FW: %d",
+                                                        __func__, psFwSysData->ePowState));
+                               }
+                       }
+               }
+               else if (eError == PVRSRV_ERROR_TIMEOUT)
+               {
+                       /* timeout waiting for the FW to ack the request: return timeout */
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "%s: Timeout waiting for powoff ack from the FW",
+                                        __func__));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Error waiting for powoff ack from the FW (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+               }
+       }
+
+       return eError;
+}
+
+#if defined(SUPPORT_AUTOVZ)
+static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT;
+       IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS);
+
+       LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
+       {
+               IMG_UINT32 ui32OSid;
+               IMG_BOOL bGuestOnline = IMG_FALSE;
+
+               for (ui32OSid = RGXFW_GUEST_OSID_START;
+                        ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE)
+                                       psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState;
+
+                       if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) ||
+                               (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING))
+                       {
+                               bGuestOnline = IMG_TRUE;
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid));
+                       }
+               }
+
+               if (!bGuestOnline)
+               {
+                       /* Allow Guests to finish reading Connection state registers before disconnecting. */
+                       OSSleepms(100);
+
+                       PVR_DPF((PVR_DBG_WARNING, "%s: All Guest connections are down. "
+                                                                         "Host can power down the GPU.", __func__));
+                       eError = PVRSRV_OK;
+                       break;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Waiting for Guests to disconnect "
+                                                                         "before powering down GPU.", __func__));
+
+                       if (PVRSRVPwrLockIsLockedByMe(psDeviceNode))
+                       {
+                               /* Don't wait with the power lock held as this prevents the vz
+                                * watchdog thread from keeping the fw-km connection alive. */
+                               PVRSRVPowerUnlock(psDeviceNode);
+                       }
+               }
+
+               OSSleepms(10);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode))
+       {
+               /* Take back power lock after waiting for Guests */
+               eError = PVRSRVPowerLock(psDeviceNode);
+       }
+
+       return eError;
+}
+#endif /* defined(SUPPORT_AUTOVZ) */
+
+/*
+       RGXVzPrePowerState
+*/
+PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE                             hDevHandle,
+                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+                                PVRSRV_POWER_FLAGS             ePwrFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+
+       PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError);
+
+       if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering down */
+#if defined(SUPPORT_AUTOVZ)
+               if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp))
+               {
+                       /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down.
+                        * Guest drivers regularly access hardware registers during runtime. If an attempt is made to
+                        * access a GPU register while the GPU is down, the SoC might lock up. */
+                       eError = _RGXWaitForGuestsToDisconnect(psDeviceNode);
+                       PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect");
+
+                       /* Temporarily restore all power callbacks used by the driver to fully power down the GPU.
+                        * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading)
+                        * are generally ignored and the GPU power state is unaffected. Special power requests like
+                        * those triggered by Suspend/Resume calls must reinstate the callbacks when needed. */
+                       PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev,
+                                                                       &RGXVzPrePowerState, &RGXVzPostPowerState,
+                                                                       psDeviceNode->psDevConfig->pfnPrePowerState,
+                                                                       psDeviceNode->psDevConfig->pfnPostPowerState,
+                                                                       &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest);
+               }
+               else
+               {
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+                       if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) &&
+                               KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))
+                       {
+                               PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+                               PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, 0, RGXFWIF_OS_OFFLINE);
+                               PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState");
+                       }
+               }
+#endif
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+       }
+       else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering up */
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+
+       }
+
+       if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)))
+       {
+               /* call regular device power function */
+               eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags);
+       }
+
+       return eError;
+}
+
+/*
+       RGXVzPostPowerState
+*/
+PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE                            hDevHandle,
+                                 PVRSRV_DEV_POWER_STATE        eNewPowerState,
+                                 PVRSRV_DEV_POWER_STATE        eCurrentPowerState,
+                                 PVRSRV_POWER_FLAGS            ePwrFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError);
+
+       if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)))
+       {
+               /* call regular device power function */
+               eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags);
+       }
+
+       if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering down */
+               PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError);
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+
+#if !defined(SUPPORT_AUTOVZ_HW_REGS)
+               /* The connection states must be reset on a GPU power cycle. If the states are kept
+                * in hardware scratch registers, they will be cleared on power down. When using shared
+                * memory the connection data must be explicitly cleared by the driver. */
+               OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL));
+#endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */
+
+               if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))
+               {
+#if defined(SUPPORT_AUTOVZ)
+                       /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState.
+                        * Skip this redundant register write, as the Host could have powered down the GPU by now. */
+                       if (psDeviceNode->bAutoVzFwIsUp)
+#endif
+                       {
+                               /* Take the VZ connection down to prevent firmware from submitting further interrupts */
+                               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+                       }
+                       /* Power transition callbacks were not executed, update RGXPowered flag here */
+                       psDevInfo->bRGXPowered = IMG_FALSE;
+               }
+       }
+       else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering up */
+               IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS);
+               volatile IMG_BOOL *pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated;
+
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+               if (PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */
+                       psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+                       /* Guest drivers expect the firmware to have set its end of the
+                        * connection to Ready state by now. Poll indefinitely otherwise. */
+                       if (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__));
+                       }
+                       while (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+                       {
+                               OSSleepms(10);
+                       }
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__));
+#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */
+
+                       /* Guests can only access the register holding the connection states,
+                        * after the GPU is confirmed to be powered up */
+                       KM_SET_OS_CONNECTION(READY, psDevInfo);
+
+                       OSWriteDeviceMem32WithWMB(pbUpdatedFlag, IMG_FALSE);
+
+                       /* Kick an initial dummy command to make the firmware initialise all
+                        * its internal guest OS data structures and compatibility information.
+                        * Use the lower-level RGXSendCommandAndGetKCCBSlot() for the job, to make
+                        * sure only 1 KCCB command is issued to the firmware.
+                        * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with
+                        * a pre-kick cache command which can interfere with the FW-KM init handshake. */
+                       {
+                               RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+                               sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+                               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS, NULL);
+                               PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot()");
+                       }
+               }
+               else
+               {
+                       KM_SET_OS_CONNECTION(READY, psDevInfo);
+
+                       /* Disable power callbacks that should not be run on virtualised drivers after the GPU
+                        * is fully initialised: system layer pre/post functions and driver idle requests.
+                        * The original device RGX Pre/Post functions are called from this Vz wrapper. */
+                       PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev,
+                                                                       &RGXVzPrePowerState, &RGXVzPostPowerState,
+                                                                       NULL, NULL, NULL, NULL);
+
+#if defined(SUPPORT_AUTOVZ)
+                       /* During first-time boot the flag is set here, while subsequent reboots will already
+                        * have set it earlier in RGXInit. Set to true from this point onwards in any case. */
+                       psDeviceNode->bAutoVzFwIsUp = IMG_TRUE;
+#endif
+               }
+
+               /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */
+               while (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__));
+                       OSSleepms(100);
+               }
+               PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__));
+
+               /* poll on the Firmware supplying the compatibility data */
+               LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
+               {
+                       if (*pbUpdatedFlag)
+                       {
+                               break;
+                       }
+                       OSSleepms(10);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT);
+
+               KM_SET_OS_CONNECTION(ACTIVE, psDevInfo);
+       }
+
+       return PVRSRV_OK;
+}
+
+#if defined(TRACK_FW_BOOT)
+static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       FW_BOOT_STAGE eStage;
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               /* Boot stage temporarily stored to the register below */
+               eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+                                      RGX_FW_BOOT_STAGE_REGISTER);
+       }
+       else
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14);
+       }
+       else
+#endif
+       {
+               IMG_BYTE *pbBootData;
+
+               if (PVRSRV_OK != DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+                                                         (void**)&pbBootData))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Could not acquire pointer to FW boot stage", __func__));
+                       eStage = FW_BOOT_STAGE_NOT_AVAILABLE;
+               }
+               else
+               {
+                       pbBootData += RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA);
+
+                       eStage = *(FW_BOOT_STAGE*)&pbBootData[RGXMIPSFW_BOOT_STAGE_OFFSET];
+
+                       if (eStage == FW_BOOT_STAGE_TLB_INIT_FAILURE)
+                       {
+                               RGXMIPSFW_BOOT_DATA *psBootData =
+                                       (RGXMIPSFW_BOOT_DATA*) (pbBootData + RGXMIPSFW_BOOTLDR_CONF_OFFSET);
+
+                               PVR_LOG(("MIPS TLB could not be initialised. Boot data info:"
+                                                " num PT pages %u, log2 PT page size %u, PT page addresses"
+                                                " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx
+                                                " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx,
+                                                psBootData->ui32PTNumPages,
+                                                psBootData->ui32PTLog2PageSize,
+                                                psBootData->aui64PTPhyAddr[0U],
+                                                psBootData->aui64PTPhyAddr[1U],
+                                                psBootData->aui64PTPhyAddr[2U],
+                                                psBootData->aui64PTPhyAddr[3U]));
+                       }
+
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+               }
+       }
+
+       PVR_LOG(("%s: FW reached boot stage %i/%i.",
+                __func__, eStage, FW_BOOT_INIT_DONE));
+}
+#endif
+
+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+       if (psDevConfig->pfnTDRGXStart == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!"));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData);
+#else
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       eError = RGXStart(&psDevInfo->sLayerParams);
+#endif
+
+       return eError;
+}
+
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+
+#if 0
+#include "emu_cr_defs.h"
+#else
+#define EMU_CR_SYSTEM_IRQ_STATUS                          (0x00E0U)
+/* IRQ is officially defined [8 .. 0] but here we split out the old deprecated single irq. */
+#define EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFE01))
+#define EMU_CR_SYSTEM_IRQ_STATUS_OLD_IRQ_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#endif
+
+static PVRSRV_ERROR
+_ValidateIrqs(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32OSid;
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+       /* Check if the Validation IRQ flag is set */
+       if ((psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) == 0)
+       {
+               return PVRSRV_OK;
+       }
+
+       PDUMPIF(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags);
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "Poll for TB irq status to be set (irqs signalled)...");
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_TB_PDUMPREG_NAME,
+                   EMU_CR_SYSTEM_IRQ_STATUS,
+                   ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK,
+                   ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK,
+                   ui32PDumpFlags,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "... and then clear them");
+       for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++)
+       {
+               PDUMPREG32(psDevInfo->psDeviceNode,
+                          RGX_PDUMPREG_NAME,
+                          RGX_CR_IRQ_OS0_EVENT_CLEAR + ui32OSid * 0x10000,
+                          RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL,
+                          ui32PDumpFlags);
+       }
+
+       PDUMPFI(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags);
+
+       /* Poll on all the interrupt status registers for all OSes */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "Validate Interrupt lines.");
+
+       for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++)
+       {
+               PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                           RGX_CR_IRQ_OS0_EVENT_STATUS + ui32OSid * 0x10000,
+                           0x0,
+                           0xFFFFFFFF,
+                           ui32PDumpFlags,
+                           PDUMP_POLL_OPERATOR_EQUAL);
+       }
+
+       return PVRSRV_OK;
+}
+#endif /* defined(NO_HARDWARE) && defined(PDUMP) */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+/*
+ * To validate the MTS unit we do the following:
+ *  - Immediately after firmware loading for each OSID
+ *    - Write the OSid to a memory location shared with FW
+ *    - Kick the register of that OSid
+ *         (Uncounted, DM 0)
+ *    - FW clears the memory location if OSid matches
+ *    - Host checks that memory location is cleared
+ *
+ *  See firmware/devices/rgx/rgxfw_bg.c
+ */
+static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE     *psDeviceNode,
+                                                                                                                RGXFWIF_SYSINIT *psFwSysInit,
+                                                                                                                PVRSRV_RGXDEV_INFO      *psDevInfo)
+{
+       IMG_UINT32 ui32ScheduleRegister;
+       IMG_UINT32 ui32OSid;
+       IMG_UINT32 ui32KickType;
+       IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS);
+
+       /* Nothing to do if the device does not support GPU_VIRTUALISATION */
+       if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:"));
+
+       ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS);
+
+       if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:"));
+               PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped));
+               PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped));
+       }
+
+       ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+
+       for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++)
+       {
+               /* set Test field */
+               psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT;
+
+#if defined(PDUMP)
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                  offsetof(RGXFWIF_SYSINIT, ui32OSKickTest),
+                                                  sizeof(psFwSysInit->ui32OSKickTest),
+                                                  PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+               /* Force a read-back to memory to avoid posted writes on certain buses */
+               OSWriteMemoryBarrier(&psFwSysInit->ui32OSKickTest);
+
+               /* kick register */
+               ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS);
+               PVR_DPF((PVR_DBG_MESSAGE, "  Testing OS: %u, Kick Reg: %X",
+                                ui32OSid,
+                                ui32ScheduleRegister));
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType);
+               OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister);
+
+#if defined(PDUMP)
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid);
+
+               PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME,
+                               ui32ScheduleRegister, ui32KickType, PDUMP_FLAGS_CONTINUOUS);
+
+               DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                          offsetof(RGXFWIF_SYSINIT, ui32OSKickTest),
+                                                          0,
+                                                          0xFFFFFFFF,
+                                                          PDUMP_POLL_OPERATOR_EQUAL,
+                                                          PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+               /* Wait test enable bit to be unset */
+               if (PVRSRVPollForValueKM(psDeviceNode,
+                                                                (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest,
+                                                                0,
+                                                                RGXFWIF_KICK_TEST_ENABLED_BIT,
+                                                                POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)",
+                                        ui32OSid,
+                                        psFwSysInit->ui32OSKickTest));
+
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               /* Check that the value is what we expect */
+               if (psFwSysInit->ui32OSKickTest != 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location",
+                                        ui32OSid,
+                                        psFwSysInit->ui32OSKickTest));
+                       return PVRSRV_ERROR_INIT_FAILURE;
+               }
+
+               PVR_DPF((PVR_DBG_MESSAGE, "    PASS"));
+       }
+
+       PVR_LOG(("MTS passed sideband tests"));
+       return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+#define SCRATCH_VALUE  (0x12345678U)
+
+static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = 0;
+       IMG_BOOL bRunRiscvDmiTest;
+
+       IMG_UINT32 *pui32FWCode = NULL;
+       PVRSRV_ERROR eError;
+
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest,
+                          &ui32AppHintDefault, &bRunRiscvDmiTest);
+       OSFreeKMAppHintState(pvAppHintState);
+
+       if (bRunRiscvDmiTest == IMG_FALSE)
+       {
+               return;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error acquiring FW code memory pointer (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+       }
+
+       PDumpIfKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS);
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN");
+
+       RGXRiscvHalt(psDevInfo);
+
+       /*
+        * Test RISC-V register reads/writes.
+        * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers
+        * via debug module.
+        */
+
+       /* Write RISC-V mscratch register */
+       RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE);
+       /* Read RISC-V misa register (compare against default standard value) */
+       RGXRiscvPollReg(psDevInfo,  RGXRISCVFW_MISA_ADDR,     RGXRISCVFW_MISA_VALUE);
+       /* Read RISC-V mscratch register (compare against previously written value) */
+       RGXRiscvPollReg(psDevInfo,  RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE);
+
+       /*
+        * Test RISC-V memory reads/writes.
+        * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module
+        * (from RISC-V point of view).
+        */
+
+       if (pui32FWCode != NULL)
+       {
+               IMG_UINT32 ui32Tmp;
+
+               /* Acquire pointer to FW code (bootloader) */
+               pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32);
+               /* Save FW code at address (bootloader) */
+               ui32Tmp = *pui32FWCode;
+
+               /* Write FW code at address (bootloader) */
+               RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE,     SCRATCH_VALUE);
+               /* Read FW code at address (bootloader + 4) (compare against value read from Host) */
+               RGXRiscvPollMem(psDevInfo,  RGXRISCVFW_BOOTLDR_CODE_BASE + 4, *(pui32FWCode + 1));
+               /* Read FW code at address (bootloader) (compare against previously written value) */
+               RGXRiscvPollMem(psDevInfo,  RGXRISCVFW_BOOTLDR_CODE_BASE,     SCRATCH_VALUE);
+               /* Restore FW code at address (bootloader) */
+               RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE,     ui32Tmp);
+
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+       }
+
+       /*
+        * Test GPU register reads/writes.
+        * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module
+        * (from RISC-V point of view).
+        * Note that system memory and GPU register accesses both use the same
+        * debug module interface, targeting different address ranges.
+        */
+
+       /* Write SCRATCH0 from the Host */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0,
+                  SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS);
+       /* Read SCRATCH0 */
+       RGXRiscvPollMem(psDevInfo,  RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE);
+       /* Write SCRATCH0 */
+       RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE);
+       /* Read SCRATCH0 from the Host */
+       PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0,
+                   ~SCRATCH_VALUE, 0xFFFFFFFFU,
+                   PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvResume(psDevInfo);
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END");
+       PDumpFiKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS);
+}
+#endif
+
+/*
+       RGXPostPowerState
+*/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE                              hDevHandle,
+                               PVRSRV_DEV_POWER_STATE  eNewPowerState,
+                               PVRSRV_DEV_POWER_STATE  eCurrentPowerState,
+                               PVRSRV_POWER_FLAGS              ePwrFlags)
+{
+       PVRSRV_DEVICE_NODE       *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO       *psDevInfo = psDeviceNode->pvDevice;
+
+       if ((eNewPowerState != eCurrentPowerState) &&
+           (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+       {
+               PVRSRV_ERROR             eError;
+
+               if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+               {
+                       /* Update timer correlation related data */
+                       RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+                       /* Update GPU state counters */
+                       _RGXUpdateGPUUtilStats(psDevInfo);
+
+                       eError = RGXDoStart(psDeviceNode);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed"));
+                               return eError;
+                       }
+
+                       OSMemoryBarrier(NULL);
+
+                       /*
+                        * Check whether the FW has started by polling on bFirmwareStarted flag
+                        */
+                       if (PVRSRVPollForValueKM(psDeviceNode,
+                                                (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted,
+                                                IMG_TRUE,
+                                                0xFFFFFFFF,
+                                                POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed."));
+                               eError = PVRSRV_ERROR_TIMEOUT;
+
+#if defined(TRACK_FW_BOOT)
+                               RGXCheckFWBootStage(psDevInfo);
+#endif
+
+                               /*
+                                * When bFirmwareStarted fails some info may be gained by doing the following
+                                * debug dump but unfortunately it could be potentially dangerous if the reason
+                                * for not booting is the GPU power is not ON. However, if we have reached this
+                                * point the System Layer has returned without errors, we assume the GPU power
+                                * is indeed ON.
+                                */
+                               RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE);
+                               RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice);
+
+                               return eError;
+                       }
+
+#if defined(PDUMP)
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+                       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                       offsetof(RGXFWIF_SYSINIT, bFirmwareStarted),
+                                                       IMG_TRUE,
+                                                       0xFFFFFFFFU,
+                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)",
+                                        eError));
+                               return eError;
+                       }
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+                       eError = _ValidateIrqs(psDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               return eError;
+                       }
+#endif
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+                       eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               return eError;
+                       }
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+                       RGXRiscvDebugModuleTest(psDevInfo);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                       SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp);
+#endif
+
+                       HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal);
+
+                       psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(SUPPORT_LINUX_DVFS)
+                       eError = ResumeDVFS(psDeviceNode);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS"));
+                               return eError;
+                       }
+#endif
+               }
+       }
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXPostPowerState: Current state: %d, New state: %d",
+                    eCurrentPowerState, eNewPowerState);
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGXPreClockSpeedChange
+*/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE                         hDevHandle,
+                                    PVRSRV_DEV_POWER_STATE     eCurrentPowerState)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+       PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz",
+                       psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+       if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) &&
+           (psFwSysData->ePowState != RGXFWIF_POW_OFF))
+       {
+               /* Update GPU frequency and timer correlation related data */
+               RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+       }
+
+       return eError;
+}
+
+/*
+       RGXPostClockSpeedChange
+*/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE                                hDevHandle,
+                                     PVRSRV_DEV_POWER_STATE    eCurrentPowerState)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       /* Update runtime configuration with the new value */
+       OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed,
+                                 ui32NewClockSpeed);
+
+       if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) &&
+           (psFwSysData->ePowState != RGXFWIF_POW_OFF))
+       {
+               RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd;
+               IMG_UINT32 ui32CmdKCCBSlot;
+
+               RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+
+               sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+               sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+               PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command");
+
+               PDUMPPOWCMDSTART(psDeviceNode);
+               eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                                     &sCOREClkSpeedChangeCmd,
+                                                     PDUMP_FLAGS_NONE,
+                                                     &ui32CmdKCCBSlot);
+               PDUMPPOWCMDEND(psDeviceNode);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command failed");
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+                       return eError;
+               }
+
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+                               psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+       }
+
+       return eError;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function     RGXDustCountChange
+
+ @Description
+
+       Does change of number of DUSTs
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE             hDevHandle,
+                                IMG_UINT32             ui32NumberOfDusts)
+{
+
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR            eError;
+       RGXFWIF_KCCB_CMD        sDustCountChange;
+       IMG_UINT32                      ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount;
+       IMG_UINT32                      ui32CmdKCCBSlot;
+       RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       if (ui32NumberOfDusts > ui32MaxAvailableDusts)
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u",
+                               __func__,
+                               ui32NumberOfDusts,
+                               ui32MaxAvailableDusts,
+                               eError));
+               return eError;
+       }
+
+       psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts;
+       OSWriteMemoryBarrier(&psRuntimeCfg->ui32DefaultDustsNumInit);
+
+#if !defined(NO_HARDWARE)
+       {
+               const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+               if (psFwSysData->ePowState == RGXFWIF_POW_OFF)
+               {
+                       return PVRSRV_OK;
+               }
+
+               if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE)
+               {
+                       eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Attempt to change dust count when not IDLE",
+                                        __func__));
+                       return eError;
+               }
+       }
+#endif
+
+       eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                               __func__));
+               return eError;
+       }
+
+       sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE;
+       sDustCountChange.uCmdData.sPowData.uPowerReqData.ui32NumOfDusts = ui32NumberOfDusts;
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "Scheduling command to change Dust Count to %u",
+                    ui32NumberOfDusts);
+       eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                             &sDustCountChange,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               PDUMPCOMMENT(psDeviceNode,
+                            "Scheduling command to change Dust Count failed. Error:%u",
+                            eError);
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Scheduling KCCB to change Dust Count failed. Error:%u",
+                                __func__, eError));
+               return eError;
+       }
+
+       /* Wait for the firmware to answer. */
+       eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                     psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                 0x1, 0xFFFFFFFF);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__));
+               return eError;
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d",
+                    psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+       SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+                        1,
+                        0xffffffff,
+                        PDUMP_POLL_OPERATOR_EQUAL,
+                        0);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ @Function     RGXAPMLatencyChange
+*/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE            hDevHandle,
+                                 IMG_UINT32            ui32ActivePMLatencyms,
+                                 IMG_BOOL              bActivePMLatencyPersistant)
+{
+
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR            eError;
+       RGXFWIF_RUNTIME_CFG     *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+       IMG_UINT32                      ui32CmdKCCBSlot;
+       PVRSRV_DEV_POWER_STATE  ePowerState;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock"));
+               return eError;
+       }
+
+       /* Update runtime configuration with the new values and ensure the
+        * new APM latency is written to memory before requesting the FW to
+        * read it
+        */
+       psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+       psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+       OSWriteMemoryBarrier(&psRuntimeCfg->bActivePMLatencyPersistant);
+
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               RGXFWIF_KCCB_CMD        sActivePMLatencyChange;
+               sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+               sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+
+               PDUMPCOMMENT(psDeviceNode,
+                            "Scheduling command to change APM latency to %u",
+                            ui32ActivePMLatencyms);
+               eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                                     &sActivePMLatencyChange,
+                                                     PDUMP_FLAGS_NONE,
+                                                     &ui32CmdKCCBSlot);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PDUMPCOMMENT(psDeviceNode,
+                                   "Scheduling command to change APM latency failed. Error:%u",
+                                   eError);
+                       PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+                       goto ErrorExit;
+               }
+       }
+
+ErrorExit:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       return eError;
+}
+
+/*
+       RGXActivePowerRequest
+*/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+
+       psDevInfo->ui32ActivePMReqTotal++;
+
+       /* Powerlock to avoid further requests from racing with the FW hand-shake
+        * from now on (previous kicks to this point are detected by the FW)
+        * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid
+        * potential dead lock between PDumpWriteLock and PowerLock
+        * during 'DriverLive + PDUMP=1 + EnableAPM=1'.
+        */
+       eError = PVRSRVPowerTryLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock");
+               }
+               else
+               {
+                       psDevInfo->ui32ActivePMReqRetry++;
+               }
+               goto _RGXActivePowerRequest_PowerLock_failed;
+       }
+
+       /* Check again for IDLE once we have the power lock */
+       if (psFwSysData->ePowState == RGXFWIF_POW_IDLE)
+       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime);
+#endif
+
+               PDUMPPOWCMDSTART(psDeviceNode);
+               eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                    PVRSRV_DEV_POWER_STATE_OFF,
+                                                    PVRSRV_POWER_FLAGS_NONE);
+               PDUMPPOWCMDEND(psDeviceNode);
+
+               if (eError == PVRSRV_OK)
+               {
+                       psDevInfo->ui32ActivePMReqOk++;
+               }
+               else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+               {
+                       psDevInfo->ui32ActivePMReqDenied++;
+               }
+       }
+       else
+       {
+               psDevInfo->ui32ActivePMReqNonIdle++;
+       }
+
+       PVRSRVPowerUnlock(psDeviceNode);
+
+_RGXActivePowerRequest_PowerLock_failed:
+
+       return eError;
+}
+/*
+       RGXForcedIdleRequest
+*/
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+       PVRSRV_DEVICE_NODE    *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO    *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_KCCB_CMD      sPowCmd;
+       PVRSRV_ERROR          eError;
+       IMG_UINT32            ui32RetryCount = 0;
+       IMG_UINT32            ui32CmdKCCBSlot;
+#if !defined(NO_HARDWARE)
+       const RGXFWIF_SYSDATA *psFwSysData;
+#endif
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+       psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       /* Firmware already forced idle */
+       if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE)
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+       if (psFwSysData->ePowState == RGXFWIF_POW_OFF)
+       {
+               return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+       }
+#endif
+
+       eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                               __func__));
+               return eError;
+       }
+       sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+       sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE;
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXForcedIdleRequest: Sending forced idle command");
+
+       /* Send one forced IDLE command to GP */
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                             &sPowCmd,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__));
+               return eError;
+       }
+
+       /* Wait for GPU to finish current workload */
+       do {
+               eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                             psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                         0x1, 0xFFFFFFFF);
+               if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+               {
+                       break;
+               }
+               ui32RetryCount++;
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: Request timeout. Retry %d of %d",
+                                __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+       } while (IMG_TRUE);
+
+       if (eError != PVRSRV_OK)
+       {
+               RGXFWNotifyHostTimeout(psDevInfo);
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Idle request failed. Firmware potentially left in forced idle state",
+                                __func__));
+               return eError;
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d",
+                    psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+       SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+                        1,
+                        0xffffffff,
+                        PDUMP_POLL_OPERATOR_EQUAL,
+                        0);
+#endif
+
+#if !defined(NO_HARDWARE)
+       /* Check the firmware state for idleness */
+       if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE)
+       {
+               return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGXCancelForcedIdleRequest
+*/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_KCCB_CMD        sPowCmd;
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       IMG_UINT32                      ui32CmdKCCBSlot;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                               __func__));
+               goto ErrorExit;
+       }
+
+       /* Send the IDLE request to the FW */
+       sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+       sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE;
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXForcedIdleRequest: Sending cancel forced idle command");
+
+       /* Send cancel forced IDLE command to GP */
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                             &sPowCmd,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               PDUMPCOMMENT(psDeviceNode,
+                            "RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d",
+                            RGXFWIF_DM_GP);
+               goto ErrorExit;
+       }
+
+       /* Wait for the firmware to answer. */
+       eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                     psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                 1, 0xFFFFFFFF);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__));
+               goto ErrorExit;
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d",
+                    psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+       SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+                        1,
+                        0xffffffff,
+                        PDUMP_POLL_OPERATOR_EQUAL,
+                        0);
+#endif
+
+       return eError;
+
+ErrorExit:
+       PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__));
+       return eError;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function     PVRSRVGetNextDustCount
+
+ @Description
+
+       Calculate a sequence of dust counts to achieve full transition coverage.
+       We increment two counts of dusts and switch up and down between them.
+       It does contain a few redundant transitions. If two dust exist, the
+       output transitions should be as follows.
+
+       0->1, 0<-1, 0->2, 0<-2, (0->1)
+       1->1, 1->2, 1<-2, (1->2)
+       2->2, (2->0),
+       0->0. Repeat.
+
+       Redundant transitions in brackets.
+
+ @Input                psDustReqState : Counter state used to calculate next dust count
+ @Input                ui32DustCount : Number of dusts in the core
+
+ @Return       PVRSRV_ERROR
+
+ ******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount)
+{
+       if (psDustReqState->bToggle)
+       {
+               psDustReqState->ui32DustCount2++;
+       }
+
+       if (psDustReqState->ui32DustCount2 > ui32DustCount)
+       {
+               psDustReqState->ui32DustCount1++;
+               psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1;
+       }
+
+       if (psDustReqState->ui32DustCount1 > ui32DustCount)
+       {
+               psDustReqState->ui32DustCount1 = 0;
+               psDustReqState->ui32DustCount2 = 0;
+       }
+
+       psDustReqState->bToggle = !psDustReqState->bToggle;
+
+       return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2;
+}
+
+/******************************************************************************
+ End of file (rgxpower.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpower.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxpower.h
new file mode 100644 (file)
index 0000000..a6cd3f2
--- /dev/null
@@ -0,0 +1,286 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX power header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX power
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXPOWER_H)
+#define RGXPOWER_H
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "rgxdevice.h"
+
+
+/*!
+******************************************************************************
+
+ @Function     RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE                               hDevHandle,
+                                                         PVRSRV_DEV_POWER_STATE        eNewPowerState,
+                                                         PVRSRV_DEV_POWER_STATE        eCurrentPowerState,
+                                                         PVRSRV_POWER_FLAGS            ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE                              hDevHandle,
+                                                          PVRSRV_DEV_POWER_STATE       eNewPowerState,
+                                                          PVRSRV_DEV_POWER_STATE       eCurrentPowerState,
+                                                          PVRSRV_POWER_FLAGS           ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXVzPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition on a vz driver
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE                             hDevHandle,
+                                                               PVRSRV_DEV_POWER_STATE  eNewPowerState,
+                                                               PVRSRV_DEV_POWER_STATE  eCurrentPowerState,
+                                                               PVRSRV_POWER_FLAGS              ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXVzPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition on a vz driver
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE                            hDevHandle,
+                                                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                                                PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+                                                                PVRSRV_POWER_FLAGS             ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXPreClockSpeedChange
+
+ @Description
+
+       Does processing required before an RGX clock speed change.
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE                         hDevHandle,
+                                                                       PVRSRV_DEV_POWER_STATE  eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function     RGXPostClockSpeedChange
+
+ @Description
+
+       Does processing required after an RGX clock speed change.
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE                                hDevHandle,
+                                                                        PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function     RGXDustCountChange
+
+ @Description Change of number of DUSTs
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle,
+                                                               IMG_UINT32 ui32NumberOfDusts);
+
+/*!
+******************************************************************************
+
+ @Function     RGXAPMLatencyChange
+
+ @Description
+
+       Changes the wait duration used before firmware indicates IDLE.
+       Reducing this value will cause the firmware to shut off faster and
+       more often but may increase bubbles in GPU scheduling due to the added
+       power management activity. If bPersistent is NOT set, APM latency will
+       return back to system default on power up.
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input           bActivePMLatencyPersistant : Set to ensure new value is not reset
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE                            hDevHandle,
+                               IMG_UINT32                              ui32ActivePMLatencyms,
+                               IMG_BOOL                                bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function     RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input           hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function     RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input           hDevHandle : RGX Device Node
+
+ @Input    bDeviceOffPermitted : Set to indicate device state being off is not
+                                 erroneous.
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function     RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input           hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVGetNextDustCount
+
+ @Description
+
+       Calculate a sequence of dust counts to achieve full transition coverage.
+       We increment two counts of dusts and switch up and down between them.
+       It does contain a few redundant transitions. If two dust exist, the
+       output transitions should be as follows.
+
+       0->1, 0<-1, 0->2, 0<-2, (0->1)
+       1->1, 1->2, 1<-2, (1->2)
+       2->2, (2->0),
+       0->0. Repeat.
+
+       Redundant transitions in brackets.
+
+ @Input                psDustReqState : Counter state used to calculate next dust count
+ @Input                ui32DustCount : Number of dusts in the core
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount);
+
+#endif /* RGXPOWER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxsrvinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxsrvinit.c
new file mode 100644 (file)
index 0000000..6025614
--- /dev/null
@@ -0,0 +1,1643 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvinit.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "km_apphint_defs.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+
+#include "rgxinit.h"
+#include "rgxmulticore.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "osfunc.h"
+
+#include "rgxdefs_km.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "rgx_fwif_hwperf.h"
+#include "rgx_hwperf_table.h"
+
+#include "fwload.h"
+#include "rgxlayer_impl.h"
+#include "rgxfwimageutils.h"
+#include "rgxfwutils.h"
+
+#include "rgx_hwperf.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgxdevice.h"
+
+#include "pvrsrv.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#endif
+
+#define DRIVER_MODE_HOST               0          /* AppHint value for host driver mode */
+
+#define        HW_PERF_FILTER_DEFAULT         0x00000000 /* Default to no HWPerf */
+#define HW_PERF_FILTER_DEFAULT_ALL_ON  0xFFFFFFFF /* All events */
+
+/* Kernel CCB size */
+
+#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE)
+#define PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE 4
+#endif
+#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE)
+#define PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE 16
+#endif
+
+#if PVRSRV_APPHINT_KCCB_SIZE_LOG2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE
+#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too low.
+#elif PVRSRV_APPHINT_KCCB_SIZE_LOG2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE
+#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high.
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+#include "pvrsrv_apphint.h"
+#endif
+
+#include "os_srvinit_param.h"
+#if !defined(__linux__)
+/*!
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+/* apphint map of name vs. enable flag */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+       HTB_LOG_SFGROUPLIST
+#undef X
+};
+/* apphint map of arg vs. OpMode */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = {
+       { "droplatest", HTB_OPMODE_DROPLATEST},
+       { "dropoldest", HTB_OPMODE_DROPOLDEST},
+       /* HTB should never be started in HTB_OPMODE_BLOCK
+        * as this can lead to deadlocks
+        */
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = {
+       { "trace", 0},
+       { "none", 0}
+#if defined(SUPPORT_TBI_INTERFACE)
+       , { "tbi", 1}
+#endif
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = {
+       { "mono", 0 },
+       { "mono_raw", 1 },
+       { "sched", 2 }
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP };
+
+/*
+ * Services AppHints initialisation
+ */
+#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e)
+APPHINT_LIST_ALL
+#undef X
+#endif /* !defined(__linux__) */
+
+/*
+ * Container for all the apphints used by this module
+ */
+typedef struct _RGX_SRVINIT_APPHINTS_
+{
+       IMG_UINT32 ui32DriverMode;
+       IMG_BOOL   bGPUUnitsPowerChange;
+       IMG_BOOL   bEnableSignatureChecks;
+       IMG_UINT32 ui32SignatureChecksBufSize;
+
+       IMG_BOOL   bAssertOnOutOfMem;
+#if defined(SUPPORT_VALIDATION)
+       IMG_BOOL   bValidateIrq;
+       IMG_BOOL   bValidateSOCUSCTimer;
+#endif
+       IMG_BOOL   bAssertOnHWRTrigger;
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST];
+       IMG_UINT32 ui32FBCDCVersionOverride;
+       IMG_UINT32 ui32TFBCCompressionControlGroup;
+       IMG_UINT32 ui32TFBCCompressionControlScheme;
+       IMG_BOOL   bTFBCCompressionControlYUVFormat;
+#endif
+       IMG_BOOL   bCheckMlist;
+       IMG_BOOL   bDisableClockGating;
+       IMG_BOOL   bDisableDMOverlap;
+       IMG_BOOL   bDisableFEDLogging;
+       IMG_BOOL   bDisablePDP;
+       IMG_BOOL   bEnableCDMKillRand;
+       IMG_BOOL   bEnableRandomCsw;
+       IMG_BOOL   bEnableSoftResetCsw;
+       IMG_BOOL   bFilteringMode;
+       IMG_BOOL   bHWPerfDisableCustomCounterFilter;
+       IMG_BOOL   bZeroFreelist;
+       IMG_UINT32 ui32EnableFWContextSwitch;
+       IMG_UINT32 ui32FWContextSwitchProfile;
+
+       IMG_UINT32 ui32HWPerfFWBufSize;
+       IMG_UINT32 ui32HWPerfHostBufSize;
+       IMG_UINT32 ui32HWPerfFilter0;
+       IMG_UINT32 ui32HWPerfFilter1;
+       IMG_UINT32 ui32HWPerfHostFilter;
+       IMG_UINT32 ui32TimeCorrClock;
+       IMG_UINT32 ui32HWRDebugDumpLimit;
+       IMG_UINT32 ui32JonesDisableMask;
+       IMG_UINT32 ui32LogType;
+       IMG_UINT32 ui32TruncateMode;
+       IMG_UINT32 ui32KCCBSizeLog2;
+       FW_PERF_CONF eFirmwarePerf;
+       RGX_ACTIVEPM_CONF eRGXActivePMConf;
+       RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+
+       IMG_BOOL   bEnableTrustedDeviceAceConfig;
+       IMG_UINT32 ui32FWContextSwitchCrossDM;
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+       IMG_UINT32 ui32PhysMemTestPasses;
+#endif
+} RGX_SRVINIT_APPHINTS;
+
+/*!
+*******************************************************************************
+
+ @Function      GetApphints
+
+ @Description   Read init time apphints and initialise internal variables
+
+ @Input         psHints : Pointer to apphints container
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints)
+{
+       void *pvParamState = SrvInitParamOpen();
+       IMG_UINT32 ui32ParamTemp;
+       IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE;
+       IMG_BOOL bE42606 = IMG_FALSE;
+#if defined(EMULATOR)
+       IMG_BOOL bAXIACELite = IMG_FALSE;
+#endif
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+       {
+               bS7TopInfra = IMG_TRUE;
+       }
+#endif
+#if defined(RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL))
+       {
+               bTPUFiltermodeCtrl = IMG_TRUE;
+       }
+#endif
+#if defined(HW_ERN_42290_BIT_MASK)
+       if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290))
+       {
+               bE42290 = IMG_TRUE;
+       }
+#endif
+#if defined(HW_ERN_42606_BIT_MASK)
+       if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606))
+       {
+               bE42606 = IMG_TRUE;
+       }
+#endif
+#if defined(HW_FEATURE_AXI_ACELITE_BIT_MASK) && defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))
+       {
+               bAXIACELite = IMG_TRUE;
+       }
+#endif
+
+       /*
+        * NB AppHints initialised to a default value via SrvInitParamInit* macros above
+        */
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    DriverMode,                         psHints->ui32DriverMode);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    GPUUnitsPowerChange,          psHints->bGPUUnitsPowerChange);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    EnableSignatureChecks,      psHints->bEnableSignatureChecks);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize);
+
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    AssertOutOfMemory,               psHints->bAssertOnOutOfMem);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    AssertOnHWRTrigger,            psHints->bAssertOnHWRTrigger);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    CheckMList,                            psHints->bCheckMlist);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    DisableClockGating,            psHints->bDisableClockGating);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    DisableDMOverlap,                psHints->bDisableDMOverlap);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    DisableFEDLogging,              psHints->bDisableFEDLogging);
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,  pvParamState,    EnableAPM,                                    ui32ParamTemp);
+       psHints->eRGXActivePMConf = ui32ParamTemp;
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    EnableCDMKillingRandMode,       psHints->bEnableCDMKillRand);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    EnableRandomContextSwitch,        psHints->bEnableRandomCsw);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    EnableSoftResetContextSwitch,  psHints->bEnableSoftResetCsw);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    EnableFWContextSwitch,   psHints->ui32EnableFWContextSwitch);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    EnableRDPowerIsland,                          ui32ParamTemp);
+       psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    FirmwarePerf,                                 ui32ParamTemp);
+       psHints->eFirmwarePerf = ui32ParamTemp;
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,
+               HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    HWPerfHostBufSizeInKB,       psHints->ui32HWPerfHostBufSize);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    HWPerfFWBufSizeInKB,           psHints->ui32HWPerfFWBufSize);
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,  pvParamState,    KernelCCBSizeLog2,                psHints->ui32KCCBSizeLog2);
+
+       if (psHints->ui32KCCBSizeLog2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too low, setting to %u",
+                        psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE));
+               psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE;
+       }
+       else if (psHints->ui32KCCBSizeLog2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too high, setting to %u",
+                        psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE));
+               psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       if (psHints->ui32KCCBSizeLog2 != PVRSRV_APPHINT_KCCB_SIZE_LOG2)
+       {
+               PVR_LOG(("KernelCCBSizeLog2 set to %u", psHints->ui32KCCBSizeLog2));
+       }
+#endif
+
+#if defined(__linux__)
+       /* name changes */
+       {
+               IMG_UINT64 ui64Tmp;
+               SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    DisablePDumpPanic, psHints->bDisablePDP);
+               SrvInitParamGetUINT64(psDevInfo->psDeviceNode,  pvParamState,    HWPerfFWFilter,                 ui64Tmp);
+               psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu);
+               psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu);
+       }
+#else
+       SrvInitParamUnreferenced(DisablePDumpPanic);
+       SrvInitParamUnreferenced(HWPerfFWFilter);
+       SrvInitParamUnreferenced(RGXBVNC);
+#endif
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,      pvParamState,    HWPerfHostFilter, psHints->ui32HWPerfHostFilter);
+       SrvInitParamGetUINT32List(psDevInfo->psDeviceNode,  pvParamState,    TimeCorrClock,       psHints->ui32TimeCorrClock);
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,      pvParamState,    HWRDebugDumpLimit,                ui32ParamTemp);
+       psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL);
+
+       if (bS7TopInfra)
+       {
+       #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU)
+       #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN        (0X00000020U)
+       #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN         (0X00000010U)
+
+               SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    JonesDisableMask,                 ui32ParamTemp);
+               if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) ||
+                       ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN))
+               {
+                       ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN |
+                                                         RGX_CR_JONES_FIX_MT_ORDER_ISP_EN);
+                       PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d",
+                                       ui32ParamTemp));
+               }
+               psHints->ui32JonesDisableMask = ui32ParamTemp;
+       }
+
+       if ((bE42290) && (bTPUFiltermodeCtrl))
+       {
+               SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    NewFilteringMode,       psHints->bFilteringMode);
+       }
+
+       if (bE42606)
+       {
+               SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,      pvParamState,    TruncateMode,         psHints->ui32TruncateMode);
+       }
+#if defined(EMULATOR)
+       if (bAXIACELite)
+       {
+               SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,        pvParamState,    EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig);
+       }
+#endif
+
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,        pvParamState,    ZeroFreelist,                        psHints->bZeroFreelist);
+
+#if defined(__linux__)
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM);
+#else
+       SrvInitParamUnreferenced(FWContextSwitchCrossDM);
+#endif
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    PhysMemTestPasses,           psHints->ui32PhysMemTestPasses);
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       /* Apphints for TPU trilinear frac masking */
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,    pvParamState,  ValidateIrq,                                              psHints->bValidateIrq);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,    pvParamState,  ValidateSOCUSCTimer,                              psHints->bValidateSOCUSCTimer);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  FBCDCVersionOverride,                         psHints->ui32FBCDCVersionOverride);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TFBCCompressionControlGroup,           psHints->ui32TFBCCompressionControlGroup);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TFBCCompressionControlScheme,         psHints->ui32TFBCCompressionControlScheme);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,  pvParamState,  TFBCCompressionControlYUVFormat,      psHints->bTFBCCompressionControlYUVFormat);
+#endif
+
+       /*
+        * FW logs apphints
+        */
+       {
+               IMG_UINT32 ui32LogGroup, ui32TraceOrTBI;
+
+               SrvInitParamGetUINT32BitField(psDevInfo->psDeviceNode,  pvParamState,    EnableLogGroup,      ui32LogGroup);
+               SrvInitParamGetUINT32List(psDevInfo->psDeviceNode,      pvParamState,    FirmwareLogType,   ui32TraceOrTBI);
+
+               /* Defaulting to TRACE */
+               BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE);
+
+#if defined(SUPPORT_TBI_INTERFACE)
+               if (ui32TraceOrTBI == 1 /* TBI */)
+               {
+                       if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0)
+                       {
+                               /* No groups configured - defaulting to MAIN group */
+                               BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN);
+                       }
+                       BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE);
+               }
+#endif
+               psHints->ui32LogType = ui32LogGroup;
+       }
+
+       SrvInitParamClose(pvParamState);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFWConfigFlags
+
+ @Description   Initialise and return FW config flags
+
+ @Input         psHints            : Apphints container
+ @Input         pui32FWConfigFlags : Pointer to config flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    RGX_SRVINIT_APPHINTS *psHints,
+                                    IMG_UINT32 *pui32FWConfigFlags,
+                                    IMG_UINT32 *pui32FWConfigFlagsExt,
+                                    IMG_UINT32 *pui32FwOsCfgFlags)
+{
+#if defined(SUPPORT_VALIDATION)
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+       IMG_UINT32 ui32FWConfigFlags = 0;
+       IMG_UINT32 ui32FWConfigFlagsExt = 0;
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               ui32FWConfigFlags = 0;
+               ui32FWConfigFlagsExt = 0;
+       }
+       else
+       {
+               ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0;
+               ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0;
+               ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0;
+               ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0;
+               ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0;
+               ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0;
+               ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0;
+               ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0;
+               ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0;
+               ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0;
+               ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0;
+               ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK;
+
+#if defined(SUPPORT_VALIDATION)
+#if defined(NO_HARDWARE) && defined(PDUMP)
+               ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0;
+#endif
+
+               if (psHints->ui32FBCDCVersionOverride > 0)
+               {
+                       ui32FWConfigFlags |= (psHints->ui32FBCDCVersionOverride == 2) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0;
+               }
+               else
+#endif /* defined(SUPPORT_VALIDATION) */
+               {
+                       ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0;
+               }
+
+#if defined(SUPPORT_VALIDATION)
+               ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0;
+
+               if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) &&
+                   ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0)))
+               {
+                       psHints->eRGXActivePMConf = 0;
+                       psHints->eRGXRDPowerIslandConf = 0;
+                       PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n"
+                                "Overriding current value for both with new value 0."));
+               }
+
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) ||
+                   RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION) ||
+                   RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_NATIVE_YUV10))
+               {
+                       ui32FWConfigFlagsExt |=
+                               ((((psHints->ui32TFBCCompressionControlGroup  << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) &
+                                                                                                                           ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) |
+                                 ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) &
+                                                                                                                           ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) |
+                                 ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0))
+                               << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK;
+               }
+#endif
+       }
+
+       *pui32FWConfigFlags    = ui32FWConfigFlags;
+       *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt;
+       *pui32FwOsCfgFlags     = psHints->ui32FWContextSwitchCrossDM |
+                                (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFilterFlags
+
+ @Description   Initialise and return filter flags
+
+ @Input         psHints : Apphints container
+
+ @Return        IMG_UINT32 : Filter flags
+
+******************************************************************************/
+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints)
+{
+       IMG_UINT32 ui32FilterFlags = 0;
+
+       ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0;
+       if (psHints->ui32TruncateMode == 2)
+       {
+               ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT;
+       }
+       else if (psHints->ui32TruncateMode == 3)
+       {
+               ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF;
+       }
+
+       return ui32FilterFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      InittDeviceFlags
+
+ @Description   Initialise and return device flags
+
+ @Input         psHints          : Apphints container
+ @Input         pui32DeviceFlags : Pointer to device flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints,
+                                  IMG_UINT32 *pui32DeviceFlags)
+{
+       IMG_UINT32 ui32DeviceFlags = 0;
+
+       ui32DeviceFlags |= psHints->bGPUUnitsPowerChange ? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0;
+       ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0;
+       ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0;
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN);
+#endif
+
+       *pui32DeviceFlags = ui32DeviceFlags;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+/*!
+*******************************************************************************
+
+ @Function      RGXTDProcessFWImage
+
+ @Description   Fetch and send data used by the trusted device to complete
+                the FW image setup
+
+ @Input         psDeviceNode : Device node
+ @Input         psRGXFW      : Firmware blob
+ @Input         puFWParams   : Parameters used by the FW at boot time
+
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        OS_FW_IMAGE *psRGXFW,
+                                        PVRSRV_FW_BOOT_PARAMS *puFWParams)
+{
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_TD_FW_PARAMS sTDFWParams;
+       RGX_LAYER_PARAMS sLayerParams;
+       PVRSRV_ERROR eError;
+
+       if (psDevConfig->pfnTDSendFWImage == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       sLayerParams.psDevInfo = psDevInfo;
+
+       sTDFWParams.pvFirmware       = OSFirmwareData(psRGXFW);
+       sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW);
+
+#if defined(RGX_FEATURE_META_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               sTDFWParams.uFWP.sMeta = puFWParams->sMeta;
+       }
+       else
+#endif
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               sTDFWParams.uFWP.sMips = puFWParams->sMips;
+
+               if (sTDFWParams.uFWP.sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Number of page table pages %u greater "
+                                        "than what is allowed by the TD interface (%u), FW might "
+                                        "not work properly!", __func__,
+                                        puFWParams->sMips.ui32FWPageTableNumPages,
+                                        TD_MAX_NUM_MIPS_PAGETABLE_PAGES));
+               }
+       }
+       else
+       {
+               sTDFWParams.uFWP.sRISCV = puFWParams->sRISCV;
+       }
+
+       eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams);
+
+       return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXAcquireMipsBootldrData
+
+ @Description   Acquire MIPS bootloader data parameters
+
+ @Input         psDeviceNode : Device node
+ @Input         puFWParams   : FW boot parameters
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                              PVRSRV_FW_BOOT_PARAMS *puFWParams)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice;
+       MMU_DEVICEATTRIBS *psFWMMUDevAttrs = psDevInfo->psDeviceNode->psFirmwareMMUDevAttrs;
+       IMG_DEV_PHYADDR sAddr;
+       IMG_UINT32 ui32PTSize, i;
+       PVRSRV_ERROR eError;
+       IMG_BOOL bValid;
+
+       /* Rogue Registers physical address */
+#if defined(SUPPORT_ALT_REGBASE)
+       puFWParams->sMips.sGPURegAddr = psDeviceNode->psDevConfig->sAltRegsGpuPBase;
+#else
+       PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL],
+                                  1,
+                                  &puFWParams->sMips.sGPURegAddr,
+                                  &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+#endif
+
+       /* MIPS Page Table physical address */
+       MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sAddr);
+
+       /* MIPS Page Table allocation is contiguous. Pass one or more addresses
+        * to the FW depending on the Page Table size and alignment. */
+
+       ui32PTSize = (psFWMMUDevAttrs->psTopLevelDevVAddrConfig->uiNumEntriesPT)
+                    << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+       ui32PTSize = PVR_ALIGN(ui32PTSize, 1U << psFWMMUDevAttrs->ui32BaseAlign);
+
+       puFWParams->sMips.ui32FWPageTableLog2PageSize = psFWMMUDevAttrs->ui32BaseAlign;
+       puFWParams->sMips.ui32FWPageTableNumPages = ui32PTSize >> psFWMMUDevAttrs->ui32BaseAlign;
+
+       if (puFWParams->sMips.ui32FWPageTableNumPages > 4U)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Page table cannot be mapped by the FW "
+                        "(size 0x%x, log2 page size %u, %u pages)",
+                        __func__, ui32PTSize, puFWParams->sMips.ui32FWPageTableLog2PageSize,
+                        puFWParams->sMips.ui32FWPageTableNumPages));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+       /* Confirm page alignment fits in 64-bits */
+       if (psFWMMUDevAttrs->ui32BaseAlign > 63)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid page alignment "
+                        "(psFWMMUDevAttrs->ui32BaseAlign = %u)",
+                        __func__, psFWMMUDevAttrs->ui32BaseAlign));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+       for (i = 0; i < puFWParams->sMips.ui32FWPageTableNumPages; i++)
+       {
+               puFWParams->sMips.asFWPageTableAddr[i].uiAddr =
+                   sAddr.uiAddr + i * (1ULL << psFWMMUDevAttrs->ui32BaseAlign);
+       }
+
+       /* MIPS Stack Pointer Physical Address */
+       eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+                              &puFWParams->sMips.sFWStackAddr,
+                              RGXGetFWImageSectionOffset(NULL, MIPS_STACK),
+                              OSGetPageShift(),
+                              1,
+                              &bValid);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitFirmware
+
+ @Description   Allocate, initialise and pdump Firmware code and data memory
+
+ @Input         psDeviceNode : Device Node
+ @Input         psHints      : Apphints
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 RGX_SRVINIT_APPHINTS *psHints)
+{
+       OS_FW_IMAGE       *psRGXFW = NULL;
+       const IMG_BYTE    *pbRGXFirmware = NULL;
+
+       /* FW code memory */
+       IMG_DEVMEM_SIZE_T uiFWCodeAllocSize;
+       void              *pvFWCodeHostAddr;
+
+       /* FW data memory */
+       IMG_DEVMEM_SIZE_T uiFWDataAllocSize;
+       void              *pvFWDataHostAddr;
+
+       /* FW coremem code memory */
+       IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize;
+       void              *pvFWCorememCodeHostAddr = NULL;
+
+       /* FW coremem data memory */
+       IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize;
+       void              *pvFWCorememDataHostAddr = NULL;
+
+       PVRSRV_FW_BOOT_PARAMS uFWParams;
+       RGX_LAYER_PARAMS sLayerParams;
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       IMG_BOOL bUseSecureFWData =
+#if defined(RGX_FEATURE_META_IDX)
+                                                               RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ||
+#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+                                   RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) ||
+#endif
+                                   (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) &&
+                                    RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32);
+#endif
+
+       /*
+        * Get pointer to Firmware image
+        */
+       eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware);
+
+       if (eError != PVRSRV_OK)
+       {
+               /* Error or confirmation message generated in RGXLoadAndGetFWData */
+               goto fw_load_fail;
+       }
+
+       sLayerParams.psDevInfo = psDevInfo;
+
+       /*
+        * Allocate Firmware memory
+        */
+
+       eError = RGXGetFWImageAllocSize(&sLayerParams,
+                                       pbRGXFirmware,
+                                       OSFirmwareSize(psRGXFW),
+                                       &uiFWCodeAllocSize,
+                                       &uiFWDataAllocSize,
+                                       &uiFWCorememCodeAllocSize,
+                                       &uiFWCorememDataAllocSize);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: RGXGetFWImageAllocSize failed",
+                       __func__));
+               goto cleanup_initfw;
+       }
+
+       psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META_DMA_BIT_MASK)
+       /* Disable META core memory allocation unless the META DMA is available */
+       if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, META_DMA))
+       {
+               uiFWCorememCodeAllocSize = 0;
+               uiFWCorememDataAllocSize = 0;
+       }
+#endif
+
+       psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize;
+
+       eError = RGXInitAllocFWImgMem(psDeviceNode,
+                                     uiFWCodeAllocSize,
+                                     uiFWDataAllocSize,
+                                     uiFWCorememCodeAllocSize,
+                                     uiFWCorememDataAllocSize);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: RGXInitAllocFWImgMem failed (%d)",
+                       __func__,
+                       eError));
+               goto cleanup_initfw;
+       }
+
+       /*
+        * Acquire pointers to Firmware allocations
+        */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw);
+
+#else
+       /* We can't get a pointer to a secure FW allocation from within the DDK */
+       pvFWCodeHostAddr = NULL;
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (bUseSecureFWData)
+       {
+               /* We can't get a pointer to a secure FW allocation from within the DDK */
+               pvFWDataHostAddr = NULL;
+       }
+       else
+#endif
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code);
+       }
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememCodeAllocSize)
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data);
+       }
+#else
+       /* We can't get a pointer to a secure FW allocation from within the DDK */
+       pvFWCorememCodeHostAddr = NULL;
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (bUseSecureFWData)
+       {
+               pvFWCorememDataHostAddr = NULL;
+       }
+       else
+#endif
+       if (uiFWCorememDataAllocSize)
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode);
+       }
+
+       /*
+        * Prepare FW boot parameters
+        */
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+       {
+               eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: RGXAcquireMipsBootldrData failed (%d)",
+                                        __func__, eError));
+                       goto release_fw_allocations;
+               }
+       }
+       else
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase;
+               uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase;
+               uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase;
+               uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr;
+               uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize;
+               uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase;
+               uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr;
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+               uFWParams.sMeta.ui32NumThreads = 2;
+#else
+               uFWParams.sMeta.ui32NumThreads = 1;
+#endif
+       }
+       else
+#endif
+       {
+               uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase;
+               uFWParams.sRISCV.sFWCorememCodeFWAddr   = psDevInfo->sFWCorememCodeFWAddr;
+               uFWParams.sRISCV.uiFWCorememCodeSize    = uiFWCorememCodeAllocSize;
+
+               uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase;
+               uFWParams.sRISCV.sFWCorememDataFWAddr   = psDevInfo->sFWCorememDataStoreFWAddr;
+               uFWParams.sRISCV.uiFWCorememDataSize    = uiFWCorememDataAllocSize;
+       }
+
+
+       /*
+        * Process the Firmware image and setup code and data segments.
+        *
+        * When the trusted device is enabled and the FW code lives
+        * in secure memory we will only setup the data segments here,
+        * while the code segments will be loaded to secure memory
+        * by the trusted device.
+        */
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               eError = RGXProcessFWImage(&sLayerParams,
+                                                                  pbRGXFirmware,
+                                                                  pvFWCodeHostAddr,
+                                                                  pvFWDataHostAddr,
+                                                                  pvFWCorememCodeHostAddr,
+                                                                  pvFWCorememDataHostAddr,
+                                                                  &uFWParams);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: RGXProcessFWImage failed (%d)",
+                                        __func__, eError));
+                       goto release_fw_allocations;
+               }
+       }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams);
+#endif
+
+
+       /*
+        * PDump Firmware allocations
+        */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Dump firmware code image");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc,
+                          0,
+                          uiFWCodeAllocSize,
+                          PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (!bUseSecureFWData)
+#endif
+       {
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "Dump firmware data image");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc,
+                                  0,
+                                  uiFWDataAllocSize,
+                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememCodeAllocSize)
+       {
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "Dump firmware coremem code image");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc,
+                                                  0,
+                                                  uiFWCorememCodeAllocSize,
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (!bUseSecureFWData && uiFWCorememDataAllocSize)
+#else
+       if (uiFWCorememDataAllocSize)
+#endif
+       {
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "Dump firmware coremem data store image");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                                                  0,
+                                                  uiFWCorememDataAllocSize,
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+
+       /*
+        * Release Firmware allocations and clean up
+        */
+
+release_fw_allocations:
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (!bUseSecureFWData && uiFWCorememDataAllocSize)
+#else
+       if (uiFWCorememDataAllocSize)
+#endif
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+       }
+release_corememcode:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememCodeAllocSize)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+       }
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+release_data:
+#endif
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (!bUseSecureFWData)
+#endif
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+       }
+
+release_code:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+#endif
+cleanup_initfw:
+       OSUnloadFirmware(psRGXFW);
+fw_load_fail:
+
+       return eError;
+}
+
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function      InitialiseHWPerfCounters
+
+ @Description   Initialisation of hardware performance counters and dumping
+                them out to pdump, so that they can be modified at a later
+                point.
+
+ @Input         pvDevice
+ @Input         psHWPerfDataMemDesc
+ @Input         psHWPerfInitDataInt
+
+ @Return        void
+
+******************************************************************************/
+
+static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     void *pvDevice,
+                                     DEVMEM_MEMDESC *psHWPerfDataMemDesc,
+                                     RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
+{
+       RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+       RGXFWIF_HWPERF_DA_BLK *psHWPerfInitDABlkData;
+       IMG_UINT32 ui32CntBlkModelLen;
+       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
+       IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx;
+       RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo;
+
+       ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "HWPerf Counter Config starts here.");
+
+       for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+       {
+               IMG_UINT32 uiUnit;
+               IMG_BOOL bDirect;
+
+               /* Exit early if this core does not have any of these counter blocks
+                * due to core type/BVNC features.... */
+               psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx];
+               if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE)
+               {
+                       continue;
+               }
+
+               /* Program all counters in one block so those already on may
+                * be configured off and vice-a-versa. */
+               for (ui32BlockID = psBlkTypeDesc->ui32CntBlkIdBase;
+                        ui32BlockID < psBlkTypeDesc->ui32CntBlkIdBase+sCntBlkRtInfo.ui32NumUnits;
+                        ui32BlockID++)
+               {
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "Unit %d Block : %s",
+                                             ui32BlockID-psBlkTypeDesc->ui32CntBlkIdBase,
+                                             psBlkTypeDesc->pszBlockNameComment);
+
+                       /* Get the block configure store to update from the global store of
+                        * block configuration. This is used to remember the configuration
+                        * between configurations and core power on in APM.
+                        * For RGX_FEATURE_HWPERF_OCEANIC layout we have a different
+                        * structure type to decode the HWPerf block. This is indicated by
+                        * the RGX_CNTBLK_ID_DA_MASK bit being set in the block-ID value. */
+
+                       bDirect = (psBlkTypeDesc->ui32IndirectReg == 0U);
+                       uiUnit = ui32BlockID - psBlkTypeDesc->ui32CntBlkIdBase;
+
+                       if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK)
+                       {
+                               psHWPerfInitDABlkData = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfInitDataInt);
+
+                               PVR_ASSERT(psHWPerfInitDABlkData);
+
+                               psHWPerfInitDABlkData->eBlockID = ui32BlockID;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information.");
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitDABlkData->eBlockID) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitDABlkData->eBlockID,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               psHWPerfInitDABlkData->uiEnabled = 0U;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "uiEnabled: Set to 0x1 if the block needs to be enabled during playback.");
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitDABlkData->uiEnabled) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitDABlkData->uiEnabled,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               psHWPerfInitDABlkData->uiNumCounters = 0U;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "uiNumCounters (X): Specifies the number of valid counters"
+                                             " [0..%d] which follow.", RGX_CNTBLK_COUNTERS_MAX);
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitDABlkData->uiNumCounters) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitDABlkData->uiNumCounters,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               for (ui32CounterIdx = 0; ui32CounterIdx < RGX_CNTBLK_COUNTERS_MAX; ui32CounterIdx++)
+                               {
+                                       psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx] = IMG_UINT32_C(0x00000000);
+
+                                       if (bDirect)
+                                       {
+                                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                             "%s_COUNTER_%d",
+                                                             psBlkTypeDesc->pszBlockNameComment,
+                                                             ui32CounterIdx);
+                                       }
+                                       else
+                                       {
+                                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                             "%s%d_COUNTER_%d",
+                                                             psBlkTypeDesc->pszBlockNameComment,
+                                                             uiUnit, ui32CounterIdx);
+                                       }
+
+                                       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+                                               psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx],
+                                                       PDUMP_FLAGS_CONTINUOUS);
+                               }
+                       }
+                       else
+                       {
+                               psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt);
+                               /* Assert to check for HWPerf block mis-configuration */
+                               PVR_ASSERT(psHWPerfInitBlkData);
+
+                               psHWPerfInitBlkData->bValid = IMG_TRUE;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "bValid: This specifies if the layout block is valid for the given BVNC.");
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitBlkData->bValid,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               psHWPerfInitBlkData->bEnabled = IMG_FALSE;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "bEnabled: Set to 0x1 if the block needs to be enabled during playback.");
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitBlkData->bEnabled,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               psHWPerfInitBlkData->eBlockID = ui32BlockID;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information.");
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitBlkData->eBlockID,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               psHWPerfInitBlkData->uiCounterMask = 0x00;
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "uiCounterMask: Bitmask for selecting the counters that need to be configured. (Bit 0 - counter0, bit 1 - counter1 and so on.)");
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitBlkData->uiCounterMask,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               for (ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->ui8NumCounters; ui32CounterIdx++)
+                               {
+                                       psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000);
+
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                     "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx);
+                                       DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx],
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                               }
+                       }
+               }
+       }
+}
+/*!
+*******************************************************************************
+
+ @Function      InitialiseCustomCounters
+
+ @Description   Initialisation of custom counters and dumping them out to
+                pdump, so that they can be modified at a later point.
+
+ @Input         psHWPerfDataMemDesc
+
+ @Return        void
+
+******************************************************************************/
+
+static void InitialiseCustomCounters(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     DEVMEM_MEMDESC *psHWPerfDataMemDesc)
+{
+       IMG_UINT32 ui32CustomBlock, ui32CounterID;
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected");
+       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                               offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask),
+                                               0,
+                                               PDUMP_FLAGS_CONTINUOUS);
+
+       for (ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++)
+       {
+               /*
+                * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+                * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+                * "expression must have a constant value".
+                */
+               const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters
+               = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters);
+
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock );
+               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                       uiOffsetOfCustomBlockSelectedCounters,
+                                       0,
+                                       PDUMP_FLAGS_CONTINUOUS);
+
+               for (ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ )
+               {
+                       const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs
+                       = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]);
+
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID);
+                       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                       uiOffsetOfCustomBlockSelectedCounterIDs,
+                                       0,
+                                       PDUMP_FLAGS_CONTINUOUS);
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitialiseAllCounters
+
+ @Description   Initialise HWPerf and custom counters
+
+ @Input         psDeviceNode : Device Node
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       RGXFWIF_HWPERF_CTL *psHWPerfInitData;
+       PVRSRV_ERROR eError;
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt);
+
+       InitialiseHWPerfCounters(psDeviceNode, psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData);
+       InitialiseCustomCounters(psDeviceNode, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+failHWPerfCountersMemDescAqCpuVirt:
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+       return eError;
+}
+#endif /* PDUMP */
+
+/*
+ * _ParseHTBAppHints:
+ *
+ * Generate necessary references to the globally visible AppHints which are
+ * declared in the above #include "km_apphint_defs.h"
+ * Without these local references some compiler tool-chains will treat
+ * unreferenced declarations as fatal errors. This function duplicates the
+ * HTB_specific apphint references which are made in htbserver.c:HTBInit()
+ * However, it makes absolutely *NO* use of these hints.
+ */
+static void
+_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       void *pvParamState = NULL;
+       IMG_UINT32 ui32LogType;
+       IMG_BOOL bAnyLogGroupConfigured;
+       IMG_UINT32 ui32BufferSize;
+       IMG_UINT32 ui32OpMode;
+
+       /* Services initialisation parameters */
+       pvParamState = SrvInitParamOpen();
+       if (pvParamState == NULL)
+               return;
+
+       SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE,  pvParamState,  EnableHTBLogGroup,   ui32LogType);
+       bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+       SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE,      pvParamState,  HTBOperationMode,     ui32OpMode);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,  HTBufferSizeInKB, ui32BufferSize);
+
+       SrvInitParamClose(pvParamState);
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                         PVRSRV_PHYS_HEAP ePhysHeap,
+                                                                         PHYS_HEAP_USAGE_FLAGS ui32RequiredFlags)
+{
+       PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap];
+       PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap);
+       PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE
+                                                                                          | PHYS_HEAP_USAGE_GPU_SECURE);
+
+       PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0,
+                                                          PVRSRV_ERROR_NOT_SUPPORTED,
+                                                          "TD heap is missing required flags. flags: 0x%x / required:0x%x",
+                                                          ui32HeapFlags,
+                                                          ui32RequiredFlags);
+
+       PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32InvalidFlags) == 0,
+                                                          PVRSRV_ERROR_NOT_SUPPORTED,
+                                                          "TD heap uses invalid flags. flags: 0x%x / invalid:0x%x",
+                                                          ui32HeapFlags,
+                                                          ui32InvalidFlags);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA");
+
+       eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE");
+
+       eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE");
+
+       return PVRSRV_OK;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInit
+
+ @Description   RGX Initialisation
+
+ @Input         psDeviceNode
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Services initialisation parameters */
+       RGX_SRVINIT_APPHINTS sApphints = {0};
+       IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags;
+       IMG_UINT32 ui32DeviceFlags;
+
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       RGX_LAYER_PARAMS sLayerParams;
+
+       PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 1");
+
+       PDUMPCOMMENT(psDeviceNode, "Device Name: %s",
+                    psDeviceNode->psDevConfig->pszName);
+       PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)",
+                    psDeviceNode->sDevId.ui32InternalID,
+                    psDeviceNode->sDevId.i32OsDeviceID);
+
+       if (psDeviceNode->psDevConfig->pszVersion)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Device Version: %s",
+                            psDeviceNode->psDevConfig->pszVersion);
+       }
+
+       /* pdump info about the core */
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGX Version Information (KM): %d.%d.%d.%d",
+                    psDevInfo->sDevFeatureCfg.ui32B,
+                    psDevInfo->sDevFeatureCfg.ui32V,
+                    psDevInfo->sDevFeatureCfg.ui32N,
+                    psDevInfo->sDevFeatureCfg.ui32C);
+
+       RGXInitMultiCoreInfo(psDeviceNode);
+
+#if defined(PDUMP)
+       eError = DevmemIntAllocDefBackingPage(psDeviceNode,
+                                             &psDeviceNode->sDummyPage,
+                                             PVR_DUMMY_PAGE_INIT_VALUE,
+                                             DUMMY_PAGE,
+                                             IMG_TRUE);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__));
+               goto cleanup;
+       }
+
+       eError = DevmemIntAllocDefBackingPage(psDeviceNode,
+                                             &psDeviceNode->sDevZeroPage,
+                                             PVR_ZERO_PAGE_INIT_VALUE,
+                                             DEV_ZERO_PAGE,
+                                             IMG_TRUE);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__));
+               goto cleanup;
+       }
+#endif /* defined(PDUMP) */
+
+       sLayerParams.psDevInfo = psDevInfo;
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       eError = RGXValidateTDHeaps(psDeviceNode);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps");
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation
+                * and it provides a good method of determining if the firmware has been booted previously */
+               psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0);
+
+               PVR_LOG(("AutoVz startup check: firmware is %s;",
+                               (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down"));
+       }
+       else if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               /* Guest assumes the firmware is always available */
+               psDeviceNode->bAutoVzFwIsUp = IMG_TRUE;
+       }
+       else
+#endif
+       {
+               /* Firmware does not follow the AutoVz life-cycle */
+               psDeviceNode->bAutoVzFwIsUp = IMG_FALSE;
+       }
+
+       if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))
+       {
+               /* set the device power state here as the regular power
+                * callbacks will not be executed on this driver */
+               psDevInfo->bRGXPowered = IMG_TRUE;
+       }
+
+       /* Set which HW Safety Events will be handled by the driver */
+#if defined(RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK)
+       psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ?
+                                                                                 RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN : 0;
+#endif
+#if defined(RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX)
+       psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS)
+                                                                                  && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ?
+                                                                                 RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN : 0;
+#endif
+
+       /* Services initialisation parameters */
+       _ParseHTBAppHints(psDeviceNode);
+       GetApphints(psDevInfo, &sApphints);
+       InitDeviceFlags(&sApphints, &ui32DeviceFlags);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(EMULATOR)
+       if ((sApphints.bEnableTrustedDeviceAceConfig) &&
+               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)))
+       {
+               SetTrustedDeviceAceEnabled();
+       }
+#endif
+#endif
+
+       eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)",
+                        __func__, eError));
+               goto cleanup;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = InitFirmware(psDeviceNode, &sApphints);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: InitFirmware failed (%d)",
+                                        __func__, eError));
+                       goto cleanup;
+               }
+       }
+
+       /*
+        * Setup Firmware initialisation data
+        */
+
+       GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags);
+
+       eError = RGXInitFirmware(psDeviceNode,
+                                sApphints.bEnableSignatureChecks,
+                                sApphints.ui32SignatureChecksBufSize,
+                                sApphints.ui32HWPerfFWBufSize,
+                                (IMG_UINT64)sApphints.ui32HWPerfFilter0 |
+                                ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32),
+                                ui32FWConfigFlags,
+                                sApphints.ui32LogType,
+                                GetFilterFlags(&sApphints),
+                                sApphints.ui32JonesDisableMask,
+                                sApphints.ui32HWRDebugDumpLimit,
+                                sizeof(RGXFWIF_HWPERF_CTL),
+#if defined(SUPPORT_VALIDATION)
+                                &sApphints.aui32TPUTrilinearFracMask[0],
+#else
+                                NULL,
+#endif
+                                sApphints.eRGXRDPowerIslandConf,
+                                sApphints.eFirmwarePerf,
+                                sApphints.ui32KCCBSizeLog2,
+                                ui32FWConfigFlagsExt,
+                                ui32FwOsCfgFlags);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: RGXInitFirmware failed (%d)",
+                                __func__,
+                                eError));
+               goto cleanup;
+       }
+
+#if defined(PDUMP)
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = InitialiseAllCounters(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: InitialiseAllCounters failed (%d)",
+                                        __func__, eError));
+                       goto cleanup;
+               }
+       }
+#endif
+
+       /*
+        * Perform second stage of RGX initialisation
+        */
+       eError = RGXInitDevPart2(psDeviceNode,
+                                ui32DeviceFlags,
+                                sApphints.ui32HWPerfHostFilter,
+                                sApphints.eRGXActivePMConf);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: RGXInitDevPart2 failed (%d)",
+                                __func__, eError));
+               goto cleanup;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       PVRSRVAppHintDumpState(psDeviceNode);
+#endif
+
+       eError = PVRSRV_OK;
+
+cleanup:
+       return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsrvinit.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxstartstop.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxstartstop.c
new file mode 100644 (file)
index 0000000..2d213e3
--- /dev/null
@@ -0,0 +1,1331 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific start/stop routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific start/stop routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxstartstop.h"
+
+#if defined(SUPPORT_SHARED_SLC)
+#include "rgxapi_km.h"
+#endif
+
+#include "rgxdevice.h"
+#include "km/rgxdefs_km.h"
+
+#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXEnableClocks
+
+ @Description   Enable RGX Clocks
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXEnableClocks(const void *hPrivate)
+{
+       RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)");
+}
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Wait for Slave Port to be Ready */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_META_SP_MSLVCTRL1,
+                             RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                             RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+       if (eError != PVRSRV_OK) return eError;
+
+       /* Issue a Write */
+       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+       (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
+       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+       (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */
+
+       return eError;
+}
+
+static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate,
+                                            IMG_UINT32 ui32RegAddr,
+                                            IMG_UINT32* ui32RegValue)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Wait for Slave Port to be Ready */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_META_SP_MSLVCTRL1,
+                             RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                             RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+       if (eError != PVRSRV_OK) return eError;
+
+       /* Issue a Read */
+       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+       (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
+
+       /* Wait for Slave Port to be Ready */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_META_SP_MSLVCTRL1,
+                             RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                             RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+       if (eError != PVRSRV_OK) return eError;
+
+#if !defined(NO_HARDWARE)
+       *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX);
+#else
+       *ui32RegValue = 0xFFFFFFFF;
+#endif
+
+       return eError;
+}
+
+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate,
+                                                IMG_UINT32 ui32CoreReg,
+                                                IMG_UINT32 ui32Value)
+{
+       IMG_UINT32 i = 0;
+
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value);
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT);
+
+       do
+       {
+               RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value);
+       } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000));
+
+       if (i == 1000)
+       {
+               RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout");
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate)
+{
+       PVRSRV_ERROR eError;
+
+       /* Give privilege to debug and slave port */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+       /* Point Meta to the bootloader address, global (uncached) range */
+       eError = RGXWriteMetaCoreRegThoughSP(hPrivate,
+                                            PC_ACCESS(0),
+                                            RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT);
+
+       if (eError != PVRSRV_OK)
+       {
+               RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!");
+               return eError;
+       }
+
+       /* Enable minim encoding */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN);
+
+       /* Enable Meta thread */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT);
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMetaProcWrapper
+
+ @Description   Configures the hardware wrapper of the META processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMetaProcWrapper(const void *hPrivate)
+{
+       IMG_UINT64 ui64GartenConfig;
+
+       /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */
+
+       /* Garten IDLE bit controlled by META */
+       ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+       /* The fence addr is set at the fw init sequence */
+
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+       {
+               /* Set PC = 0 for fences */
+               ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK;
+               ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV
+                                   << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT;
+
+       }
+       else
+       {
+               /* Set PC = 0 for fences */
+               ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+               ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV
+                                   << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+               /* Set SLC DM=META */
+               ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_BIFDM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+       }
+
+       RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper");
+       RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMipsProcWrapper
+
+ @Description   Configures the hardware wrapper of the MIPS processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMipsProcWrapper(const void *hPrivate)
+{
+       IMG_DEV_PHYADDR sPhyAddr;
+       IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */
+
+       RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper");
+
+       /*
+        * MIPS wrapper (registers transaction ID and ISA mode) setup
+        */
+
+       RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register");
+
+       if (RGXGetDevicePhysBusWidth(hPrivate) > 32)
+       {
+               RGXWriteReg32(hPrivate,
+                             RGX_CR_MIPS_WRAPPER_CONFIG,
+                             (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >>
+                             RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+                             RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+       }
+       else
+       {
+               RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr);
+
+               RGXMIPSWrapperConfig(hPrivate,
+                                    RGX_CR_MIPS_WRAPPER_CONFIG,
+                                    sPhyAddr.uiAddr,
+                                    RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN,
+                                    RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+       }
+
+       /*
+        * Boot remap setup
+        */
+
+       RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       /* Do not mark accesses to a FW code remap region as DRM accesses */
+       ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+       ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+       ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#endif
+
+       RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers");
+       RGXBootRemapConfig(hPrivate,
+                          RGX_CR_MIPS_ADDR_REMAP1_CONFIG1,
+                          RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN,
+                          RGX_CR_MIPS_ADDR_REMAP1_CONFIG2,
+                          sPhyAddr.uiAddr,
+                          ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK,
+                          ui64RemapSettings);
+
+#if defined(FIX_HW_BRN_63553_BIT_MASK)
+       if (RGX_DEVICE_HAS_BRN(hPrivate, 63553))
+       {
+               IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32;
+               IMG_BOOL bDevicePA0IsValid  = RGXDevicePA0IsValid(hPrivate);
+
+               /* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */
+               if (bPhysBusAbove32Bit || !bDevicePA0IsValid)
+               {
+                       RGXCodeRemapConfig(hPrivate,
+                                       RGX_CR_MIPS_ADDR_REMAP5_CONFIG1,
+                                       0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN,
+                                       RGX_CR_MIPS_ADDR_REMAP5_CONFIG2,
+                                       sPhyAddr.uiAddr,
+                                       ~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK,
+                                       ui64RemapSettings);
+               }
+       }
+#endif
+
+       /*
+        * Data remap setup
+        */
+
+       RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       if (RGXGetDevicePhysBusWidth(hPrivate) > 32)
+       {
+               /* Remapped private data in secure memory */
+               ui64RemapSettings |= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN;
+       }
+       else
+       {
+               /* Remapped data in non-secure memory */
+               ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+       }
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+       ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+#endif
+
+       RGXCommentLog(hPrivate, "RGXStart: Write data remap registers");
+       RGXDataRemapConfig(hPrivate,
+                          RGX_CR_MIPS_ADDR_REMAP2_CONFIG1,
+                          RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN,
+                          RGX_CR_MIPS_ADDR_REMAP2_CONFIG2,
+                          sPhyAddr.uiAddr,
+                          ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK,
+                          ui64RemapSettings);
+
+       /*
+        * Code remap setup
+        */
+
+       RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       /* Do not mark accesses to a FW code remap region as DRM accesses */
+       ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+       ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+       ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#endif
+
+       RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers");
+       RGXCodeRemapConfig(hPrivate,
+                          RGX_CR_MIPS_ADDR_REMAP3_CONFIG1,
+                          RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN,
+                          RGX_CR_MIPS_ADDR_REMAP3_CONFIG2,
+                          sPhyAddr.uiAddr,
+                          ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK,
+                          ui64RemapSettings);
+
+       if (RGXGetDevicePhysBusWidth(hPrivate) == 32)
+       {
+               /*
+                * Trampoline remap setup
+                */
+
+               RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr);
+               ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+               /* Remapped data in non-secure memory */
+               ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+#if defined(MIPS_FW_CODE_OSID)
+               ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
+#endif
+
+               RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers");
+               RGXTrampolineRemapConfig(hPrivate,
+                                        RGX_CR_MIPS_ADDR_REMAP4_CONFIG1,
+                                        sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+                                        RGX_CR_MIPS_ADDR_REMAP4_CONFIG2,
+                                        RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+                                        ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK,
+                                        ui64RemapSettings);
+       }
+
+       /* Garten IDLE bit controlled by MIPS */
+       RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS");
+       RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+       /* Turn on the EJTAG probe (only useful driver live) */
+       RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0);
+}
+
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+/*!
+*******************************************************************************
+
+ @Function      RGXInitRiscvProcWrapper
+
+ @Description   Configures the hardware wrapper of the RISCV processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitRiscvProcWrapper(const void *hPrivate)
+{
+       IMG_DEV_VIRTADDR sTmp;
+
+       RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper");
+
+       RGXCommentLog(hPrivate, "RGXStart: Write boot code remap");
+       RGXAcquireBootCodeAddr(hPrivate, &sTmp);
+       RGXWriteReg64(hPrivate,
+                     RGXRISCVFW_BOOTLDR_CODE_REMAP,
+                     sTmp.uiAddr |
+                     (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+                       << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT |
+                     (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT |
+                     RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
+
+       RGXCommentLog(hPrivate, "RGXStart: Write boot data remap");
+       RGXAcquireBootDataAddr(hPrivate, &sTmp);
+       RGXWriteReg64(hPrivate,
+                     RGXRISCVFW_BOOTLDR_DATA_REMAP,
+                     sTmp.uiAddr |
+                     (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+                       << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT |
+                     (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT |
+#if defined(SUPPORT_TRUSTED_DEVICE)
+                     RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN |
+#endif
+                     RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
+
+       /* Garten IDLE bit controlled by RISCV */
+       RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV");
+       RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+}
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function      __RGXInitSLC
+
+ @Description   Initialise RGX SLC
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void __RGXInitSLC(const void *hPrivate)
+{
+#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY))
+       {
+               IMG_UINT32 ui32Reg;
+               IMG_UINT32 ui32RegVal;
+
+               /*
+                * SLC control
+                */
+               ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+               ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH |
+                   RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+               RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+
+               /*
+                * SLC scramble bits
+                */
+               {
+                   IMG_UINT32 i;
+                   IMG_UINT32 ui32Count=0;
+                   IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate);
+                   IMG_UINT64 aui64ScrambleValues[4];
+                   IMG_UINT32 aui32ScrambleRegs[] = {
+                       RGX_CR_SLC3_SCRAMBLE,
+                       RGX_CR_SLC3_SCRAMBLE2,
+                       RGX_CR_SLC3_SCRAMBLE3,
+                       RGX_CR_SLC3_SCRAMBLE4
+                   };
+
+                   if (2 == ui32SLCBanks)
+                   {
+                       aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a);
+                       aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a);
+                       aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566);
+                       aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a);
+                       ui32Count = 4;
+                   }
+                   else if (4 == ui32SLCBanks)
+                   {
+                       aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4);
+                       aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372);
+                       aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1);
+                       aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478);
+                       ui32Count = 4;
+
+                   }
+                   else if (8 == ui32SLCBanks)
+                   {
+                       aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688);
+                       aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33);
+                       aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447);
+                       ui32Count = 3;
+                   }
+
+                   for (i = 0; i < ui32Count; i++)
+                   {
+                       IMG_UINT32 ui32Reg = aui32ScrambleRegs[i];
+                       IMG_UINT64 ui64Value = aui64ScrambleValues[i];
+                       RGXWriteReg64(hPrivate, ui32Reg, ui64Value);
+                   }
+               }
+
+               {
+                       /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */
+                       RGXCommentLog(hPrivate, "Disable forced SLC coherency");
+                       RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0);
+               }
+       }
+       else
+#endif
+       {
+               IMG_UINT32 ui32Reg;
+               IMG_UINT32 ui32RegVal;
+               IMG_UINT64 ui64RegVal;
+
+               /*
+                * SLC Bypass control
+                */
+               ui32Reg = RGX_CR_SLC_CTRL_BYPASS;
+               ui64RegVal = 0;
+
+#if defined(RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN)
+               if ((RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, SLC_SIZE_IN_KILOBYTES) == 8)  ||
+                   RGX_DEVICE_HAS_BRN(hPrivate, 61450))
+               {
+                       RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF");
+                       ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN |
+                                               (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN;
+               }
+#endif
+
+               if (ui64RegVal != 0)
+               {
+                       RGXReadModifyWriteReg64(hPrivate, ui32Reg, ui64RegVal, ~ui64RegVal);
+               }
+
+               /*
+                * SLC Misc control.
+                *
+                * Note: This is a 64bit register and we set only the lower 32bits leaving the top
+                *       32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default.
+                */
+               ui32Reg = RGX_CR_SLC_CTRL_MISC;
+               ui32RegVal = RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+               ui32RegVal |= RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+#endif
+
+#if defined(FIX_HW_BRN_60084_BIT_MASK)
+               if (RGX_DEVICE_HAS_BRN(hPrivate, 60084))
+               {
+#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING)
+                       ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+#else
+                       if (RGX_DEVICE_HAS_ERN(hPrivate, 61389))
+                       {
+                               ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+                       }
+#endif
+               }
+#endif
+
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+               /* Bypass burst combiner if SLC line size is smaller than 1024 bits */
+               if (RGXGetDeviceCacheLineSize(hPrivate) < 1024)
+               {
+                       ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+               }
+#endif
+
+               RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+       }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitBIF
+
+ @Description   Initialise RGX BIF
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitBIF(const void *hPrivate)
+{
+       if (!RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+       {
+               IMG_DEV_PHYADDR sPCAddr;
+
+               /*
+                * Acquire the address of the Kernel Page Catalogue.
+                */
+               RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
+
+               /*
+                * Write the kernel catalogue base.
+                */
+               RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+
+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+               if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+               {
+                       /* Write the cat-base address */
+                       RGXWriteKernelMMUPC64(hPrivate,
+                                             BIF_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV),
+                                             RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT,
+                                             RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT,
+                                             ((sPCAddr.uiAddr
+                                             >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+                                             << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT)
+                                             & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+                       if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+                       {
+                               /* Keep catbase registers in sync */
+                               RGXWriteKernelMMUPC64(hPrivate,
+                                                     FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV),
+                                                     RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT,
+                                                     RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT,
+                                                     ((sPCAddr.uiAddr
+                                                     >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
+                                                     << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT)
+                                                     & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK);
+                       }
+#endif
+
+                       /*
+                        * Trusted Firmware boot
+                        */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+                       RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+                       RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+               }
+               else
+#endif /* defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) */
+               {
+#if defined(RGX_CR_MMU_CBASE_MAPPING) // FIXME_OCEANIC
+                       IMG_UINT32 uiPCAddr;
+                       uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+                                    << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+                                   & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+
+                       /* Set the mapping context */
+                       RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV);
+                       (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */
+
+                       /* Write the cat-base address */
+                       RGXWriteKernelMMUPC32(hPrivate,
+                                             RGX_CR_MMU_CBASE_MAPPING,
+                                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+                                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+                                             uiPCAddr);
+
+#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV)
+                       /* Set-up different MMU ID mapping to the same PC used above */
+                       RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF);
+                       (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */
+
+                       RGXWriteKernelMMUPC32(hPrivate,
+                                             RGX_CR_MMU_CBASE_MAPPING,
+                                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+                                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+                                             uiPCAddr);
+#endif
+#endif
+               }
+       }
+       else
+       {
+               /*
+                * Trusted Firmware boot
+                */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+               RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+               RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+       }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXAXIACELiteInit
+
+ @Description   Initialise AXI-ACE Lite interface
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXAXIACELiteInit(const void *hPrivate)
+{
+       IMG_UINT32 ui32RegAddr;
+       IMG_UINT64 ui64RegVal;
+
+       ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION;
+
+       /* Setup AXI-ACE config. Set everything to outer cache */
+       ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+                    (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+                    (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT)  |
+                    (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+                    (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+                    (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+                    (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+                    (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+#if defined(FIX_HW_BRN_42321_BIT_MASK)
+       if (RGX_DEVICE_HAS_BRN(hPrivate, 42321))
+       {
+               ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT);
+       }
+#endif
+
+#if defined(FIX_HW_BRN_68186_BIT_MASK)
+       if (RGX_DEVICE_HAS_BRN(hPrivate, 68186))
+       {
+        /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk  */
+               ui64RegVal |= (IMG_UINT64)1 << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT;
+       }
+#endif
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+       {
+               RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted");
+               ui64RegVal |= IMG_UINT64_C(0xFC)
+                     << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT;
+       }
+#endif
+
+       RGXCommentLog(hPrivate, "Init AXI-ACE interface");
+       RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal);
+}
+
+PVRSRV_ERROR RGXStart(const void *hPrivate)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_CHAR *pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       IMG_BOOL bDoFWSlaveBoot = IMG_FALSE;
+       IMG_BOOL bMetaFW = IMG_FALSE;
+#endif
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+       {
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV;
+       }
+       else
+#endif
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META))
+       {
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+               bMetaFW = IMG_TRUE;
+               bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate);
+       }
+#endif
+
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET))
+       {
+               /* Disable the default sys_bus_secure protection to perform minimal setup */
+               RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure");
+               RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0);
+               (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+       }
+
+#if defined(SUPPORT_SHARED_SLC)
+       /* When the SLC is shared, the SLC reset is performed by the System layer when calling
+        * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid
+        * soft_resetting it here.
+        */
+#define RGX_CR_SOFT_RESET_ALL  (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN)
+       RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)");
+#else
+#define RGX_CR_SOFT_RESET_ALL  (RGX_CR_SOFT_RESET_MASKFULL)
+#endif
+
+#if defined(RGX_S7_SOFT_RESET_DUSTS)
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+       {
+               /* Set RGX in soft-reset */
+               RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1");
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS);
+
+               /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2);
+
+               RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2");
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS);
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2);
+
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2);
+
+               /* Take everything out of reset but the FW processor */
+               RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR);
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN);
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0);
+
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2);
+
+               RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR);
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+       }
+       else
+#endif
+       {
+               /* Set RGX in soft-reset */
+               RGXCommentLog(hPrivate, "RGXStart: soft reset everything");
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL);
+
+               /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+               /* Take Rascal and Dust out of reset */
+               RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset");
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN);
+
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+               /* Take everything out of reset but the FW processor */
+               RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR);
+
+#if defined(RGX_FEATURE_XE_ARCHITECTURE) && (RGX_FEATURE_XE_ARCHITECTURE > 1)
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_CPU_EN);
+#else
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+#endif
+
+               (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+       }
+
+       /* Enable clocks */
+       RGXEnableClocks(hPrivate);
+
+       /*
+        * Initialise SLC.
+        */
+#if !defined(SUPPORT_SHARED_SLC)
+       __RGXInitSLC(hPrivate);
+#endif
+
+       if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0)
+       {
+               RGXCommentLog(hPrivate, "RGXStart: Enable safety events");
+               RGXWriteReg32(hPrivate, RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE,
+                                       RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL);
+       }
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (bMetaFW)
+       {
+               if (bDoFWSlaveBoot)
+               {
+                       /* Configure META to Slave boot */
+                       RGXCommentLog(hPrivate, "RGXStart: META Slave boot");
+                       RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+
+               }
+               else
+               {
+                       /* Configure META to Master boot */
+                       RGXCommentLog(hPrivate, "RGXStart: META Master boot");
+                       RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+               }
+       }
+#endif
+
+       /*
+        * Initialise Firmware wrapper
+        */
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+       {
+               RGXInitRiscvProcWrapper(hPrivate);
+       }
+       else
+#endif
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (bMetaFW)
+       {
+               RGXInitMetaProcWrapper(hPrivate);
+       }
+       else
+#endif
+       {
+               RGXInitMipsProcWrapper(hPrivate);
+       }
+
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE))
+       {
+               /* We must init the AXI-ACE interface before 1st BIF transaction */
+               RGXAXIACELiteInit(hPrivate);
+       }
+
+       /*
+        * Initialise BIF.
+        */
+       RGXInitBIF(hPrivate);
+
+       RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR);
+
+       /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */
+       RGXWaitCycles(hPrivate, 32, 3);
+
+       RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0);
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+       /* ... and afterwards */
+       RGXWaitCycles(hPrivate, 32, 3);
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (bMetaFW && bDoFWSlaveBoot)
+       {
+               eError = RGXFabricCoherencyTest(hPrivate);
+               if (eError != PVRSRV_OK) return eError;
+
+               RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start");
+               eError = RGXStartFirmware(hPrivate);
+               if (eError != PVRSRV_OK) return eError;
+       }
+       else
+#endif
+       {
+               RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+               if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+               {
+                       /* Bring Debug Module out of reset */
+                       RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+
+                       /* Boot the FW */
+                       RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1);
+                       RGXWaitCycles(hPrivate, 32, 3);
+               }
+#endif
+       }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure");
+       RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+       (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+#endif
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXStop(const void *hPrivate)
+{
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) || defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+       IMG_BOOL bMipsFW = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+       IMG_BOOL bRiscvFW = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR);
+       IMG_BOOL bMetaFW = !bMipsFW && !bRiscvFW;
+#endif
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       RGXDeviceAckIrq(hPrivate);
+
+       /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper
+        * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW
+        */
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+       if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
+       {
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+               if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_JONES_IDLE,
+                                       RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+                                       RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+               }
+               else
+#endif
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SIDEKICK_IDLE,
+                                       RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+                                       RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+               }
+
+               if (eError != PVRSRV_OK) return eError;
+       }
+#endif
+
+       if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
+       {
+#if !defined(SUPPORT_SHARED_SLC)
+               /*
+                * Wait for SLC to signal IDLE
+                * For LAYOUT_MARS = 1, SLC would have been powered down by FW
+                */
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+               if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SLC3_IDLE,
+                                       RGX_CR_SLC3_IDLE_MASKFULL,
+                                       RGX_CR_SLC3_IDLE_MASKFULL);
+               }
+               else
+#endif
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SLC_IDLE,
+                                       RGX_CR_SLC_IDLE_MASKFULL,
+                                       RGX_CR_SLC_IDLE_MASKFULL);
+               }
+#endif /* SUPPORT_SHARED_SLC */
+               if (eError != PVRSRV_OK) return eError;
+       }
+
+       /* Unset MTS DM association with threads */
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+                     RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL);
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+                     RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
+#if defined(RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC) // FIXME_OCEANIC
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+                     RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL);
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+                     RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL);
+#endif
+
+#if defined(PDUMP) && defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (bMetaFW)
+       {
+               /* Disabling threads is only required for pdumps to stop the fw gracefully */
+
+               /* Disable thread 0 */
+               eError = RGXWriteMetaRegThroughSP(hPrivate,
+                                                 META_CR_T0ENABLE_OFFSET,
+                                                 ~META_CR_TXENABLE_ENABLE_BIT);
+               if (eError != PVRSRV_OK) return eError;
+
+               /* Disable thread 1 */
+               eError = RGXWriteMetaRegThroughSP(hPrivate,
+                                                 META_CR_T1ENABLE_OFFSET,
+                                                 ~META_CR_TXENABLE_ENABLE_BIT);
+               if (eError != PVRSRV_OK) return eError;
+
+               /* Clear down any irq raised by META (done after disabling the FW
+                * threads to avoid a race condition).
+                * This is only really needed for PDumps but we do it anyway driver-live.
+                */
+               RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0);
+               (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */
+
+               /* Wait for the Slave Port to finish all the transactions */
+               eError = RGXPollReg32(hPrivate,
+                                     RGX_CR_META_SP_MSLVCTRL1,
+                                     RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                                     RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+               if (eError != PVRSRV_OK) return eError;
+       }
+#endif
+
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+       /* Extra Idle checks */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_BIF_STATUS_MMU,
+                             0,
+                             RGX_CR_BIF_STATUS_MMU_MASKFULL);
+       if (eError != PVRSRV_OK) return eError;
+
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_BIFPM_STATUS_MMU,
+                             0,
+                             RGX_CR_BIFPM_STATUS_MMU_MASKFULL);
+       if (eError != PVRSRV_OK) return eError;
+#endif
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+       if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) &&
+           !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE))
+#endif
+       {
+               eError = RGXPollReg32(hPrivate,
+                                     RGX_CR_BIF_READS_EXT_STATUS,
+                                     0,
+                                     RGX_CR_BIF_READS_EXT_STATUS_MASKFULL);
+               if (eError != PVRSRV_OK) return eError;
+       }
+
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_BIFPM_READS_EXT_STATUS,
+                             0,
+                             RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL);
+       if (eError != PVRSRV_OK) return eError;
+#endif
+
+       {
+               IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL;
+               eError = RGXPollReg64(hPrivate,
+                                     RGX_CR_SLC_STATUS1,
+                                     0,
+                                     ui64SLCMask);
+               if (eError != PVRSRV_OK) return eError;
+       }
+
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+       if (4 == RGXGetDeviceSLCBanks(hPrivate))
+       {
+               eError = RGXPollReg64(hPrivate,
+                                     RGX_CR_SLC_STATUS2,
+                                     0,
+                                     RGX_CR_SLC_STATUS2_MASKFULL);
+               if (eError != PVRSRV_OK) return eError;
+       }
+#endif
+
+       if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
+       {
+#if !defined(SUPPORT_SHARED_SLC)
+               /*
+                * Wait for SLC to signal IDLE
+                * For LAYOUT_MARS = 1, SLC would have been powered down by FW
+                */
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+               if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SLC3_IDLE,
+                                       RGX_CR_SLC3_IDLE_MASKFULL,
+                                       RGX_CR_SLC3_IDLE_MASKFULL);
+               }
+               else
+#endif
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SLC_IDLE,
+                                       RGX_CR_SLC_IDLE_MASKFULL,
+                                       RGX_CR_SLC_IDLE_MASKFULL);
+               }
+#endif /* SUPPORT_SHARED_SLC */
+               if (eError != PVRSRV_OK) return eError;
+       }
+
+       /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper
+        * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW
+        */
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+       if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
+       {
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
+               if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+               {
+#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+                       if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM))
+                       {
+                               eError = RGXPollReg32(hPrivate,
+                                               RGX_CR_JONES_IDLE,
+                                               RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+                                               RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+                       }
+#endif
+               }
+               else
+#endif
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SIDEKICK_IDLE,
+                                       RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+                                       RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+               }
+
+               if (eError != PVRSRV_OK) return eError;
+       }
+#endif
+
+#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
+       if (bMetaFW)
+       {
+               IMG_UINT32 ui32RegValue;
+
+               eError = RGXReadMetaRegThroughSP(hPrivate,
+                                                META_CR_TxVECINT_BHALT,
+                                                &ui32RegValue);
+               if (eError != PVRSRV_OK) return eError;
+
+               if ((ui32RegValue & 0xFFFFFFFFU) == 0x0)
+               {
+                       /* Wait for Sidekick/Jones to signal IDLE including
+                        * the Garten Wrapper if there is no debugger attached
+                        * (TxVECINT_BHALT = 0x0) */
+                       if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+                       {
+                               eError = RGXPollReg32(hPrivate,
+                                                     RGX_CR_SIDEKICK_IDLE,
+                                                     RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+                                                     RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+                               if (eError != PVRSRV_OK) return eError;
+                       }
+                       else
+                       {
+                               eError = RGXPollReg32(hPrivate,
+                                                     RGX_CR_JONES_IDLE,
+                                                     RGX_CR_JONES_IDLE_GARTEN_EN,
+                                                     RGX_CR_JONES_IDLE_GARTEN_EN);
+                               if (eError != PVRSRV_OK) return eError;
+                       }
+               }
+       }
+       else
+#endif
+       {
+               if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)
+               {
+                       /* As FW core has been moved from SIDEKICK to the new MARS domain, checking
+                        * idle bits for CPU & System Arbiter excluding SOCIF which will never be Idle
+                        * if Host polling on this register
+                        */
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_MARS_IDLE,
+                                       RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN,
+                                       RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN);
+                       if (eError != PVRSRV_OK) return eError;
+               }
+#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
+               else
+               {
+                       eError = RGXPollReg32(hPrivate,
+                                       RGX_CR_SIDEKICK_IDLE,
+                                       RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+                                       RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+                       if (eError != PVRSRV_OK) return eError;
+               }
+#endif
+       }
+
+       return eError;
+}
+
+
+/*
+ * RGXInitSLC
+ */
+#if defined(SUPPORT_SHARED_SLC)
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void *pvPowerParams;
+
+       if (psDeviceNode == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       psDevInfo = psDeviceNode->pvDevice;
+       pvPowerParams = &psDevInfo->sLayerParams;
+
+       /* reset the SLC */
+       RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC");
+       RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN);
+
+       /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+       (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET);
+
+       /* Take everything out of reset */
+       RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0);
+
+       __RGXInitSLC(pvPowerParams);
+
+       return PVRSRV_OK;
+}
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxta3d.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxta3d.c
new file mode 100644 (file)
index 0000000..3b43bab
--- /dev/null
@@ -0,0 +1,5426 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA/3D routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX TA/3D routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "ri_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "rgxsyncutils.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#include "rgxtimerquery.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TA3D_UFO_DUMP   0
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static INLINE
+void _DebugSyncValues(const IMG_CHAR *pszFunction,
+               const IMG_UINT32 *pui32UpdateValues,
+               const IMG_UINT32 ui32Count)
+{
+       IMG_UINT32 i;
+       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+       for (i = 0; i < ui32Count; i++)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp));
+               pui32Tmp++;
+       }
+}
+
+static INLINE
+void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction,
+               const IMG_CHAR *pszDMName,
+               const PSYNC_CHECKPOINT *apsSyncCheckpoints,
+               const IMG_UINT32 ui32Count)
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < ui32Count; i++)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i)));
+       }
+}
+
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/* define the number of commands required to be set up by the CCB helper */
+/* 1 command for the TA */
+#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1
+/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */
+#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui32CyclesPrediction)
+#else
+#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST)
+#endif
+
+typedef struct {
+       DEVMEM_MEMDESC                          *psContextStateMemDesc;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+       DEVMEM_MEMDESC                          *psContextStateMemDesc;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+       /* this lock protects usage of the render context.
+        * it ensures only one kick is being prepared and/or submitted on
+        * this render context at any time
+        */
+       POS_LOCK                                hLock;
+       RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+       RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+       DEVMEM_MEMDESC                          *psFWRenderContextMemDesc;
+       DEVMEM_MEMDESC                          *psFWFrameworkMemDesc;
+       RGX_SERVER_RC_TA_DATA           sTAData;
+       RGX_SERVER_RC_3D_DATA           s3DData;
+       IMG_UINT32                                      ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE         (1 << 0)
+#define RC_CLEANUP_3D_COMPLETE         (1 << 1)
+       DLLIST_NODE                                     sListNode;
+       SYNC_ADDR_LIST                          sSyncAddrListTAFence;
+       SYNC_ADDR_LIST                          sSyncAddrListTAUpdate;
+       SYNC_ADDR_LIST                          sSyncAddrList3DFence;
+       SYNC_ADDR_LIST                          sSyncAddrList3DUpdate;
+       ATOMIC_T                                        hIntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA                       sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+
+/*
+       Static functions used by render context code
+*/
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+               PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                       psTAData->psServerCommonContext,
+                       RGXFWIF_DM_GEOM,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+#if defined(DEBUG)
+       /* Log the number of TA context stores which occurred */
+       {
+               RGXFWIF_TACTX_STATE     *psFWTAState;
+
+               eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+                               (void**)&psFWTAState);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to map firmware render context state (%s)",
+                                       __func__, PVRSRVGetErrorString(eError)));
+               }
+               else
+               {
+                       /* Release the CPU virt addr */
+                       DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+               }
+       }
+#endif
+       FWCommonContextFree(psTAData->psServerCommonContext);
+       DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc);
+       psTAData->psServerCommonContext = NULL;
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+               PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                       ps3DData->psServerCommonContext,
+                       RGXFWIF_DM_3D,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+#if defined(DEBUG)
+       /* Log the number of 3D context stores which occurred */
+       {
+               RGXFWIF_3DCTX_STATE     *psFW3DState;
+
+               eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc,
+                               (void**)&psFW3DState);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to map firmware render context state (%s)",
+                                       __func__, PVRSRVGetErrorString(eError)));
+               }
+               else
+               {
+                       /* Release the CPU virt addr */
+                       DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc);
+               }
+       }
+#endif
+
+       FWCommonContextFree(ps3DData->psServerCommonContext);
+       DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc);
+       ps3DData->psServerCommonContext = NULL;
+       return PVRSRV_OK;
+}
+
+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode)
+{
+       RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+       PVRSRV_ERROR                    eError;
+
+       eError = PMRDumpPageList(psPMRNode->psPMR,
+                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "Error (%s) printing pmr %p",
+                               PVRSRVGetErrorString(eError),
+                               psPMRNode->psPMR));
+       }
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+       DLLIST_NODE *psNode, *psNext;
+
+       PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+                       psFreeList->sFreeListFWDevVAddr.ui32Addr,
+                       psFreeList->ui32FreelistID,
+                       psFreeList->ui64FreelistChecksum));
+
+       /* Dump Init FreeList page list */
+       PVR_LOG(("  Initial Memory block"));
+       dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+       {
+               _RGXDumpPMRPageList(psNode);
+       }
+
+       /* Dump Grow FreeList page list */
+       PVR_LOG(("  Grow Memory blocks"));
+       dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+       {
+               _RGXDumpPMRPageList(psNode);
+       }
+
+       return IMG_TRUE;
+}
+
+static void _CheckFreelist(RGX_FREELIST *psFreeList,
+               IMG_UINT32 ui32NumOfPagesToCheck,
+               IMG_UINT64 ui64ExpectedCheckSum,
+               IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+       /* No checksum needed as we have all information in the pdumps */
+       PVR_UNREFERENCED_PARAMETER(psFreeList);
+       PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+       PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+       *pui64CalculatedCheckSum = 0;
+#else
+       PVRSRV_ERROR eError;
+       size_t uiNumBytes;
+       IMG_UINT8* pui8Buffer;
+       IMG_UINT32* pui32Buffer;
+       IMG_UINT32 ui32CheckSumAdd = 0;
+       IMG_UINT32 ui32CheckSumXor = 0;
+       IMG_UINT32 ui32Entry;
+       IMG_UINT32 ui32Entry2;
+       IMG_BOOL bFreelistBad = IMG_FALSE;
+
+       *pui64CalculatedCheckSum = 0;
+
+       PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+       /* Allocate Buffer of the size of the freelist */
+       pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+       if (pui8Buffer == NULL)
+       {
+               PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!",
+                               __func__, psFreeList));
+               PVR_ASSERT(0);
+               return;
+       }
+
+       /* Copy freelist content into Buffer */
+       eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+                       psFreeList->uiFreeListPMROffset +
+                       (((psFreeList->ui32MaxFLPages -
+                                       psFreeList->ui32CurrentFLPages -
+                                       psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) &
+                                       ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)),
+                                       pui8Buffer,
+                                       ui32NumOfPagesToCheck * sizeof(IMG_UINT32),
+                                       &uiNumBytes);
+       if (eError != PVRSRV_OK)
+       {
+               OSFreeMem(pui8Buffer);
+               PVR_LOG(("%s: Failed to get freelist data for freelist %p!",
+                               __func__, psFreeList));
+               PVR_ASSERT(0);
+               return;
+       }
+
+       PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+
+       /* Generate checksum (skipping the first page if not allocated) */
+       pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+       ui32Entry = ((psFreeList->ui32GrowFLPages == 0  &&  psFreeList->ui32CurrentFLPages > 1) ? 1 : 0);
+       for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+       {
+               ui32CheckSumAdd += pui32Buffer[ui32Entry];
+               ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+               /* Check for double entries */
+               for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+               {
+                       if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2])
+                       {
+                               PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+                                               __func__,
+                                               psFreeList->sFreeListFWDevVAddr.ui32Addr,
+                                               pui32Buffer[ui32Entry2],
+                                               ui32Entry,
+                                               ui32Entry2,
+                                               psFreeList->ui32CurrentFLPages));
+                               bFreelistBad = IMG_TRUE;
+                               break;
+                       }
+               }
+       }
+
+       OSFreeMem(pui8Buffer);
+
+       /* Check the calculated checksum against the expected checksum... */
+       *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+       if (ui64ExpectedCheckSum != 0  &&  ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+       {
+               PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx,
+                               __func__, psFreeList,
+                               ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+               bFreelistBad = IMG_TRUE;
+       }
+
+       if (bFreelistBad)
+       {
+               PVR_LOG(("%s: Sleeping for ever!", __func__));
+               PVR_ASSERT(!bFreelistBad);
+       }
+#endif
+}
+
+
+/*
+ *  Function to work out the number of freelist pages to reserve for growing
+ *  within the FW without having to wait for the host to progress a grow
+ *  request.
+ *
+ *  The number of pages must be a multiple of 4 to align the PM addresses
+ *  for the initial freelist allocation and also be less than the grow size.
+ *
+ *  If the threshold or grow size means less than 4 pages, then the feature
+ *  is not used.
+ */
+static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList,
+               IMG_UINT32  ui32FLPages)
+{
+       IMG_UINT32  ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) &
+                       ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1);
+
+       if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages)
+       {
+               ui32ReadyFLPages = psFreeList->ui32GrowFLPages;
+       }
+
+       return ui32ReadyFLPages;
+}
+
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+               IMG_UINT32 ui32NumPages,
+               PDLLIST_NODE pListHeader)
+{
+       RGX_PMR_NODE    *psPMRNode;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_UINT32  ui32MappingTable = 0;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiLength;
+       IMG_DEVMEM_SIZE_T uistartPage;
+       PVRSRV_ERROR eError;
+       static const IMG_CHAR szAllocName[] = "Free List";
+
+       /* Are we allowed to grow ? */
+       if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "Freelist [0x%p]: grow by %u pages denied. "
+                               "Max PB size reached (current pages %u+%u/%u)",
+                               psFreeList,
+                               ui32NumPages,
+                               psFreeList->ui32CurrentFLPages,
+                               psFreeList->ui32ReadyFLPages,
+                               psFreeList->ui32MaxFLPages));
+               return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+       }
+
+       /* Allocate kernel memory block structure */
+       psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+       if (psPMRNode == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to allocate host data structure",
+                               __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorAllocHost;
+       }
+
+       /*
+        * Lock protects simultaneous manipulation of:
+        * - the memory block list
+        * - the freelist's ui32CurrentFLPages
+        */
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+       /*
+        *  The PM never takes the last page in a freelist, so if this block
+        *  of pages is the first one and there is no ability to grow, then
+        *  we can skip allocating one 4K page for the lowest entry.
+        */
+       if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE)
+       {
+               /*
+                * Allocation size will be rounded up to the OS page size,
+                * any attempt to change it a bit now will be invalidated later.
+                */
+               psPMRNode->bFirstPageMissing = IMG_FALSE;
+       }
+       else
+       {
+               psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0  &&  ui32NumPages > 1);
+       }
+
+       psPMRNode->ui32NumPages = ui32NumPages;
+       psPMRNode->psFreeList = psFreeList;
+
+       /* Allocate Memory Block */
+       PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Allocate PB Block (Pages %08X)", ui32NumPages);
+       uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+       if (psPMRNode->bFirstPageMissing)
+       {
+               uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+       }
+       eError = PhysmemNewRamBackedPMR(psFreeList->psConnection,
+                       psFreeList->psDevInfo->psDeviceNode,
+                       uiSize,
+                       uiSize,
+                       1,
+                       1,
+                       &ui32MappingTable,
+                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+                       sizeof(szAllocName),
+                       szAllocName,
+                       psFreeList->ownerPid,
+                       &psPMRNode->psPMR,
+                       PDUMP_NONE,
+                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX,
+                               __func__,
+                               (IMG_UINT64)uiSize));
+               goto ErrorBlockAlloc;
+       }
+
+       /* Zeroing physical pages pointed by the PMR */
+       if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+       {
+               eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to zero PMR %p of freelist %p (%s)",
+                                       __func__,
+                                       psPMRNode->psPMR,
+                                       psFreeList,
+                                       PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(0);
+               }
+       }
+
+       uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+       uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+       uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+       eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR,
+                       psFreeList->ownerPid);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: call to RIWritePMREntryWithOwnerKM failed (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+       }
+
+       /* Attach RI information */
+       eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR,
+                       OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN),
+                       szAllocName,
+                       0,
+                       uiSize,
+                       IMG_FALSE,
+                       IMG_FALSE,
+                       &psPMRNode->hRIHandle);
+       PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM");
+
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+       /* write Freelist with Memory Block physical addresses */
+       eError = PMRWritePMPageList(
+                       /* Target PMR, offset, and length */
+                       psFreeList->psFreeListPMR,
+                       (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+                       (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+                       /* Referenced PMR, and "page" granularity */
+                       psPMRNode->psPMR,
+                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                       &psPMRNode->psPageList);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to write pages of Node %p",
+                               __func__,
+                               psPMRNode));
+               goto ErrorPopulateFreelist;
+       }
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+       /* Copy freelist memory to shadow freelist */
+       {
+               const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32);
+               const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2;
+               const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset;
+               IMG_BYTE *pFLMapAddr;
+               size_t uiNumBytes;
+               PVRSRV_ERROR res;
+               IMG_HANDLE hMapHandle;
+
+               /* Map both the FL and the shadow FL */
+               res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize,
+                               (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle);
+               if (res != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to map freelist (ID=%d)",
+                                       __func__,
+                                       psFreeList->ui32FreelistID));
+                       goto ErrorPopulateFreelist;
+               }
+
+               /* Copy only the newly added memory */
+               OSCachedMemCopy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength);
+               OSWriteMemoryBarrier(pFLMapAddr);
+
+#if defined(PDUMP)
+               PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Initialize shadow freelist");
+
+               /* Translate memcpy to pdump */
+               {
+                       IMG_DEVMEM_OFFSET_T uiCurrOffset;
+
+                       for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32))
+                       {
+                               PMRPDumpCopyMem32(psFreeList->psFreeListPMR,
+                                               uiCurrOffset + ui32FLMaxSize,
+                                               psFreeList->psFreeListPMR,
+                                               uiCurrOffset,
+                                               ":SYSMEM:$1",
+                                               PDUMP_FLAGS_CONTINUOUS);
+                       }
+               }
+#endif
+
+
+               res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle);
+
+               if (res != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to release freelist mapping (ID=%d)",
+                                       __func__,
+                                       psFreeList->ui32FreelistID));
+                       goto ErrorPopulateFreelist;
+               }
+       }
+#endif
+
+       /* We add It must be added to the tail, otherwise the freelist population won't work */
+       dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+       /* Update number of available pages */
+       psFreeList->ui32CurrentFLPages += ui32NumPages;
+
+       /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */
+       if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+       {
+               psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+       }
+
+       /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */
+       psFreeList->ui32ReadyFLPages    = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages);
+       psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+
+       if (psFreeList->bCheckFreelist)
+       {
+               /*
+                *  We can only calculate the freelist checksum when the list is full
+                *  (e.g. at initial creation time). At other times the checksum cannot
+                *  be calculated and has to be disabled for this freelist.
+                */
+               if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages)
+               {
+                       _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum);
+               }
+               else
+               {
+                       psFreeList->ui64FreelistChecksum = 0;
+               }
+       }
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)",
+                       psFreeList,
+                       ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"),
+                       ui32NumPages,
+                       psFreeList->ui32CurrentFLPages,
+                       psFreeList->ui32ReadyFLPages,
+                       psFreeList->ui32MaxFLPages,
+                       psFreeList->ui64FreelistChecksum,
+                       (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : "")));
+
+       return PVRSRV_OK;
+
+       /* Error handling */
+ErrorPopulateFreelist:
+       PMRUnrefPMR(psPMRNode->psPMR);
+
+ErrorBlockAlloc:
+       OSFreeMem(psPMRNode);
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ErrorAllocHost:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+               RGX_FREELIST *psFreeList)
+{
+       DLLIST_NODE *psNode;
+       RGX_PMR_NODE *psPMRNode;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32OldValue;
+
+       /*
+        * Lock protects simultaneous manipulation of:
+        * - the memory block list
+        * - the freelist's ui32CurrentFLPages value
+        */
+       PVR_ASSERT(pListHeader);
+       PVR_ASSERT(psFreeList);
+       PVR_ASSERT(psFreeList->psDevInfo);
+       PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+       /* Get node from head of list and remove it */
+       psNode = dllist_get_next_node(pListHeader);
+       if (psNode)
+       {
+               dllist_remove_node(psNode);
+
+               psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+               PVR_ASSERT(psPMRNode);
+               PVR_ASSERT(psPMRNode->psPMR);
+               PVR_ASSERT(psPMRNode->psFreeList);
+
+               /* remove block from freelist list */
+
+               /* Unwrite Freelist with Memory Block physical addresses */
+               eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to unwrite pages of Node %p",
+                                       __func__,
+                                       psPMRNode));
+                       PVR_ASSERT(IMG_FALSE);
+               }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+               if (psPMRNode->hRIHandle)
+               {
+                       PVRSRV_ERROR eError;
+
+                       eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle);
+                       PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM");
+               }
+
+#endif  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+               /* Free PMR (We should be the only one that holds a ref on the PMR) */
+               eError = PMRUnrefPMR(psPMRNode->psPMR);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to free PB block %p (%s)",
+                                       __func__,
+                                       psPMRNode->psPMR,
+                                       PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(IMG_FALSE);
+               }
+
+               /* update available pages in freelist */
+               ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+
+               /*
+                * Deallocated pages should first be deducted from ReadyPages bank, once
+                * there are no more left, start deducting them from CurrentPage bank.
+                */
+               if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages)
+               {
+                       psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages;
+                       psFreeList->ui32ReadyFLPages = 0;
+               }
+               else
+               {
+                       psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages;
+               }
+
+               /* check underflow */
+               PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+               PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+                               psFreeList,
+                               psPMRNode->ui32NumPages,
+                               psFreeList->ui32CurrentFLPages,
+                               psFreeList->ui32MaxFLPages));
+
+               OSFreeMem(psPMRNode);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+                               psFreeList,
+                               psFreeList->ui32InitFLPages));
+               eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+       }
+
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       return eError;
+}
+
+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+       DLLIST_NODE *psNode, *psNext;
+       RGX_FREELIST *psFreeList = NULL;
+
+       OSLockAcquire(psDevInfo->hLockFreeList);
+
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+               if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+               {
+                       psFreeList = psThisFreeList;
+                       break;
+               }
+       }
+
+       OSLockRelease(psDevInfo->hLockFreeList);
+       return psFreeList;
+}
+
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT32 ui32FreelistID)
+{
+       RGX_FREELIST *psFreeList = NULL;
+       RGXFWIF_KCCB_CMD s3DCCBCmd;
+       IMG_UINT32 ui32GrowValue;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psDevInfo);
+
+       psFreeList = FindFreeList(psDevInfo, ui32FreelistID);
+
+       if (psFreeList)
+       {
+               /* Since the FW made the request, it has already consumed the ready pages, update the host struct */
+               psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages;
+               psFreeList->ui32ReadyFLPages = 0;
+
+               /* Try to grow the freelist */
+               eError = RGXGrowFreeList(psFreeList,
+                               psFreeList->ui32GrowFLPages,
+                               &psFreeList->sMemoryBlockHead);
+
+               if (eError == PVRSRV_OK)
+               {
+                       /* Grow successful, return size of grow size */
+                       ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+                       psFreeList->ui32NumGrowReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                       /* Update Stats */
+                       PVRSRVStatsUpdateFreelistStats(0,
+                                       1, /* Add 1 to the appropriate counter (Requests by FW) */
+                                       psFreeList->ui32InitFLPages,
+                                       psFreeList->ui32NumHighPages,
+                                       psFreeList->ownerPid);
+
+#endif
+
+               }
+               else
+               {
+                       /* Grow failed */
+                       ui32GrowValue = 0;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "Grow for FreeList %p failed (%s)",
+                                       psFreeList,
+                                       PVRSRVGetErrorString(eError)));
+               }
+
+               /* send feedback */
+               s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+               s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+               s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+               s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+               s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages;
+
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = RGXScheduleCommand(psDevInfo,
+                                       RGXFWIF_DM_3D,
+                                       &s3DCCBCmd,
+                                       PDUMP_FLAGS_NONE);
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+               /* Kernel CCB should never fill up, as the FW is processing them right away  */
+
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+       else
+       {
+               /* Should never happen */
+               PVR_DPF((PVR_DBG_ERROR,
+                               "FreeList Lookup for FreeList ID 0x%08x failed (Populate)",
+                               ui32FreelistID));
+               PVR_ASSERT(IMG_FALSE);
+       }
+}
+
+static void _RGXFreeListReconstruction(PDLLIST_NODE psNode)
+{
+
+       PVRSRV_RGXDEV_INFO              *psDevInfo;
+       RGX_FREELIST                    *psFreeList;
+       RGX_PMR_NODE                    *psPMRNode;
+       PVRSRV_ERROR                    eError;
+       IMG_DEVMEM_OFFSET_T             uiOffset;
+       IMG_DEVMEM_SIZE_T               uiLength;
+       IMG_UINT32                              ui32StartPage;
+
+       psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+       psFreeList = psPMRNode->psFreeList;
+       PVR_ASSERT(psFreeList);
+       psDevInfo = psFreeList->psDevInfo;
+       PVR_ASSERT(psDevInfo);
+
+       uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+       ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+       uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+       PMRUnwritePMPageList(psPMRNode->psPageList);
+       psPMRNode->psPageList = NULL;
+       eError = PMRWritePMPageList(
+                       /* Target PMR, offset, and length */
+                       psFreeList->psFreeListPMR,
+                       (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+                       (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+                       /* Referenced PMR, and "page" granularity */
+                       psPMRNode->psPMR,
+                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                       &psPMRNode->psPageList);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Error (%s) writing FL 0x%08x",
+                               __func__,
+                               PVRSRVGetErrorString(eError),
+                               (IMG_UINT32)psFreeList->ui32FreelistID));
+       }
+
+       /* Zeroing physical pages pointed by the reconstructed freelist */
+       if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+       {
+               eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to zero PMR %p of freelist %p (%s)",
+                                       __func__,
+                                       psPMRNode->psPMR,
+                                       psFreeList,
+                                       PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(0);
+               }
+       }
+
+
+       psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+}
+
+
+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList)
+{
+       IMG_UINT32        ui32OriginalFLPages;
+       DLLIST_NODE       *psNode, *psNext;
+       RGXFWIF_FREELIST  *psFWFreeList;
+       PVRSRV_ERROR      eError;
+
+       //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID));
+
+       /* Do the FreeList Reconstruction */
+       ui32OriginalFLPages            = psFreeList->ui32CurrentFLPages;
+       psFreeList->ui32CurrentFLPages = 0;
+
+       /* Reconstructing Init FreeList pages */
+       dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+       {
+               _RGXFreeListReconstruction(psNode);
+       }
+
+       /* Reconstructing Grow FreeList pages */
+       dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+       {
+               _RGXFreeListReconstruction(psNode);
+       }
+
+       /* Ready pages are allocated but kept hidden until OOM occurs. */
+       psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+       if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages)
+       {
+               PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages);
+               return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED;
+       }
+
+       /* Reset the firmware freelist structure */
+       eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       psFWFreeList->ui32CurrentStackTop       = psFWFreeList->ui32CurrentPages - 1;
+       psFWFreeList->ui32AllocatedPageCount    = 0;
+       psFWFreeList->ui32AllocatedMMUPageCount = 0;
+
+       DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+       /* Check the Freelist checksum if required (as the list is fully populated) */
+       if (psFreeList->bCheckFreelist)
+       {
+               IMG_UINT64  ui64CheckSum;
+
+               _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+       }
+
+       return eError;
+}
+
+
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                              IMG_UINT32 ui32FreelistsCount,
+                                              const IMG_UINT32 *paui32Freelists)
+{
+       PVRSRV_ERROR      eError = PVRSRV_OK;
+       DLLIST_NODE       *psNode, *psNext;
+       IMG_UINT32        ui32Loop;
+       RGXFWIF_KCCB_CMD  sTACCBCmd;
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       DLLIST_NODE       *psNodeHWRTData, *psNextHWRTData;
+       RGX_KM_HW_RT_DATASET *psKMHWRTDataSet;
+       RGXFWIF_HWRTDATA     *psHWRTData;
+#endif
+       IMG_UINT32        ui32FinalFreelistsCount = 0;
+       IMG_UINT32        aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */
+
+       PVR_ASSERT(psDevInfo != NULL);
+       PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT);
+       if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT)
+       {
+               ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT;
+       }
+
+       //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount));
+
+       /*
+        *  Initialise the response command (in case we don't find a freelist ID).
+        *  Also copy the list to the 'final' freelist array.
+        */
+       sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+       sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+       for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+       {
+               sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] |
+                               RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+               aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop];
+       }
+
+       ui32FinalFreelistsCount = ui32FreelistsCount;
+
+       /*
+        *  The list of freelists we have been given for reconstruction will
+        *  consist of local and global freelists (maybe MMU as well). Any
+        *  local freelists should have their global list specified as well.
+        *  There may be cases where the global freelist is not given (in
+        *  cases of partial setups before a poll failure for example). To
+        *  handle that we must first ensure every local freelist has a global
+        *  freelist specified, otherwise we add that to the 'final' list.
+        *  This final list of freelists is created in a first pass.
+        *
+        *  Even with the global freelists listed, there may be other local
+        *  freelists not listed, which are going to have their global freelist
+        *  reconstructed. Therefore we have to find those freelists as well
+        *  meaning we will have to iterate the entire list of freelists to
+        *  find which must be reconstructed. This is the second pass.
+        */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST  *psFreeList   = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+               IMG_BOOL      bInList       = IMG_FALSE;
+               IMG_BOOL      bGlobalInList = IMG_FALSE;
+
+               /* Check if this local freelist is in the list and ensure its global is too. */
+               if (psFreeList->ui32FreelistGlobalID != 0)
+               {
+                       for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++)
+                       {
+                               if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID)
+                               {
+                                       bInList = IMG_TRUE;
+                               }
+                               if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+                               {
+                                       bGlobalInList = IMG_TRUE;
+                               }
+                       }
+
+                       if (bInList  &&  !bGlobalInList)
+                       {
+                               aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID;
+                               ui32FinalFreelistsCount++;
+                       }
+               }
+       }
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST  *psFreeList  = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+               IMG_BOOL      bReconstruct = IMG_FALSE;
+
+               /*
+                *  Check if this freelist needs to be reconstructed (was it requested
+                *  or is its global freelist going to be reconstructed)...
+                */
+               for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++)
+               {
+                       if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID  ||
+                           aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+                       {
+                               bReconstruct = IMG_TRUE;
+                               break;
+                       }
+               }
+
+               if (bReconstruct)
+               {
+                       eError = RGXReconstructFreeList(psFreeList);
+                       if (eError == PVRSRV_OK)
+                       {
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+                               /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */
+                               dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData)
+                               {
+                                       psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData);
+                                       eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                                       "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)",
+                                                                       psKMHWRTDataSet->psHWRTDataFwMemDesc,
+                                                                       psHWRTData));
+                                               continue;
+                                       }
+
+                                       psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR;
+                                       psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA;
+
+                                       DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+                               }
+#endif
+
+                               /* Update the response for this freelist if it was specifically requested for reconstruction. */
+                               for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+                               {
+                                       if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID)
+                                       {
+                                               /* Reconstruction of this requested freelist was successful... */
+                                               sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+                                               break;
+                                       }
+                               }
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Reconstructing of FreeList %p failed (%s)",
+                                               psFreeList,
+                                               PVRSRVGetErrorString(eError)));
+                       }
+               }
+       }
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       /* Check that all freelists were found and reconstructed... */
+       for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+       {
+               PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &
+                           RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0);
+       }
+
+       /* send feedback */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                               RGXFWIF_DM_GEOM,
+                               &sTACCBCmd,
+                               PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       /* Kernel CCB should never fill up, as the FW is processing them right away  */
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create a single HWRTData instance */
+static PVRSRV_ERROR RGXCreateHWRTData_aux(
+               CONNECTION_DATA      *psConnection,
+               PVRSRV_DEVICE_NODE      *psDeviceNode,
+               IMG_DEV_VIRTADDR        psVHeapTableDevVAddr,
+               IMG_DEV_VIRTADDR                psPMMListDevVAddr, /* per-HWRTData */
+               RGX_FREELIST                    *apsFreeLists[RGXFW_MAX_FREELISTS],
+               IMG_DEV_VIRTADDR                sTailPtrsDevVAddr,
+               IMG_DEV_VIRTADDR        sMacrotileArrayDevVAddr, /* per-HWRTData */
+               IMG_DEV_VIRTADDR        sRgnHeaderDevVAddr, /* per-HWRTData */
+               IMG_DEV_VIRTADDR        sRTCDevVAddr,
+               IMG_UINT16                      ui16MaxRTs,
+               RGX_HWRTDATA_COMMON_COOKIE      *psHWRTDataCommonCookie,
+               RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32Loop;
+
+       /* KM cookie storing all the FW/HW data */
+       RGX_KM_HW_RT_DATASET *psKMHWRTDataSet;
+
+       /* local pointers for memory descriptors of FW allocations */
+       DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL;
+       DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL;
+       DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL;
+
+       /* local pointer for CPU-mapped [FW]HWRTData */
+       RGXFWIF_HWRTDATA *psHWRTData = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* Prepare the HW RT DataSet struct */
+       psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet));
+       if (psKMHWRTDataSet == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto AllocError;
+       }
+
+       *ppsKMHWRTDataSet = psKMHWRTDataSet;
+       psKMHWRTDataSet->psDeviceNode = psDeviceNode;
+
+       psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie;
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       /*
+        * This FW RT-Data is only mapped into kernel for initialisation.
+        * Otherwise this allocation is only used by the FW.
+        * Therefore the GPU cache doesn't need coherency, and write-combine will
+        * suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_HWRTDATA),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwHWRTData",
+                       &psHWRTDataFwMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed",
+                               __func__));
+               goto FWRTDataAllocateError;
+       }
+
+       psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc;
+       eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr,
+                                                       psHWRTDataFwMemDesc,
+                                                       0,
+                                                       RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError);
+
+       eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc,
+                                                                         (void **)&psHWRTData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+       psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+
+       psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr;
+
+       psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr;
+
+       psHWRTData->sTailPtrsDevVAddr     = sTailPtrsDevVAddr;
+       psHWRTData->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr;
+       psHWRTData->sRgnHeaderDevVAddr          = sRgnHeaderDevVAddr;
+       psHWRTData->sRTCDevVAddr                        = sRTCDevVAddr;
+
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+       {
+               psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+               psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++;
+               psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr;
+               /* invalid initial snapshot value, the snapshot is always taken during first kick
+                * and hence the value get replaced during the first kick anyway. So it's safe to set it 0.
+                */
+               psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+       }
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData));
+#endif
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       {
+               RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl;
+
+               psRTACtl->ui32RenderTargetIndex = 0;
+               psRTACtl->ui32ActiveRenderTargets = 0;
+               psRTACtl->sValidRenderTargets.ui32Addr = 0;
+               psRTACtl->sRTANumPartialRenders.ui32Addr = 0;
+               psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs;
+
+               if (ui16MaxRTs > 1)
+               {
+                       PDUMPCOMMENT(psDeviceNode, "Allocate memory for shadow render target cache");
+                       eError = DevmemFwAllocate(psDevInfo,
+                                       ui16MaxRTs * sizeof(IMG_UINT32),
+                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                       PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                       "FwShadowRTCache",
+                                       &psRTArrayFwMemDesc);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate %u bytes for render target array (%s)",
+                                               __func__,
+                                               ui16MaxRTs, PVRSRVGetErrorString(eError)));
+                               goto FWAllocateRTArryError;
+                       }
+
+                       psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc;
+                       eError = RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets,
+                                       psRTArrayFwMemDesc,
+                                       0,
+                                       RFW_FWADDR_FLAG_NONE);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError);
+
+                       PDUMPCOMMENT(psDeviceNode, "Allocate memory for tracking renders accumulation");
+                       eError = DevmemFwAllocate(psDevInfo,
+                                       ui16MaxRTs * sizeof(IMG_UINT32),
+                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                       PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                       "FwRendersAccumulation",
+                                       &psRendersAccArrayFwMemDesc);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)",
+                                               __func__,
+                                               ui16MaxRTs, PVRSRVGetErrorString(eError)));
+                               goto FWAllocateRTAccArryError;
+                       }
+                       psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc;
+                       eError = RGXSetFirmwareAddress(&psRTACtl->sRTANumPartialRenders,
+                                       psRendersAccArrayFwMemDesc,
+                                       0,
+                                       RFW_FWADDR_FLAG_NONE);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError);
+               }
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr);
+       DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+       return PVRSRV_OK;
+
+FWAllocRTAccArryFwAddrError:
+       DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc);
+FWAllocateRTAccArryError:
+       RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc);
+FWAllocateRTArryFwAddrError:
+       DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc);
+FWAllocateRTArryError:
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+       {
+               PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+               psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--;
+       }
+       OSLockRelease(psDevInfo->hLockFreeList);
+       DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+FWRTDataCpuMapError:
+       RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+FWRTDataFwAddrError:
+       DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc);
+FWRTDataAllocateError:
+       *ppsKMHWRTDataSet = NULL;
+       OSFreeMem(psKMHWRTDataSet);
+
+AllocError:
+       return eError;
+}
+
+static void RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32Loop;
+
+       if (psKMHWRTDataSet == NULL)
+       {
+               return;
+       }
+
+       psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice;
+
+       if (psKMHWRTDataSet->psRTArrayFwMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc);
+       }
+
+       if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc);
+       }
+
+       /* Decrease freelist refcount */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+       {
+               PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+               psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--;
+       }
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData);
+#endif
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist
+        * otherwise we risk traversing the freelist to find a pointer from a freed data structure */
+       RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc);
+
+       OSFreeMem(psKMHWRTDataSet);
+}
+
+/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */
+PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA      *psConnection,
+               PVRSRV_DEVICE_NODE      *psDeviceNode,
+               IMG_DEV_VIRTADDR        asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+               IMG_DEV_VIRTADDR                asPMMListDevVAddr[RGXMKIF_NUM_RTDATAS],
+               RGX_FREELIST                    *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS],
+               IMG_UINT32           ui32ScreenPixelMax,
+               IMG_UINT64           ui64MultiSampleCtl,
+               IMG_UINT64           ui64FlippedMultiSampleCtl,
+               IMG_UINT32           ui32TPCStride,
+               IMG_DEV_VIRTADDR                asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+               IMG_UINT32           ui32TPCSize,
+               IMG_UINT32           ui32TEScreen,
+               IMG_UINT32           ui32TEAA,
+               IMG_UINT32           ui32TEMTILE1,
+               IMG_UINT32           ui32TEMTILE2,
+               IMG_UINT32           ui32MTileStride,
+               IMG_UINT32                 ui32ISPMergeLowerX,
+               IMG_UINT32                 ui32ISPMergeLowerY,
+               IMG_UINT32                 ui32ISPMergeUpperX,
+               IMG_UINT32                 ui32ISPMergeUpperY,
+               IMG_UINT32                 ui32ISPMergeScaleX,
+               IMG_UINT32                 ui32ISPMergeScaleY,
+               IMG_DEV_VIRTADDR        asMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS],
+               IMG_DEV_VIRTADDR        asRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS],
+               IMG_DEV_VIRTADDR        asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+               IMG_UINT32                      uiRgnHeaderSize,
+               IMG_UINT32                      ui32ISPMtileSize,
+               IMG_UINT16                      ui16MaxRTs,
+               RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS])
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32RTDataID;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+
+       RGX_HWRTDATA_COMMON_COOKIE      *psHWRTDataCommonCookie;
+       RGXFWIF_HWRTDATA_COMMON         *psHWRTDataCommon;
+       DEVMEM_MEMDESC                          *psHWRTDataCommonFwMemDesc;
+       RGXFWIF_DEV_VIRTADDR            sHWRTDataCommonFwAddr;
+
+       /* Prepare KM cleanup object for HWRTDataCommon FW object */
+       psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie));
+       if (psHWRTDataCommonCookie == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_HWRTDataCommonCookieAlloc;
+       }
+
+       /*
+        * This FW common context is only mapped into kernel for initialisation.
+        * Otherwise this allocation is only used by the FW.
+        * Therefore the GPU cache doesn't need coherency, and write-combine will
+        * suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_HWRTDATA_COMMON),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwHWRTDataCommon",
+                       &psHWRTDataCommonFwMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__));
+               goto err_HWRTDataCommonAlloc;
+       }
+       eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr);
+
+       eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA);
+
+       psHWRTDataCommon->bTACachesNeedZeroing = IMG_FALSE;
+       psHWRTDataCommon->ui32ScreenPixelMax    = ui32ScreenPixelMax;
+       psHWRTDataCommon->ui64MultiSampleCtl = ui64MultiSampleCtl;
+       psHWRTDataCommon->ui64FlippedMultiSampleCtl = ui64FlippedMultiSampleCtl;
+       psHWRTDataCommon->ui32TPCStride         = ui32TPCStride;
+       psHWRTDataCommon->ui32TPCSize           = ui32TPCSize;
+       psHWRTDataCommon->ui32TEScreen          = ui32TEScreen;
+       psHWRTDataCommon->ui32TEAA              = ui32TEAA;
+       psHWRTDataCommon->ui32TEMTILE1          = ui32TEMTILE1;
+       psHWRTDataCommon->ui32TEMTILE2          = ui32TEMTILE2;
+       psHWRTDataCommon->ui32MTileStride       = ui32MTileStride;
+       psHWRTDataCommon->ui32ISPMergeLowerX = ui32ISPMergeLowerX;
+       psHWRTDataCommon->ui32ISPMergeLowerY = ui32ISPMergeLowerY;
+       psHWRTDataCommon->ui32ISPMergeUpperX = ui32ISPMergeUpperX;
+       psHWRTDataCommon->ui32ISPMergeUpperY = ui32ISPMergeUpperY;
+       psHWRTDataCommon->ui32ISPMergeScaleX = ui32ISPMergeScaleX;
+       psHWRTDataCommon->ui32ISPMergeScaleY = ui32ISPMergeScaleY;
+       psHWRTDataCommon->uiRgnHeaderSize                       = uiRgnHeaderSize;
+       psHWRTDataCommon->ui32ISPMtileSize              = ui32ISPMtileSize;
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump HWRTDataCommon");
+       DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS);
+#endif
+       DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc);
+
+       psHWRTDataCommonCookie->ui32RefCount = 0;
+       psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc;
+       psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr;
+
+       /* Here we are creating a set of HWRTData(s)
+          the number of elements in the set equals RGXMKIF_NUM_RTDATAS.
+       */
+
+       for (ui32RTDataID = 0; ui32RTDataID < RGXMKIF_NUM_RTDATAS; ui32RTDataID++)
+       {
+               eError = RGXCreateHWRTData_aux(
+                       psConnection,
+                       psDeviceNode,
+                       asVHeapTableDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS],
+                       asPMMListDevVAddr[ui32RTDataID],
+                       &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS],
+                       asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS],
+                       asMacrotileArrayDevVAddr[ui32RTDataID],
+                       asRgnHeaderDevVAddr[ui32RTDataID],
+                       asRTCDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS],
+                       ui16MaxRTs,
+                       psHWRTDataCommonCookie,
+                       &pasKMHWRTDataSet[ui32RTDataID]);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to create HWRTData [slot %u] (%s)",
+                                       __func__,
+                                       ui32RTDataID,
+                                       PVRSRVGetErrorString(eError)));
+                       goto err_HWRTDataAlloc;
+               }
+               psHWRTDataCommonCookie->ui32RefCount += 1;
+       }
+
+       return PVRSRV_OK;
+
+err_HWRTDataAlloc:
+       PVR_DPF((PVR_DBG_WARNING, "%s: err_HWRTDataAlloc %u",
+                        __func__, psHWRTDataCommonCookie->ui32RefCount));
+       if (pasKMHWRTDataSet)
+       {
+               for (ui32RTDataID = psHWRTDataCommonCookie->ui32RefCount; ui32RTDataID > 0; ui32RTDataID--)
+               {
+                       if (pasKMHWRTDataSet[ui32RTDataID-1] != NULL)
+                       {
+                               RGXDestroyHWRTData_aux(pasKMHWRTDataSet[ui32RTDataID-1]);
+                               pasKMHWRTDataSet[ui32RTDataID-1] = NULL;
+                       }
+               }
+       }
+err_HWRTDataCommonVA:
+       RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc);
+err_HWRTDataCommonFwAddr:
+       DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc);
+err_HWRTDataCommonAlloc:
+       OSFreeMem(psHWRTDataCommonCookie);
+err_HWRTDataCommonCookieAlloc:
+
+       return eError;
+}
+
+/* Destroy a single instance of HWRTData.
+   Additionally, destroy the HWRTDataCommon{Cookie} objects
+   when it is the last HWRTData within a corresponding set of HWRTDatas.
+*/
+PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_DEVICE_NODE *psDevNode;
+       PVRSRV_ERROR eError;
+       PRGXFWIF_HWRTDATA psHWRTData;
+       RGX_HWRTDATA_COMMON_COOKIE *psCommonCookie;
+
+       PVR_ASSERT(psKMHWRTDataSet);
+
+       psDevNode = psKMHWRTDataSet->psDeviceNode;
+       psDevInfo = psDevNode->pvDevice;
+
+       eError = RGXSetFirmwareAddress(&psHWRTData,
+                                      psKMHWRTDataSet->psHWRTDataFwMemDesc, 0,
+                                      RFW_FWADDR_NOREF_FLAG);
+       PVR_RETURN_IF_ERROR(eError);
+
+       /* Cleanup HWRTData */
+       eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie;
+
+       RGXDestroyHWRTData_aux(psKMHWRTDataSet);
+
+       /* We've got past potential PVRSRV_ERROR_RETRY events, so we are sure
+          that the HWRTDATA instance will be destroyed during this call.
+          Consequently, we decrease the ref count for HWRTDataCommonCookie.
+
+          NOTE: This ref count does not require locks or atomics.
+          -------------------------------------------------------
+            HWRTDatas bound into one pair are always destroyed sequentially,
+            within a single loop on the Client side.
+            The Common/Cookie objects always belong to only one pair of
+            HWRTDatas, and ref count is used to ensure that the Common/Cookie
+            objects will be destroyed after destruction of all HWRTDatas
+            within a single pair.
+       */
+       psCommonCookie->ui32RefCount--;
+
+       /* When ref count for HWRTDataCommonCookie hits ZERO
+        * we have to destroy the HWRTDataCommon [FW object] and the cookie
+        * [KM object] afterwards. */
+       if (psCommonCookie->ui32RefCount == 0)
+       {
+               RGXUnsetFirmwareAddress(psCommonCookie->psHWRTDataCommonFwMemDesc);
+
+               /* We don't need to flush the SLC before freeing.
+                * FW RequestCleanUp has already done that for HWRTData, so we're fine
+                * now. */
+
+               DevmemFwUnmapAndFree(psDevNode->pvDevice,
+                                    psCommonCookie->psHWRTDataCommonFwMemDesc);
+               OSFreeMem(psCommonCookie);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+               PVRSRV_DEVICE_NODE      *psDeviceNode,
+               IMG_HANDLE                      hMemCtxPrivData,
+               IMG_UINT32                      ui32MaxFLPages,
+               IMG_UINT32                      ui32InitFLPages,
+               IMG_UINT32                      ui32GrowFLPages,
+               IMG_UINT32           ui32GrowParamThreshold,
+               RGX_FREELIST                    *psGlobalFreeList,
+               IMG_BOOL                                bCheckFreelist,
+               IMG_DEV_VIRTADDR                sFreeListDevVAddr,
+               PMR                                     *psFreeListPMR,
+               IMG_DEVMEM_OFFSET_T     uiFreeListPMROffset,
+               RGX_FREELIST                    **ppsFreeList)
+{
+       PVRSRV_ERROR                            eError;
+       RGXFWIF_FREELIST                        *psFWFreeList;
+       DEVMEM_MEMDESC                          *psFWFreelistMemDesc;
+       RGX_FREELIST                            *psFreeList;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+
+       if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+       {
+               IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages;
+
+               /* Round up number of FL pages to the next multiple of the OS page size */
+
+               ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+               ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+               ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+               ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+               ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+               ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+               PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u",
+                                __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages));
+
+               ui32InitFLPages = ui32NewInitFLPages;
+               ui32GrowFLPages = ui32NewGrowFLPages;
+               ui32MaxFLPages = ui32NewMaxFLPages;
+       }
+
+       /* Allocate kernel freelist struct */
+       psFreeList = OSAllocZMem(sizeof(*psFreeList));
+       if (psFreeList == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to allocate host data structure",
+                               __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorAllocHost;
+       }
+
+       /*
+        * This FW FreeList context is only mapped into kernel for initialisation
+        * and reconstruction (at other times it is not mapped and only used by the
+        * FW).
+        * Therefore the GPU cache doesn't need coherency, and write-combine will
+        * suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(*psFWFreeList),
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                       PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                       "FwFreeList",
+                       &psFWFreelistMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: DevmemAllocate for RGXFWIF_FREELIST failed",
+                               __func__));
+               goto FWFreeListAlloc;
+       }
+
+       /* Initialise host data structures */
+       psFreeList->psDevInfo = psDevInfo;
+       psFreeList->psConnection = psConnection;
+       psFreeList->psFreeListPMR = psFreeListPMR;
+       psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+       psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+       eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr);
+
+       /* psFreeList->ui32FreelistID set below with lock... */
+       psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0);
+       psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+       psFreeList->ui32InitFLPages = ui32InitFLPages;
+       psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+       psFreeList->ui32CurrentFLPages = 0;
+       psFreeList->ui32ReadyFLPages = 0;
+       psFreeList->ui32GrowThreshold = ui32GrowParamThreshold;
+       psFreeList->ui64FreelistChecksum = 0;
+       psFreeList->ui32RefCount = 0;
+       psFreeList->bCheckFreelist = bCheckFreelist;
+       dllist_init(&psFreeList->sMemoryBlockHead);
+       dllist_init(&psFreeList->sMemoryBlockInitHead);
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       dllist_init(&psFreeList->sNodeHWRTDataHead);
+#endif
+       psFreeList->ownerPid = OSGetCurrentClientProcessIDKM();
+
+
+       /* Add to list of freelists */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+       dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+
+       /* Initialise FW data structure */
+       eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+       {
+               const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages);
+
+               psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+               psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages;
+               psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+               psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+               psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr;
+               psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr +
+                               ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) &
+                                               ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1);
+               psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+               psFWFreeList->bGrowPending = IMG_FALSE;
+               psFWFreeList->ui32ReadyPages = ui32ReadyPages;
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+               /* Get the FW Memory Context address... */
+               eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext,
+                                              RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData),
+                                              0, RFW_FWADDR_NOREF_FLAG);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed",
+                                       __func__));
+                       DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+                       goto FWFreeListCpuMap;
+               }
+#else
+               PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData);
+#endif
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, "
+                       "Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", "
+                       "Init FL base address 0x%016" IMG_UINT64_FMTSPECx,
+                       psFreeList,
+                       ui32MaxFLPages,
+                       ui32InitFLPages,
+                       sFreeListDevVAddr.uiAddr,
+                       psFWFreeList->ui64CurrentDevVAddr));
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump FW FreeList");
+       DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+       /*
+        * Separate dump of the Freelist's number of Pages and stack pointer.
+        * This allows to easily modify the PB size in the out2.txt files.
+        */
+       PDUMPCOMMENT(psDeviceNode, "FreeList TotalPages");
+       DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+                       offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+                       psFWFreeList->ui32CurrentPages,
+                       PDUMP_FLAGS_CONTINUOUS);
+       PDUMPCOMMENT(psDeviceNode, "FreeList StackPointer");
+       DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+                       offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+                       psFWFreeList->ui32CurrentStackTop,
+                       PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+
+       /* Add initial PB block */
+       eError = RGXGrowFreeList(psFreeList,
+                       ui32InitFLPages,
+                       &psFreeList->sMemoryBlockInitHead);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%d)",
+                               __func__,
+                               sFreeListDevVAddr.uiAddr,
+                               eError));
+               goto FWFreeListCpuMap;
+       }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       /* Update Stats */
+       PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+                       0,
+                       psFreeList->ui32InitFLPages,
+                       psFreeList->ui32NumHighPages,
+                       psFreeList->ownerPid);
+
+#endif
+
+       /* return values */
+       *ppsFreeList = psFreeList;
+
+       return PVRSRV_OK;
+
+       /* Error handling */
+
+FWFreeListCpuMap:
+       /* Remove freelists from list  */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       dllist_remove_node(&psFreeList->sNode);
+       OSLockRelease(psDevInfo->hLockFreeList);
+       RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+
+ErrorSetFwAddr:
+       DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc);
+
+FWFreeListAlloc:
+       OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+       RGXDestroyFreeList
+ */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32RefCount;
+
+       PVR_ASSERT(psFreeList);
+
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+       ui32RefCount = psFreeList->ui32RefCount;
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       if (ui32RefCount != 0)
+       {
+               /* Freelist still busy */
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       /* Freelist is not in use => start firmware cleanup */
+       eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+                       psFreeList->sFreeListFWDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               /* Can happen if the firmware took too long to handle the cleanup request,
+                * or if SLC-flushes didn't went through (due to some GPU lockup) */
+               return eError;
+       }
+
+       /* Remove FreeList from linked list before we destroy it... */
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+       dllist_remove_node(&psFreeList->sNode);
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       /* Confirm all HWRTData nodes are freed before releasing freelist */
+       PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead));
+#endif
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       if (psFreeList->bCheckFreelist)
+       {
+               RGXFWIF_FREELIST  *psFWFreeList;
+               IMG_UINT64        ui32CurrentStackTop;
+               IMG_UINT64        ui64CheckSum;
+
+               /* Get the current stack pointer for this free list */
+               DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+               ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop;
+               DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+               if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1)
+               {
+                       /* Do consistency tests (as the list is fully populated) */
+                       _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+               }
+               else
+               {
+                       /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */
+                       _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum);
+               }
+       }
+
+       /* Destroy FW structures */
+       RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+       DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+       /* Remove grow shrink blocks */
+       while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+       {
+               eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+
+       /* Remove initial PB block */
+       eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* consistency checks */
+       PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+       PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+       /* free Freelist */
+       OSFreeMem(psFreeList);
+
+       return eError;
+}
+
+
+/*
+       RGXCreateZSBuffer
+ */
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+               PVRSRV_DEVICE_NODE      *psDeviceNode,
+               DEVMEMINT_RESERVATION   *psReservation,
+               PMR                                             *psPMR,
+               PVRSRV_MEMALLOCFLAGS_T  uiMapFlags,
+               RGX_ZSBUFFER_DATA **ppsZSBuffer)
+{
+       PVRSRV_ERROR                            eError;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_PRBUFFER                        *psFWZSBuffer;
+       RGX_ZSBUFFER_DATA                       *psZSBuffer;
+       DEVMEM_MEMDESC                          *psFWZSBufferMemDesc;
+       IMG_BOOL                                        bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE;
+
+       /* Allocate host data structure */
+       psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer));
+       if (psZSBuffer == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate cleanup data structure for ZS-Buffer",
+                               __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorAllocCleanup;
+       }
+
+       /* Populate Host data */
+       psZSBuffer->psDevInfo = psDevInfo;
+       psZSBuffer->psReservation = psReservation;
+       psZSBuffer->psPMR = psPMR;
+       psZSBuffer->uiMapFlags = uiMapFlags;
+       psZSBuffer->ui32RefCount = 0;
+       psZSBuffer->bOnDemand = bOnDemand;
+       if (bOnDemand)
+       {
+               /* psZSBuffer->ui32ZSBufferID set below with lock... */
+               psZSBuffer->psMapping = NULL;
+
+               OSLockAcquire(psDevInfo->hLockZSBuffer);
+               psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+               dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+               OSLockRelease(psDevInfo->hLockZSBuffer);
+       }
+
+       /* Allocate firmware memory for ZS-Buffer. */
+       PDUMPCOMMENT(psDeviceNode, "Allocate firmware ZS-Buffer data structure");
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(*psFWZSBuffer),
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                       PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                       "FwZSBuffer",
+                       &psFWZSBufferMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware ZS-Buffer (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto ErrorAllocFWZSBuffer;
+       }
+       psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc;
+
+       /* Temporarily map the firmware render context to the kernel. */
+       eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+                       (void **)&psFWZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware ZS-Buffer (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto ErrorAcquireFWZSBuffer;
+       }
+
+       /* Populate FW ZS-Buffer data structure */
+       psFWZSBuffer->bOnDemand = bOnDemand;
+       psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED;
+       psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID;
+
+       /* Get firmware address of ZS-Buffer. */
+       eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr);
+
+       /* Dump the ZS-Buffer and the memory content */
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump firmware ZS-Buffer");
+       DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       /* Release address acquired above. */
+       DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+       /* define return value */
+       *ppsZSBuffer = psZSBuffer;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+                       psZSBuffer,
+                       (bOnDemand) ? "On-Demand": "Up-front"));
+
+       psZSBuffer->owner=OSGetCurrentClientProcessIDKM();
+
+       return PVRSRV_OK;
+
+       /* error handling */
+
+ErrorSetFwAddr:
+       DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+ErrorAcquireFWZSBuffer:
+       DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc);
+
+ErrorAllocFWZSBuffer:
+       OSFreeMem(psZSBuffer);
+
+ErrorAllocCleanup:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+       RGXDestroyZSBuffer
+ */
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+       POS_LOCK hLockZSBuffer;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psZSBuffer);
+       hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+       /* Request ZS Buffer cleanup */
+       eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+                       psZSBuffer->sZSBufferFWDevVAddr);
+       if (eError == PVRSRV_OK)
+       {
+               /* Free the firmware render context. */
+               RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc);
+               DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc);
+
+               /* Remove Deferred Allocation from list */
+               if (psZSBuffer->bOnDemand)
+               {
+                       OSLockAcquire(hLockZSBuffer);
+                       PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+                       dllist_remove_node(&psZSBuffer->sNode);
+                       OSLockRelease(hLockZSBuffer);
+               }
+
+               PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+               PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer));
+
+               /* Free ZS-Buffer host data structure */
+               OSFreeMem(psZSBuffer);
+
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+       POS_LOCK hLockZSBuffer;
+       PVRSRV_ERROR eError;
+
+       if (!psZSBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (!psZSBuffer->bOnDemand)
+       {
+               /* Only deferred allocations can be populated */
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+                       psZSBuffer,
+                       psZSBuffer->ui32ZSBufferID));
+       hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+       OSLockAcquire(hLockZSBuffer);
+
+       if (psZSBuffer->ui32RefCount == 0)
+       {
+               if (psZSBuffer->bOnDemand)
+               {
+                       IMG_HANDLE hDevmemHeap;
+
+                       PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+                       /* Get Heap */
+                       eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+                       PVR_ASSERT(psZSBuffer->psMapping == NULL);
+                       if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL))
+                       {
+                               OSLockRelease(hLockZSBuffer);
+                               return PVRSRV_ERROR_INVALID_HEAP;
+                       }
+
+                       eError = DevmemIntMapPMR(hDevmemHeap,
+                                       psZSBuffer->psReservation,
+                                       psZSBuffer->psPMR,
+                                       psZSBuffer->uiMapFlags,
+                                       &psZSBuffer->psMapping);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)",
+                                               psZSBuffer,
+                                               psZSBuffer->ui32ZSBufferID,
+                                               PVRSRVGetErrorString(eError)));
+                               OSLockRelease(hLockZSBuffer);
+                               return eError;
+
+                       }
+                       PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+                                       psZSBuffer,
+                                       psZSBuffer->ui32ZSBufferID));
+               }
+       }
+
+       /* Increase refcount*/
+       psZSBuffer->ui32RefCount++;
+
+       OSLockRelease(hLockZSBuffer);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+               RGX_POPULATION **ppsPopulation)
+{
+       RGX_POPULATION *psPopulation;
+       PVRSRV_ERROR eError;
+
+       psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner);
+#endif
+
+       /* Do the backing */
+       eError = RGXBackingZSBuffer(psZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               goto OnErrorBacking;
+       }
+
+       /* Create the handle to the backing */
+       psPopulation = OSAllocMem(sizeof(*psPopulation));
+       if (psPopulation == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto OnErrorAlloc;
+       }
+
+       psPopulation->psZSBuffer = psZSBuffer;
+
+       /* return value */
+       *ppsPopulation = psPopulation;
+
+       return PVRSRV_OK;
+
+OnErrorAlloc:
+       RGXUnbackingZSBuffer(psZSBuffer);
+
+OnErrorBacking:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+       POS_LOCK hLockZSBuffer;
+       PVRSRV_ERROR eError;
+
+       if (!psZSBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+                       psZSBuffer,
+                       psZSBuffer->ui32ZSBufferID));
+
+       hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+       OSLockAcquire(hLockZSBuffer);
+
+       if (psZSBuffer->bOnDemand)
+       {
+               if (psZSBuffer->ui32RefCount == 1)
+               {
+                       PVR_ASSERT(psZSBuffer->psMapping);
+
+                       eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)",
+                                               psZSBuffer,
+                                               psZSBuffer->ui32ZSBufferID,
+                                               PVRSRVGetErrorString(eError)));
+                               OSLockRelease(hLockZSBuffer);
+                               return eError;
+                       }
+
+                       PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+                                       psZSBuffer,
+                                       psZSBuffer->ui32ZSBufferID));
+               }
+       }
+
+       /* Decrease refcount*/
+       psZSBuffer->ui32RefCount--;
+
+       OSLockRelease(hLockZSBuffer);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+       PVRSRV_ERROR eError;
+
+       if (!psPopulation)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       OSFreeMem(psPopulation);
+
+       return PVRSRV_OK;
+}
+
+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID)
+{
+       DLLIST_NODE *psNode, *psNext;
+       RGX_ZSBUFFER_DATA *psZSBuffer = NULL;
+
+       OSLockAcquire(psDevInfo->hLockZSBuffer);
+
+       dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext)
+       {
+               RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+               if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID)
+               {
+                       psZSBuffer = psThisZSBuffer;
+                       break;
+               }
+       }
+
+       OSLockRelease(psDevInfo->hLockZSBuffer);
+       return psZSBuffer;
+}
+
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+               IMG_UINT32 ui32ZSBufferID)
+{
+       RGX_ZSBUFFER_DATA *psZSBuffer;
+       RGXFWIF_KCCB_CMD sTACCBCmd;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psDevInfo);
+
+       /* scan all deferred allocations */
+       psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+       if (psZSBuffer)
+       {
+               IMG_BOOL bBackingDone = IMG_TRUE;
+
+               /* Populate ZLS */
+               eError = RGXBackingZSBuffer(psZSBuffer);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "Populating ZS-Buffer (ID = 0x%08x) failed (%s)",
+                                       ui32ZSBufferID,
+                                       PVRSRVGetErrorString(eError)));
+                       bBackingDone = IMG_FALSE;
+               }
+
+               /* send confirmation */
+               sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+               sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+               sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = RGXScheduleCommand(psDevInfo,
+                                       RGXFWIF_DM_GEOM,
+                                       &sTACCBCmd,
+                                       PDUMP_FLAGS_NONE);
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               /* Kernel CCB should never fill up, as the FW is processing them right away  */
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner);
+#endif
+
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)",
+                               ui32ZSBufferID));
+       }
+}
+
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+               IMG_UINT32 ui32ZSBufferID)
+{
+       RGX_ZSBUFFER_DATA *psZSBuffer;
+       RGXFWIF_KCCB_CMD sTACCBCmd;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psDevInfo);
+
+       /* scan all deferred allocations */
+       psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+       if (psZSBuffer)
+       {
+               /* Unpopulate ZLS */
+               eError = RGXUnbackingZSBuffer(psZSBuffer);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)",
+                                       ui32ZSBufferID,
+                                       PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(IMG_FALSE);
+               }
+
+               /* send confirmation */
+               sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+               sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+               sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = RGXScheduleCommand(psDevInfo,
+                                       RGXFWIF_DM_GEOM,
+                                       &sTACCBCmd,
+                                       PDUMP_FLAGS_NONE);
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               /* Kernel CCB should never fill up, as the FW is processing them right away  */
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)",
+                               ui32ZSBufferID));
+       }
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+               PVRSRV_DEVICE_NODE *psDeviceNode,
+               DEVMEM_MEMDESC *psAllocatedMemDesc,
+               IMG_UINT32 ui32AllocatedOffset,
+               DEVMEM_MEMDESC *psFWMemContextMemDesc,
+               IMG_DEV_VIRTADDR sVDMCallStackAddr,
+               IMG_UINT32 ui32CallStackDepth,
+               IMG_UINT32 ui32Priority,
+               IMG_UINT32 ui32MaxDeadlineMS,
+               IMG_UINT64 ui64RobustnessAddress,
+               RGX_COMMON_CONTEXT_INFO *psInfo,
+               RGX_SERVER_RC_TA_DATA *psTAData,
+               IMG_UINT32 ui32CCBAllocSizeLog2,
+               IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+               IMG_UINT32 ui32ContextFlags)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_TACTX_STATE *psContextState;
+       IMG_UINT32 uiCoreIdx;
+       PVRSRV_ERROR eError;
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+        */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TA context suspend state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_TACTX_STATE),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwTAContextState",
+                       &psTAData->psContextStateMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware GPU context suspend state (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_tacontextsuspendalloc;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+                       (void **)&psContextState);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware render context state (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_suspendcpuvirtacquire;
+       }
+
+       for (uiCoreIdx = 0; uiCoreIdx < RGX_NUM_GEOM_CORES; uiCoreIdx++)
+       {
+               psContextState->asGeomCore[uiCoreIdx].uTAReg_VDM_CALL_STACK_POINTER_Init =
+                       sVDMCallStackAddr.uiAddr + (uiCoreIdx * ui32CallStackDepth * sizeof(IMG_UINT64));
+       }
+
+       DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+
+       eError = FWCommonContextAllocate(psConnection,
+                       psDeviceNode,
+                       REQ_TYPE_TA,
+                       RGXFWIF_DM_GEOM,
+                       NULL,
+                       psAllocatedMemDesc,
+                       ui32AllocatedOffset,
+                       psFWMemContextMemDesc,
+                       psTAData->psContextStateMemDesc,
+                       ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2,
+                       ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2,
+                       ui32ContextFlags,
+                       ui32Priority,
+                       ui32MaxDeadlineMS,
+                       ui64RobustnessAddress,
+                       psInfo,
+                       &psTAData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to init TA fw common context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_tacommoncontext;
+       }
+
+       /*
+        * Dump the FW 3D context suspend state buffer
+        */
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump the TA context suspend state buffer");
+       DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+                       0,
+                       sizeof(RGXFWIF_TACTX_STATE),
+                       PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       psTAData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_tacommoncontext:
+fail_suspendcpuvirtacquire:
+       DevmemFwUnmapAndFree(psDevInfo, psTAData->psContextStateMemDesc);
+fail_tacontextsuspendalloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+               PVRSRV_DEVICE_NODE *psDeviceNode,
+               DEVMEM_MEMDESC *psAllocatedMemDesc,
+               IMG_UINT32 ui32AllocatedOffset,
+               DEVMEM_MEMDESC *psFWMemContextMemDesc,
+               IMG_UINT32 ui32Priority,
+               IMG_UINT32 ui32MaxDeadlineMS,
+               IMG_UINT64 ui64RobustnessAddress,
+               RGX_COMMON_CONTEXT_INFO *psInfo,
+               RGX_SERVER_RC_3D_DATA *ps3DData,
+               IMG_UINT32 ui32CCBAllocSizeLog2,
+               IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+               IMG_UINT32 ui32ContextFlags)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+       IMG_UINT        uiNumISPStoreRegs;
+       IMG_UINT        ui3DRegISPStateStoreSize = 0;
+
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+        */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware 3D context suspend state");
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+       {
+               uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+                               RGX_FEATURE_NUM_RASTER_PIPES_IDX);
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+               {
+                       uiNumISPStoreRegs *= (1U + psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+                                       RGX_FEATURE_XPU_MAX_SLAVES_IDX));
+               }
+       }
+       else
+       {
+               uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+                               RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX);
+       }
+
+       /* Size of the CS buffer */
+       /* Calculate the size of the 3DCTX ISP state */
+       ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+                       uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]);
+
+       eError = DevmemFwAllocate(psDevInfo,
+                       ui3DRegISPStateStoreSize,
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "Fw3DContextState",
+                       &ps3DData->psContextStateMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware GPU context suspend state (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_3dcontextsuspendalloc;
+       }
+
+       eError = FWCommonContextAllocate(psConnection,
+                       psDeviceNode,
+                       REQ_TYPE_3D,
+                       RGXFWIF_DM_3D,
+                       NULL,
+                       psAllocatedMemDesc,
+                       ui32AllocatedOffset,
+                       psFWMemContextMemDesc,
+                       ps3DData->psContextStateMemDesc,
+                       ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2,
+                       ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2,
+                       ui32ContextFlags,
+                       ui32Priority,
+                       ui32MaxDeadlineMS,
+                       ui64RobustnessAddress,
+                       psInfo,
+                       &ps3DData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to init 3D fw common context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_3dcommoncontext;
+       }
+
+       /*
+        * Dump the FW 3D context suspend state buffer
+        */
+       PDUMPCOMMENT(psDeviceNode, "Dump the 3D context suspend state buffer");
+       DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+                       0,
+                       sizeof(RGXFWIF_3DCTX_STATE),
+                       PDUMP_FLAGS_CONTINUOUS);
+
+       ps3DData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_3dcommoncontext:
+       DevmemFwUnmapAndFree(psDevInfo, ps3DData->psContextStateMemDesc);
+fail_3dcontextsuspendalloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA                            *psConnection,
+               PVRSRV_DEVICE_NODE                      *psDeviceNode,
+               IMG_UINT32                                      ui32Priority,
+               IMG_DEV_VIRTADDR                        sVDMCallStackAddr,
+               IMG_UINT32                                      ui32CallStackDepth,
+               IMG_UINT32                                      ui32FrameworkRegisterSize,
+               IMG_PBYTE                                       pabyFrameworkRegisters,
+               IMG_HANDLE                                      hMemCtxPrivData,
+               IMG_UINT32                                      ui32StaticRenderContextStateSize,
+               IMG_PBYTE                                       pStaticRenderContextState,
+               IMG_UINT32                                      ui32PackedCCBSizeU8888,
+               IMG_UINT32                                      ui32ContextFlags,
+               IMG_UINT64                                      ui64RobustnessAddress,
+               IMG_UINT32                                      ui32MaxTADeadlineMS,
+               IMG_UINT32                                      ui32Max3DDeadlineMS,
+               RGX_SERVER_RENDER_CONTEXT       **ppsRenderContext)
+{
+       PVRSRV_ERROR                            eError;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_SERVER_RENDER_CONTEXT       *psRenderContext;
+       DEVMEM_MEMDESC                          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       RGX_COMMON_CONTEXT_INFO         sInfo = {NULL};
+       RGXFWIF_FWRENDERCONTEXT         *psFWRenderContext;
+
+       *ppsRenderContext = NULL;
+
+       if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psRenderContext = OSAllocZMem(sizeof(*psRenderContext));
+       if (psRenderContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eError = OSLockCreate(&psRenderContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_lock;
+       }
+
+       psRenderContext->psDeviceNode = psDeviceNode;
+
+       /*
+               Create the FW render context, this has the TA and 3D FW common
+               contexts embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWRENDERCONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwRenderContext",
+                       &psRenderContext->psFWRenderContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwrendercontext;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+       if (ui32FrameworkRegisterSize)
+       {
+               /*
+                * Create the FW framework buffer
+                */
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psRenderContext->psFWFrameworkMemDesc,
+                               ui32FrameworkRegisterSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate firmware GPU framework state (%s)",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psRenderContext->psFWFrameworkMemDesc,
+                               pabyFrameworkRegisters,
+                               ui32FrameworkRegisterSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to populate the framework buffer (%s)",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+
+               sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+       }
+
+       eError = _Create3DContext(psConnection,
+                       psDeviceNode,
+                       psRenderContext->psFWRenderContextMemDesc,
+                       offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+                       psFWMemContextMemDesc,
+                       ui32Priority,
+                       ui32Max3DDeadlineMS,
+                       ui64RobustnessAddress,
+                       &sInfo,
+                       &psRenderContext->s3DData,
+                       U32toU8_Unpack3(ui32PackedCCBSizeU8888),
+                       U32toU8_Unpack4(ui32PackedCCBSizeU8888),
+                       ui32ContextFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_3dcontext;
+       }
+
+       eError = _CreateTAContext(psConnection,
+                       psDeviceNode,
+                       psRenderContext->psFWRenderContextMemDesc,
+                       offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+                       psFWMemContextMemDesc,
+                       sVDMCallStackAddr,
+                       ui32CallStackDepth,
+                       ui32Priority,
+                       ui32MaxTADeadlineMS,
+                       ui64RobustnessAddress,
+                       &sInfo,
+                       &psRenderContext->sTAData,
+                       U32toU8_Unpack1(ui32PackedCCBSizeU8888),
+                       U32toU8_Unpack2(ui32PackedCCBSizeU8888),
+                       ui32ContextFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_tacontext;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+                       (void **)&psFWRenderContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_acquire_cpu_mapping;
+       }
+
+       /* Copy the static render context data */
+       OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize);
+       DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS);
+       DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       psRenderContext->psBufferSyncContext =
+                       pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                       "rogue-ta3d");
+       if (IS_ERR(psRenderContext->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to create buffer_sync context (err=%ld)",
+                               __func__, PTR_ERR(psRenderContext->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence);
+       SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate);
+       SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence);
+       SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate);
+
+       {
+               PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+
+               OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+               dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+               OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+       }
+
+       *ppsRenderContext = psRenderContext;
+       return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+fail_buffer_sync_context_create:
+#endif
+fail_acquire_cpu_mapping:
+       _DestroyTAContext(&psRenderContext->sTAData,
+                       psDeviceNode);
+fail_tacontext:
+       _Destroy3DContext(&psRenderContext->s3DData,
+                       psRenderContext->psDeviceNode);
+fail_3dcontext:
+fail_frameworkcopy:
+       if (psRenderContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+fail_fwrendercontext:
+       OSLockDestroy(psRenderContext->hLock);
+fail_lock:
+       OSFreeMem(psRenderContext);
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+       PVRSRV_ERROR                            eError;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
+       IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+       /* remove node from list before calling destroy - as destroy, if successful
+        * will invalidate the node
+        * must be re-added if destroy fails
+        */
+       OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+       dllist_remove_node(&(psRenderContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       /* Check psBufferSyncContext has not been destroyed already (by a previous
+        * call to this function which then later returned PVRSRV_ERROR_RETRY)
+        */
+       if (psRenderContext->psBufferSyncContext != NULL)
+       {
+               pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext);
+               psRenderContext->psBufferSyncContext = NULL;
+       }
+#endif
+
+       /* Cleanup the TA if we haven't already */
+       if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+       {
+               eError = _DestroyTAContext(&psRenderContext->sTAData,
+                               psRenderContext->psDeviceNode);
+               if (eError == PVRSRV_OK)
+               {
+                       psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+               }
+               else
+               {
+                       goto e0;
+               }
+       }
+
+       /* Cleanup the 3D if we haven't already */
+       if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+       {
+               eError = _Destroy3DContext(&psRenderContext->s3DData,
+                               psRenderContext->psDeviceNode);
+               if (eError == PVRSRV_OK)
+               {
+                       psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+               }
+               else
+               {
+                       goto e0;
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+                       (void **)&psFWRenderContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware render context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+
+       DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+       /* Check if all of the workload estimation CCB commands for this workload are read */
+       if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+       {
+
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                       __func__, ui32WorkEstCCBSubmitted,
+                       psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+               eError = PVRSRV_ERROR_RETRY;
+               goto e0;
+       }
+#endif
+
+       /*
+               Only if both TA and 3D contexts have been cleaned up can we
+               free the shared resources
+        */
+       if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+       {
+               if (psRenderContext->psFWFrameworkMemDesc)
+               {
+                       /* Free the framework buffer */
+                       DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+               }
+
+               /* Free the firmware render context */
+               DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence);
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate);
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence);
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+               OSLockDestroy(psRenderContext->hLock);
+
+               OSFreeMem(psRenderContext);
+       }
+
+       return PVRSRV_OK;
+
+e0:
+       OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+       return eError;
+}
+
+
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount,
+                        IMG_UINT32 ui32ClientTAUpdateCount,
+                        IMG_UINT32 ui32Client3DFenceCount,
+                        IMG_UINT32 ui32Client3DUpdateCount,
+                        PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress,
+                        IMG_UINT32 *paui32ClientTAFenceValue,
+                        PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress,
+                        IMG_UINT32 *paui32ClientTAUpdateValue,
+                        PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress,
+                        IMG_UINT32 *paui32Client3DFenceValue,
+                        PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress,
+                        IMG_UINT32 *paui32Client3DUpdateValue)
+{
+       IMG_UINT32 i;
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~",
+                        __func__));
+
+       /* Dump Fence syncs, Update syncs and PR Update syncs */
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:",
+               __func__, ui32ClientTAFenceCount));
+       for (i = 0; i < ui32ClientTAFenceCount; i++)
+       {
+               if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32ClientTAFenceCount,
+                               (void *) pauiClientTAFenceUFOAddress,
+                               pauiClientTAFenceUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+                               __func__, i + 1, ui32ClientTAFenceCount,
+                               (void *) pauiClientTAFenceUFOAddress,
+                               pauiClientTAFenceUFOAddress->ui32Addr,
+                               *paui32ClientTAFenceValue,
+                               *paui32ClientTAFenceValue));
+                       paui32ClientTAFenceValue++;
+               }
+               pauiClientTAFenceUFOAddress++;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:",
+                        __func__, ui32ClientTAUpdateCount));
+       for (i = 0; i < ui32ClientTAUpdateCount; i++)
+       {
+               if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32ClientTAUpdateCount,
+                               (void *) pauiClientTAUpdateUFOAddress,
+                               pauiClientTAUpdateUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)",
+                               __func__, i + 1, ui32ClientTAUpdateCount,
+                               (void *) pauiClientTAUpdateUFOAddress,
+                               pauiClientTAUpdateUFOAddress->ui32Addr,
+                               *paui32ClientTAUpdateValue,
+                               *paui32ClientTAUpdateValue));
+                       paui32ClientTAUpdateValue++;
+               }
+               pauiClientTAUpdateUFOAddress++;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:",
+                        __func__, ui32Client3DFenceCount));
+       for (i = 0; i < ui32Client3DFenceCount; i++)
+       {
+               if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32Client3DFenceCount,
+                               (void *) pauiClient3DFenceUFOAddress,
+                               pauiClient3DFenceUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+                               __func__, i + 1, ui32Client3DFenceCount,
+                               (void *) pauiClient3DFenceUFOAddress,
+                               pauiClient3DFenceUFOAddress->ui32Addr,
+                               *paui32Client3DFenceValue,
+                               *paui32Client3DFenceValue));
+                       paui32Client3DFenceValue++;
+               }
+               pauiClient3DFenceUFOAddress++;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:",
+                        __func__, ui32Client3DUpdateCount));
+       for (i = 0; i < ui32Client3DUpdateCount; i++)
+       {
+               if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32Client3DUpdateCount,
+                               (void *) pauiClient3DUpdateUFOAddress,
+                               pauiClient3DUpdateUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)",
+                               __func__, i + 1, ui32Client3DUpdateCount,
+                               (void *) pauiClient3DUpdateUFOAddress,
+                               pauiClient3DUpdateUFOAddress->ui32Addr,
+                               *paui32Client3DUpdateValue,
+                               *paui32Client3DUpdateValue));
+                       paui32Client3DUpdateValue++;
+               }
+               pauiClient3DUpdateUFOAddress++;
+       }
+}
+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT     *psRenderContext,
+               IMG_UINT32                                      ui32ClientTAFenceCount,
+               SYNC_PRIMITIVE_BLOCK            **apsClientTAFenceSyncPrimBlock,
+               IMG_UINT32                                      *paui32ClientTAFenceSyncOffset,
+               IMG_UINT32                                      *paui32ClientTAFenceValue,
+               IMG_UINT32                                      ui32ClientTAUpdateCount,
+               SYNC_PRIMITIVE_BLOCK            **apsClientTAUpdateSyncPrimBlock,
+               IMG_UINT32                                      *paui32ClientTAUpdateSyncOffset,
+               IMG_UINT32                                      *paui32ClientTAUpdateValue,
+               IMG_UINT32                                      ui32Client3DUpdateCount,
+               SYNC_PRIMITIVE_BLOCK            **apsClient3DUpdateSyncPrimBlock,
+               IMG_UINT32                                      *paui32Client3DUpdateSyncOffset,
+               IMG_UINT32                                      *paui32Client3DUpdateValue,
+               SYNC_PRIMITIVE_BLOCK            *psPRFenceSyncPrimBlock,
+               IMG_UINT32                                      ui32PRFenceSyncOffset,
+               IMG_UINT32                                      ui32PRFenceValue,
+               PVRSRV_FENCE                            iCheckTAFence,
+               PVRSRV_TIMELINE                 iUpdateTATimeline,
+               PVRSRV_FENCE                            *piUpdateTAFence,
+               IMG_CHAR                                        szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH],
+               PVRSRV_FENCE                            iCheck3DFence,
+               PVRSRV_TIMELINE                 iUpdate3DTimeline,
+               PVRSRV_FENCE                            *piUpdate3DFence,
+               IMG_CHAR                                        szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+               IMG_UINT32                                      ui32TACmdSize,
+               IMG_PBYTE                                       pui8TADMCmd,
+               IMG_UINT32                                      ui323DPRCmdSize,
+               IMG_PBYTE                                       pui83DPRDMCmd,
+               IMG_UINT32                                      ui323DCmdSize,
+               IMG_PBYTE                                       pui83DDMCmd,
+               IMG_UINT32                                      ui32ExtJobRef,
+               IMG_BOOL                                        bKickTA,
+               IMG_BOOL                                        bKickPR,
+               IMG_BOOL                                        bKick3D,
+               IMG_BOOL                                        bAbort,
+               IMG_UINT32                                      ui32PDumpFlags,
+               RGX_KM_HW_RT_DATASET            *psKMHWRTDataSet,
+               RGX_ZSBUFFER_DATA               *psZSBuffer,
+               RGX_ZSBUFFER_DATA               *psMSAAScratchBuffer,
+               IMG_UINT32                      ui32SyncPMRCount,
+               IMG_UINT32                      *paui32SyncPMRFlags,
+               PMR                             **ppsSyncPMRs,
+               IMG_UINT32                      ui32RenderTargetSize,
+               IMG_UINT32                      ui32NumberOfDrawCalls,
+               IMG_UINT32                      ui32NumberOfIndices,
+               IMG_UINT32                      ui32NumberOfMRTs,
+               IMG_UINT64                      ui64DeadlineInus)
+{
+       /* per-context helper structures */
+       RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData;
+       RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData;
+
+       IMG_UINT32                              ui32TACmdCount=0;
+       IMG_UINT32                              ui323DCmdCount=0;
+       IMG_UINT32                              ui32TACmdOffset=0;
+       IMG_UINT32                              ui323DCmdOffset=0;
+       RGXFWIF_UFO                             sPRUFO;
+       IMG_UINT32                              i;
+       PVRSRV_ERROR                    eError = PVRSRV_OK;
+       PVRSRV_ERROR                    eError2 = PVRSRV_OK;
+
+       PVRSRV_RGXDEV_INFO      *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext);
+       IMG_UINT32              ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+       IMG_BOOL                bCCBStateOpen = IMG_FALSE;
+
+       IMG_UINT32                              ui32ClientPRUpdateCount = 0;
+       PRGXFWIF_UFO_ADDR               *pauiClientPRUpdateUFOAddress = NULL;
+       IMG_UINT32                              *paui32ClientPRUpdateValue = NULL;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+       PRGXFWIF_UFO_ADDR               *pauiClientTAFenceUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               *pauiClientTAUpdateUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               *pauiClient3DFenceUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               *pauiClient3DUpdateUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               uiPRFenceUFOAddress;
+
+       IMG_UINT64               uiCheckTAFenceUID = 0;
+       IMG_UINT64               uiCheck3DFenceUID = 0;
+       IMG_UINT64               uiUpdateTAFenceUID = 0;
+       IMG_UINT64               uiUpdate3DFenceUID = 0;
+
+       IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd;
+
+       RGXFWIF_KCCB_CMD_KICK_DATA      sTACmdKickData;
+       RGXFWIF_KCCB_CMD_KICK_DATA      s3DCmdKickData;
+       IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D);
+
+       IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0;
+
+       IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
+
+       PVRSRV_FENCE    iUpdateTAFence = PVRSRV_NO_FENCE;
+       PVRSRV_FENCE    iUpdate3DFence = PVRSRV_NO_FENCE;
+
+       IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE;
+       IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0;
+       IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+
+       /*
+        * Count of the number of TA and 3D update values (may differ from number of
+        * TA and 3D updates later, as sync checkpoints do not need to specify a value)
+        */
+       IMG_UINT32 ui32ClientPRUpdateValueCount = 0;
+       IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount;
+       IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount;
+       PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL;                             /*!< TA fence checkpoints */
+       PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL;                             /*!< 3D fence checkpoints */
+       IMG_UINT32 ui32FenceTASyncCheckpointCount = 0;
+       IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL;                               /*!< TA update checkpoint (output) */
+       PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL;                               /*!< 3D update checkpoint (output) */
+       PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+       void *pvTAUpdateFenceFinaliseData = NULL;
+       void *pv3DUpdateFenceFinaliseData = NULL;
+
+       RGX_SYNC_DATA sTASyncData = {NULL};             /*!< Contains internal update syncs for TA */
+       RGX_SYNC_DATA s3DSyncData = {NULL};             /*!< Contains internal update syncs for 3D */
+
+       IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE;
+#if defined(SUPPORT_VALIDATION)
+       PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE;
+       PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL;
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0};
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0};
+       IMG_UINT32 ui32TACommandOffset = 0;
+       IMG_UINT32 ui323DCommandOffset = 0;
+       IMG_UINT32 ui32TACmdHeaderOffset = 0;
+       IMG_UINT32 ui323DCmdHeaderOffset = 0;
+       IMG_UINT32 ui323DFullRenderCommandOffset = 0;
+       IMG_UINT32 ui32TACmdOffsetWrapCheck = 0;
+       IMG_UINT32 ui323DCmdOffsetWrapCheck = 0;
+       RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+       IMG_UINT32 ui32TAFenceCount, ui323DFenceCount;
+       IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount;
+       IMG_UINT32 ui32PRUpdateCount;
+
+       IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM();
+
+       IMG_UINT32 ui32Client3DFenceCount = 0;
+
+       /* Ensure we haven't been given a null ptr to
+        * TA fence values if we have been told we
+        * have TA sync prim fences
+        */
+       if (ui32ClientTAFenceCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL,
+                                       "paui32ClientTAFenceValue NULL but "
+                                       "ui32ClientTAFenceCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       /* Ensure we haven't been given a null ptr to
+        * TA update values if we have been told we
+        * have TA updates
+        */
+       if (ui32ClientTAUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL,
+                                       "paui32ClientTAUpdateValue NULL but "
+                                       "ui32ClientTAUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       /* Ensure we haven't been given a null ptr to
+        * 3D update values if we have been told we
+        * have 3D updates
+        */
+       if (ui32Client3DUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL,
+                                       "paui32Client3DUpdateValue NULL but "
+                                       "ui32Client3DUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Write FW addresses into CMD SHARED BLOCKs */
+       {
+               CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd;
+               CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd;
+               CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd;
+
+               if (psKMHWRTDataSet == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer"));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               /* Write FW address for TA CMD
+               */
+               if (psGeomCmdShared != NULL)
+               {
+                       psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr;
+
+                       if (psZSBuffer != NULL)
+                       {
+                               psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr;
+                       }
+                       if (psMSAAScratchBuffer != NULL)
+                       {
+                               psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr;
+                       }
+               }
+
+               /* Write FW address for 3D CMD
+               */
+               if (ps3DCmdShared != NULL)
+               {
+                       ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr;
+
+                       if (psZSBuffer != NULL)
+                       {
+                               ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr;
+                       }
+                       if (psMSAAScratchBuffer != NULL)
+                       {
+                               ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr;
+                       }
+               }
+
+               /* Write FW address for PR3D CMD
+               */
+               if (psPR3DCmdShared != NULL)
+               {
+                       psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr;
+
+                       if (psZSBuffer != NULL)
+                       {
+                               psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr;
+                       }
+                       if (psMSAAScratchBuffer != NULL)
+                       {
+                               psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr;
+                       }
+               }
+       }
+
+       if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, "
+                          "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d",
+                          __func__,
+                          ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                          ui32Client3DFenceCount, ui32Client3DUpdateCount));
+
+
+       RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice,
+                                 &pPreAddr,
+                                 &pPostAddr,
+                                 &pRMWUFOAddr);
+
+       /* Double-check we have a PR kick if there are client fences */
+       if (unlikely(!bKickPR && ui32Client3DFenceCount != 0))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick",
+                       __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+       szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+
+       OSLockAcquire(psRenderContext->hLock);
+
+       ui32TAFenceCount = ui32ClientTAFenceCount;
+       ui323DFenceCount = ui32Client3DFenceCount;
+       ui32TAUpdateCount = ui32ClientTAUpdateCount;
+       ui323DUpdateCount = ui32Client3DUpdateCount;
+       ui32PRUpdateCount = ui32ClientPRUpdateCount;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (ui32SyncPMRCount)
+       {
+               int err;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling"
+                         " pvr_buffer_sync_resolve_and_create_fences", __func__));
+
+               err = pvr_buffer_sync_resolve_and_create_fences(
+                   psRenderContext->psBufferSyncContext,
+                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                   ui32SyncPMRCount,
+                   ppsSyncPMRs,
+                   paui32SyncPMRFlags,
+                   &ui32BufferFenceSyncCheckpointCount,
+                   &apsBufferFenceSyncCheckpoints,
+                   &psBufferUpdateSyncCheckpoint,
+                   &psBufferSyncData
+               );
+
+               if (unlikely(err))
+               {
+                       switch (err)
+                       {
+                               case -EINTR:
+                                       eError = PVRSRV_ERROR_RETRY;
+                                       break;
+                               case -ENOMEM:
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       break;
+                               default:
+                                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                       break;
+                       }
+
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   "
+                                       "pvr_buffer_sync_resolve_and_create_fences failed (%d)",
+                                       __func__, eError));
+                       }
+                       OSLockRelease(psRenderContext->hLock);
+
+                       return eError;
+               }
+
+#if !defined(SUPPORT_STRIP_RENDERING)
+               if (bKickTA)
+               {
+                       ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+               else
+               {
+                       ui323DFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+#else /* !defined(SUPPORT_STRIP_RENDERING) */
+               ui323DFenceCount += ui32BufferFenceSyncCheckpointCount;
+
+               PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly);
+#endif /* !defined(SUPPORT_STRIP_RENDERING) */
+
+               if (psBufferUpdateSyncCheckpoint != NULL)
+               {
+                       if (bKick3D)
+                       {
+                               ui323DUpdateCount++;
+                       }
+                       else
+                       {
+                               ui32PRUpdateCount++;
+                       }
+               }
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2
+#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2."
+#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */
+
+       if (iCheckTAFence != PVRSRV_NO_FENCE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]"
+                         " (iCheckFence=%d),"
+                         " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+                         __func__, iCheckTAFence,
+                         (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext));
+
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(
+                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                   iCheckTAFence,
+                   &ui32FenceTASyncCheckpointCount,
+                   &apsFenceTASyncCheckpoints,
+                   &uiCheckTAFenceUID,
+                   ui32PDumpFlags);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)",
+                                 __func__, eError));
+                       goto fail_resolve_input_ta_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d "
+                         "checkpoints (apsFenceSyncCheckpoints=<%p>)",
+                         __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount,
+                         (void *) apsFenceTASyncCheckpoints));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               if (apsFenceTASyncCheckpoints)
+               {
+                       _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints,
+                                             ui32FenceTASyncCheckpointCount);
+               }
+#endif /* defined(TA3D_CHECKPOINT_DEBUG) */
+       }
+
+       if (iCheck3DFence != PVRSRV_NO_FENCE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]"
+                         " (iCheckFence=%d), "
+                         "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+                         __func__, iCheck3DFence,
+                         (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(
+                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                   iCheck3DFence,
+                   &ui32Fence3DSyncCheckpointCount,
+                   &apsFence3DSyncCheckpoints,
+                   &uiCheck3DFenceUID,
+                   ui32PDumpFlags);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)",
+                                 __func__, eError));
+                       goto fail_resolve_input_3d_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d "
+                         "checkpoints (apsFenceSyncCheckpoints=<%p>)",
+                         __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount,
+                         (void*)apsFence3DSyncCheckpoints));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               if (apsFence3DSyncCheckpoints)
+               {
+                       _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints,
+                                             ui32Fence3DSyncCheckpointCount);
+               }
+#endif /* defined(TA3D_CHECKPOINT_DEBUG) */
+       }
+
+       if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
+           iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
+       {
+               IMG_UINT32 i;
+
+               if (bKickTA)
+               {
+                       ui32TAFenceCount += ui32FenceTASyncCheckpointCount;
+
+                       for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++)
+                       {
+                               if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) !=
+                                   uiCurrentProcess)
+                               {
+                                       ui32TAFenceCount++;
+                               }
+                       }
+               }
+
+               if (bKick3D)
+               {
+                       ui323DFenceCount += ui32Fence3DSyncCheckpointCount;
+               }
+
+               ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ?
+                               UPDATE_FENCE_CHECKPOINT_COUNT : 0;
+               ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ?
+                               UPDATE_FENCE_CHECKPOINT_COUNT : 0;
+               ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ?
+                               UPDATE_FENCE_CHECKPOINT_COUNT : 0;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       /* Check if TestingSLR is adding an extra sync checkpoint to the
+        * 3D fence check (which we won't signal)
+        */
+       if ((psDevInfo->ui32TestSLRInterval > 0) &&
+           (--psDevInfo->ui32TestSLRCount == 0))
+       {
+               bTestSLRAdd3DCheck = IMG_TRUE;
+               psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval;
+       }
+
+       if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE))
+       {
+               if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint "
+                                "to 3D fence but no update 3D timeline provided", __func__));
+               }
+               else
+               {
+                       SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                                           iUpdate3DTimeline,
+                                           hTestSLRTmpFence,
+                                           "TestSLRCheck",
+                                           &psDummySyncCheckpoint);
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence "
+                                                 "checkpoints (psDummySyncCheckpoint=<%p>)",
+                                                 __func__, (void*)psDummySyncCheckpoint));
+                       SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+                                                     1,
+                                                     &psDummySyncCheckpoint);
+                       if (!pauiClient3DFenceUFOAddress)
+                       {
+                               pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+                       }
+
+                       if (ui32Client3DFenceCount == 0)
+                       {
+                               b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                       }
+                       ui323DFenceCount++;
+               }
+       }
+#endif /* defined(SUPPORT_VALIDATION) */
+
+       if (bKickTA)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d",
+                         __func__, ui32TAFenceCount, ui32TAUpdateCount));
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                       psDevInfo,
+                   0,
+                   ui32TAFenceCount,
+                   ui32TAUpdateCount,
+                   ui32TACmdSize,
+                   &pPreAddr,
+                   (bKick3D ? NULL : &pPostAddr),
+                   (bKick3D ? NULL : &pRMWUFOAddr),
+                   pasTACmdHelperData
+               );
+       }
+
+       if (bKickPR)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32Client3DFenceCount=%d", __func__,
+                         ui323DFenceCount));
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                       psDevInfo,
+                       0,
+                   ui323DFenceCount,
+                   0,
+                   sizeof(sPRUFO),
+                       NULL,
+                       NULL,
+                       NULL,
+                   &pas3DCmdHelperData[ui323DCmdCount++]
+               );
+       }
+
+       if (bKickPR && !bUseCombined3DAnd3DPR)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32PRUpdateCount=%d", __func__,
+                         ui32PRUpdateCount));
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                       psDevInfo,
+                   0,
+                   0,
+                   ui32PRUpdateCount,
+                   /* if the client has not provided a 3DPR command, the regular 3D
+                    * command should be used instead */
+                   pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize,
+                       NULL,
+                       NULL,
+                       NULL,
+                   &pas3DCmdHelperData[ui323DCmdCount++]
+               );
+       }
+
+       if (bKick3D || bAbort)
+       {
+               if (!bKickTA)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32Client3DFenceCount=%d", __func__,
+                         ui323DFenceCount));
+               }
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                       psDevInfo,
+                       0,
+                       bKickTA ? 0 : ui323DFenceCount,
+                   ui323DUpdateCount,
+                   ui323DCmdSize,
+                       (bKickTA ? NULL : &pPreAddr),
+                       &pPostAddr,
+                       &pRMWUFOAddr,
+                   &pas3DCmdHelperData[ui323DCmdCount++]
+               );
+       }
+
+       if (bKickTA)
+       {
+               ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData);
+
+               eError = RGXCheckSpaceCCB(
+                   FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext),
+                   ui32TACmdSizeTmp
+               );
+               if (eError != PVRSRV_OK)
+               {
+                       goto err_not_enough_space;
+               }
+       }
+
+       if (ui323DCmdCount > 0)
+       {
+               ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData);
+
+               eError = RGXCheckSpaceCCB(
+                   FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext),
+                   ui323DCmdSizeTmp
+               );
+               if (eError != PVRSRV_OK)
+               {
+                       goto err_not_enough_space;
+               }
+       }
+
+       /* need to reset the counter here */
+
+       ui323DCmdCount = 0;
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...",
+                          __func__, ui32ClientTAFenceCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence,
+                       ui32ClientTAFenceCount,
+                       apsClientTAFenceSyncPrimBlock,
+                       paui32ClientTAFenceSyncOffset);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_ta_fence;
+       }
+
+       if (ui32ClientTAFenceCount)
+       {
+               pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: pauiClientTAFenceUFOAddress=<%p> ",
+                          __func__, (void*)pauiClientTAFenceUFOAddress));
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...",
+                          __func__, ui32ClientTAUpdateCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate,
+                       ui32ClientTAUpdateCount,
+                       apsClientTAUpdateSyncPrimBlock,
+                       paui32ClientTAUpdateSyncOffset);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_ta_update;
+       }
+
+       if (ui32ClientTAUpdateCount)
+       {
+               pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: pauiClientTAUpdateUFOAddress=<%p> ",
+                          __func__, (void*)pauiClientTAUpdateUFOAddress));
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...",
+                          __func__, ui32Client3DFenceCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence,
+                       ui32Client3DFenceCount,
+                       NULL,
+                       NULL);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_3d_fence;
+       }
+
+       if (ui32Client3DFenceCount)
+       {
+               pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ",
+                          __func__, (void*)pauiClient3DFenceUFOAddress));
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...",
+                          __func__, ui32Client3DUpdateCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate,
+                       ui32Client3DUpdateCount,
+                       apsClient3DUpdateSyncPrimBlock,
+                       paui32Client3DUpdateSyncOffset);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_3d_update;
+       }
+
+       if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D))
+       {
+               pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ",
+                          __func__, (void*)pauiClient3DUpdateUFOAddress));
+
+       eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress);
+
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_pr_fence_address;
+       }
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+       DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                   ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0),
+                   ui32Client3DUpdateCount,
+                   pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue,
+                   pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue,
+                   pauiClient3DFenceUFOAddress, NULL,
+                   pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue);
+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */
+
+       if (ui32SyncPMRCount)
+       {
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(SUPPORT_STRIP_RENDERING)
+               /* Append buffer sync fences to TA fences */
+               if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   Append %d buffer sync checkpoints to TA Fence "
+                                          "(&psRenderContext->sSyncAddrListTAFence=<%p>, "
+                                          "pauiClientTAFenceUFOAddress=<%p>)...",
+                                          __func__,
+                                          ui32BufferFenceSyncCheckpointCount,
+                                          (void*)&psRenderContext->sSyncAddrListTAFence ,
+                                          (void*)pauiClientTAFenceUFOAddress));
+                       SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+                                       ui32BufferFenceSyncCheckpointCount,
+                                       apsBufferFenceSyncCheckpoints);
+                       if (!pauiClientTAFenceUFOAddress)
+                       {
+                               pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+                       }
+                       if (ui32ClientTAFenceCount == 0)
+                       {
+                               bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                       }
+                       ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+               else
+#endif
+               /* Append buffer sync fences to 3D fences */
+               if (ui32BufferFenceSyncCheckpointCount > 0)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   Append %d buffer sync checkpoints to 3D Fence "
+                                          "(&psRenderContext->sSyncAddrList3DFence=<%p>, "
+                                          "pauiClient3DFenceUFOAddress=<%p>)...",
+                                          __func__,
+                                          ui32BufferFenceSyncCheckpointCount,
+                                          (void*)&psRenderContext->sSyncAddrList3DFence,
+                                          (void*)pauiClient3DFenceUFOAddress));
+                       SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+                                       ui32BufferFenceSyncCheckpointCount,
+                                       apsBufferFenceSyncCheckpoints);
+                       if (!pauiClient3DFenceUFOAddress)
+                       {
+                               pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+                       }
+                       if (ui32Client3DFenceCount == 0)
+                       {
+                               b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                       }
+                       ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+
+               if (psBufferUpdateSyncCheckpoint)
+               {
+                       /* If we have a 3D kick append update to the 3D updates else append to the PR update */
+                       if (bKick3D)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s:   Append 1 buffer sync checkpoint<%p> to 3D Update"
+                                                  " (&psRenderContext->sSyncAddrList3DUpdate=<%p>,"
+                                                  " pauiClient3DUpdateUFOAddress=<%p>)...",
+                                                  __func__,
+                                                  (void*)psBufferUpdateSyncCheckpoint,
+                                                  (void*)&psRenderContext->sSyncAddrList3DUpdate,
+                                                  (void*)pauiClient3DUpdateUFOAddress));
+                               /* Append buffer sync update to 3D updates */
+                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                               1,
+                                               &psBufferUpdateSyncCheckpoint);
+                               if (!pauiClient3DUpdateUFOAddress)
+                               {
+                                       pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                               }
+                               ui32Client3DUpdateCount++;
+                       }
+                       else
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   Append 1 buffer sync checkpoint<%p> to PR Update"
+                                                  " (&psRenderContext->sSyncAddrList3DUpdate=<%p>,"
+                                                  " pauiClientPRUpdateUFOAddress=<%p>)...",
+                                          __func__,
+                                                  (void*)psBufferUpdateSyncCheckpoint,
+                                                  (void*)&psRenderContext->sSyncAddrList3DUpdate,
+                                                  (void*)pauiClientPRUpdateUFOAddress));
+                               /* Attach update to the 3D (used for PR) Updates */
+                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                               1,
+                                               &psBufferUpdateSyncCheckpoint);
+                               if (!pauiClientPRUpdateUFOAddress)
+                               {
+                                       pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                               }
+                               ui32ClientPRUpdateCount++;
+                       }
+               }
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   (after buffer_sync) ui32ClientTAFenceCount=%d, "
+                                  "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, "
+                                  "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,",
+                                  __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                                  ui32Client3DFenceCount, ui32Client3DUpdateCount,
+                                  ui32ClientPRUpdateCount));
+
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Buffer sync not supported but got %u buffers",
+                                __func__, ui32SyncPMRCount));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_no_buffer_sync_invalid_params;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+       }
+
+       /*
+        * The hardware requires a PR to be submitted if there is a TA (otherwise
+        * it can wedge if we run out of PB space with no PR to run)
+        *
+        * If we only have a TA, attach native checks to the TA and updates to the PR
+        * If we have a TA and 3D, attach checks to TA, updates to 3D
+        * If we only have a 3D, attach checks and updates to the 3D
+        *
+        * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in
+        * addition to the update fence FD (if supplied)
+        *
+        * Currently, the client driver never kicks only the 3D, so we only support
+        * that for the time being.
+        */
+       if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
+           iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
+       {
+               PRGXFWIF_UFO_ADDR       *pauiClientTAIntUpdateUFOAddress = NULL;
+               PRGXFWIF_UFO_ADDR       *pauiClient3DIntUpdateUFOAddress = NULL;
+
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d",
+                                  __func__, iCheckTAFence, iUpdateTATimeline));
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d",
+                                  __func__, iCheck3DFence, iUpdate3DTimeline));
+
+               {
+                       /* Create the output fence for TA (if required) */
+                       if (iUpdateTATimeline != PVRSRV_NO_TIMELINE)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: calling SyncCheckpointCreateFence[TA] "
+                                                  "(iUpdateFence=%d, iUpdateTimeline=%d, "
+                                                  "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)",
+                                                  __func__, iUpdateTAFence, iUpdateTATimeline,
+                                                  (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+                               eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+                                               szFenceNameTA,
+                                               iUpdateTATimeline,
+                                               psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                                               &iUpdateTAFence,
+                                               &uiUpdateTAFenceUID,
+                                               &pvTAUpdateFenceFinaliseData,
+                                               &psUpdateTASyncCheckpoint,
+                                               (void*)&psTAFenceTimelineUpdateSync,
+                                               &ui32TAFenceTimelineUpdateValue,
+                                               ui32PDumpFlags);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                       "%s:   SyncCheckpointCreateFence[TA] failed (%s)",
+                                                       __func__,
+                                                       PVRSRVGetErrorString(eError)));
+                                       goto fail_create_ta_fence;
+                               }
+
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: returned from SyncCheckpointCreateFence[TA] "
+                                                  "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+                                                  "ui32FenceTimelineUpdateValue=0x%x)",
+                                                  __func__, iUpdateTAFence,
+                                                  (void*)psTAFenceTimelineUpdateSync,
+                                                  ui32TAFenceTimelineUpdateValue));
+
+                               /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
+                               pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
+                                                  __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+                       }
+
+                       /* Append the sync prim update for the TA timeline (if required) */
+                       if (psTAFenceTimelineUpdateSync)
+                       {
+                               sTASyncData.ui32ClientUpdateCount                = ui32ClientTAUpdateCount;
+                               sTASyncData.ui32ClientUpdateValueCount   = ui32ClientTAUpdateValueCount;
+                               sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount;
+                               sTASyncData.paui32ClientUpdateValue              = paui32ClientTAUpdateValue;
+
+                               eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue,
+                                               &psRenderContext->sSyncAddrListTAUpdate,
+                                               (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate,
+                                                               psTAFenceTimelineUpdateSync,
+                                                               &sTASyncData,
+                                                               bKick3D);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       goto fail_alloc_update_values_mem_TA;
+                               }
+
+                               paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue;
+                               ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount;
+                               pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress;
+                               ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount;
+                       }
+
+                       /* Create the output fence for 3D (if required) */
+                       if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: calling SyncCheckpointCreateFence[3D] "
+                                                  "(iUpdateFence=%d, iUpdateTimeline=%d, "
+                                                  "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)",
+                                                  __func__, iUpdate3DFence, iUpdate3DTimeline,
+                                                  (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+                               eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+                                               szFenceName3D,
+                                               iUpdate3DTimeline,
+                                               psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                                               &iUpdate3DFence,
+                                               &uiUpdate3DFenceUID,
+                                               &pv3DUpdateFenceFinaliseData,
+                                               &psUpdate3DSyncCheckpoint,
+                                               (void*)&ps3DFenceTimelineUpdateSync,
+                                               &ui323DFenceTimelineUpdateValue,
+                                               ui32PDumpFlags);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                       "%s:   SyncCheckpointCreateFence[3D] failed (%s)",
+                                                       __func__,
+                                                       PVRSRVGetErrorString(eError)));
+                                       goto fail_create_3d_fence;
+                               }
+
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: returned from SyncCheckpointCreateFence[3D] "
+                                                  "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+                                                  "ui32FenceTimelineUpdateValue=0x%x)",
+                                                  __func__, iUpdate3DFence,
+                                                  (void*)ps3DFenceTimelineUpdateSync,
+                                                  ui323DFenceTimelineUpdateValue));
+
+                               /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
+                               pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
+                                                  __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+                       }
+
+                       /* Append the sync prim update for the 3D timeline (if required) */
+                       if (ps3DFenceTimelineUpdateSync)
+                       {
+                               s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount;
+                               s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount;
+                               s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount;
+                               s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue;
+
+                               eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue,
+                                               &psRenderContext->sSyncAddrList3DUpdate,
+                                               &psRenderContext->sSyncAddrList3DUpdate,        /*!< PR update: is this required? */
+                                               ps3DFenceTimelineUpdateSync,
+                                               &s3DSyncData,
+                                               bKick3D);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       goto fail_alloc_update_values_mem_3D;
+                               }
+
+                               paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue;
+                               ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount;
+                               pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress;
+                               ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount;
+
+                               if (!bKick3D)
+                               {
+                                       paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue;
+                                       ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount;
+                                       pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress;
+                                       ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount;
+                               }
+                       }
+
+                       /*
+                        * The hardware requires a PR to be submitted if there is a TA OOM.
+                        * If we only have a TA, attach native checks and updates to the TA
+                        * and 3D updates to the PR.
+                        * If we have a TA and 3D, attach the native TA checks and updates
+                        * to the TA and similarly for the 3D.
+                        * Note that 'updates' includes the cleanup syncs for 'check' fence
+                        * FDs, in addition to the update fence FD (if supplied).
+                        * Currently, the client driver never kicks only the 3D, so we don't
+                        * support that for the time being.
+                        */
+
+                       {
+                               if (bKickTA)
+                               {
+                                       /* Attach checks and updates to TA */
+
+                                       /* Checks (from input fence) */
+                                       if (ui32FenceTASyncCheckpointCount > 0)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...",
+                                                                  __func__,
+                                                                  ui32FenceTASyncCheckpointCount,
+                                                                  (void*)apsFenceTASyncCheckpoints));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+                                                               ui32FenceTASyncCheckpointCount,
+                                                               apsFenceTASyncCheckpoints);
+                                               if (!pauiClientTAFenceUFOAddress)
+                                               {
+                                                       pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+                                               }
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   {ui32ClientTAFenceCount was %d, now %d}",
+                                                                  __func__, ui32ClientTAFenceCount,
+                                                                  ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount));
+                                               if (ui32ClientTAFenceCount == 0)
+                                               {
+                                                       bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                                               }
+                                               ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount;
+                                       }
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s:   {ui32ClientTAFenceCount now %d}",
+                                                          __func__, ui32ClientTAFenceCount));
+
+                                       if (psUpdateTASyncCheckpoint)
+                                       {
+                                               /* Update (from output fence) */
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append 1 sync checkpoint<%p> (ID=%d) to TA Update...",
+                                                                  __func__, (void*)psUpdateTASyncCheckpoint,
+                                                                  SyncCheckpointGetId(psUpdateTASyncCheckpoint)));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate,
+                                                               1,
+                                                               &psUpdateTASyncCheckpoint);
+                                               if (!pauiClientTAUpdateUFOAddress)
+                                               {
+                                                       pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+                                               }
+                                               ui32ClientTAUpdateCount++;
+                                       }
+
+                                       if (!bKick3D && psUpdate3DSyncCheckpoint)
+                                       {
+                                               /* Attach update to the 3D (used for PR) Updates */
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...",
+                                                                  __func__, (void*)psUpdate3DSyncCheckpoint,
+                                                                  SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                                               1,
+                                                               &psUpdate3DSyncCheckpoint);
+                                               if (!pauiClientPRUpdateUFOAddress)
+                                               {
+                                                       pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                                               }
+                                               ui32ClientPRUpdateCount++;
+                                       }
+                               }
+
+                               if (bKick3D)
+                               {
+                                       /* Attach checks and updates to the 3D */
+
+                                       /* Checks (from input fence) */
+                                       if (ui32Fence3DSyncCheckpointCount > 0)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append %d sync checkpoints to 3D Fence...",
+                                                                  __func__, ui32Fence3DSyncCheckpointCount));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+                                                               ui32Fence3DSyncCheckpointCount,
+                                                               apsFence3DSyncCheckpoints);
+                                               if (!pauiClient3DFenceUFOAddress)
+                                               {
+                                                       pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+                                               }
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   {ui32Client3DFenceCount was %d, now %d}",
+                                                                  __func__, ui32Client3DFenceCount,
+                                                                  ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount));
+                                               if (ui32Client3DFenceCount == 0)
+                                               {
+                                                       b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                                               }
+                                               ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount;
+                                       }
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s:   {ui32Client3DFenceCount was %d}",
+                                                          __func__, ui32Client3DFenceCount));
+
+                                       if (psUpdate3DSyncCheckpoint)
+                                       {
+                                               /* Update (from output fence) */
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...",
+                                                                  __func__, (void*)psUpdate3DSyncCheckpoint,
+                                                                  SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                                               1,
+                                                               &psUpdate3DSyncCheckpoint);
+                                               if (!pauiClient3DUpdateUFOAddress)
+                                               {
+                                                       pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                                               }
+                                               ui32Client3DUpdateCount++;
+                                       }
+                               }
+
+                               /*
+                                * Relocate sync check points from the 3D fence that are
+                                * external to the current process, to the TA fence.
+                                * This avoids a sync lockup when dependent renders are
+                                * submitted out-of-order and a PR must be scheduled.
+                                */
+                               if (bKickTA)
+                               {
+                                       /* Search for external timeline dependencies */
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s: Checking 3D fence for external sync points (%d)...",
+                                                          __func__, ui32Fence3DSyncCheckpointCount));
+
+                                       for (i=0; i<ui32Fence3DSyncCheckpointCount; i++)
+                                       {
+                                               /* Check to see if the checkpoint is on a TL owned by
+                                                * another process.
+                                                */
+                                               if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != uiCurrentProcess)
+                                               {
+                                                       /* 3D Sync point represents cross process
+                                                        * dependency, copy sync point to TA command fence. */
+                                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                                          "%s:   Append 1 sync checkpoint<%p> (ID=%d) to TA Fence...",
+                                                                          __func__, (void*)apsFence3DSyncCheckpoints[i],
+                                                                          SyncCheckpointGetId(apsFence3DSyncCheckpoints[i])));
+
+                                                       SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+                                                                                                                 1,
+                                                                                                                 &apsFence3DSyncCheckpoints[i]);
+
+                                                       if (!pauiClientTAFenceUFOAddress)
+                                                       {
+                                                               pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+                                                       }
+
+                                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                                          "%s:   {ui32ClientTAFenceCount was %d, now %d}",
+                                                                          __func__,
+                                                                          ui32ClientTAFenceCount,
+                                                                          ui32ClientTAFenceCount + 1));
+
+                                                       if (ui32ClientTAFenceCount == 0)
+                                                       {
+                                                               bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                                                       }
+
+                                                       ui32ClientTAFenceCount++;
+                                               }
+                                       }
+                               }
+
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s:   (after pvr_sync) ui32ClientTAFenceCount=%d, "
+                                                  "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, "
+                                                  "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,",
+                                                  __func__,
+                                                  ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                                                  ui32Client3DFenceCount, ui32Client3DUpdateCount,
+                                                  ui32ClientPRUpdateCount));
+                       }
+               }
+
+               if (ui32ClientTAFenceCount)
+               {
+                       PVR_ASSERT(pauiClientTAFenceUFOAddress);
+                       if (!bTAFenceOnSyncCheckpointsOnly)
+                       {
+                               PVR_ASSERT(paui32ClientTAFenceValue);
+                       }
+               }
+               if (ui32ClientTAUpdateCount)
+               {
+                       PVR_ASSERT(pauiClientTAUpdateUFOAddress);
+                       if (ui32ClientTAUpdateValueCount>0)
+                       {
+                               PVR_ASSERT(paui32ClientTAUpdateValue);
+                       }
+               }
+               if (ui32Client3DFenceCount)
+               {
+                       PVR_ASSERT(pauiClient3DFenceUFOAddress);
+                       PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly);
+               }
+               if (ui32Client3DUpdateCount)
+               {
+                       PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+                       if (ui32Client3DUpdateValueCount>0)
+                       {
+                               PVR_ASSERT(paui32Client3DUpdateValue);
+                       }
+               }
+               if (ui32ClientPRUpdateCount)
+               {
+                       PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+                       if (ui32ClientPRUpdateValueCount>0)
+                       {
+                               PVR_ASSERT(paui32ClientPRUpdateValue);
+                       }
+               }
+
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ",
+                          __func__,
+                          ui32ClientTAFenceCount,
+                          (void*)paui32ClientTAFenceValue));
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ",
+                          __func__,
+                          ui32ClientTAUpdateCount,
+                          (void*)pauiClientTAUpdateUFOAddress));
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+       DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                   ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0),
+                               ui32Client3DUpdateCount,
+                   pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue,
+                   pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue,
+                   pauiClient3DFenceUFOAddress, NULL,
+                   pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue);
+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */
+
+       /* Command size check */
+       if (ui32TAFenceCount != ui32ClientTAFenceCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences"
+                       " is different than the actual number (%u != %u)",
+                       ui32TAFenceCount, ui32ClientTAFenceCount));
+       }
+       if (ui32TAUpdateCount != ui32ClientTAUpdateCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates"
+                       " is different than the actual number (%u != %u)",
+                       ui32TAUpdateCount, ui32ClientTAUpdateCount));
+       }
+       if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences"
+                       " is different than the actual number (%u != %u)",
+                       ui323DFenceCount, ui32Client3DFenceCount));
+       }
+       if (ui323DUpdateCount != ui32Client3DUpdateCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates"
+                       " is different than the actual number (%u != %u)",
+                       ui323DUpdateCount, ui32Client3DUpdateCount));
+       }
+       if (ui32PRUpdateCount != ui32ClientPRUpdateCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates"
+                       " is different than the actual number (%u != %u)",
+                       ui32PRUpdateCount, ui32ClientPRUpdateCount));
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       if (bKickTA || bKick3D || bAbort)
+       {
+               sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize  = ui32RenderTargetSize;
+               sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls;
+               sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices   = ui32NumberOfIndices;
+               sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs      = ui32NumberOfMRTs;
+       }
+#endif
+
+       /* Init and acquire to TA command if required */
+       if (bKickTA)
+       {
+               RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Prepare workload estimation */
+               WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+                               &psRenderContext->sWorkEstData,
+                               &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA,
+                               RGXFWIF_CCB_CMD_TYPE_GEOM,
+                               &sWorkloadCharacteristics,
+                               ui64DeadlineInus,
+                               &sWorkloadKickDataTA);
+#endif
+
+               /* Init the TA command helper */
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d",
+                                  __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount));
+               RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+                               ui32ClientTAFenceCount,
+                               pauiClientTAFenceUFOAddress,
+                               paui32ClientTAFenceValue,
+                               ui32ClientTAUpdateCount,
+                               pauiClientTAUpdateUFOAddress,
+                               paui32ClientTAUpdateValue,
+                               ui32TACmdSize,
+                               pui8TADMCmd,
+                       &pPreAddr,
+                       (bKick3D ? NULL : &pPostAddr),
+                       (bKick3D ? NULL : &pRMWUFOAddr),
+                               RGXFWIF_CCB_CMD_TYPE_GEOM,
+                               ui32ExtJobRef,
+                               ui32IntJobRef,
+                               ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                               &sWorkloadKickDataTA,
+#else
+                               NULL,
+#endif
+                               "TA",
+                               bCCBStateOpen,
+                               pasTACmdHelperData);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* The following is used to determine the offset of the command header containing
+                  the workload estimation data so that can be accessed when the KCCB is read */
+               ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+#endif
+
+               eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                          __func__, eError));
+                       goto fail_taacquirecmd;
+               }
+               else
+               {
+                       ui32TACmdCount++;
+               }
+       }
+
+       /* Only kick the 3D if required */
+       if (bKickPR)
+       {
+               RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+               /*
+                       The command helper doesn't know about the PR fence so create
+                       the command with all the fences against it and later create
+                       the PR command itself which _must_ come after the PR fence.
+               */
+               sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+               sPRUFO.ui32Value = ui32PRFenceValue;
+
+               /* Init the PR fence command helper */
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d",
+                                  __func__, ui32Client3DFenceCount));
+               RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+                               ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0),
+                               pauiClient3DFenceUFOAddress,
+                               NULL,
+                               0,
+                               NULL,
+                               NULL,
+                               sizeof(sPRUFO),
+                               (IMG_UINT8*) &sPRUFO,
+                               NULL,
+                               NULL,
+                               NULL,
+                               RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+                               ui32ExtJobRef,
+                               ui32IntJobRef,
+                               ui32PDumpFlags,
+                               NULL,
+                               "3D-PR-Fence",
+                               bCCBStateOpen,
+                               &pas3DCmdHelperData[ui323DCmdCount++]);
+
+               /* Init the 3D PR command helper */
+               /*
+                       Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update
+                       if no 3D is present. This is so the timeline update cannot happen out of order with any
+                       other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB).
+                       This out of order timeline sync prim update could happen if we attach it to the TA update.
+                */
+               if (ui32ClientPRUpdateCount)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s: Line %d, ui32ClientPRUpdateCount=%d, "
+                                          "pauiClientPRUpdateUFOAddress=0x%x, "
+                                          "ui32ClientPRUpdateValueCount=%d, "
+                                          "paui32ClientPRUpdateValue=0x%x",
+                                          __func__, __LINE__, ui32ClientPRUpdateCount,
+                                          pauiClientPRUpdateUFOAddress->ui32Addr,
+                                          ui32ClientPRUpdateValueCount,
+                                          (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue));
+               }
+
+               if (!bUseCombined3DAnd3DPR)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d",
+                                          __func__, ui32ClientPRUpdateCount));
+                       RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+                                       0,
+                                       NULL,
+                                       NULL,
+                                       ui32ClientPRUpdateCount,
+                                       pauiClientPRUpdateUFOAddress,
+                                       paui32ClientPRUpdateValue,
+                                       pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead
+                                       pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd,
+                                       NULL,
+                                       NULL,
+                                       NULL,
+                                       RGXFWIF_CCB_CMD_TYPE_3D_PR,
+                                       ui32ExtJobRef,
+                                       ui32IntJobRef,
+                                       ui32PDumpFlags,
+                                       NULL,
+                                       "3D-PR",
+                                       bCCBStateOpen,
+                                       &pas3DCmdHelperData[ui323DCmdCount++]);
+               }
+       }
+
+       if (bKick3D || bAbort)
+       {
+               RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+               const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Prepare workload estimation */
+               WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+                               &psRenderContext->sWorkEstData,
+                               &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D,
+                               e3DCmdType,
+                               &sWorkloadCharacteristics,
+                               ui64DeadlineInus,
+                               &sWorkloadKickData3D);
+#endif
+
+               /* Init the 3D command helper */
+               RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+                               bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */
+                               bKickTA ? NULL : pauiClient3DFenceUFOAddress,
+                               NULL,
+                               ui32Client3DUpdateCount,
+                               pauiClient3DUpdateUFOAddress,
+                               paui32Client3DUpdateValue,
+                               ui323DCmdSize,
+                               pui83DDMCmd,
+                               (bKickTA ? NULL : &pPreAddr),
+                               &pPostAddr,
+                               &pRMWUFOAddr,
+                               e3DCmdType,
+                               ui32ExtJobRef,
+                               ui32IntJobRef,
+                               ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                               &sWorkloadKickData3D,
+#else
+                               NULL,
+#endif
+                               "3D",
+                               bCCBStateOpen,
+                               &pas3DCmdHelperData[ui323DCmdCount++]);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* The following are used to determine the offset of the command header containing the workload estimation
+                  data so that can be accessed when the KCCB is read */
+               ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
+               ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1);
+#endif
+       }
+
+       /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+       if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS))
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError));
+               goto fail_3dcmdinit;
+       }
+
+       if (ui323DCmdCount)
+       {
+               PVR_ASSERT(bKickPR || bKick3D);
+
+               /* Acquire space for all the 3D command(s) */
+               eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+                        * of a new TA command with the same Write offset in Kernel CCB.
+                        */
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError));
+                       goto fail_3dacquirecmd;
+               }
+       }
+
+       /*
+               We should acquire the space in the kernel CCB here as after this point
+               we release the commands which will take operations on server syncs
+               which can't be undone
+       */
+
+       /*
+               Everything is ready to go now, release the commands
+       */
+       if (ui32TACmdCount)
+       {
+               ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+               RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+                               pasTACmdHelperData,
+                               "TA",
+                               FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+               /* This checks if the command would wrap around at the end of the CCB and therefore  would start at an
+                  offset of 0 rather than the current command offset */
+               if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+               {
+                       ui32TACommandOffset = ui32TACmdOffset;
+               }
+               else
+               {
+                       ui32TACommandOffset = 0;
+               }
+#endif
+       }
+
+       if (ui323DCmdCount)
+       {
+               ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+               RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+                               pas3DCmdHelperData,
+                               "3D",
+                               FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+               if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+               {
+                       ui323DCommandOffset = ui323DCmdOffset;
+               }
+               else
+               {
+                       ui323DCommandOffset = 0;
+               }
+#endif
+       }
+
+       if (ui32TACmdCount)
+       {
+               IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr;
+               RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext);
+               CMDTA3D_SHARED *psGeomCmdShared = IMG_OFFSET_ADDR(pui8TADMCmd, 0);
+
+               sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+               sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+               sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+
+               /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+               sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
+#else
+               sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+               eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl,
+                               &sTACmdKickData.ui32NumCleanupCtl,
+                               RGXFWIF_DM_GEOM,
+                               bKickTA,
+                               psKMHWRTDataSet,
+                               psZSBuffer,
+                               psMSAAScratchBuffer);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                          __func__, eError));
+                       goto fail_taattachcleanupctls;
+               }
+
+               if (psGeomCmdShared)
+               {
+                       HTBLOGK(HTB_SF_MAIN_KICK_TA,
+                                       sTACmdKickData.psContext,
+                                       ui32TACmdOffset,
+                                       psGeomCmdShared->sCmn.ui32FrameNum,
+                                       ui32ExtJobRef,
+                                       ui32IntJobRef
+                       );
+               }
+
+               RGXSRV_HWPERF_ENQ(psRenderContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 ui32FWCtx,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_TA,
+                                 iCheckTAFence,
+                                 iUpdateTAFence,
+                                 iUpdateTATimeline,
+                                 uiCheckTAFenceUID,
+                                 uiUpdateTAFenceUID,
+                                 ui64DeadlineInus,
+                                 WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA));
+
+               if (!bUseSingleFWCommand)
+               {
+                       /* Construct the kernel TA CCB command. */
+                       RGXFWIF_KCCB_CMD sTAKCCBCmd;
+                       sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+                       sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData;
+
+                       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+                       {
+                               eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+                                               RGXFWIF_DM_GEOM,
+                                               &sTAKCCBCmd,
+                                               ui32PDumpFlags);
+                               if (eError2 != PVRSRV_ERROR_RETRY)
+                               {
+                                       break;
+                               }
+                               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+                       } END_LOOP_UNTIL_TIMEOUT();
+               }
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_taacquirecmd;
+               }
+
+               PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+                                       ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                                       RGX_HWPERF_KICK_TYPE_TA3D);
+       }
+
+       if (ui323DCmdCount)
+       {
+               RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 };
+               IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr;
+               RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext);
+               CMDTA3D_SHARED *ps3DCmdShared = IMG_OFFSET_ADDR(pui83DDMCmd, 0);
+
+               s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+               s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+               s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+
+               /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+               s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+#else
+               s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+               eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl,
+                               &s3DCmdKickData.ui32NumCleanupCtl,
+                               RGXFWIF_DM_3D,
+                               bKick3D,
+                               psKMHWRTDataSet,
+                               psZSBuffer,
+                               psMSAAScratchBuffer);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                          __func__, eError));
+                       goto fail_3dattachcleanupctls;
+               }
+
+               if (ps3DCmdShared)
+               {
+                       HTBLOGK(HTB_SF_MAIN_KICK_3D,
+                                       s3DCmdKickData.psContext,
+                                       ui323DCmdOffset,
+                                       ps3DCmdShared->sCmn.ui32FrameNum,
+                                       ui32ExtJobRef,
+                                       ui32IntJobRef
+                                       );
+               }
+
+               RGXSRV_HWPERF_ENQ(psRenderContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 ui32FWCtx,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_3D,
+                                 iCheck3DFence,
+                                 iUpdate3DFence,
+                                 iUpdate3DTimeline,
+                                 uiCheck3DFenceUID,
+                                 uiUpdate3DFenceUID,
+                                 ui64DeadlineInus,
+                                 WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D));
+
+               if (bUseSingleFWCommand)
+               {
+                       /* Construct the kernel TA/3D CCB command. */
+                       s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK;
+                       s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData;
+                       s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData;
+               }
+               else
+               {
+                       /* Construct the kernel 3D CCB command. */
+                       s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+                       s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData;
+               }
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+                                       RGXFWIF_DM_3D,
+                                       &s3DKCCBCmd,
+                                       ui32PDumpFlags);
+                       if (eError2 != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+       }
+
+       if (eError2 != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
+               if (eError == PVRSRV_OK)
+               {
+                       eError = eError2;
+               }
+       }
+
+       /*
+        * Now check eError (which may have returned an error from our earlier calls
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (unlikely(eError != PVRSRV_OK ))
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                  __func__, eError));
+               goto fail_3dacquirecmd;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateTASyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x",
+                                  __func__, (void*)psUpdateTASyncCheckpoint,
+                                  SyncCheckpointGetId(psUpdateTASyncCheckpoint),
+                                  SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint);
+       }
+       if (psTAFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Updating NOHW sync prim [TA] <%p> to %d",
+                                  __func__, (void*)psTAFenceTimelineUpdateSync,
+                                  ui32TAFenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue);
+       }
+
+       if (psUpdate3DSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x",
+                                  __func__, (void*)psUpdate3DSyncCheckpoint,
+                                  SyncCheckpointGetId(psUpdate3DSyncCheckpoint),
+                                  SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint);
+       }
+       if (ps3DFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Updating NOHW sync prim [3D] <%p> to %d",
+                                  __func__, (void*)ps3DFenceTimelineUpdateSync,
+                                  ui323DFenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...",
+                                  __func__, (void*)psBufferSyncData));
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       if (piUpdateTAFence)
+       {
+               *piUpdateTAFence = iUpdateTAFence;
+       }
+       if (piUpdate3DFence)
+       {
+               *piUpdate3DFence = iUpdate3DFence;
+       }
+
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence.
+        * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+        */
+       if (bKickTA)
+       {
+               SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+       }
+       SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+
+       if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence,
+                                                                       pvTAUpdateFenceFinaliseData,
+                                                                       psUpdateTASyncCheckpoint, szFenceNameTA);
+       }
+       if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence,
+                                                                       pv3DUpdateFenceFinaliseData,
+                                                                       psUpdate3DSyncCheckpoint, szFenceName3D);
+       }
+
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceTASyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+       }
+       if (apsFence3DSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+       }
+
+       if (sTASyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+       }
+       if (s3DSyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       if (bTestSLRAdd3DCheck)
+       {
+               SyncCheckpointFree(psDummySyncCheckpoint);
+       }
+#endif
+       OSLockRelease(psRenderContext->hLock);
+
+       return PVRSRV_OK;
+
+fail_3dattachcleanupctls:
+fail_taattachcleanupctls:
+fail_3dacquirecmd:
+fail_3dcmdinit:
+fail_taacquirecmd:
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence);
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate);
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence);
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate);
+       /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list.
+        * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what
+        * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the
+        * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate.
+        */
+       if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs))
+       {
+               SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr);
+       }
+
+fail_alloc_update_values_mem_3D:
+       if (iUpdate3DFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData);
+       }
+fail_create_3d_fence:
+fail_alloc_update_values_mem_TA:
+       if (iUpdateTAFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData);
+       }
+fail_create_ta_fence:
+#if !defined(SUPPORT_BUFFER_SYNC)
+err_no_buffer_sync_invalid_params:
+#endif /* !defined(SUPPORT_BUFFER_SYNC) */
+err_pr_fence_address:
+err_populate_sync_addr_list_3d_update:
+err_populate_sync_addr_list_3d_fence:
+err_populate_sync_addr_list_ta_update:
+err_populate_sync_addr_list_ta_fence:
+err_not_enough_space:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence.
+        * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+        */
+#if defined(SUPPORT_BUFFER_SYNC)
+       SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount,
+                                    apsBufferFenceSyncCheckpoints);
+#endif
+       SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+fail_resolve_input_3d_fence:
+       if (bKickTA)
+       {
+               SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+       }
+fail_resolve_input_ta_fence:
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceTASyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+       }
+       if (apsFence3DSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+       }
+       if (sTASyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+       }
+       if (s3DSyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+       }
+#if defined(SUPPORT_VALIDATION)
+       if (bTestSLRAdd3DCheck)
+       {
+               SyncCheckpointFree(psDummySyncCheckpoint);
+       }
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+       PVR_ASSERT(eError != PVRSRV_OK);
+       OSLockRelease(psRenderContext->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+               PVRSRV_DEVICE_NODE * psDeviceNode,
+               RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+               IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       OSLockAcquire(psRenderContext->hLock);
+
+       if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+                               psConnection,
+                               psRenderContext->psDeviceNode->pvDevice,
+                               ui32Priority,
+                               RGXFWIF_DM_GEOM);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to set the priority of the TA part of the rendercontext (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       goto fail_tacontext;
+               }
+               psRenderContext->sTAData.ui32Priority = ui32Priority;
+       }
+
+       if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+                               psConnection,
+                               psRenderContext->psDeviceNode->pvDevice,
+                               ui32Priority,
+                               RGXFWIF_DM_3D);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to set the priority of the 3D part of the rendercontext (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       goto fail_3dcontext;
+               }
+               psRenderContext->s3DData.ui32Priority = ui32Priority;
+       }
+
+       OSLockRelease(psRenderContext->hLock);
+       return PVRSRV_OK;
+
+fail_3dcontext:
+fail_tacontext:
+       OSLockRelease(psRenderContext->hLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                 RGX_CONTEXT_PROPERTY eContextProperty,
+                                                 IMG_UINT64 ui64Input,
+                                                 IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psRenderContext->hLock);
+                       eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext,
+                                                        ui32ContextFlags);
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext,
+                                                            ui32ContextFlags);
+                       }
+                       OSLockRelease(psRenderContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+
+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                         void *pvDumpDebugFile,
+                         IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+       dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+
+               DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+               DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+       }
+       OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_UINT32 ui32ContextBitMask = 0;
+
+       OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+               if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext)
+               {
+                       if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED)
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA;
+                       }
+               }
+
+               if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext)
+               {
+                       if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED)
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D;
+                       }
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/*
+ * RGXRenderContextStalledKM
+ */
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+       RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE);
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxta3d.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxta3d.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxta3d.h
new file mode 100644 (file)
index 0000000..89a5b22
--- /dev/null
@@ -0,0 +1,502 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA and 3D Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX TA and 3D Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXTA3D_H
+#define RGXTA3D_H
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+/*****************************************************************************
+ * The Design of Data Storage System for Render Targets                      *
+ * ====================================================                      *
+ *   Relevant for                                                            *
+ *     understanding RGXCreateHWRTDataSet & RGXDestroyHWRTDataSet            *
+ *                                                                           *
+ *                                                                           *
+ *        +=========================================+                        *
+ *        |           RenderTargetDataSet           |                        *
+ *        +---------------|---------|---------------+                        *
+ *                        |         |                                        *
+ *                        V         V                                        *
+ *  +- - - - - - - - - - - - +   +- - - - - - - - - - - - +                  *
+ *  | KM_HW_RT_DATA_HANDLE_0 |   | KM_HW_RT_DATA_HANDLE_1 |                  *
+ *  +- - -|- - - - - - - - - +   +- - - - - - - - - | - - +                  *
+ *        |                                         |                        *
+ *        |                                         |           [UM]Client   *
+ *  ------|-----------------------------------------|----------------------- *
+ *        |                                         |               Bridge   *
+ *  ------|-----------------------------------------|----------------------- *
+ *        |                                         |           [KM]Server   *
+ *        |                                         |                        *
+ *        | KM-ptr                                  | KM-ptr                 *
+ *        V                                         V                        *
+ *  +====================+           +====================+                  *
+ *  |  KM_HW_RT_DATA_0   |           |   KM_HW_RT_DATA_1  |                  *
+ *  +-----|------------|-+           +-|------------|-----+                  *
+ *        |            |               |            |                        *
+ *        |            |               |            |                        *
+ *        |            |               |            |                        *
+ *        |            |               |            |                        *
+ *        |            | KM-ptr        | KM-ptr     |                        *
+ *        |            V               V            |                        *
+ *        |      +==========================+       |                        *
+ *        |      | HW_RT_DATA_COMMON_COOKIE |       |                        *
+ *        |      +--------------------------+       |                        *
+ *        |                   |                     |                        *
+ *        |                   |                     |                        *
+ *  ------|-------------------|---------------------|----------------------- *
+ *        |                   |                     |         [FW]Firmware   *
+ *        |                   |                     |                        *
+ *        | FW-addr           |                     | FW-addr                *
+ *        V                   |                     V                        *
+ *  +===============+         |           +===============+                  *
+ *  | HW_RT_DATA_0  |         |           | HW_RT_DATA_1  |                  *
+ *  +------------|--+         |           +--|------------+                  *
+ *               |            |              |                               *
+ *               | FW-addr    | FW-addr      | FW-addr                       *
+ *               V            V              V                               *
+ *        +=========================================+                        *
+ *        |           HW_RT_DATA_COMMON             |                        *
+ *        +-----------------------------------------+                        *
+ *                                                                           *
+ *****************************************************************************/
+
+typedef struct _RGX_HWRTDATA_COMMON_COOKIE_
+{
+       DEVMEM_MEMDESC                  *psHWRTDataCommonFwMemDesc;
+       RGXFWIF_DEV_VIRTADDR    sHWRTDataCommonFwAddr;
+       IMG_UINT32                              ui32RefCount;
+
+} RGX_HWRTDATA_COMMON_COOKIE;
+
+typedef struct _RGX_KM_HW_RT_DATASET_
+{
+       RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie;
+
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr;
+
+       DEVMEM_MEMDESC *psHWRTDataFwMemDesc;
+       DEVMEM_MEMDESC *psRTArrayFwMemDesc;
+       DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc;
+
+       RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS];
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       DLLIST_NODE                     sNodeHWRTData;
+#endif
+
+} RGX_KM_HW_RT_DATASET;
+
+struct _RGX_FREELIST_ {
+       PVRSRV_RGXDEV_INFO              *psDevInfo;
+       CONNECTION_DATA                 *psConnection;
+
+       /* Free list PMR */
+       PMR                                             *psFreeListPMR;
+       IMG_DEVMEM_OFFSET_T             uiFreeListPMROffset;
+
+       /* Freelist config */
+       IMG_UINT32                              ui32MaxFLPages;
+       IMG_UINT32                              ui32InitFLPages;
+       IMG_UINT32                              ui32CurrentFLPages;
+       IMG_UINT32                              ui32GrowFLPages;
+       IMG_UINT32                              ui32ReadyFLPages;
+       IMG_UINT32                              ui32GrowThreshold;              /* Percentage of FL memory used that should trigger a new grow request */
+       IMG_UINT32                              ui32FreelistID;
+       IMG_UINT32                              ui32FreelistGlobalID;   /* related global freelist for this freelist */
+       IMG_UINT64                              ui64FreelistChecksum;   /* checksum over freelist content */
+       IMG_BOOL                                bCheckFreelist;                 /* freelist check enabled */
+       IMG_UINT32                              ui32RefCount;                   /* freelist reference counting */
+
+       IMG_UINT32                              ui32NumGrowReqByApp;    /* Total number of grow requests by Application */
+       IMG_UINT32                              ui32NumGrowReqByFW;             /* Total Number of grow requests by Firmware */
+       IMG_UINT32                              ui32NumHighPages;               /* High Mark of pages in the freelist */
+
+       IMG_PID                                 ownerPid;                               /* Pid of the owner of the list */
+
+       /* Memory Blocks */
+       DLLIST_NODE                             sMemoryBlockHead;
+       DLLIST_NODE                             sMemoryBlockInitHead;
+       DLLIST_NODE                             sNode;
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       /* HWRTData nodes linked to local freelist */
+       DLLIST_NODE                             sNodeHWRTDataHead;
+#endif
+
+       /* FW data structures */
+       DEVMEM_MEMDESC                  *psFWFreelistMemDesc;
+       RGXFWIF_DEV_VIRTADDR    sFreeListFWDevVAddr;
+};
+
+struct _RGX_PMR_NODE_ {
+       RGX_FREELIST                    *psFreeList;
+       PMR                                             *psPMR;
+       PMR_PAGELIST                    *psPageList;
+       DLLIST_NODE                             sMemoryBlock;
+       IMG_UINT32                              ui32NumPages;
+       IMG_BOOL                                bFirstPageMissing;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       RI_HANDLE                               hRIHandle;
+#endif
+};
+
+typedef struct {
+       PVRSRV_RGXDEV_INFO              *psDevInfo;
+       DEVMEM_MEMDESC                  *psFWZSBufferMemDesc;
+       RGXFWIF_DEV_VIRTADDR    sZSBufferFWDevVAddr;
+
+       DEVMEMINT_RESERVATION   *psReservation;
+       PMR                                             *psPMR;
+       DEVMEMINT_MAPPING               *psMapping;
+       PVRSRV_MEMALLOCFLAGS_T  uiMapFlags;
+       IMG_UINT32                              ui32ZSBufferID;
+       IMG_UINT32                              ui32RefCount;
+       IMG_BOOL                                bOnDemand;
+
+       IMG_BOOL                                ui32NumReqByApp;                /* Number of Backing Requests from Application */
+       IMG_BOOL                                ui32NumReqByFW;                 /* Number of Backing Requests from Firmware */
+
+       IMG_PID                                 owner;
+
+       DLLIST_NODE     sNode;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+       RGX_ZSBUFFER_DATA               *psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+/* Create set of HWRTData(s) */
+PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA      *psConnection,
+                                                          PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                                          IMG_DEV_VIRTADDR             asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+                                                          IMG_DEV_VIRTADDR             psPMMListDevVAddr[RGXMKIF_NUM_RTDATAS],
+                                                          RGX_FREELIST                 *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS],
+                                                          IMG_UINT32                   ui32ScreenPixelMax,
+                                                          IMG_UINT64                   ui64MultiSampleCtl,
+                                                          IMG_UINT64                   ui64FlippedMultiSampleCtl,
+                                                          IMG_UINT32                   ui32TPCStride,
+                                                          IMG_DEV_VIRTADDR             asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+                                                          IMG_UINT32                   ui32TPCSize,
+                                                          IMG_UINT32                   ui32TEScreen,
+                                                          IMG_UINT32                   ui32TEAA,
+                                                          IMG_UINT32                   ui32TEMTILE1,
+                                                          IMG_UINT32                   ui32TEMTILE2,
+                                                          IMG_UINT32                   ui32MTileStride,
+                                                          IMG_UINT32                   ui32ISPMergeLowerX,
+                                                          IMG_UINT32                   ui32ISPMergeLowerY,
+                                                          IMG_UINT32                   ui32ISPMergeUpperX,
+                                                          IMG_UINT32                   ui32ISPMergeUpperY,
+                                                          IMG_UINT32                   ui32ISPMergeScaleX,
+                                                          IMG_UINT32                   ui32ISPMergeScaleY,
+                                                          IMG_DEV_VIRTADDR             sMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS],
+                                                          IMG_DEV_VIRTADDR             sRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS],
+                                                          IMG_DEV_VIRTADDR             asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+                                                          IMG_UINT32                   uiRgnHeaderSize,
+                                                          IMG_UINT32                   ui32ISPMtileSize,
+                                                          IMG_UINT16                   ui16MaxRTs,
+                                                          RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]);
+
+/* Destroy HWRTDataSet */
+PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet);
+
+/*
+       RGXCreateZSBufferKM
+*/
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA                       *psConnection,
+                                 PVRSRV_DEVICE_NODE                    *psDeviceNode,
+                                 DEVMEMINT_RESERVATION         *psReservation,
+                                 PMR                                           *psPMR,
+                                 PVRSRV_MEMALLOCFLAGS_T                uiMapFlags,
+                                 RGX_ZSBUFFER_DATA                     **ppsZSBuffer);
+
+/*
+       RGXDestroyZSBufferKM
+*/
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+                                                                  RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls)
+ */
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+       RGXProcessRequestZSBufferBacking
+*/
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT32 ui32ZSBufferID);
+
+/*
+       RGXProcessRequestZSBufferUnbacking
+*/
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                               IMG_UINT32 ui32ZSBufferID);
+
+/*
+       RGXGrowFreeList
+*/
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+                             IMG_UINT32 ui32NumPages,
+                             PDLLIST_NODE pListHeader);
+
+/* Create free list */
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA         *psConnection,
+                                                          PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                                          IMG_HANDLE                   hMemCtxPrivData,
+                                                          IMG_UINT32                   ui32MaxFLPages,
+                                                          IMG_UINT32                   ui32InitFLPages,
+                                                          IMG_UINT32                   ui32GrowFLPages,
+                                                          IMG_UINT32                   ui32GrowParamThreshold,
+                                                          RGX_FREELIST                 *psGlobalFreeList,
+                                                          IMG_BOOL                             bCheckFreelist,
+                                                          IMG_DEV_VIRTADDR             sFreeListDevVAddr,
+                                                          PMR                                  *psFreeListPMR,
+                                                          IMG_DEVMEM_OFFSET_T  uiFreeListPMROffset,
+                                                          RGX_FREELIST                 **ppsFreeList);
+
+/* Destroy free list */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+       RGXProcessRequestGrow
+*/
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                  IMG_UINT32 ui32FreelistID);
+
+
+/* Reconstruct free list after Hardware Recovery */
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                         IMG_UINT32 ui32FreelistsCount,
+                                                                                         const IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXCreateRenderContextKM
+
+ @Description
+       Server-side implementation of RGXCreateRenderContext
+
+ @Input psConnection -
+ @Input psDeviceNode - device node
+ @Input ui32Priority - context priority
+ @Input sVDMCallStackAddr - VDM call stack device virtual address
+ @Input ui32CallStackDepth - VDM call stack depth
+ @Input ui32FrameworkCommandSize - framework command size
+ @Input pabyFrameworkCommand - ptr to framework command
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32StaticRenderContextStateSize - size of fixed render state
+ @Input pStaticRenderContextState - ptr to fixed render state buffer
+ @Input ui32PackedCCBSizeU8888 :
+               ui8TACCBAllocSizeLog2 - TA CCB size
+               ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow
+               ui83DCCBAllocSizeLog2 - 3D CCB size
+               ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow
+ @Input ui32ContextFlags - flags which specify properties of the context
+ @Output ppsRenderContext -
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA                            *psConnection,
+                                                                                       PVRSRV_DEVICE_NODE                      *psDeviceNode,
+                                                                                       IMG_UINT32                                      ui32Priority,
+                                                                                       IMG_DEV_VIRTADDR                        sVDMCallStackAddr,
+                                                                                       IMG_UINT32                                      ui32CallStackDepth,
+                                                                                       IMG_UINT32                                      ui32FrameworkCommandSize,
+                                                                                       IMG_PBYTE                                       pabyFrameworkCommand,
+                                                                                       IMG_HANDLE                                      hMemCtxPrivData,
+                                                                                       IMG_UINT32                                      ui32StaticRenderContextStateSize,
+                                                                                       IMG_PBYTE                                       pStaticRenderContextState,
+                                                                                       IMG_UINT32                                      ui32PackedCCBSizeU8888,
+                                                                                       IMG_UINT32                                      ui32ContextFlags,
+                                                                                       IMG_UINT64                                      ui64RobustnessAddress,
+                                                                                       IMG_UINT32                                      ui32MaxTADeadlineMS,
+                                                                                       IMG_UINT32                                      ui32Max3DDeadlineMS,
+                                                                                       RGX_SERVER_RENDER_CONTEXT       **ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+       Server-side implementation of RGXDestroyRenderContext
+
+ @Input psRenderContext -
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXKickTA3DKM
+
+ @Description
+       Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT     *psRenderContext,
+                                                                IMG_UINT32                                     ui32ClientTAFenceCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClientTAFenceSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32ClientTAFenceSyncOffset,
+                                                                IMG_UINT32                                     *paui32ClientTAFenceValue,
+                                                                IMG_UINT32                                     ui32ClientTAUpdateCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClientUpdateSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32ClientUpdateSyncOffset,
+                                                                IMG_UINT32                                     *paui32ClientTAUpdateValue,
+                                                                IMG_UINT32                                     ui32Client3DUpdateCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClient3DUpdateSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32Client3DUpdateSyncOffset,
+                                                                IMG_UINT32                                     *paui32Client3DUpdateValue,
+                                                                SYNC_PRIMITIVE_BLOCK           *psPRSyncPrimBlock,
+                                                                IMG_UINT32                                     ui32PRSyncOffset,
+                                                                IMG_UINT32                                     ui32PRFenceValue,
+                                                                PVRSRV_FENCE                           iCheckFence,
+                                                                PVRSRV_TIMELINE                        iUpdateTimeline,
+                                                                PVRSRV_FENCE                           *piUpdateFence,
+                                                                IMG_CHAR                                       szFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                                PVRSRV_FENCE                           iCheckFence3D,
+                                                                PVRSRV_TIMELINE                        iUpdateTimeline3D,
+                                                                PVRSRV_FENCE                           *piUpdateFence3D,
+                                                                IMG_CHAR                                       szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+                                                                IMG_UINT32                                     ui32TACmdSize,
+                                                                IMG_PBYTE                                      pui8TADMCmd,
+                                                                IMG_UINT32                                     ui323DPRCmdSize,
+                                                                IMG_PBYTE                                      pui83DPRDMCmd,
+                                                                IMG_UINT32                                     ui323DCmdSize,
+                                                                IMG_PBYTE                                      pui83DDMCmd,
+                                                                IMG_UINT32                                     ui32ExtJobRef,
+                                                                IMG_BOOL                                       bKickTA,
+                                                                IMG_BOOL                                       bKickPR,
+                                                                IMG_BOOL                                       bKick3D,
+                                                                IMG_BOOL                                       bAbort,
+                                                                IMG_UINT32                                     ui32PDumpFlags,
+                                                                RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+                                                                RGX_ZSBUFFER_DATA                      *psZSBuffer,
+                                                                RGX_ZSBUFFER_DATA                      *psMSAAScratchBuffer,
+                                                                IMG_UINT32                                     ui32SyncPMRCount,
+                                                                IMG_UINT32                                     *paui32SyncPMRFlags,
+                                                                PMR                                            **ppsSyncPMRs,
+                                                                IMG_UINT32                                     ui32RenderTargetSize,
+                                                                IMG_UINT32                                     ui32NumberOfDrawCalls,
+                                                                IMG_UINT32                                     ui32NumberOfIndices,
+                                                                IMG_UINT32                                     ui32NumberOfMRTs,
+                                                                IMG_UINT64                                     ui64DeadlineInus);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                 PVRSRV_DEVICE_NODE * psDevNode,
+                                                 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                                                                RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                IMG_UINT64 ui64Input,
+                                                                                                IMG_UINT64 *pui64Output);
+
+/* Debug - Dump debug info of render contexts on this device */
+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                         void *pvDumpDebugFile,
+                         IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+#endif /* RGXTA3D_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtdmtransfer.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtdmtransfer.c
new file mode 100644 (file)
index 0000000..f341464
--- /dev/null
@@ -0,0 +1,1329 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.c
+@Title          Device specific TDM transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+#include "rgxshader.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#include "rgxtimerquery.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TDM_UFO_DUMP    0
+
+//#define TDM_CHECKPOINT_DEBUG 1
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+       RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+       IMG_UINT32                  ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       DEVMEM_MEMDESC          *psFWFrameworkMemDesc;
+       DEVMEM_MEMDESC          *psFWTransferContextMemDesc;
+       IMG_UINT32              ui32Flags;
+       RGX_SERVER_TQ_TDM_DATA  sTDMData;
+       DLLIST_NODE             sListNode;
+       SYNC_ADDR_LIST          sSyncAddrListFence;
+       SYNC_ADDR_LIST          sSyncAddrListUpdate;
+       POS_LOCK                hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA       sWorkEstData;
+#endif
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+               CONNECTION_DATA         * psConnection,
+               PVRSRV_DEVICE_NODE      * psDeviceNode,
+               DEVMEM_MEMDESC          * psAllocatedMemDesc,
+               IMG_UINT32                ui32AllocatedOffset,
+               DEVMEM_MEMDESC          * psFWMemContextMemDesc,
+               IMG_UINT32                ui32Priority,
+               RGX_COMMON_CONTEXT_INFO * psInfo,
+               RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+               IMG_UINT32                                ui32CCBAllocSizeLog2,
+               IMG_UINT32                                ui32CCBMaxAllocSizeLog2,
+               IMG_UINT32                                ui32ContextFlags,
+               IMG_UINT64                ui64RobustnessAddress)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       psTDMData->psBufferSyncContext =
+                       pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                                      "rogue-tdm");
+       if (IS_ERR(psTDMData->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to create buffer_sync context (err=%ld)",
+                               __func__, PTR_ERR(psTDMData->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       eError = FWCommonContextAllocate(
+                       psConnection,
+                       psDeviceNode,
+                       REQ_TYPE_TQ_TDM,
+                       RGXFWIF_DM_TDM,
+                       NULL,
+                       psAllocatedMemDesc,
+                       ui32AllocatedOffset,
+                       psFWMemContextMemDesc,
+                       NULL,
+                       ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2,
+                       ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2,
+                       ui32ContextFlags,
+                       ui32Priority,
+                       UINT_MAX, /* max deadline MS */
+                       ui64RobustnessAddress,
+                       psInfo,
+                       &psTDMData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextalloc;
+       }
+
+       psTDMData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+       psTDMData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+               RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+               PVRSRV_DEVICE_NODE      * psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(
+                       psDeviceNode,
+                       psTDMData->psServerCommonContext,
+                       RGXFWIF_DM_TDM,
+                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free it's resources */
+       FWCommonContextFree(psTDMData->psServerCommonContext);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+       psTDMData->psBufferSyncContext = NULL;
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+               CONNECTION_DATA            * psConnection,
+               PVRSRV_DEVICE_NODE         * psDeviceNode,
+               IMG_UINT32                   ui32Priority,
+               IMG_UINT32                   ui32FrameworkCommandSize,
+               IMG_PBYTE                    pabyFrameworkCommand,
+               IMG_HANDLE                   hMemCtxPrivData,
+               IMG_UINT32                                       ui32PackedCCBSizeU88,
+               IMG_UINT32                   ui32ContextFlags,
+               IMG_UINT64                   ui64RobustnessAddress,
+               RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+       RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+       DEVMEM_MEMDESC          * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_RGXDEV_INFO      * psDevInfo = psDeviceNode->pvDevice;
+       RGX_COMMON_CONTEXT_INFO   sInfo = {NULL};
+       PVRSRV_ERROR              eError = PVRSRV_OK;
+
+       /* Allocate the server side structure */
+       *ppsTransferContext = NULL;
+       psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+       if (psTransferContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /*
+               Create the FW transfer context, this has the TDM common
+               context embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWTDMCONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwTransferContext",
+                       &psTransferContext->psFWTransferContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwtransfercontext;
+       }
+
+       eError = OSLockCreate(&psTransferContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_lockcreate;
+       }
+
+       psTransferContext->psDeviceNode = psDeviceNode;
+
+       if (ui32FrameworkCommandSize)
+       {
+               /*
+                * Create the FW framework buffer
+                */
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psTransferContext->psFWFrameworkMemDesc,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to allocate firmware GPU framework state (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psTransferContext->psFWFrameworkMemDesc,
+                               pabyFrameworkCommand,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to populate the framework buffer (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+
+               sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+       }
+
+       eError = _CreateTDMTransferContext(psConnection,
+                                          psDeviceNode,
+                                          psTransferContext->psFWTransferContextMemDesc,
+                                          offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext),
+                                          psFWMemContextMemDesc,
+                                          ui32Priority,
+                                          &sInfo,
+                                          &psTransferContext->sTDMData,
+                                                                          U32toU8_Unpack1(ui32PackedCCBSizeU88),
+                                                                          U32toU8_Unpack2(ui32PackedCCBSizeU88),
+                                          ui32ContextFlags,
+                                          ui64RobustnessAddress);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_tdmtransfercontext;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+       {
+               WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
+       }
+#endif
+
+       SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+       SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+       OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+       *ppsTransferContext = psTransferContext;
+
+       return PVRSRV_OK;
+
+fail_tdmtransfercontext:
+fail_frameworkcopy:
+       if (psTransferContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       OSLockDestroy(psTransferContext->hLock);
+fail_lockcreate:
+       DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+fail_fwtransfercontext:
+       OSFreeMem(psTransferContext);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       *ppsTransferContext = NULL;
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM(
+       CONNECTION_DATA           * psConnection,
+       PVRSRV_DEVICE_NODE        * psDeviceNode,
+       PMR                      ** ppsCLIPMRMem,
+       PMR                      ** ppsUSCPMRMem)
+{
+       PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMRMem);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_FWTDMCONTEXT    *psFWTransferContext;
+       IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+       {
+               eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc,
+                               (void **)&psFWTransferContext);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to map firmware transfer context (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       return eError;
+               }
+
+               ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted;
+
+               DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc);
+
+               /* Check if all of the workload estimation CCB commands for this workload are read */
+               if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                                       __func__, ui32WorkEstCCBSubmitted,
+                                       psTransferContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+                       return PVRSRV_ERROR_RETRY;
+               }
+       }
+#endif
+
+
+       /* remove node from list before calling destroy - as destroy, if successful
+        * will invalidate the node
+        * must be re-added if destroy fails
+        */
+       OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+       dllist_remove_node(&(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+       eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+                                           psTransferContext->psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_destroyTDM;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+       {
+               WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
+       }
+#endif
+
+       if (psTransferContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+       }
+
+       SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+       SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+       DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+
+       OSLockDestroy(psTransferContext->hLock);
+
+       OSFreeMem(psTransferContext);
+
+       return PVRSRV_OK;
+
+fail_destroyTDM:
+
+       OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+               RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+               IMG_UINT32                  ui32PDumpFlags,
+               IMG_UINT32                  ui32ClientUpdateCount,
+               SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFODevVarBlock,
+               IMG_UINT32                * paui32ClientUpdateSyncOffset,
+               IMG_UINT32                * paui32ClientUpdateValue,
+               PVRSRV_FENCE                iCheckFence,
+               PVRSRV_TIMELINE             iUpdateTimeline,
+               PVRSRV_FENCE              * piUpdateFence,
+               IMG_CHAR                    szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+               IMG_UINT32                  ui32FWCommandSize,
+               IMG_UINT8                 * pui8FWCommand,
+               IMG_UINT32                  ui32ExtJobRef,
+               IMG_UINT32                  ui32SyncPMRCount,
+               IMG_UINT32                * paui32SyncPMRFlags,
+               PMR                      ** ppsSyncPMRs,
+               IMG_UINT32                  ui32TDMCharacteristic1,
+               IMG_UINT32                  ui32TDMCharacteristic2,
+               IMG_UINT64                  ui64DeadlineInus)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+       RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+       PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress   = NULL;
+       PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress  = NULL;
+       IMG_UINT32          ui32IntClientFenceCount  = 0;
+       IMG_UINT32        * paui32IntUpdateValue     = paui32ClientUpdateValue;
+       IMG_UINT32          ui32IntClientUpdateCount = ui32ClientUpdateCount;
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eError2;
+       PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+       PVRSRV_RGXDEV_INFO  *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext);
+       RGX_CLIENT_CCB      *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext);
+       IMG_UINT32          ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+
+       IMG_UINT32 ui32CmdOffset = 0;
+       IMG_BOOL bCCBStateOpen;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+       IMG_UINT64               uiCheckFenceUID = 0;
+       IMG_UINT64               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0};
+       IMG_UINT32 ui32TDMWorkloadDataRO = 0;
+       IMG_UINT32 ui32TDMCmdHeaderOffset = 0;
+       IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0;
+       RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif
+
+       PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+       void *pvUpdateFenceFinaliseData = NULL;
+
+       if (iUpdateTimeline >= 0 && !piUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if !defined(SUPPORT_WORKLOAD_ESTIMATION)
+       PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1);
+       PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2);
+       PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus);
+#endif
+
+       /* Ensure we haven't been given a null ptr to
+        * update values if we have been told we
+        * have updates
+        */
+       if (ui32ClientUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+                                       "paui32ClientUpdateValue NULL but "
+                                       "ui32ClientUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       szUpdateFenceName[31] = '\0';
+
+       if (ui32SyncPMRCount != 0)
+       {
+               if (!ppsSyncPMRs)
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       /* We can't allocate the required amount of stack space on all consumer architectures */
+       psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+       if (psCmdHelper == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_allochelper;
+       }
+
+
+       /*
+               Init the command helper commands for all the prepares
+       */
+       {
+               IMG_CHAR *pszCommandName;
+               RGXFWIF_CCB_CMD_TYPE eType;
+#if defined(SUPPORT_BUFFER_SYNC)
+               struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+               pszCommandName = "TQ-TDM";
+
+               if (ui32FWCommandSize == 0)
+               {
+                       /* A NULL CMD for TDM is used to append updates to a non finished
+                        * FW command. bCCBStateOpen is used in case capture range is
+                        * entered on this command, to not drain CCB up to the Roff for this
+                        * command, but the finished command prior to this.
+                        */
+                       bCCBStateOpen = IMG_TRUE;
+                       eType = RGXFWIF_CCB_CMD_TYPE_NULL;
+               }
+               else
+               {
+                       bCCBStateOpen = IMG_FALSE;
+                       eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+               }
+#if defined(SUPPORT_BUFFER_SYNC)
+               psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext;
+#endif
+
+               eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+                                             0,
+                                             NULL,
+                                             NULL);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_populate_sync_addr_list;
+               }
+
+               eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+                                             ui32ClientUpdateCount,
+                                             pauiClientUpdateUFODevVarBlock,
+                                             paui32ClientUpdateSyncOffset);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_populate_sync_addr_list;
+               }
+               paui32IntUpdateValue    = paui32ClientUpdateValue;
+               pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+               if (ui32SyncPMRCount)
+               {
+#if defined(SUPPORT_BUFFER_SYNC)
+                       int err;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+                       err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+                                                                       psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                                       ui32SyncPMRCount,
+                                                                       ppsSyncPMRs,
+                                                                       paui32SyncPMRFlags,
+                                                                       &ui32BufferFenceSyncCheckpointCount,
+                                                                       &apsBufferFenceSyncCheckpoints,
+                                                                       &psBufferUpdateSyncCheckpoint,
+                                                                       &psBufferSyncData);
+                       if (err)
+                       {
+                               switch (err)
+                               {
+                                       case -EINTR:
+                                               eError = PVRSRV_ERROR_RETRY;
+                                               break;
+                                       case -ENOMEM:
+                                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                               break;
+                                       default:
+                                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                               break;
+                               }
+
+                               if (eError != PVRSRV_ERROR_RETRY)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
+                               }
+                               goto fail_resolve_input_fence;
+                       }
+
+                       /* Append buffer sync fences */
+                       if (ui32BufferFenceSyncCheckpointCount > 0)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+                               SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
+                                                                     ui32BufferFenceSyncCheckpointCount,
+                                                                     apsBufferFenceSyncCheckpoints);
+                               if (!pauiIntFenceUFOAddress)
+                               {
+                                       pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+                               }
+                               ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+                       }
+
+                       if (psBufferUpdateSyncCheckpoint)
+                       {
+                               /* Append the update (from output fence) */
+                               SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+                                                             1,
+                                                             &psBufferUpdateSyncCheckpoint);
+                               if (!pauiIntUpdateUFOAddress)
+                               {
+                                       pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+                               }
+                               ui32IntClientUpdateCount++;
+                       }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto fail_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+               }
+
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                   iCheckFence,
+                                                   &ui32FenceSyncCheckpointCount,
+                                                   &apsFenceSyncCheckpoints,
+                                                   &uiCheckFenceUID,
+                                                   ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_resolve_input_fence;
+               }
+#if defined(TDM_CHECKPOINT_DEBUG)
+               {
+                       IMG_UINT32 ii;
+                       for (ii=0; ii<32; ii++)
+                       {
+                               PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+                       }
+               }
+#endif
+               /* Create the output fence (if required) */
+               if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+               {
+                       eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+                                                          szUpdateFenceName,
+                                                          iUpdateTimeline,
+                                                          psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                          &iUpdateFence,
+                                                          &uiUpdateFenceUID,
+                                                          &pvUpdateFenceFinaliseData,
+                                                          &psUpdateSyncCheckpoint,
+                                                          (void*)&psFenceTimelineUpdateSync,
+                                                          &ui32FenceTimelineUpdateValue,
+                                                          ui32PDumpFlags);
+                       if (eError != PVRSRV_OK)
+                       {
+                               goto fail_create_output_fence;
+                       }
+
+                       /* Append the sync prim update for the timeline (if required) */
+                       if (psFenceTimelineUpdateSync)
+                       {
+                               IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                               /* Allocate memory to hold the list of update values (including our timeline update) */
+                               pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                               if (!pui32IntAllocatedUpdateValues)
+                               {
+                                       /* Failed to allocate memory */
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       goto fail_alloc_update_values_mem;
+                               }
+                               OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                               /* Copy the update values into the new memory, then append our timeline update value */
+                               if (paui32IntUpdateValue)
+                               {
+                                       OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+                               }
+                               /* Now set the additional update value */
+                               pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+                               *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+                               ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               /* Now append the timeline sync prim addr to the transfer context update list */
+                               SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
+                                                          psFenceTimelineUpdateSync);
+#if defined(TDM_CHECKPOINT_DEBUG)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+                               paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+                       }
+               }
+
+               if (ui32FenceSyncCheckpointCount)
+               {
+                       /* Append the checks (from input fence) */
+                       if (ui32FenceSyncCheckpointCount > 0)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
+#if defined(TDM_CHECKPOINT_DEBUG)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
+                                                             ui32FenceSyncCheckpointCount,
+                                                             apsFenceSyncCheckpoints);
+                               if (!pauiIntFenceUFOAddress)
+                               {
+                                       pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+                               }
+                               ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+                       }
+#if defined(TDM_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+               }
+               if (psUpdateSyncCheckpoint)
+               {
+                       /* Append the update (from output fence) */
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+                       SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+                                                     1,
+                                                     &psUpdateSyncCheckpoint);
+                       if (!pauiIntUpdateUFOAddress)
+                       {
+                               pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+                       }
+                       ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+               }
+
+#if (ENABLE_TDM_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+                       for (ii=0; ii<ui32IntClientFenceCount; ii++)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+                       for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+                       {
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+               {
+                       sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1;
+                       sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2;
+
+                       /* Prepare workload estimation */
+                       WorkEstPrepare(psDeviceNode->pvDevice,
+                                       &psTransferContext->sWorkEstData,
+                                       &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM,
+                                       eType,
+                                       &sWorkloadCharacteristics,
+                                       ui64DeadlineInus,
+                                       &sWorkloadKickDataTransfer);
+               }
+#endif
+               RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+                                         &pPreAddr,
+                                         &pPostAddr,
+                                         &pRMWUFOAddr);
+               /*
+                       Create the command helper data for this command
+               */
+               RGXCmdHelperInitCmdCCB(psDevInfo,
+                                      psClientCCB,
+                                      0,
+                                      ui32IntClientFenceCount,
+                                      pauiIntFenceUFOAddress,
+                                      NULL,
+                                      ui32IntClientUpdateCount,
+                                      pauiIntUpdateUFOAddress,
+                                      paui32IntUpdateValue,
+                                      ui32FWCommandSize,
+                                      pui8FWCommand,
+                                      &pPreAddr,
+                                      &pPostAddr,
+                                      &pRMWUFOAddr,
+                                      eType,
+                                      ui32ExtJobRef,
+                                      ui32IntJobRef,
+                                      ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                                      &sWorkloadKickDataTransfer,
+#else /* SUPPORT_WORKLOAD_ESTIMATION */
+                                      NULL,
+#endif /* SUPPORT_WORKLOAD_ESTIMATION */
+                                      pszCommandName,
+                                      bCCBStateOpen,
+                                      psCmdHelper);
+       }
+
+       /*
+               Acquire space for all the commands in one go
+       */
+
+       eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_3dcmdacquire;
+       }
+
+
+       /*
+               We should acquire the kernel CCB(s) space here as the schedule could fail
+               and we would have to roll back all the syncs
+       */
+
+       /*
+               Only do the command helper release (which takes the server sync
+               operations if the acquire succeeded
+       */
+       ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+       RGXCmdHelperReleaseCmdCCB(1,
+                                 psCmdHelper,
+                                 "TQ_TDM",
+                                 FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+       {
+               /* The following is used to determine the offset of the command header containing
+                  the workload estimation data so that can be accessed when the KCCB is read */
+               ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper);
+
+               ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+
+               /* This checks if the command would wrap around at the end of the CCB and
+                * therefore would start at an offset of 0 rather than the current command
+                * offset */
+               if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck)
+               {
+                       ui32TDMWorkloadDataRO = ui32CmdOffset;
+               }
+               else
+               {
+                       ui32TDMWorkloadDataRO = 0;
+               }
+       }
+#endif
+
+       /*
+               Even if we failed to acquire the client CCB space we might still need
+               to kick the HW to process a padding packet to release space for us next
+               time round
+       */
+       {
+               RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+               IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress(
+                               psTransferContext->sTDMData.psServerCommonContext).ui32Addr;
+
+               /* Construct the kernel 3D CCB command. */
+               sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+               sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+               /* Add the Workload data into the KCCB kick */
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+               {
+                       /* Store the offset to the CCCB command header so that it can be referenced
+                        * when the KCCB command reaches the FW */
+                       sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset;
+               }
+#endif
+
+               /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+               /*              s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+               /*              ui323DCmdOffset); */
+               RGXSRV_HWPERF_ENQ(psTransferContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_TQTDM,
+                                 iCheckFence,
+                                 iUpdateFence,
+                                 iUpdateTimeline,
+                                 uiCheckFenceUID,
+                                 uiUpdateFenceUID,
+                                 NO_DEADLINE,
+                                 NO_CYCEST);
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+                                                    RGXFWIF_DM_TDM,
+                                                    & sTDMKCCBCmd,
+                                                    ui32PDumpFlags);
+                       if (eError2 != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXTDMSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_2dcmdacquire;
+               }
+
+               PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef,
+                                       ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM);
+       }
+
+       /*
+        * Now check eError (which may have returned an error from our earlier calls
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_2dcmdacquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateSyncCheckpoint)
+       {
+               SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+       }
+       if (psFenceTimelineUpdateSync)
+       {
+               SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       * piUpdateFence = iUpdateFence;
+       if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData,
+                                           psUpdateSyncCheckpoint, szUpdateFenceName);
+       }
+
+       OSFreeMem(psCmdHelper);
+
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                    apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return PVRSRV_OK;
+
+/*
+       No resources are created in this function so there is nothing to free
+       unless we had to merge syncs.
+       If we fail after the client CCB acquire there is still nothing to do
+       as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+
+       SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
+       SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+
+/* fail_pdumpcheck: */
+/* fail_cmdtype: */
+
+       if (iUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+       }
+fail_create_output_fence:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                    apsFenceSyncCheckpoints);
+
+fail_resolve_input_fence:
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+fail_populate_sync_addr_list:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       OSFreeMem(psCmdHelper);
+fail_allochelper:
+
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       OSLockRelease(psTransferContext->hLock);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+               RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+               IMG_UINT32                 ui32PDumpFlags)
+{
+       RGXFWIF_KCCB_CMD  sKCCBCmd;
+       PVRSRV_ERROR      eError;
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       /* Schedule the firmware command */
+       sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+       sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+                                           RGXFWIF_DM_TDM,
+                                           &sKCCBCmd,
+                                           ui32PDumpFlags);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to schedule the FW command %d (%s)",
+                               __func__, eError, PVRSRVGETERRORSTRING(eError)));
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                      PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                      RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                      IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+                                           psConnection,
+                                           psTransferContext->psDeviceNode->pvDevice,
+                                           ui32Priority,
+                                           RGXFWIF_DM_TDM);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError)));
+
+                       OSLockRelease(psTransferContext->hLock);
+                       return eError;
+               }
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                                                                         RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                         IMG_UINT64 ui64Input,
+                                                                                                         IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psTransferContext->hLock);
+                       eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext,
+                                                        ui32ContextFlags);
+                       OSLockRelease(psTransferContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              void *pvDumpDebugFile,
+                              IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+
+       OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+               DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_UINT32 ui32ContextBitMask = 0;
+
+       OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+               if (CheckStalledClientCommonContext(
+                               psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+                               == PVRSRV_ERROR_CCCB_STALLED) {
+                       ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtransfer.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtransfer.c
new file mode 100644 (file)
index 0000000..91b3b8d
--- /dev/null
@@ -0,0 +1,1805 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+#include "rgxshader.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#include "rgxtimerquery.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TQ_UFO_DUMP     0
+
+//#define TRANSFER_CHECKPOINT_DEBUG 1
+
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+       DEVMEM_MEMDESC                          *psFWContextStateMemDesc;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_3D_DATA;
+
+typedef struct {
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_2D_DATA;
+
+struct _RGX_SERVER_TQ_CONTEXT_ {
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+       DEVMEM_MEMDESC                          *psFWFrameworkMemDesc;
+       DEVMEM_MEMDESC              *psFWTransferContextMemDesc;
+       IMG_UINT32                                      ui32Flags;
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D         (1<<0)
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D         (1<<1)
+       RGX_SERVER_TQ_3D_DATA           s3DData;
+       RGX_SERVER_TQ_2D_DATA           s2DData;
+       DLLIST_NODE                                     sListNode;
+       ATOMIC_T                        hIntJobRef;
+       IMG_UINT32                      ui32PDumpFlags;
+       /* per-prepare sync address lists */
+       SYNC_ADDR_LIST                  asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT];
+       SYNC_ADDR_LIST                  asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT];
+       POS_LOCK                                hLock;
+};
+
+/*
+       Static functions used by transfer context code
+*/
+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                        DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                                                        IMG_UINT32 ui32AllocatedOffset,
+                                                                                        DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                                                        IMG_UINT32 ui32Priority,
+                                                                                        RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                                                        RGX_SERVER_TQ_3D_DATA *ps3DData,
+                                                                                        IMG_UINT32 ui32CCBAllocSizeLog2,
+                                                                                        IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+                                                                                        IMG_UINT32 ui32ContextFlags,
+                                                                                        IMG_UINT64 ui64RobustnessAddress)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+       IMG_UINT        ui3DRegISPStateStoreSize = 0;
+       IMG_UINT        uiNumISPStoreRegs = 1; /* default value 1 expected */
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+       */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TQ/3D context suspend state");
+
+       if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+       {
+               uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+                                                                                                       RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX);
+       }
+
+       /* Calculate the size of the 3DCTX ISP state */
+       ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+                       uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       ps3DData->psBufferSyncContext =
+               pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                                                          "rogue-tq3d");
+       if (IS_ERR(ps3DData->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: failed to create buffer_sync context (err=%ld)",
+                                __func__, PTR_ERR(ps3DData->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                       ui3DRegISPStateStoreSize,
+                                                       RGX_FWCOMCTX_ALLOCFLAGS,
+                                                       "FwTQ3DContext",
+                                                       &ps3DData->psFWContextStateMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextswitchstate;
+       }
+
+       eError = FWCommonContextAllocate(psConnection,
+                                        psDeviceNode,
+                                        REQ_TYPE_TQ_3D,
+                                        RGXFWIF_DM_3D,
+                                                                        NULL,
+                                                                        psAllocatedMemDesc,
+                                                                        ui32AllocatedOffset,
+                                        psFWMemContextMemDesc,
+                                        ps3DData->psFWContextStateMemDesc,
+                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2,
+                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2,
+                                        ui32ContextFlags,
+                                        ui32Priority,
+                                        UINT_MAX, /* max deadline MS */
+                                        ui64RobustnessAddress,
+                                        psInfo,
+                                        &ps3DData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextalloc;
+       }
+
+
+       PDUMPCOMMENT(psDeviceNode, "Dump 3D context suspend state buffer");
+       DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
+
+       ps3DData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_contextalloc:
+       DevmemFwUnmapAndFree(psDevInfo, ps3DData->psFWContextStateMemDesc);
+fail_contextswitchstate:
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext);
+       ps3DData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                        DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                                                        IMG_UINT32 ui32Priority,
+                                                                                        RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                                                        RGX_SERVER_TQ_2D_DATA *ps2DData,
+                                                                                        IMG_UINT32 ui32CCBAllocSizeLog2,
+                                                                                        IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+                                                                                        IMG_UINT32 ui32ContextFlags,
+                                                                                        IMG_UINT64 ui64RobustnessAddress)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       ps2DData->psBufferSyncContext =
+               pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                                                          "rogue-tqtla");
+       if (IS_ERR(ps2DData->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: failed to create buffer_sync context (err=%ld)",
+                                __func__, PTR_ERR(ps2DData->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       eError = FWCommonContextAllocate(psConnection,
+                                        psDeviceNode,
+                                        REQ_TYPE_TQ_2D,
+                                        RGXFWIF_DM_2D,
+                                                                        NULL,
+                                        NULL,
+                                        0,
+                                        psFWMemContextMemDesc,
+                                        NULL,
+                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2,
+                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2,
+                                        ui32ContextFlags,
+                                        ui32Priority,
+                                        UINT_MAX, /* max deadline MS */
+                                        ui64RobustnessAddress,
+                                        psInfo,
+                                        &ps2DData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextalloc;
+       }
+
+       ps2DData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext);
+       ps2DData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData,
+                                                                                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                                                                                         ps2DData->psServerCommonContext,
+                                                                                         RGXFWIF_DM_2D,
+                                                                                         ui32PDumpFlags);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+       FWCommonContextFree(ps2DData->psServerCommonContext);
+       ps2DData->psServerCommonContext = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext);
+       ps2DData->psBufferSyncContext = NULL;
+#endif
+
+       return PVRSRV_OK;
+}
+#endif /* #if defined(RGX_FEATURE_TLA_BIT_MASK) */
+
+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData,
+                                                                                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                                                                                         ps3DData->psServerCommonContext,
+                                                                                         RGXFWIF_DM_3D,
+                                                                                         ui32PDumpFlags);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+       DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc);
+       FWCommonContextFree(ps3DData->psServerCommonContext);
+       ps3DData->psServerCommonContext = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext);
+       ps3DData->psBufferSyncContext = NULL;
+#endif
+
+       return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA          *psConnection,
+                                                                                  PVRSRV_DEVICE_NODE           *psDeviceNode,
+                                                                                  IMG_UINT32                           ui32Priority,
+                                                                                  IMG_UINT32                           ui32FrameworkCommandSize,
+                                                                                  IMG_PBYTE                            pabyFrameworkCommand,
+                                                                                  IMG_HANDLE                           hMemCtxPrivData,
+                                                                                  IMG_UINT32                           ui32PackedCCBSizeU8888,
+                                                                                  IMG_UINT32                           ui32ContextFlags,
+                                                                                  IMG_UINT64                           ui64RobustnessAddress,
+                                                                                  RGX_SERVER_TQ_CONTEXT        **ppsTransferContext,
+                                                                                  PMR                                          **ppsCLIPMRMem,
+                                                                                  PMR                                          **ppsUSCPMRMem)
+{
+       RGX_SERVER_TQ_CONTEXT   *psTransferContext;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       DEVMEM_MEMDESC                  *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       RGX_COMMON_CONTEXT_INFO sInfo = {NULL};
+       PVRSRV_ERROR                    eError = PVRSRV_OK;
+
+       /* Allocate the server side structure */
+       *ppsTransferContext = NULL;
+       psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+       if (psTransferContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /*
+               Create the FW transfer context, this has the TQ common
+               context embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWTRANSFERCONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwTransferContext",
+                       &psTransferContext->psFWTransferContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwtransfercontext;
+       }
+
+       eError = OSLockCreate(&psTransferContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+                                                                       __func__,
+                                                                       PVRSRVGetErrorString(eError)));
+               goto fail_createlock;
+       }
+
+       psTransferContext->psDeviceNode = psDeviceNode;
+
+       if (ui32FrameworkCommandSize)
+       {
+               /*
+                * Create the FW framework buffer
+                */
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psTransferContext->psFWFrameworkMemDesc,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate firmware GPU framework state (%s)",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psTransferContext->psFWFrameworkMemDesc,
+                               pabyFrameworkCommand,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to populate the framework buffer (%s)",
+                                               __func__,
+                               PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+
+               sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+       }
+
+       eError = _Create3DTransferContext(psConnection,
+                                                                         psDeviceNode,
+                                                                         psTransferContext->psFWTransferContextMemDesc,
+                                                                         offsetof(RGXFWIF_FWTRANSFERCONTEXT, sTQContext),
+                                                                         psFWMemContextMemDesc,
+                                                                         ui32Priority,
+                                                                         &sInfo,
+                                                                         &psTransferContext->s3DData,
+                                                                         U32toU8_Unpack3(ui32PackedCCBSizeU8888),
+                                                                         U32toU8_Unpack4(ui32PackedCCBSizeU8888),
+                                                                         ui32ContextFlags,
+                                                                         ui64RobustnessAddress);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_3dtransfercontext;
+       }
+       psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))
+       {
+               eError = _Create2DTransferContext(psConnection,
+                                                                                 psDeviceNode,
+                                                                                 psFWMemContextMemDesc,
+                                                                                 ui32Priority,
+                                                                                 &sInfo,
+                                                                                 &psTransferContext->s2DData,
+                                                                                 U32toU8_Unpack1(ui32PackedCCBSizeU8888),
+                                                                                 U32toU8_Unpack2(ui32PackedCCBSizeU8888),
+                                                                                 ui32ContextFlags,
+                                                                                 ui64RobustnessAddress);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_2dtransfercontext;
+               }
+               psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+       }
+#endif
+
+       PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
+
+       {
+               PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+               OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+               dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+               OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+               *ppsTransferContext = psTransferContext;
+       }
+
+       return PVRSRV_OK;
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+fail_2dtransfercontext:
+       _Destroy3DTransferContext(&psTransferContext->s3DData,
+                                                         psTransferContext->psDeviceNode,
+                                                         psTransferContext->ui32PDumpFlags);
+#endif
+fail_3dtransfercontext:
+fail_frameworkcopy:
+       if (psTransferContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       OSLockDestroy(psTransferContext->hLock);
+fail_createlock:
+       DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+fail_fwtransfercontext:
+       OSFreeMem(psTransferContext);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       *ppsTransferContext = NULL;
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+       IMG_UINT32 i;
+
+       /* remove node from list before calling destroy - as destroy, if successful
+        * will invalidate the node
+        * must be re-added if destroy fails
+        */
+       OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+       dllist_remove_node(&(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+                       (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+       {
+               eError = _Destroy2DTransferContext(&psTransferContext->s2DData,
+                                                                                  psTransferContext->psDeviceNode,
+                                                                                  PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_destroy2d;
+               }
+               /* We've freed the 2D context, don't try to free it again */
+               psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+       }
+#endif
+
+       if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+       {
+               eError = _Destroy3DTransferContext(&psTransferContext->s3DData,
+                                                                                  psTransferContext->psDeviceNode,
+                                                                                  PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_destroy3d;
+               }
+               /* We've freed the 3D context, don't try to free it again */
+               psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+       }
+
+       /* free any resources within the per-prepare UFO address stores */
+       for (i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++)
+       {
+               SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]);
+               SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]);
+       }
+
+       if (psTransferContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+       }
+
+       DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+
+       OSLockDestroy(psTransferContext->hLock);
+
+       OSFreeMem(psTransferContext);
+
+       return PVRSRV_OK;
+
+fail_destroy3d:
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+
+fail_destroy2d:
+#endif
+       OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT   *psTransferContext,
+                                                                          IMG_UINT32                           ui32PrepareCount,
+                                                                          IMG_UINT32                           *paui32ClientUpdateCount,
+                                                                          SYNC_PRIMITIVE_BLOCK         ***papauiClientUpdateUFODevVarBlock,
+                                                                          IMG_UINT32                           **papaui32ClientUpdateSyncOffset,
+                                                                          IMG_UINT32                           **papaui32ClientUpdateValue,
+                                                                          PVRSRV_FENCE                         iCheckFence,
+                                                                          PVRSRV_TIMELINE                      i2DUpdateTimeline,
+                                                                          PVRSRV_FENCE                         *pi2DUpdateFence,
+                                                                          PVRSRV_TIMELINE                      i3DUpdateTimeline,
+                                                                          PVRSRV_FENCE                         *pi3DUpdateFence,
+                                                                          IMG_CHAR                                     szFenceName[32],
+                                                                          IMG_UINT32                           *paui32FWCommandSize,
+                                                                          IMG_UINT8                            **papaui8FWCommand,
+                                                                          IMG_UINT32                           *pui32TQPrepareFlags,
+                                                                          IMG_UINT32                           ui32ExtJobRef,
+                                                                          IMG_UINT32                           ui32SyncPMRCount,
+                                                                          IMG_UINT32                           *paui32SyncPMRFlags,
+                                                                          PMR                                          **ppsSyncPMRs)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper;
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper;
+#endif
+       IMG_UINT32 ui323DCmdCount = 0;
+       IMG_UINT32 ui323DCmdLast = 0;
+       IMG_UINT32 ui323DCmdOffset = 0;
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       IMG_UINT32 ui322DCmdCount = 0;
+       IMG_UINT32 ui322DCmdLast = 0;
+       IMG_UINT32 ui322DCmdOffset = 0;
+#endif
+       IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE;
+       IMG_UINT32 i;
+       IMG_UINT64 uiCheckFenceUID = 0;
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       IMG_UINT64 ui2DUpdateFenceUID = 0;
+#endif
+       IMG_UINT64 ui3DUpdateFenceUID = 0;
+
+       PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL;
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL;
+       IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui322DFenceTimelineUpdateValue = 0;
+       void *pv2DUpdateFenceFinaliseData = NULL;
+#endif
+       PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+       void *pv3DUpdateFenceFinaliseData = NULL;
+#if defined(SUPPORT_BUFFER_SYNC)
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_ERROR eError2;
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE;
+#endif
+       PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE;
+       IMG_UINT32   ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+       IMG_UINT32   ui32PreparesDone = 0;
+
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+#if !defined(RGX_FEATURE_TLA_BIT_MASK)
+       PVR_UNREFERENCED_PARAMETER(i2DUpdateTimeline);
+       PVR_UNREFERENCED_PARAMETER(pi2DUpdateFence);
+#endif
+
+       RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice,
+                                 &pPreAddr,
+                                 &pPostAddr,
+                                 &pRMWUFOAddr);
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+#endif
+       if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Validate sync prim fence/update value ptrs
+        * for each prepare.
+        */
+       {
+               IMG_UINT32 ui32Prepare;
+               IMG_UINT32 *pui32UpdateCount = paui32ClientUpdateCount;
+               IMG_UINT32 **papui32UpdateValue = papaui32ClientUpdateValue;
+
+               /* Check that we have not been given a null ptr for
+                * update count parameters.
+                */
+               PVR_LOG_RETURN_IF_FALSE((paui32ClientUpdateCount != NULL),
+                                       "paui32ClientUpdateCount NULL",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+
+               for (ui32Prepare=0; ui32Prepare<ui32PrepareCount; ui32Prepare++)
+               {
+                       /* Ensure we haven't been given a null ptr to
+                        * update values if we have been told we
+                        * have updates for this prepare
+                        */
+                       if (*pui32UpdateCount > 0)
+                       {
+                               PVR_LOG_RETURN_IF_FALSE(*papui32UpdateValue != NULL,
+                                                       "paui32ClientUpdateValue NULL but "
+                                                       "ui32ClientUpdateCount > 0",
+                                                       PVRSRV_ERROR_INVALID_PARAMS);
+                       }
+                       /* Advance local ptr to update values ptr for next prepare. */
+                       papui32UpdateValue++;
+                       /* Advance local ptr to update count for next prepare. */
+                       pui32UpdateCount++;
+               }
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       szFenceName[31] = '\0';
+
+       if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32SyncPMRCount != 0)
+       {
+               if (!ppsSyncPMRs)
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+               /* PMR sync is valid only when there is no batching */
+               if ((ui32PrepareCount != 1))
+#endif
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       /* We can't allocate the required amount of stack space on all consumer architectures */
+       pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount);
+       if (pas3DCmdHelper == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_alloc3dhelper;
+       }
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount);
+       if (pas2DCmdHelper == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_alloc2dhelper;
+       }
+#endif
+
+       if (iCheckFence != PVRSRV_NO_FENCE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psDeviceNode->hSyncCheckpointContext));
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(psDeviceNode->hSyncCheckpointContext,
+                                                                                       iCheckFence,
+                                                                                       &ui32FenceSyncCheckpointCount,
+                                                                                       &apsFenceSyncCheckpoints,
+                                                                                       &uiCheckFenceUID,
+                                                                                       ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+                       goto fail_resolve_fencesync_input_fence;
+               }
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+               if (ui32FenceSyncCheckpointCount > 0)
+               {
+                       IMG_UINT32 ii;
+                       for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+                       {
+                               PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+                       }
+               }
+#endif
+       }
+       /*
+               Ensure we do the right thing for server syncs which cross call boundaries
+       */
+       for (i=0;i<ui32PrepareCount;i++)
+       {
+               if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+               {
+                       ui323DCmdLast++;
+               }
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+               else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) &&
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+               {
+                       ui322DCmdLast++;
+               }
+#endif
+       }
+
+       /*
+               Init the command helper commands for all the prepares
+       */
+       for (i=0;i<ui32PrepareCount;i++)
+       {
+               RGX_CLIENT_CCB *psClientCCB;
+               RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+               IMG_CHAR *pszCommandName;
+               RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+               RGXFWIF_CCB_CMD_TYPE eType;
+               PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+               PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+               SYNC_ADDR_LIST *psSyncAddrListFence = &psTransferContext->asSyncAddrListFence[i];
+               SYNC_ADDR_LIST *psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i];
+               IMG_UINT32 ui32IntClientFenceCount = 0U;
+               IMG_UINT32 ui32IntClientUpdateCount = paui32ClientUpdateCount[i];
+               IMG_UINT32 *paui32IntUpdateValue = papaui32ClientUpdateValue[i];
+#if defined(SUPPORT_BUFFER_SYNC)
+               struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+               PVRSRV_FENCE *piUpdateFence = NULL;
+               PVRSRV_TIMELINE iUpdateTimeline = PVRSRV_NO_TIMELINE;
+               void **ppvUpdateFenceFinaliseData = NULL;
+               PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL;
+               PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL;
+               IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL;
+               IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL;
+               IMG_BOOL bCheckFence = IMG_FALSE;
+               IMG_BOOL bUpdateFence = IMG_FALSE;
+               IMG_UINT64 *puiUpdateFenceUID = NULL;
+
+               IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+               if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+               {
+                       psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+                       psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+                       pszCommandName = "TQ-3D";
+                       psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++];
+                       eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D;
+#if defined(SUPPORT_BUFFER_SYNC)
+                       psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext;
+#endif
+                       bCheckFence = ui323DCmdCount == 1;
+                       bUpdateFence = ui323DCmdCount == ui323DCmdLast
+                               && i3DUpdateTimeline != PVRSRV_NO_TIMELINE;
+
+                       if (bUpdateFence)
+                       {
+                               piUpdateFence = &i3DUpdateFence;
+                               iUpdateTimeline = i3DUpdateTimeline;
+                               ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData;
+                               ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint;
+                               ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync;
+                               pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue;
+                               ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues;
+                               puiUpdateFenceUID = &ui3DUpdateFenceUID;
+                       }
+               }
+               else
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+               if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) &&
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+               {
+                       psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+                       psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+                       pszCommandName = "TQ-2D";
+                       psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++];
+                       eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D;
+#if defined(SUPPORT_BUFFER_SYNC)
+                       psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext;
+#endif
+                       bCheckFence = ui322DCmdCount == 1;
+                       bUpdateFence = ui322DCmdCount == ui322DCmdLast
+                               && i2DUpdateTimeline != PVRSRV_NO_TIMELINE;
+
+                       if (bUpdateFence)
+                       {
+                               piUpdateFence = &i2DUpdateFence;
+                               iUpdateTimeline = i2DUpdateTimeline;
+                               ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData;
+                               ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint;
+                               ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync;
+                               pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue;
+                               ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues;
+                               puiUpdateFenceUID = &ui2DUpdateFenceUID;
+                       }
+               }
+               else
+#endif
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto fail_prepare_loop;
+               }
+
+               if (i == 0)
+               {
+                       ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags,
+                                       "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr);
+                       psTransferContext->ui32PDumpFlags |= ui32PDumpFlags;
+               }
+               else
+               {
+                       IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+                       if (ui32NewPDumpFlags != ui32PDumpFlags)
+                       {
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__));
+                               goto fail_prepare_loop;
+                       }
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount));
+               eError = SyncAddrListPopulate(psSyncAddrListFence,
+                                                                               0,
+                                                                               NULL,
+                                                                               NULL);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_prepare_loop;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount));
+               eError = SyncAddrListPopulate(psSyncAddrListUpdate,
+                                                                               ui32IntClientUpdateCount,
+                                                                               papauiClientUpdateUFODevVarBlock[i],
+                                                                               papaui32ClientUpdateSyncOffset[i]);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_prepare_loop;
+               }
+               if (!pauiIntUpdateUFOAddress)
+               {
+                       pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after sync prims) ui32IntClientUpdateCount=%d", __func__, ui32IntClientUpdateCount));
+               if (ui32SyncPMRCount)
+               {
+#if defined(SUPPORT_BUFFER_SYNC)
+                       int err;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+                       err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+                                                                       psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                                       ui32SyncPMRCount,
+                                                                       ppsSyncPMRs,
+                                                                       paui32SyncPMRFlags,
+                                                                       &ui32BufferFenceSyncCheckpointCount,
+                                                                       &apsBufferFenceSyncCheckpoints,
+                                                                       &psBufferUpdateSyncCheckpoint,
+                                                                       &psBufferSyncData);
+                       if (err)
+                       {
+                               switch (err)
+                               {
+                                       case -EINTR:
+                                               eError = PVRSRV_ERROR_RETRY;
+                                               break;
+                                       case -ENOMEM:
+                                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                               break;
+                                       default:
+                                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                               break;
+                               }
+
+                               if (eError != PVRSRV_ERROR_RETRY)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
+                               }
+                               goto fail_resolve_buffersync_input_fence;
+                       }
+
+                       /* Append buffer sync fences */
+                       if (ui32BufferFenceSyncCheckpointCount > 0)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+                               SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence,
+                                                                                                         ui32BufferFenceSyncCheckpointCount,
+                                                                                                         apsBufferFenceSyncCheckpoints);
+                               if (!pauiIntFenceUFOAddress)
+                               {
+                                       pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+                               }
+                               ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+                       }
+
+                       if (psBufferUpdateSyncCheckpoint)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->asSyncAddrListUpdate[i]=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)psSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+                               /* Append the update (from output fence) */
+                               SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+                                                                                         1,
+                                                                                         &psBufferUpdateSyncCheckpoint);
+                               if (!pauiIntUpdateUFOAddress)
+                               {
+                                       pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+                               }
+                               ui32IntClientUpdateCount++;
+                       }
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+                       PVR_DPF((PVR_DBG_ERROR, "%s:   <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS));
+                       OSLockRelease(psTransferContext->hLock);
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+               }
+
+               /* Create the output fence (if required) */
+               if (bUpdateFence)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d,  psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psDeviceNode->hSyncCheckpointContext));
+                       eError = SyncCheckpointCreateFence(psDeviceNode,
+                                                          szFenceName,
+                                                          iUpdateTimeline,
+                                                          psDeviceNode->hSyncCheckpointContext,
+                                                          piUpdateFence,
+                                                          puiUpdateFenceUID,
+                                                          ppvUpdateFenceFinaliseData,
+                                                          ppsUpdateSyncCheckpoint,
+                                                          (void*)ppsFenceTimelineUpdateSync,
+                                                          pui32FenceTimelineUpdateValue,
+                                                          ui32PDumpFlags);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s:   SyncCheckpointCreateFence failed (%s)",
+                                               __func__,
+                                               PVRSRVGetErrorString(eError)));
+                               goto fail_prepare_loop;
+                       }
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence));
+
+                       /* Append the sync prim update for the timeline (if required) */
+                       if (*ppsFenceTimelineUpdateSync)
+                       {
+                               IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                               /* Allocate memory to hold the list of update values (including our timeline update) */
+                               *ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                               if (!*ppui32IntAllocatedUpdateValues)
+                               {
+                                       /* Failed to allocate memory */
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       goto fail_prepare_loop;
+                               }
+                               OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+#if defined(SUPPORT_BUFFER_SYNC)
+                               if (psBufferUpdateSyncCheckpoint)
+                               {
+                                       /* Copy the update values into the new memory, then append our timeline update value */
+                                       OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1));
+                                       pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1);
+                               }
+                               else
+#endif
+                               {
+                                       /* Copy the update values into the new memory, then append our timeline update value */
+                                       OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+                                       pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+                               }
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue));
+                               /* Now set the additional update value */
+                               *pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+                               if (ui32IntClientUpdateCount > 0)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: *ppui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               /* Now append the timeline sync prim addr to the transfer context update list */
+                               SyncAddrListAppendSyncPrim(psSyncAddrListUpdate,
+                                                          *ppsFenceTimelineUpdateSync);
+                               ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+                               if (ui32IntClientUpdateCount > 0)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: *ppui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues));
+                               paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues;
+                       }
+               }
+
+               if (bCheckFence && ui32FenceSyncCheckpointCount)
+               {
+                       /* Append the checks (from input fence) */
+                       if (ui32FenceSyncCheckpointCount > 0)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence));
+                               SyncAddrListAppendCheckpoints(psSyncAddrListFence,
+                                                                                         ui32FenceSyncCheckpointCount,
+                                                                                         apsFenceSyncCheckpoints);
+                               if (!pauiIntFenceUFOAddress)
+                               {
+                                       pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+                               }
+                               ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+                       }
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+                       if (ui32IntClientFenceCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+                               for (iii=0; iii<ui32IntClientFenceCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: psSyncAddrListFence->pasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+               }
+               if (bUpdateFence && *ppsUpdateSyncCheckpoint)
+               {
+                       /* Append the update (from output fence) */
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+                       SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+                                                                                 1,
+                                                                                 ppsUpdateSyncCheckpoint);
+                       if (!pauiIntUpdateUFOAddress)
+                       {
+                               pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+                       }
+                       ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+               }
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+
+#if (ENABLE_TQ_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+                       for (ii=0; ii<ui32IntClientFenceCount; ii++)
+                       {
+                               PVR_ASSERT(psTmpIntFenceUFOAddress->ui32Addr & 0x1);
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+                       for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+                       {
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+
+               ui32PreparesDone++;
+
+               /*
+                       Create the command helper data for this command
+               */
+               RGXCmdHelperInitCmdCCB(psDevInfo,
+                                      psClientCCB,
+                                      0,
+                                      ui32IntClientFenceCount,
+                                      pauiIntFenceUFOAddress,
+                                      NULL, /* fence value */
+                                      ui32IntClientUpdateCount,
+                                      pauiIntUpdateUFOAddress,
+                                      paui32IntUpdateValue,
+                                      paui32FWCommandSize[i],
+                                      papaui8FWCommand[i],
+                                                          &pPreAddr,
+                                                          &pPostAddr,
+                                                          &pRMWUFOAddr,
+                                      eType,
+                                      ui32ExtJobRef,
+                                      ui32IntJobRef,
+                                      ui32PDumpFlags,
+                                      NULL,
+                                      pszCommandName,
+                                      bCCBStateOpen,
+                                      psCmdHelper);
+       }
+
+       /*
+               Acquire space for all the commands in one go
+       */
+       if (ui323DCmdCount)
+       {
+               eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+                                                                                  &pas3DCmdHelper[0]);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_cmdacquire;
+               }
+       }
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+       {
+               eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount,
+                                                                                  &pas2DCmdHelper[0]);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_cmdacquire;
+               }
+       }
+#endif
+
+       /*
+               We should acquire the kernel CCB(s) space here as the schedule could fail
+               and we would have to roll back all the syncs
+       */
+
+       if (ui323DCmdCount)
+       {
+               ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+               RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+                                                                 &pas3DCmdHelper[0],
+                                                                 "TQ_3D",
+                                                                 FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr);
+       }
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+       {
+               ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+               RGXCmdHelperReleaseCmdCCB(ui322DCmdCount,
+                                                                 &pas2DCmdHelper[0],
+                                                                 "TQ_2D",
+                                                                 FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr);
+       }
+#endif
+
+       if (ui323DCmdCount)
+       {
+               RGXFWIF_KCCB_CMD s3DKCCBCmd;
+               IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr;
+               RGX_CLIENT_CCB *ps3DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext);
+
+               /* Take one of the helper data structs and extract the common cmd struct,
+                * this is used to obtain the frame num. Each command should share the same
+                * frame number so we can just get the first.
+                */
+               RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas3DCmdHelper[0];
+               CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0);
+
+               /* Construct the kernel 3D CCB command. */
+               s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+               s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext);
+               s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps3DTQCCB);
+               s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps3DTQCCB);
+               s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+               s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+               HTBLOGK(HTB_SF_MAIN_KICK_3D,
+                               s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+                               ui323DCmdOffset,
+                               psTransferCmdCmn->ui32FrameNum,
+                               ui32ExtJobRef,
+                               ui32IntJobRef
+                               );
+
+               RGXSRV_HWPERF_ENQ(psTransferContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 ui32FWCtx,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_TQ3D,
+                                 iCheckFence,
+                                 i3DUpdateFence,
+                                 i3DUpdateTimeline,
+                                 uiCheckFenceUID,
+                                 ui3DUpdateFenceUID,
+                                 NO_DEADLINE,
+                                 NO_CYCEST);
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError2 = RGXScheduleCommand(psDevInfo,
+                                                                               RGXFWIF_DM_3D,
+                                                                               &s3DKCCBCmd,
+                                                                               ui32PDumpFlags);
+                       if (eError2 != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_cmdacquire;
+               }
+
+               PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef,
+                                       ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D);
+       }
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+       {
+               RGXFWIF_KCCB_CMD s2DKCCBCmd;
+               IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr;
+               RGX_CLIENT_CCB *ps2DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext);
+
+               /* Take one of the helper data structs and extract the common cmd struct,
+                * this is used to obtain the frame num. Each command should share the same
+                * frame number so we can just get the first.
+                */
+               RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas2DCmdHelper[0];
+               CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0);
+
+               /* Construct the kernel 2D CCB command. */
+               s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+               s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext);
+               s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps2DTQCCB);
+               s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps2DTQCCB);
+               s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+               HTBLOGK(HTB_SF_MAIN_KICK_2D,
+                               s2DKCCBCmd.uCmdData.sCmdKickData.psContext,
+                               ui322DCmdOffset,
+                               psTransferCmdCmn->ui32FrameNum,
+                               ui32ExtJobRef,
+                               ui32IntJobRef);
+
+               RGXSRV_HWPERF_ENQ(psTransferContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 ui32FWCtx,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_TQ2D,
+                                 iCheckFence,
+                                 i2DUpdateFence,
+                                 i2DUpdateTimeline,
+                                 uiCheckFenceUID,
+                                 ui2DUpdateFenceUID,
+                                 NO_DEADLINE,
+                                 NO_CYCEST);
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError2 = RGXScheduleCommand(psDevInfo,
+                                                                               RGXFWIF_DM_2D,
+                                                                               &s2DKCCBCmd,
+                                                                               ui32PDumpFlags);
+                       if (eError2 != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_cmdacquire;
+               }
+
+               PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef,
+                                       ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D);
+       }
+#endif
+
+       /*
+        * Now check eError (which may have returned an error from our earlier calls
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_cmdacquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (ps2DUpdateSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint);
+       }
+       if (ps2DFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue);
+       }
+#endif
+       if (ps3DUpdateSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint);
+       }
+       if (ps3DFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (pi2DUpdateFence)
+       {
+               *pi2DUpdateFence = i2DUpdateFence;
+       }
+#endif
+       if (pi3DUpdateFence)
+       {
+               *pi3DUpdateFence = i3DUpdateFence;
+       }
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psDeviceNode, i2DUpdateFence, pv2DUpdateFenceFinaliseData,
+                                           ps2DUpdateSyncCheckpoint, szFenceName);
+       }
+#endif
+       if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psDeviceNode, i3DUpdateFence, pv3DUpdateFenceFinaliseData,
+                                           ps3DUpdateSyncCheckpoint, szFenceName);
+       }
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       OSFreeMem(pas2DCmdHelper);
+#endif
+       OSFreeMem(pas3DCmdHelper);
+
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (pui322DIntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui322DIntAllocatedUpdateValues);
+               pui322DIntAllocatedUpdateValues = NULL;
+       }
+#endif
+       if (pui323DIntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui323DIntAllocatedUpdateValues);
+               pui323DIntAllocatedUpdateValues = NULL;
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return PVRSRV_OK;
+
+/*
+       No resources are created in this function so there is nothing to free
+       unless we had to merge syncs.
+       If we fail after the client CCB acquire there is still nothing to do
+       as only the client CCB release will modify the client CCB
+*/
+fail_cmdacquire:
+fail_prepare_loop:
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       for (i=0;i<ui32PreparesDone;i++)
+       {
+               SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListFence[i]);
+               SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[i]);
+       }
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (ui32PreparesDone > 0)
+       {
+               /* Prevent duplicate rollback in case of buffer sync. */
+               psBufferUpdateSyncCheckpoint = NULL;
+       }
+#endif
+
+       /* Free memory allocated to hold the internal list of update values */
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (pui322DIntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui322DIntAllocatedUpdateValues);
+               pui322DIntAllocatedUpdateValues = NULL;
+       }
+#endif
+       if (pui323DIntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui323DIntAllocatedUpdateValues);
+               pui323DIntAllocatedUpdateValues = NULL;
+       }
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if (i2DUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData);
+       }
+#endif
+       if (i3DUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData);
+       }
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferUpdateSyncCheckpoint)
+       {
+               SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[0]);
+       }
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+fail_resolve_buffersync_input_fence:
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+fail_resolve_fencesync_input_fence:
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       OSFreeMem(pas2DCmdHelper);
+fail_alloc2dhelper:
+#endif
+       OSFreeMem(pas3DCmdHelper);
+fail_alloc3dhelper:
+
+       OSLockRelease(psTransferContext->hLock);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDevNode,
+                                                   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+                                                   IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+#endif
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+       OSLockAcquire(psTransferContext->hLock);
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+       if ((psTransferContext->s2DData.ui32Priority != ui32Priority) &&
+                       (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+       {
+               eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
+                                                                       psConnection,
+                                                                       psTransferContext->psDeviceNode->pvDevice,
+                                                                       ui32Priority,
+                                                                       RGXFWIF_DM_2D);
+               if (eError != PVRSRV_OK)
+               {
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       goto fail_2dcontext;
+               }
+               psTransferContext->s2DData.ui32Priority = ui32Priority;
+       }
+#endif
+
+       if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
+                                                                       psConnection,
+                                                                       psTransferContext->psDeviceNode->pvDevice,
+                                                                       ui32Priority,
+                                                                       RGXFWIF_DM_3D);
+               if (eError != PVRSRV_OK)
+               {
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError)));
+                       }
+                       goto fail_3dcontext;
+               }
+               psTransferContext->s3DData.ui32Priority = ui32Priority;
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return PVRSRV_OK;
+
+fail_3dcontext:
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+
+fail_2dcontext:
+#endif
+       OSLockRelease(psTransferContext->hLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
+                                                                                                  RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                  IMG_UINT64 ui64Input,
+                                                                                                  IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psTransferContext->hLock);
+                       eError = FWCommonContextSetFlags(psTransferContext->s2DData.psServerCommonContext,
+                                                        ui32ContextFlags);
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = FWCommonContextSetFlags(psTransferContext->s3DData.psServerCommonContext,
+                                                            ui32ContextFlags);
+                       }
+                       OSLockRelease(psTransferContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+
+       OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+               if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+               {
+                       DumpFWCommonContextInfo(psCurrentServerTransferCtx->s2DData.psServerCommonContext,
+                                               pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+               }
+#endif
+
+               if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+               {
+                       DumpFWCommonContextInfo(psCurrentServerTransferCtx->s3DData.psServerCommonContext,
+                                               pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_UINT32 ui32ContextBitMask = 0;
+
+       OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+#if defined(RGX_FEATURE_TLA_BIT_MASK)
+               if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+                               (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) &&
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+               {
+                       if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED)
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D;
+                       }
+               }
+#endif
+
+               if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext))
+               {
+                       if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED))
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D;
+                       }
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtransfer.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtransfer.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/rogue/rgxtransfer.h
new file mode 100644 (file)
index 0000000..cbc5b73
--- /dev/null
@@ -0,0 +1,153 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Transfer queue Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXTRANSFER_H)
+#define RGXTRANSFER_H
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT;
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXCreateTransferContextKM
+
+ @Description
+       Server-side implementation of RGXCreateTransferContext
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA          *psConnection,
+                                                                                  PVRSRV_DEVICE_NODE           *psDeviceNode,
+                                                                                  IMG_UINT32                           ui32Priority,
+                                                                                  IMG_UINT32                           ui32FrameworkCommandSize,
+                                                                                  IMG_PBYTE                            pabyFrameworkCommand,
+                                                                                  IMG_HANDLE                           hMemCtxPrivData,
+                                                                                  IMG_UINT32                           ui32PackedCCBSizeU8888,
+                                                                                  IMG_UINT32                           ui32ContextFlags,
+                                                                                  IMG_UINT64                           ui64RobustnessAddress,
+                                                                                  RGX_SERVER_TQ_CONTEXT        **ppsTransferContext,
+                                                                                  PMR                                          **ppsCLIPMRMem,
+                                                                                  PMR                                          **ppsUSCPMRMem);
+
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXDestroyTransferContextKM
+
+ @Description
+       Server-side implementation of RGXDestroyTransferContext
+
+ @Input psTransferContext - Transfer context
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext);
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVSubmitTransferKM
+
+ @Description
+       Schedules one or more 2D or 3D HW commands on the firmware
+
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT   *psTransferContext,
+                                                                       IMG_UINT32                              ui32PrepareCount,
+                                                                       IMG_UINT32                              *paui32ClientUpdateCount,
+                                                                       SYNC_PRIMITIVE_BLOCK            ***papauiClientUpdateUFODevVarBlock,
+                                                                       IMG_UINT32                              **papaui32ClientUpdateSyncOffset,
+                                                                       IMG_UINT32                              **papaui32ClientUpdateValue,
+                                                                       PVRSRV_FENCE                    iCheckFence,
+                                                                       PVRSRV_TIMELINE                 i2DUpdateTimeline,
+                                                                       PVRSRV_FENCE                    *pi2DUpdateFence,
+                                                                       PVRSRV_TIMELINE                 i3DUpdateTimeline,
+                                                                       PVRSRV_FENCE                    *pi3DUpdateFence,
+                                                                       IMG_CHAR                                szFenceName[32],
+                                                                       IMG_UINT32                              *paui32FWCommandSize,
+                                                                       IMG_UINT8                               **papaui8FWCommand,
+                                                                       IMG_UINT32                              *pui32TQPrepareFlags,
+                                                                       IMG_UINT32                              ui32ExtJobRef,
+                                                                       IMG_UINT32                              ui32SyncPMRCount,
+                                                                       IMG_UINT32                              *paui32SyncPMRFlags,
+                                                                       PMR                                             **ppsSyncPMRs);
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDevNode,
+                                                                                                  RGX_SERVER_TQ_CONTEXT *psTransferContext,
+                                                                                                  IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
+                                                                                                  RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                  IMG_UINT64 ui64Input,
+                                                                                                  IMG_UINT64 *pui64Output);
+
+/* Debug - Dump debug info of transfer contexts on this device */
+void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                           void *pvDumpDebugFile,
+                           IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXTRANSFER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxcompute.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxcompute.c
new file mode 100644 (file)
index 0000000..12e6e77
--- /dev/null
@@ -0,0 +1,1418 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Compute routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Compute routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#include "rgxtimerquery.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP    0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+       DEVMEM_MEMDESC                          *psContextStateMemDesc;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+} RGX_SERVER_CC_CMP_DATA;
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+       //RGX_SERVER_COMMON_CONTEXT     *psServerCommonContext;
+       //DEVMEM_MEMDESC                                *psFWComputeContextStateMemDesc;
+       DEVMEM_MEMDESC                          *psFWComputeContextMemDesc;
+       DEVMEM_MEMDESC                          *psFWFrameworkMemDesc;
+       RGX_SERVER_CC_CMP_DATA          sComputeData;
+       DLLIST_NODE                                     sListNode;
+       SYNC_ADDR_LIST                          sSyncAddrListFence;
+       SYNC_ADDR_LIST                          sSyncAddrListUpdate;
+       POS_LOCK                                        hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA                       sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+static
+PVRSRV_ERROR _CreateComputeContext(CONNECTION_DATA *psConnection,
+                                                                  PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                  DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                                  IMG_UINT32 ui32AllocatedOffset,
+                                                                  SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                                  IMG_UINT32 ui32PackedCCBSizeU88,
+                                                                  IMG_UINT32 ui32ContextFlags,
+                                                                  IMG_UINT32 ui32Priority,
+                                                                  IMG_UINT64 ui64RobustnessAddress,
+                                                                  IMG_UINT32 ui32MaxDeadlineMS,
+                                                                  RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                                  RGX_SERVER_CC_CMP_DATA *psComputeData)
+{
+       IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+        */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware compute context suspend state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         sizeof(RGXFWIF_COMPUTECTX_STATE),
+                                                         RGX_FWCOMCTX_ALLOCFLAGS,
+                                                         "FwComputeContextState",
+                                                         &psComputeData->psContextStateMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate firmware GPU context suspend state (%d)",
+                                __func__,
+                                eError));
+               goto fail_contextsuspendalloc;
+       }
+
+       ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
+       ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
+       eError = FWCommonContextAllocate(psConnection,
+                                                                        psDeviceNode,
+                                                                        REQ_TYPE_CDM,
+                                                                        RGXFWIF_DM_CDM,
+                                                                        psServerMMUContext,
+                                                                        psAllocatedMemDesc,
+                                                                        ui32AllocatedOffset,
+                                                                        psFWMemContextMemDesc,
+                                                                        psComputeData->psContextStateMemDesc,
+                                                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2,
+                                                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2,
+                                                                        ui32ContextFlags,
+                                                                        ui32Priority,
+                                                                        ui32MaxDeadlineMS,
+                                                                        ui64RobustnessAddress,
+                                                                        psInfo,
+                                                                        &psComputeData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to init Compute fw common context (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto fail_computecommoncontext;
+       }
+
+       /*
+        * Dump the FW compute context suspend state buffer
+       */
+       PDUMPCOMMENT(psDeviceNode, "Dump the compute context suspend state buffer");
+       DevmemPDumpLoadMem(psComputeData->psContextStateMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_COMPUTECTX_STATE),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       psComputeData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_computecommoncontext:
+       DevmemFree(psComputeData->psContextStateMemDesc);
+fail_contextsuspendalloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+static
+PVRSRV_ERROR _DestroyComputeContext(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+       RGX_SERVER_CC_CMP_DATA *psComputeData = &psComputeContext->sComputeData;
+       PVRSRV_DEVICE_NODE *psDeviceNode = psComputeContext->psDeviceNode;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                                                                                         psComputeData->psServerCommonContext,
+                                                                                         RGXFWIF_DM_CDM,
+                                                                                         PDUMP_FLAGS_NONE);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                                __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+
+       /* Remove from node list before freeing. */
+       OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+       dllist_remove_node(&(psComputeContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+       FWCommonContextFree(psComputeData->psServerCommonContext);
+       DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psComputeData->psContextStateMemDesc);
+       psComputeData->psServerCommonContext = NULL;
+       return PVRSRV_OK;
+       }
+
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA                   *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE                     *psDeviceNode,
+                                                                                        IMG_UINT32                                     ui32Priority,
+                                                                                        IMG_UINT32                                     ui32FrameworkCommandSize,
+                                                                                        IMG_PBYTE                                      pabyFrameworkCommand,
+                                                                                        IMG_HANDLE                                     hMemCtxPrivData,
+                                                                                        IMG_UINT32                                     ui32StaticComputeContextStateSize,
+                                                                                        IMG_PBYTE                                      pStaticComputeContextState,
+                                                                                        IMG_UINT32                                     ui32PackedCCBSizeU88,
+                                                                                        IMG_UINT32                                     ui32ContextFlags,
+                                                                                        IMG_UINT64                                     ui64RobustnessAddress,
+                                                                                        IMG_UINT32                                     ui32MaxDeadlineMS,
+                                                                                        RGX_SERVER_COMPUTE_CONTEXT     **ppsComputeContext)
+{
+       DEVMEM_MEMDESC                          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_SERVER_COMPUTE_CONTEXT      *psComputeContext;
+       RGX_COMMON_CONTEXT_INFO         sInfo = {NULL};
+       PVRSRV_ERROR                            eError = PVRSRV_OK;
+       RGXFWIF_FWCOMPUTECONTEXT        *psFWComputeContext;
+
+       /* Prepare cleanup struct */
+       *ppsComputeContext = NULL;
+
+       if (ui32StaticComputeContextStateSize > RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+       if (psComputeContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /*
+               Create the FW compute context, this has the CDM common
+               context embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWCOMPUTECONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwComputeContext",
+                       &psComputeContext->psFWComputeContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwcomputecontext;
+       }
+
+       eError = OSLockCreate(&psComputeContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to create lock (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto fail_createlock;
+       }
+
+       psComputeContext->psDeviceNode = psDeviceNode;
+
+       if (ui32FrameworkCommandSize)
+       {
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psComputeContext->psFWFrameworkMemDesc,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to allocate firmware GPU framework state (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psComputeContext->psFWFrameworkMemDesc,
+                               pabyFrameworkCommand,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to populate the framework buffer (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+               sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+       }
+
+       eError = _CreateComputeContext(psConnection,
+                                                                        psDeviceNode,
+                                                                        psComputeContext->psFWComputeContextMemDesc,
+                                                                        offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext),
+                                                                        hMemCtxPrivData,
+                                                                        psFWMemContextMemDesc,
+                                                                        ui32PackedCCBSizeU88,
+                                                                        ui32ContextFlags,
+                                                                        ui32Priority,
+                                                                        ui64RobustnessAddress,
+                                                                        ui32MaxDeadlineMS,
+                                                                        &sInfo,
+                                                                        &psComputeContext->sComputeData);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_computecontext;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+                       (void **)&psFWComputeContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_acquire_cpu_mapping;
+       }
+
+       OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize);
+       DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
+       DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       psComputeContext->psBufferSyncContext =
+                       pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                       "rogue-cdm");
+       if (IS_ERR(psComputeContext->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to create buffer_sync context (err=%ld)",
+                               __func__, PTR_ERR(psComputeContext->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
+#endif
+
+       SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+       SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+       {
+               PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+
+               OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+               dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+               OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+       }
+
+       *ppsComputeContext = psComputeContext;
+       return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+fail_buffer_sync_context_create:
+#endif
+fail_acquire_cpu_mapping:
+       FWCommonContextFree(psComputeContext->sComputeData.psServerCommonContext);
+fail_frameworkcopy:
+fail_computecontext:
+       if (psComputeContext->psFWFrameworkMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       OSLockDestroy(psComputeContext->hLock);
+fail_createlock:
+       DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+fail_fwcomputecontext:
+       OSFreeMem(psComputeContext);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+       PVRSRV_ERROR                            eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_FWCOMPUTECONTEXT        *psFWComputeContext;
+       IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+       eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+                       (void **)&psFWComputeContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware compute context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted;
+
+       DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+       /* Check if all of the workload estimation CCB commands for this workload are read */
+       if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                       __func__, ui32WorkEstCCBSubmitted,
+                       psComputeContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+               return PVRSRV_ERROR_RETRY;
+       }
+#endif
+
+       eError = _DestroyComputeContext(psComputeContext);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       /* remove after RGXFWRequestCommonContextCleanUp() because we might return
+        * RETRY and don't want to be calling this twice */
+       if (psComputeContext->psBufferSyncContext != NULL)
+       {
+               pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext);
+               psComputeContext->psBufferSyncContext = NULL;
+       }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
+#endif
+
+       if (psComputeContext->psFWFrameworkMemDesc != NULL)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+       }
+       DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+
+       OSLockDestroy(psComputeContext->hLock);
+       OSFreeMem(psComputeContext);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT     *psComputeContext,
+                                                               IMG_UINT32                                      ui32ClientUpdateCount,
+                                                               SYNC_PRIMITIVE_BLOCK            **pauiClientUpdateUFODevVarBlock,
+                                                               IMG_UINT32                                      *paui32ClientUpdateSyncOffset,
+                                                               IMG_UINT32                                      *paui32ClientUpdateValue,
+                                                               PVRSRV_FENCE                            iCheckFence,
+                                                               PVRSRV_TIMELINE                         iUpdateTimeline,
+                                                               PVRSRV_FENCE                            *piUpdateFence,
+                                                               IMG_CHAR                                        pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                               IMG_UINT32                                      ui32CmdSize,
+                                                               IMG_PBYTE                                       pui8DMCmd,
+                                                               IMG_UINT32                                      ui32PDumpFlags,
+                                                               IMG_UINT32                                      ui32ExtJobRef,
+                                                               IMG_UINT32                                      ui32SyncPMRCount,
+                                                               IMG_UINT32                                      *paui32SyncPMRFlags,
+                                                               PMR                                                     **ppsSyncPMRs,
+                                                               IMG_UINT32                                      ui32NumWorkgroups,
+                                                               IMG_UINT32                                      ui32NumWorkitems,
+                                                               IMG_UINT64                                      ui64DeadlineInus)
+{
+       RGXFWIF_KCCB_CMD                sCmpKCCBCmd;
+       RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+       PVRSRV_ERROR                    eError;
+       PVRSRV_ERROR                    eError2;
+       IMG_UINT32                              ui32CDMCmdOffset = 0;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->sComputeData.psServerCommonContext);
+       RGX_CLIENT_CCB          *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext);
+       IMG_UINT32              ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+       IMG_UINT32                              ui32FWCtx;
+       IMG_BOOL                                bCCBStateOpen = IMG_FALSE;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0};
+       IMG_UINT32 ui32CDMWorkloadDataRO = 0;
+       IMG_UINT32 ui32CDMCmdHeaderOffset = 0;
+       IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0;
+       RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+       IMG_UINT64                              ui64FBSCEntryMask;
+       IMG_UINT32 ui32IntClientFenceCount = 0;
+       PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+       IMG_UINT32 ui32IntClientUpdateCount = 0;
+       PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+       IMG_UINT32 *paui32IntUpdateValue = NULL;
+       PVRSRV_FENCE  iUpdateFence = PVRSRV_NO_FENCE;
+       IMG_UINT64 uiCheckFenceUID = 0;
+       IMG_UINT64 uiUpdateFenceUID = 0;
+       PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+       void *pvUpdateFenceFinaliseData = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0);
+
+       if (iUpdateTimeline >= 0 && !piUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Ensure we haven't been given a null ptr to
+        * update values if we have been told we
+        * have updates
+        */
+       if (ui32ClientUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+                                       "paui32ClientUpdateValue NULL but "
+                                       "ui32ClientUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       pszUpdateFenceName[31] = '\0';
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+                                                                       0,
+                                                                       NULL,
+                                                                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               goto err_populate_sync_addr_list;
+       }
+
+       ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+       eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+                                                                       ui32ClientUpdateCount,
+                                                                       pauiClientUpdateUFODevVarBlock,
+                                                                       paui32ClientUpdateSyncOffset);
+       if (eError != PVRSRV_OK)
+       {
+               goto err_populate_sync_addr_list;
+       }
+       if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+       {
+               pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+       }
+       paui32IntUpdateValue = paui32ClientUpdateValue;
+
+       if (ui32SyncPMRCount != 0)
+       {
+#if defined(SUPPORT_BUFFER_SYNC)
+               int err;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling "
+                         "pvr_buffer_sync_resolve_and_create_fences", __func__));
+
+               err = pvr_buffer_sync_resolve_and_create_fences(
+                   psComputeContext->psBufferSyncContext,
+                   psComputeContext->psDeviceNode->hSyncCheckpointContext,
+                   ui32SyncPMRCount,
+                   ppsSyncPMRs,
+                   paui32SyncPMRFlags,
+                   &ui32BufferFenceSyncCheckpointCount,
+                   &apsBufferFenceSyncCheckpoints,
+                   &psBufferUpdateSyncCheckpoint,
+                   &psBufferSyncData
+               );
+
+               if (unlikely(err))
+               {
+                       switch (err)
+                       {
+                               case -EINTR:
+                                       eError = PVRSRV_ERROR_RETRY;
+                                       break;
+                               case -ENOMEM:
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       break;
+                               default:
+                                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                       break;
+                       }
+
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   "
+                                       "pvr_buffer_sync_resolve_and_create_fences failed (%d)",
+                                       __func__, eError));
+                       }
+
+                       goto fail_resolve_input_fence;
+               }
+
+               /* Append buffer sync fences */
+               if (ui32BufferFenceSyncCheckpointCount > 0)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints "
+                                 "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, "
+                                 "pauiIntFenceUFOAddress=<%p>)...", __func__,
+                                 ui32BufferFenceSyncCheckpointCount,
+                                 (void *) &psComputeContext->sSyncAddrListFence ,
+                                 (void *) pauiIntFenceUFOAddress));
+
+                       SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence,
+                                                             ui32BufferFenceSyncCheckpointCount,
+                                                             apsBufferFenceSyncCheckpoints);
+                       if (pauiIntFenceUFOAddress == NULL)
+                       {
+                               pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+                       }
+                       ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+
+               /* Append the update (from output fence) */
+               if (psBufferUpdateSyncCheckpoint)
+               {
+                       SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+                                                     1, &psBufferUpdateSyncCheckpoint);
+                       if (pauiIntUpdateUFOAddress == NULL)
+                       {
+                               pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+                       }
+                       ui32IntClientUpdateCount++;
+               }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+               PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers",
+                       __func__, ui32SyncPMRCount));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+       /* Resolve the sync checkpoints that make up the input fence */
+       eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+                                                                               iCheckFence,
+                                                                               &ui32FenceSyncCheckpointCount,
+                                                                               &apsFenceSyncCheckpoints,
+                                           &uiCheckFenceUID, ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+               goto fail_free_buffer_sync_data;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               IMG_UINT32 ii;
+               for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+               {
+                       PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+               }
+       }
+#endif
+       /* Create the output fence (if required) */
+       if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+               eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
+                                                  pszUpdateFenceName,
+                                                                                  iUpdateTimeline,
+                                                                                  psComputeContext->psDeviceNode->hSyncCheckpointContext,
+                                                                                  &iUpdateFence,
+                                                                                  &uiUpdateFenceUID,
+                                                                                  &pvUpdateFenceFinaliseData,
+                                                                                  &psUpdateSyncCheckpoint,
+                                                                                  (void*)&psFenceTimelineUpdateSync,
+                                                                                  &ui32FenceTimelineUpdateValue,
+                                                                                  ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError));
+                       goto fail_create_output_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+               /* Append the sync prim update for the timeline (if required) */
+               if (psFenceTimelineUpdateSync)
+               {
+                       IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                       /* Allocate memory to hold the list of update values (including our timeline update) */
+                       pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                       if (!pui32IntAllocatedUpdateValues)
+                       {
+                               /* Failed to allocate memory */
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto fail_alloc_update_values_mem;
+                       }
+                       OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                       /* Copy the update values into the new memory, then append our timeline update value */
+                       if (paui32IntUpdateValue)
+                       {
+                               OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+                       }
+#if defined(CMP_CHECKPOINT_DEBUG)
+                       if (ui32IntClientUpdateCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Now set the additional update value */
+                       pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+                       *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+                       ui32IntClientUpdateCount++;
+                       /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+                       paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__,  (void*)psFenceTimelineUpdateSync));
+                       /* Now append the timeline sync prim addr to the compute context update list */
+                       SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
+                                                  psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+                       if (ui32IntClientUpdateCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+                       paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+               }
+       }
+
+       /* Append the checks (from input fence) */
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+               if (ui32IntClientUpdateCount > 0)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+               SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+                                                                         ui32FenceSyncCheckpointCount,
+                                                                         apsFenceSyncCheckpoints);
+               if (!pauiIntFenceUFOAddress)
+               {
+                       pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+               }
+               ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+       }
+#if defined(CMP_CHECKPOINT_DEBUG)
+       if (ui32IntClientUpdateCount > 0)
+       {
+               IMG_UINT32 iii;
+               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __func__, iii, (void*)pui32Tmp));
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp));
+                       pui32Tmp++;
+               }
+       }
+#endif
+
+       if (psUpdateSyncCheckpoint)
+       {
+               /* Append the update (from output fence) */
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+               SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+                                                                         1,
+                                                                         &psUpdateSyncCheckpoint);
+               if (!pauiIntUpdateUFOAddress)
+               {
+                       pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+               }
+               ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+               if (ui32IntClientUpdateCount > 0)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+                       for (ii=0; ii<ui32IntClientFenceCount; ii++)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+                       for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+                       {
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+       RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+                                 &pPreAddr,
+                                 &pPostAddr,
+                                 &pRMWUFOAddr);
+       /*
+        * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
+        * in other words, take the value and set it to zero afterwards.
+        * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
+        * as it must be ready at the time of context activation.
+        */
+       {
+               eError = RGXExtractFBSCEntryMaskFromMMUContext(psComputeContext->psDeviceNode,
+                                                                                                          FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext),
+                                                                                                          &ui64FBSCEntryMask);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
+                       goto fail_cmdinvalfbsc;
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups;
+       sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems  = ui32NumWorkitems;
+
+       /* Prepare workload estimation */
+       WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice,
+                       &psComputeContext->sWorkEstData,
+                       &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM,
+                       RGXFWIF_CCB_CMD_TYPE_CDM,
+                       &sWorkloadCharacteristics,
+                       ui64DeadlineInus,
+                       &sWorkloadKickDataCompute);
+#endif
+
+       RGXCmdHelperInitCmdCCB(psDevInfo,
+                              psClientCCB,
+                              ui64FBSCEntryMask,
+                              ui32IntClientFenceCount,
+                              pauiIntFenceUFOAddress,
+                              NULL,
+                              ui32IntClientUpdateCount,
+                              pauiIntUpdateUFOAddress,
+                              paui32IntUpdateValue,
+                              ui32CmdSize,
+                              pui8DMCmd,
+                           &pPreAddr,
+                           &pPostAddr,
+                           &pRMWUFOAddr,
+                              RGXFWIF_CCB_CMD_TYPE_CDM,
+                              ui32ExtJobRef,
+                              ui32IntJobRef,
+                              ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                              &sWorkloadKickDataCompute,
+#else
+                              NULL,
+#endif
+                              "Compute",
+                              bCCBStateOpen,
+                              asCmdHelperData);
+
+       eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_cmdaquire;
+       }
+
+
+       /*
+               We should reserve space in the kernel CCB here and fill in the command
+               directly.
+               This is so if there isn't space in the kernel CCB we can return with
+               retry back to services client before we take any operations
+       */
+
+       /*
+               We might only be kicking for flush out a padding packet so only submit
+               the command if the create was successful
+       */
+       if (eError == PVRSRV_OK)
+       {
+               /*
+                       All the required resources are ready at this point, we can't fail so
+                       take the required server sync operations and commit all the resources
+               */
+
+               ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB);
+               RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr);
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* The following is used to determine the offset of the command header containing
+          the workload estimation data so that can be accessed when the KCCB is read */
+       ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData);
+
+       ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext));
+
+       /* This checks if the command would wrap around at the end of the CCB and
+        * therefore would start at an offset of 0 rather than the current command
+        * offset */
+       if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck)
+       {
+               ui32CDMWorkloadDataRO = ui32CDMCmdOffset;
+       }
+       else
+       {
+               ui32CDMWorkloadDataRO = 0;
+       }
+#endif
+
+       /* Construct the kernel compute CCB command. */
+       sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+       sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext);
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+       /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Store the offset to the CCCB command header so that it can be referenced
+        * when the KCCB command reaches the FW */
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset;
+#else
+       sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+       ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr;
+
+       if (psComputeCmdCmn)
+       {
+               HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+                               sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+                               ui32CDMCmdOffset,
+                               psComputeCmdCmn->ui32FrameNum,
+                               ui32ExtJobRef,
+                               ui32IntJobRef
+                               );
+       }
+
+       RGXSRV_HWPERF_ENQ(psComputeContext, OSGetCurrentClientProcessIDKM(),
+                         ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                         RGX_HWPERF_KICK_TYPE_CDM,
+                         iCheckFence,
+                         iUpdateFence,
+                         iUpdateTimeline,
+                         uiCheckFenceUID,
+                         uiUpdateFenceUID,
+                         NO_DEADLINE,
+                         NO_CYCEST);
+
+       /*
+        * Submit the compute command to the firmware.
+        */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+                                                                       RGXFWIF_DM_CDM,
+                                                                       &sCmpKCCBCmd,
+                                                                       ui32PDumpFlags);
+               if (eError2 != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError2 != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s failed to schedule kernel CCB command (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError2)));
+               if (eError == PVRSRV_OK)
+               {
+                       eError = eError2;
+               }
+       }
+       else
+       {
+               PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+                                       ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                                       RGX_HWPERF_KICK_TYPE_CDM);
+       }
+       /*
+        * Now check eError (which may have returned an error from our earlier call
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_cmdaquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+       }
+       if (psFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       *piUpdateFence = iUpdateFence;
+
+       if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence,
+                                           pvUpdateFenceFinaliseData,
+                                                                       psUpdateSyncCheckpoint, pszUpdateFenceName);
+       }
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+
+       return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinvalfbsc:
+       SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
+       SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+       if (iUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+       }
+fail_create_output_fence:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+
+fail_free_buffer_sync_data:
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+
+fail_resolve_input_fence:
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+err_populate_sync_addr_list:
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+       OSLockRelease(psComputeContext->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+       RGXFWIF_KCCB_CMD sFlushCmd;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psComputeContext->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+       sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+       sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext);
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                       RGXFWIF_DM_CDM,
+                                                                       &sFlushCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS,
+                                                                       &ui32kCCBCommandSlot);
+               /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */
+               if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Returning RETRY to caller", __func__));
+                       eError = PVRSRV_ERROR_RETRY;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to schedule SLC flush command (%s)",
+                                        __func__,
+                                        PVRSRVGetErrorString(eError)));
+               }
+       }
+       else
+       {
+               /* Wait for the SLC flush to complete */
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Compute flush aborted (%s)",
+                                        __func__,
+                                        PVRSRVGetErrorString(eError)));
+               }
+               else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] &
+                                 RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE))
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+               }
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT  *psComputeContext)
+{
+       RGXFWIF_KCCB_CMD  sKCCBCmd;
+       PVRSRV_ERROR      eError;
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       /* Schedule the firmware command */
+       sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+       sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+                                                                       RGXFWIF_DM_CDM,
+                                                                       &sKCCBCmd,
+                                                                       PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to schedule the FW command %d (%s)",
+                               __func__,
+                               eError,
+                               PVRSRVGETERRORSTRING(eError)));
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                  PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                                                                 RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                                                                 IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       OSLockAcquire(psComputeContext->hLock);
+
+       eError = ContextSetPriority(psComputeContext->sComputeData.psServerCommonContext,
+                                                               psConnection,
+                                                               psComputeContext->psDeviceNode->pvDevice,
+                                                               ui32Priority,
+                                                               RGXFWIF_DM_CDM);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError)));
+       }
+
+       OSLockRelease(psComputeContext->hLock);
+       return eError;
+}
+
+/*
+ * PVRSRVRGXSetComputeContextPropertyKM
+ */
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                  RGX_CONTEXT_PROPERTY eContextProperty,
+                                                  IMG_UINT64 ui64Input,
+                                                  IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psComputeContext->hLock);
+                       eError = FWCommonContextSetFlags(psComputeContext->sComputeData.psServerCommonContext,
+                                                        ui32ContextFlags);
+                       OSLockRelease(psComputeContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                          DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+       dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+               DumpFWCommonContextInfo(psCurrentServerComputeCtx->sComputeData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+       }
+       OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32ContextBitMask = 0;
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+       dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+               if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->sComputeData.psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+                       == PVRSRV_ERROR_CCCB_STALLED)
+               {
+                       ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+               }
+       }
+       OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/*
+ * PVRSRVRGXGetLastDeviceErrorKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA    *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32         *ui32Error)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       *ui32Error = psDevInfo->eLastDeviceError;
+       psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE;
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxcompute.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxcompute.h
new file mode 100644 (file)
index 0000000..35d7dc1
--- /dev/null
@@ -0,0 +1,182 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX compute functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX compute functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXCOMPUTE_H)
+#define RGXCOMPUTE_H
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXCreateComputeContextKM
+
+ @Description
+       Creates a RGX device context for submitting commands to CDM.
+
+ @Input pvDeviceNode - Services-managed device
+ @Input        ui32Priority - Scheduling priority for commands on this context
+ @Input hMemCtxPrivData - private data
+ @Input ui32PackedCCBSizeU88 - packed CCB size. The first byte contains the
+               log2 CCB size and the second byte the log2 maximum CCB size.
+ @Input ui32ComputeCtxSwitchSize - Context control size
+ @Input pComputeCtxSwitch_Regs - Context control registers
+ @Output ppsComputeContext - cleanup data
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA                   *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE                     *psDeviceNode,
+                                                                                        IMG_UINT32                                     ui32Priority,
+                                                                                        IMG_UINT32                                     ui32FrameworkCommandSize,
+                                                                                        IMG_PBYTE                                      pabyFrameworkCommand,
+                                                                                        IMG_HANDLE                                     hMemCtxPrivData,
+                                                                                        IMG_UINT32                                     ui32StaticComputeContextStateSize,
+                                                                                        IMG_PBYTE                                      pStaticComputeContextState,
+                                                                                        IMG_UINT32                                     ui32PackedCCBSizeU88,
+                                                                                        IMG_UINT32                                     ui32ContextFlags,
+                                                                                        IMG_UINT64                                     ui64RobustnessAddress,
+                                                                                        IMG_UINT32                                     ui32MaxDeadlineMS,
+                                                                                        RGX_SERVER_COMPUTE_CONTEXT     **ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+       Server-side implementation of RGXDestroyComputeContext
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXKickCDMKM
+
+ @Description
+       Server-side implementation of RGXKickCDM
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT     *psComputeContext,
+                                                               IMG_UINT32                                      ui32ClientUpdateCount,
+                                                               SYNC_PRIMITIVE_BLOCK            **pauiClientUpdateUFODevVarBlock,
+                                                               IMG_UINT32                                      *paui32ClientUpdateSyncOffset,
+                                                               IMG_UINT32                                      *paui32ClientUpdateValue,
+                                                               PVRSRV_FENCE                            iCheckFence,
+                                                               PVRSRV_TIMELINE                         iUpdateTimeline,
+                                                               PVRSRV_FENCE                            *piUpdateFence,
+                                                               IMG_CHAR                                        pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                               IMG_UINT32                                      ui32CmdSize,
+                                                               IMG_PBYTE                                       pui8DMCmd,
+                                                               IMG_UINT32                                      ui32PDumpFlags,
+                                                               IMG_UINT32                                      ui32ExtJobRef,
+                                                               IMG_UINT32                                      ui32SyncPMRCount,
+                                                               IMG_UINT32                                      *paui32SyncPMRFlags,
+                                                               PMR                                                     **ppsSyncPMRs,
+                                                               IMG_UINT32                                      ui32NumWorkgroups,
+                                                               IMG_UINT32                                      ui32NumWorkitems,
+                                                               IMG_UINT64                                      ui64DeadlineInus);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXFlushComputeDataKM
+
+ @Description
+       Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function         PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description   Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input         psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                                                                 PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                 RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                                                                 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+                                                                                                 RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                 IMG_UINT64 ui64Input,
+                                                                                                 IMG_UINT64 *pui64Output);
+
+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA    *psConnection,
+                                           PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_UINT32         *ui32Error);
+
+/* Debug - Dump debug info of compute contexts on this device */
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                          DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          void *pvDumpDebugFile,
+                          IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXCOMPUTE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdebug.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdebug.c
new file mode 100644 (file)
index 0000000..3f0a353
--- /dev/null
@@ -0,0 +1,3809 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rgx debug information
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "cache_km.h"
+#include "osfunc.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services_km.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_utils.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif_sf.h"
+#include "rgxfw_log_helper.h"
+#include "fwtrace_string.h"
+#include "rgxfwimageutils.h"
+#include "fwload.h"
+
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxcompute.h"
+#include "rgxtdmtransfer.h"
+#include "rgxtimecorr.h"
+#include "rgx_options.h"
+#include "rgxinit.h"
+#include "rgxlayer_impl.h"
+#include "devicemem_history_server.h"
+#include "info_page.h"
+
+#define PVR_DUMP_FIRMWARE_INFO(x)                                                                                                              \
+       PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x",                            \
+                                               PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion),                                              \
+                                               PVRVERSION_UNPACK_MIN((x).ui32DDKVersion),                                              \
+                                               (x).ui32DDKBuild,                                                                                               \
+                                               ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\
+                                               (x).ui32BuildOptions);
+
+#define DD_SUMMARY_INDENT  ""
+#define DD_NORMAL_INDENT   "    "
+
+#define RGX_DEBUG_STR_SIZE                     (150U)
+#define MAX_FW_DESCRIPTION_LENGTH      (600U)
+
+
+#define RGX_TEXAS_BIF0_ID                              (0)
+#define RGX_TEXAS_BIF1_ID                              (1)
+
+/*
+ *  The first 7 or 8 cat bases are memory contexts used for PM
+ *  or firmware. The rest are application contexts. The numbering
+ *  is zero-based.
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#define MAX_RESERVED_FW_MMU_CONTEXT            (7)
+#else
+#define MAX_RESERVED_FW_MMU_CONTEXT            (6)
+#endif
+
+static const IMG_CHAR *const pszPowStateName[] =
+{
+#define X(NAME)        #NAME,
+       RGXFWIF_POW_STATES
+#undef X
+};
+
+typedef struct _IMG_FLAGS2DESC_
+{
+       IMG_UINT32              uiFlag;
+       const IMG_CHAR  *pszLabel;
+} IMG_FLAGS2DESC;
+
+static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] =
+{
+       "offline",
+       "ready",
+       "active",
+       "offloading"
+};
+
+#if defined(PVR_ENABLE_PHR)
+static const IMG_FLAGS2DESC asPHRConfig2Description[] =
+{
+       {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"},
+       {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"},
+       {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "},
+};
+#endif
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR
+RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset,
+                        IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask)
+{
+       IMG_UINT32 ui32RegValue, ui32NumPolls = 0;
+       PVRSRV_ERROR eError;
+
+       do
+       {
+               eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000));
+
+       return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY;
+}
+
+static PVRSRV_ERROR
+RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal)
+{
+       PVRSRV_ERROR eError;
+
+       /* Core Read Ready? */
+       eError = RGXPollMetaRegThroughSP(psDevInfo,
+                                        META_CR_TXUXXRXRQ_OFFSET,
+                                        META_CR_TXUXXRXRQ_DREADY_BIT,
+                                                                        META_CR_TXUXXRXRQ_DREADY_BIT);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+       /* Set the reg we are interested in reading */
+       eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET,
+                               ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr");
+
+       /* Core Read Done? */
+       eError = RGXPollMetaRegThroughSP(psDevInfo,
+                                        META_CR_TXUXXRXRQ_OFFSET,
+                                        META_CR_TXUXXRXRQ_DREADY_BIT,
+                                                                        META_CR_TXUXXRXRQ_DREADY_BIT);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+       /* Read the value */
+       return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal);
+}
+#endif
+
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                                               RGXFWIF_DEV_VIRTADDR *psFWAddr,
+                                               void *pvHostCodeAddr,
+                                               IMG_UINT32 ui32MaxLen,
+                                               const IMG_CHAR *pszDesc,
+                                               IMG_UINT32 ui32StartOffset)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32Value = 0;
+       IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset;
+       IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset);
+       IMG_UINT32 i;
+
+#if defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               return PVRSRV_OK;
+       }
+#endif
+
+       ui32MaxLen -= ui32StartOffset;
+       ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+       for (i = 0; i < ui32MaxLen; i++)
+       {
+               eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+                       return eError;
+               }
+
+#if defined(EMULATOR)
+               if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+#endif
+               {
+                       PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value));
+
+                       if (pui32FWCode[i] != ui32Value)
+                       {
+                               PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)",
+                                        __func__, pszDesc,
+                                        (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr);
+                               return PVRSRV_ERROR_FW_IMAGE_MISMATCH;
+                       }
+               }
+
+               ui32FWCodeDevVAAddr += 4;
+       }
+
+       PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc);
+       return PVRSRV_OK;
+}
+#endif
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _ValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+       PVRSRV_ERROR eError;
+       IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL;
+       OS_FW_IMAGE *psRGXFW = NULL;
+       const IMG_BYTE *pbRGXFirmware = NULL;
+       RGXFWIF_DEV_VIRTADDR sFWAddr;
+       IMG_UINT32 ui32StartOffset = 0;
+       RGX_LAYER_PARAMS sLayerParams;
+       sLayerParams.psDevInfo = psDevInfo;
+
+#if defined(EMULATOR)
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator");
+               return PVRSRV_OK;
+       }
+#endif
+
+       if (psDevInfo->pvRegsBaseKM == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__));
+               return PVRSRV_ERROR_BAD_MAPPING;
+       }
+
+       /* Load FW from system for code verification */
+       pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes);
+       if (pui32HostFWCode == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed in allocating memory for FW code. "
+                               "So skipping FW code verification",
+                               __func__));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes);
+               if (pui32HostFWCoremem == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed in allocating memory for FW core code. "
+                                       "So skipping FW code verification",
+                                       __func__));
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto freeHostFWCode;
+               }
+       }
+
+       /* Load FW image */
+       eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).",
+                        __func__, PVRSRVGetErrorString(eError)));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto cleanup_initfw;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware,
+                                               (void*) pui32HostFWCode, NULL,
+                                               (void*) pui32HostFWCoremem, NULL, NULL);
+       }
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware,
+                                                pui32HostFWCode, NULL,
+                                                pui32HostFWCoremem, NULL);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__));
+               goto cleanup_initfw;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               /* starting checking after BOOT LOADER config */
+               sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+
+               ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET;
+       }
+       else
+       {
+               /* Use bootloader code remap which is always configured before the FW is started */
+               sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE;
+       }
+
+       eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                       psDevInfo, &sFWAddr,
+                                       pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes,
+                                       "FW code", ui32StartOffset);
+       if (eError != PVRSRV_OK)
+       {
+               goto cleanup_initfw;
+       }
+
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE);
+               }
+               else
+               {
+                       sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE);
+
+                       /* Core must be halted while issuing abstract commands */
+                       eError = RGXRiscvHalt(psDevInfo);
+                       PVR_GOTO_IF_ERROR(eError, cleanup_initfw);
+               }
+
+               eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                               psDevInfo, &sFWAddr,
+                                               pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes,
+                                               "FW coremem code", 0);
+
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+               {
+                       eError = RGXRiscvResume(psDevInfo);
+                       PVR_GOTO_IF_ERROR(eError, cleanup_initfw);
+               }
+       }
+
+cleanup_initfw:
+       if (psRGXFW)
+       {
+               OSUnloadFirmware(psRGXFW);
+       }
+
+       if (pui32HostFWCoremem)
+       {
+               OSFreeMem(pui32HostFWCoremem);
+       }
+freeHostFWCode:
+       if (pui32HostFWCode)
+       {
+               OSFreeMem(pui32HostFWCode);
+       }
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+       PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       return PVRSRV_OK;
+#endif
+}
+#endif /* !defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+       IMG_PBYTE pbCodeMemoryPointer;
+       PVRSRV_ERROR eError;
+       RGXFWIF_DEV_VIRTADDR sFWAddr;
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+       }
+       else
+       {
+               PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR));
+               sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE;
+       };
+
+       eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0);
+       if (eError != PVRSRV_OK)
+       {
+               goto releaseFWCodeMapping;
+       }
+
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer);
+               if (eError != PVRSRV_OK)
+               {
+                       goto releaseFWCoreCodeMapping;
+               }
+
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE);
+               }
+               else
+               {
+                       PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR));
+                       sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE);
+               }
+
+               eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer,
+                                               psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0);
+       }
+
+releaseFWCoreCodeMapping:
+       if (psDevInfo->ui32FWCorememCodeSizeInBytes)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+       }
+releaseFWCodeMapping:
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+
+       return eError;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       return PVRSRV_OK;
+#endif
+}
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel    - MMU level
+
+ @Return   IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+       const IMG_CHAR* pszMMULevel = "";
+
+       switch (ui32MMULevel)
+       {
+               case 0x0: pszMMULevel = " (Page Table)"; break;
+               case 0x1: pszMMULevel = " (Page Directory)"; break;
+               case 0x2: pszMMULevel = " (Page Catalog)"; break;
+               case 0x3: pszMMULevel = " (Cat Base Reg)"; break;
+       }
+
+       return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID           - Tag ID value
+ @Input ui32BIFModule       - BIF module
+ @Input bRead               - Read flag
+ @Input bWriteBack          - Write Back flag
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   void
+
+******************************************************************************/
+#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8                (12)
+#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8  (15)
+#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX               (6)
+#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX (9)
+#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST    (33)
+#define RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST       (41)
+#define RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST                (48)
+#define RGX_TEXAS_BIF0_TAG_LAST                                (51)
+
+#define RGX_TEXAS_BIF1_TAG_LAST                                (26)
+
+#define RGX_JONES_BIF_IPP_TAG                          (0)
+#define RGX_JONES_BIF_DCE_TAG_FIRST                    (1)
+#define RGX_JONES_BIF_DCE_TAG_LAST                     (14)
+#define RGX_JONES_BIF_TDM_TAG_FIRST                    (15)
+#define RGX_JONES_BIF_TDM_TAG_LAST                     (19)
+#define RGX_JONES_BIF_PM_TAG                           (20)
+#define RGX_JONES_BIF_CDM_TAG_FIRST                    (21)
+#define RGX_JONES_BIF_CDM_TAG_LAST                     (31)
+#define RGX_JONES_BIF_META_TAG                         (32)
+#define RGX_JONES_BIF_META_DMA_TAG                     (33)
+#define RGX_JONES_BIF_TE_TAG_FIRST                     (34)
+#define RGX_JONES_BIF_TE_TAG_LAST                      (47)
+#define RGX_JONES_BIF_RTU_TAG_FIRST                    (48)
+#define RGX_JONES_BIF_RTU_TAG_LAST                     (53)
+#define RGX_JONES_BIF_RPM_TAG                          (54)
+#define RGX_JONES_BIF_TAG_LAST                         (54)
+
+
+/* The MCU L1 requestors are common to all Texas BIFs so put them
+ * in their own function. */
+static INLINE void _RGXDecodeMMUReqMCULevel1(PVRSRV_RGXDEV_INFO    *psDevInfo,
+                                                                                        IMG_UINT32  ui32TagID,
+                                                                                        IMG_CHAR    **ppszTagSB)
+{
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+       {
+               switch (ui32TagID)
+               {
+                       case  0: *ppszTagSB = "IP0 PDS"; break;
+                       case  1: *ppszTagSB = "IP0 Global"; break;
+                       case  2: *ppszTagSB = "IP1 PDS"; break;
+                       case  3: *ppszTagSB = "IP1 Global"; break;
+                       case  4: *ppszTagSB = "IP2 PDS"; break;
+                       case  5: *ppszTagSB = "IP2 Global"; break;
+               }
+       }
+       else
+       {
+               switch (ui32TagID)
+               {
+                       case  0: *ppszTagSB = "IP0 PDS"; break;
+                       case  1: *ppszTagSB = "IP0 Global"; break;
+                       case  2: *ppszTagSB = "IP0 BSC"; break;
+                       case  3: *ppszTagSB = "IP0 Constants"; break;
+
+                       case  4: *ppszTagSB = "IP1 PDS"; break;
+                       case  5: *ppszTagSB = "IP1 Global"; break;
+                       case  6: *ppszTagSB = "IP1 BSC"; break;
+                       case  7: *ppszTagSB = "IP1 Constants"; break;
+
+                       case  8: *ppszTagSB = "IP2 PDS"; break;
+                       case  9: *ppszTagSB = "IP2 Global"; break;
+                       case 10: *ppszTagSB = "IP2 BSC"; break;
+                       case 11: *ppszTagSB = "IP2 Constants"; break;
+               }
+       }
+}
+
+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO    *psDevInfo,
+                                                                IMG_UINT32  ui32TagID,
+                                                                IMG_UINT32  ui32BIFModule,
+                                                                IMG_BOOL    bRead,
+                                                                IMG_BOOL    bWriteBack,
+                                                                IMG_BOOL    bFBMFault,
+                                                                IMG_CHAR    **ppszTagID,
+                                                                IMG_CHAR    **ppszTagSB,
+                                                                IMG_CHAR    *pszScratchBuf,
+                                                                IMG_UINT32  ui32ScratchBufSize)
+{
+       IMG_UINT32 ui32BIFsPerSPU = 2;
+       IMG_CHAR   *pszTagID = "-";
+       IMG_CHAR   *pszTagSB = "-";
+
+       PVR_ASSERT(ppszTagID != NULL);
+       PVR_ASSERT(ppszTagSB != NULL);
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+       {
+               ui32BIFsPerSPU = 4;
+       }
+
+       if (bFBMFault)
+       {
+               pszTagID = "FBM";
+               if (bWriteBack)
+               {
+                       pszTagSB = "Header/state cache request";
+               }
+       }
+       else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_SPU) &&
+                ui32BIFModule <  RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU)
+       {
+               if ((ui32BIFModule % ui32BIFsPerSPU) == 0)
+               {
+                       IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST =
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+                               ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX
+                               : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8;
+                       IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST =
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+                               ? RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX
+                               : RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8;
+
+                       /* Texas 0 BIF */
+                       if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST)
+                       {
+                               pszTagID = "MCU L1";
+                               _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB);
+                       }
+                       else if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST)
+                       {
+                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+                               {
+                                       switch (ui32TagID)
+                                       {
+                                               case 6: pszTagID = "TCU L1"; break;
+                                               case 7:
+                                               case 8: pszTagID = "PBE0"; break;
+                                       }
+                               }
+                               else
+                               {
+                                       switch (ui32TagID)
+                                       {
+                                               case 12: pszTagID = "TCU L1"; break;
+                                               case 13:
+                                               case 14: pszTagID = "PBE0"; break;
+                                       }
+                               }
+                       }
+                       else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST)
+                       {
+                               pszTagID = "IPF ID Array";
+                       }
+                       else if (ui32TagID < RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST)
+                       {
+                               switch (ui32TagID)
+                               {
+                                       case 34: pszTagID = "IPF_CPF"; break;
+                                       case 35: pszTagID = "PPP"; break;
+                                       case 36:
+                                       case 37: pszTagID = "ISP0 ID Array"; break;
+                                       case 38:
+                                       case 39: pszTagID = "ISP2 ID Array"; break;
+                                       case 40: pszTagID = "VCE RTC"; break;
+                               }
+                       }
+                       else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST)
+                       {
+                               pszTagID = "RTU RAC";
+                       }
+                       else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_LAST)
+                       {
+                               switch (ui32TagID)
+                               {
+                                       case 49: pszTagID = "VCE AMC"; break;
+                                       case 50:
+                                       case 51: pszTagID = "SHF"; break;
+                               }
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID));
+                       }
+               }
+               else if ((ui32BIFModule % ui32BIFsPerSPU) == 1)
+               {
+                       IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST =
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+                               ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX
+                               : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8;
+
+                       /* Texas 1 BIF */
+                       if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST)
+                       {
+                               pszTagID = "MCU L1";
+                               _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB);
+                       }
+                       else if (ui32TagID <= RGX_TEXAS_BIF1_TAG_LAST)
+                       {
+                               switch (ui32TagID)
+                               {
+                                       /** Albiorix/NUM_TPU_PER_SPU > 1 **/
+                                       case 6:
+                                       case 7:  pszTagID = "BSC"; break;
+                                       /** All cores **/
+                                       case 12: pszTagID = "TCU L1"; break;
+                                       case 13: pszTagID = "TPF"; break;
+                                       case 14: pszTagID = "TPF CPF"; break;
+                                       case 15:
+                                       case 16: pszTagID = "PBE1"; break;
+                                       case 17: pszTagID = "PDSRW cache"; break;
+                                       case 18: pszTagID = "PDS"; break;
+                                       case 19:
+                                       case 20: pszTagID = "ISP1 ID Array"; break;
+                                       case 21: pszTagID = "USC L2"; break;
+                                       case 22: pszTagID = "VDM L2"; break;
+                                       case 23: pszTagID = "RTU FBA L2"; break;
+                                       case 24: pszTagID = "RTU SHR L2"; break;
+                                       case 25: pszTagID = "RTU SHG L2"; break;
+                                       case 26: pszTagID = "RTU TUL L2"; break;
+                               }
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID));
+                       }
+               }
+       }
+       else if (ui32BIFModule == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU)
+       {
+               /* Jones BIF */
+
+               if ((ui32TagID >= RGX_JONES_BIF_DCE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_DCE_TAG_LAST))
+               {
+                       pszTagID = "DCE";
+               }
+               else if ((ui32TagID >= RGX_JONES_BIF_TDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TDM_TAG_LAST))
+               {
+                       pszTagID = "TDM";
+               }
+               else if ((ui32TagID >= RGX_JONES_BIF_CDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_CDM_TAG_LAST))
+               {
+                       pszTagID = "CDM";
+               }
+               else if ((ui32TagID >= RGX_JONES_BIF_TE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TE_TAG_LAST))
+               {
+                       pszTagID = "Tiling Engine (TE3)";
+               }
+               else if ((ui32TagID >= RGX_JONES_BIF_RTU_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_RTU_TAG_LAST))
+               {
+                       pszTagID = "RTU";
+               }
+               else if (ui32TagID <= RGX_JONES_BIF_TAG_LAST)
+               {
+                       switch (ui32TagID)
+                       {
+                               case RGX_JONES_BIF_IPP_TAG:             pszTagID = "IPP"; break;
+                               case RGX_JONES_BIF_PM_TAG:              pszTagID = "PM"; break;
+                               case RGX_JONES_BIF_META_TAG:    pszTagID = "META"; break;
+                               case RGX_JONES_BIF_META_DMA_TAG:pszTagID = "META DMA"; break;
+                               case RGX_JONES_BIF_RPM_TAG:             pszTagID = "RPM"; break;
+                       }
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Jones BIF Tag ID: %d", __func__, ui32TagID));
+               }
+       }
+       else if (bWriteBack)
+       {
+               pszTagID = "";
+               pszTagSB = "Writeback of dirty cacheline";
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified BIF Module: %d", __func__, ui32BIFModule));
+       }
+
+       *ppszTagID = pszTagID;
+       *ppszTagSB = pszTagSB;
+}
+
+
+static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer,
+                                                       IMG_UINT64 *pui64Seconds,
+                                                       IMG_UINT64 *pui64Nanoseconds)
+{
+       IMG_UINT32 ui32Remainder;
+
+       *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder);
+       *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL);
+}
+
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+       DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+       DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+       DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+       DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+
+/*!
+*******************************************************************************
+
+ @Function     _PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psResult                 - The DevicememHistory result to be printed
+ @Input ui32Index                - The index of the result
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+                                               DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+                                               IMG_UINT32 ui32Index,
+                                               const IMG_CHAR* pszIndent)
+{
+       IMG_UINT32 ui32Remainder;
+       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+       ConvertOSTimestampToSAndNS(psResult->ui64When,
+                                                       &ui64Seconds,
+                                                       &ui64Nanoseconds);
+
+       if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE)
+       {
+               PVR_DUMPDEBUG_LOG("%s    [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+                                       " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+                                       " Operation: %s Modified: %" IMG_UINT64_FMTSPEC
+                                       " us ago (OS time %" IMG_UINT64_FMTSPEC
+                                       ".%09" IMG_UINT64_FMTSPEC " s)",
+                                               pszIndent,
+                                               ui32Index,
+                                               psResult->szString,
+                                               psResult->sBaseDevVAddr.uiAddr,
+                                               psResult->uiSize,
+                                               psResult->bMap ? "Map": "Unmap",
+                                               OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+                                               ui64Seconds,
+                                               ui64Nanoseconds);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%s    [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+                                       " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+                                       " Operation: %s Modified: %" IMG_UINT64_FMTSPEC
+                                       " us ago (OS time %" IMG_UINT64_FMTSPEC
+                                       ".%09" IMG_UINT64_FMTSPEC
+                                       ") PID: %u (%s)",
+                                               pszIndent,
+                                               ui32Index,
+                                               psResult->szString,
+                                               psResult->sBaseDevVAddr.uiAddr,
+                                               psResult->uiSize,
+                                               psResult->bMap ? "Map": "Unmap",
+                                               OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+                                               ui64Seconds,
+                                               ui64Nanoseconds,
+                                               psResult->sProcessInfo.uiPID,
+                                               psResult->sProcessInfo.szProcessName);
+       }
+
+       if (!psResult->bRange)
+       {
+               PVR_DUMPDEBUG_LOG("%s        Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped");
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%s        Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s",
+                                                                               pszIndent,
+                                                                               psResult->ui32StartPage,
+                                                                               psResult->ui32StartPage + psResult->ui32PageCount - 1,
+                                                                               psResult->sMapStartAddr.uiAddr,
+                                                                               psResult->sMapEndAddr.uiAddr,
+                                                                               psResult->bAll ? "(whole allocation) " : "",
+                                                                               psResult->bMap ? "mapped": "unmapped");
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input pvDumpDebugFile          - Optional file identifier to be passed to the
+                                   'printf' function if required
+ @Input psFaultProcessInfo       - The process info derived from the page fault
+ @Input psQueryOut               - Storage for the query results
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+                                               DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                                               const IMG_CHAR* pszIndent)
+{
+       IMG_UINT32 i;
+
+       if (psQueryOut->ui32NumResults == 0)
+       {
+               PVR_DUMPDEBUG_LOG("%s    No results", pszIndent);
+       }
+       else
+       {
+               for (i = 0; i < psQueryOut->ui32NumResults; i++)
+               {
+                       _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                                       psFaultProcessInfo,
+                                                                       &psQueryOut->sResults[i],
+                                                                       i,
+                                                                       pszIndent);
+               }
+       }
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+       { 0, PVRSRV_4K_PAGE_SIZE },
+       { 1, PVRSRV_16K_PAGE_SIZE },
+       { 2, PVRSRV_64K_PAGE_SIZE },
+       { 3, PVRSRV_256K_PAGE_SIZE },
+       { 4, PVRSRV_1M_PAGE_SIZE },
+       { 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function     _PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW     - The HW page size value
+
+ @Return   IMG_UINT32      The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+       if (ui32PageSizeHW > 5)
+       {
+               /* This is invalid, so return a default value as we cannot ASSERT in this code! */
+               return PVRSRV_4K_PAGE_SIZE;
+       }
+
+       return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID              - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr     - The device address to search for allocations at/before/after
+ @Input asQueryOut         - Storage for the query results
+ @Input ui32PageSizeBytes  - Faulted page size in bytes
+
+ @Return IMG_BOOL          - IMG_TRUE if any results were found for this page fault
+
+******************************************************************************/
+static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+                                                       DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+                                                       IMG_UINT32 ui32PageSizeBytes)
+{
+       DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+       IMG_BOOL bAnyHits = IMG_FALSE;
+
+       /* if the page fault originated in the firmware then the allocation may
+        * appear to belong to any PID, because FW allocations are attributed
+        * to the client process creating the allocation, so instruct the
+        * devicemem_history query to search all available PIDs
+        */
+       if (uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+       {
+               sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY;
+       }
+       else
+       {
+               sQueryIn.uiPID = uiPID;
+       }
+
+       /* Query the DevicememHistory for all allocations in the previous page... */
+       sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes;
+       if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING],
+                                 ui32PageSizeBytes, IMG_TRUE))
+       {
+               bAnyHits = IMG_TRUE;
+       }
+
+       /* Query the DevicememHistory for any record at the exact address... */
+       sQueryIn.sDevVAddr = sFaultDevVAddr;
+       if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED],
+                                 ui32PageSizeBytes, IMG_FALSE))
+       {
+               bAnyHits = IMG_TRUE;
+       }
+       else
+       {
+               /* If not matched then try matching any record in the faulting page... */
+               if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED],
+                                         ui32PageSizeBytes, IMG_TRUE))
+               {
+                       bAnyHits = IMG_TRUE;
+               }
+       }
+
+       /* Query the DevicememHistory for all allocations in the next page... */
+       sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+       if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT],
+                                 ui32PageSizeBytes, IMG_TRUE))
+       {
+               bAnyHits = IMG_TRUE;
+       }
+
+       return bAnyHits;
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+       /* the process info of the memory context that page faulted */
+       RGXMEM_PROCESS_INFO sProcessInfo;
+       IMG_DEV_VIRTADDR sFaultDevVAddr;
+       MMU_FAULT_DATA   sMMUFaultData;
+       DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+       /* the CR timer value at the time of the fault, recorded by the FW.
+        * used to differentiate different page faults
+        */
+       IMG_UINT64 ui64CRTimer;
+       /* time when this FAULT_INFO entry was added. used for timing
+        * reference against the map/unmap information
+        */
+       IMG_UINT64 ui64When;
+       IMG_UINT32 ui32FaultInfoFlags;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the first `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+       IMG_UINT32 ui32Head;
+       /* the number of faults in this log need not correspond exactly to
+        * the HWINFO number of the FW, as the FW HWINFO log may contain
+        * non-page fault HWRs
+        */
+       FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+#define FAULT_INFO_PROC_INFO   (0x1U)
+#define FAULT_INFO_DEVMEM_HIST (0x2U)
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                       FAULT_INFO *psInfo,
+                                                       RGXMEM_PROCESS_INFO *psProcInfo)
+{
+       IMG_UINT32 i, j;
+
+       for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+       {
+               for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++)
+               {
+                       IMG_BOOL bFound;
+
+                       RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo;
+                       bFound = RGXPCPIDToProcessInfo(psDevInfo,
+                                                               psProcInfo->uiPID,
+                                                               psProcInfo);
+                       if (!bFound)
+                       {
+                               OSStringLCopy(psProcInfo->szProcessName,
+                                                               "(unknown)",
+                                                               sizeof(psProcInfo->szProcessName));
+                       }
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psInfo               - The page fault occurrence to print
+
+ @Return   void
+
+******************************************************************************/
+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       FAULT_INFO *psInfo,
+                                       const IMG_CHAR* pszIndent)
+{
+       IMG_UINT32 i;
+       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+       ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds);
+
+       if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO))
+       {
+               IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ?
+                                                       0 : psInfo->sProcessInfo.uiPID;
+
+               PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC
+                                                       ", CRTimer: 0x%016" IMG_UINT64_FMTSPECX
+                                                       ", PID: %u (%s, unregistered: %u) OS time: "
+                                                       "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+                                       pszIndent,
+                                       psInfo->sFaultDevVAddr.uiAddr,
+                                       psInfo->ui64CRTimer,
+                                       uiPID,
+                                       psInfo->sProcessInfo.szProcessName,
+                                       psInfo->sProcessInfo.bUnregistered,
+                                       ui64Seconds,
+                                       ui64Nanoseconds);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent);
+       }
+
+       if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST))
+       {
+               for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+               {
+                       const IMG_CHAR *pszWhich = NULL;
+
+                       switch (i)
+                       {
+                               case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+                                       pszWhich = "Preceding page";
+                                       break;
+                               case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+                                       pszWhich = "Faulted page";
+                                       break;
+                               case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+                                       pszWhich = "Next page";
+                                       break;
+                       }
+
+                       PVR_DUMPDEBUG_LOG("%s  %s:", pszIndent, pszWhich);
+                       _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                               &psInfo->sProcessInfo,
+                                                               &psInfo->asQueryOut[i],
+                                                               pszIndent);
+               }
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("%s  No matching Devmem History for fault address", pszIndent);
+       }
+}
+
+static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       FAULT_INFO *psInfo,
+                                       IMG_DEV_VIRTADDR sFaultDevVAddr,
+                                       IMG_DEV_PHYADDR sPCDevPAddr,
+                                       IMG_UINT64 ui64CRTimer,
+                                       IMG_UINT32 ui32PageSizeBytes)
+{
+       IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE;
+       RGXMEM_PROCESS_INFO sProcessInfo;
+
+       psInfo->ui32FaultInfoFlags = 0;
+       psInfo->sFaultDevVAddr = sFaultDevVAddr;
+       psInfo->ui64CRTimer = ui64CRTimer;
+       psInfo->ui64When = OSClockns64();
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               /* Check if this is PM fault */
+               if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM)
+               {
+                       bIsPMFault = IMG_TRUE;
+                       bFound = IMG_TRUE;
+                       sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM;
+                       OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName));
+                       sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0';
+                       sProcessInfo.bUnregistered = IMG_FALSE;
+               }
+               else
+               {
+                       /* look up the process details for the faulting page catalogue */
+                       bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+               }
+
+               if (bFound)
+               {
+                       IMG_BOOL bHits;
+
+                       psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO;
+                       psInfo->sProcessInfo = sProcessInfo;
+
+                       if (bIsPMFault)
+                       {
+                               bHits = IMG_TRUE;
+                       }
+                       else
+                       {
+                               /* get any DevicememHistory data for the faulting address */
+                               bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+                                                                sFaultDevVAddr,
+                                                                psInfo->asQueryOut,
+                                                                ui32PageSizeBytes);
+
+                               if (bHits)
+                               {
+                                       psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST;
+
+                                       /* if the page fault was caused by the firmware then get information about
+                                        * which client application created the related allocations.
+                                        *
+                                        * Fill in the process info data for each query result.
+                                        */
+
+                                       if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+                                       {
+                                               _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo);
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _DumpFaultAddressHostView
+
+ @Description
+
+ Dump FW HWR fault status in human readable form.
+
+ @Input ui32Index            - Index of global Fault info
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Return   void
+
+******************************************************************************/
+static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       const IMG_CHAR* pszIndent)
+{
+       MMU_LEVEL eTopLevel;
+       const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" };
+       const IMG_CHAR szPageError[][3] = {"", "PT",  "PD",  "PC"  };
+
+       eTopLevel = psFaultData->eTopLevel;
+
+       if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN)
+       {
+               PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent);
+               return;
+       }
+       else if (psFaultData->eType == MMU_FAULT_TYPE_PM)
+       {
+               PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address);
+       }
+       else
+       {
+               MMU_LEVEL eCurrLevel;
+               PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST);
+
+               for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--)
+               {
+                       MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel];
+                       if (psMMULevelData->ui64Address)
+                       {
+                               if (psMMULevelData->uiBytesPerEntry == 4)
+                               {
+                                       PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s",
+                                                               pszIndent,
+                                                               szPageLevel[eCurrLevel],
+                                                               psMMULevelData->ui32Index,
+                                                               (IMG_UINT) psMMULevelData->ui64Address,
+                                                               psMMULevelData->psDebugStr);
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+                                                               pszIndent,
+                                                               szPageLevel[eCurrLevel],
+                                                               psMMULevelData->ui32Index,
+                                                               psMMULevelData->ui64Address,
+                                                               psMMULevelData->psDebugStr);
+                               }
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)",
+                                                       pszIndent,
+                                                       szPageError[eCurrLevel],
+                                                       psMMULevelData->ui32Index,
+                                                       psMMULevelData->ui32NumOfEntries);
+                               break;
+                       }
+               }
+       }
+
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo            - RGX device info
+ @Input ui64MMUStatus        - MMU Status register value
+ @Input pszMetaOrCore        - string representing call is for META or MMU core
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       const IMG_UINT64 aui64MMUStatus[],
+                                       const IMG_PCHAR pszMetaOrCore,
+                                       const IMG_CHAR *pszIndent)
+{
+       if (aui64MMUStatus[0] == 0x0)
+       {
+               PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore);
+       }
+       else
+       {
+               IMG_UINT32 ui32PC        = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT;
+               IMG_UINT64 ui64Addr      = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT) <<  4; /* align shift */
+               IMG_UINT32 ui32Requester = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT;
+               IMG_UINT32 ui32MMULevel  = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK) >>
+                                          RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT;
+               IMG_BOOL bRead           = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS1_RNW_EN) != 0;
+               IMG_BOOL bFault          = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS1_FAULT_EN) != 0;
+               IMG_BOOL bROFault        = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK) >>
+                                           RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x2;
+               IMG_BOOL bProtFault      = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK) >>
+                                           RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x3;
+               IMG_UINT32 ui32BIFModule;
+               IMG_BOOL bWriteBack, bFBMFault;
+               IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+               IMG_CHAR *pszTagID = NULL;
+               IMG_CHAR *pszTagSB = NULL;
+               const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "META" : "RISCV";
+
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE))
+               {
+                       ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK) >>
+                                                                               RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT;
+                       bWriteBack    = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN) != 0;
+                       bFBMFault     = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN) != 0;
+               }
+               else
+               {
+                       ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK) >>
+                                                                               RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT;
+                       bWriteBack    = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN) != 0;
+                       bFBMFault     = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN) != 0;
+               }
+
+               if (strcmp(pszMetaOrCore, "Core") != 0)
+               {
+                       ui32PC          = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK) >>
+                                                               RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT;
+                       ui64Addr        = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK) >>
+                                                               RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT) <<  4; /* align shift */
+                       ui32Requester = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK) >>
+                                                               RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT;
+                       ui32MMULevel  = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK) >>
+                                                               RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT;
+                       bRead           = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS_META_RNW_EN) != 0;
+                       bFault      = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN) != 0;
+                       bROFault    = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >>
+                                                               RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x2;
+                       bProtFault  = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >>
+                                                               RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x3;
+               }
+               else
+               {
+                       _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32BIFModule, bRead, bWriteBack, bFBMFault, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+               }
+
+               PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore);
+               PVR_DUMPDEBUG_LOG("%s  * MMU status (0x%016" IMG_UINT64_FMTSPECX " | 0x%08" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s(%s)%s%s%s%s.",
+                                                 pszIndent,
+                                                 aui64MMUStatus[0],
+                                                 aui64MMUStatus[1],
+                                                 ui32PC,
+                                                 (bRead)?"Reading from":"Writing to",
+                                                 ui64Addr,
+                                                 (pszTagID)? pszTagID : pszMetaOrRiscv,
+                                                 (pszTagSB)? pszTagSB : "-",
+                                                 (bFault)?", Fault":"",
+                                                 (bROFault)?", Read Only fault":"",
+                                                 (bProtFault)?", PM/FW core protection fault":"",
+                                                 _RGXDecodeMMULevel(ui32MMULevel));
+
+       }
+}
+
+static_assert((RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+                         "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+
+
+static const IMG_FLAGS2DESC asCswOpts2Description[] =
+{
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"},
+       {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"},
+       {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"},
+};
+
+static const IMG_FLAGS2DESC asMisc2Description[] =
+{
+       {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"},
+       {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"},
+       {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"},
+       {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"},
+       {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"},
+       {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"},
+       {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"},
+       {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"},
+       {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"},
+       {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"},
+       {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"},
+       {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"},
+       {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"},
+       {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"},
+       {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"},
+       {RGXFWIF_INICFG_WORKEST, " Workload Estim;"},
+       {RGXFWIF_INICFG_PDVFS, " PDVFS;"},
+       {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"},
+       {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"},
+       {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"},
+       {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"},
+       {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"}
+};
+
+static const IMG_FLAGS2DESC asFwOsCfg2Description[] =
+{
+       {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " GEOM;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"},
+       {RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN, " RDM;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio GEOM;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"},
+       {RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM, " LowPrio RDM;"},
+};
+
+static const IMG_FLAGS2DESC asHwrState2Description[] =
+{
+       {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"},
+       {RGXFWIF_HWR_RESET_IN_PROGRESS, " Reset ongoing;"},
+       {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"},
+       {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"},
+       {RGXFWIF_HWR_DM_STALLING, " DM stalling;"},
+       {RGXFWIF_HWR_FW_FAULT, " FW Fault;"},
+       {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"},
+};
+
+static const IMG_FLAGS2DESC asDmState2Description[] =
+{
+       {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"},
+       {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"},
+       {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"},
+       {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"},
+       {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"},
+       {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"},
+       {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"},
+       {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"},
+       {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"},
+};
+
+static const IMG_FLAGS2DESC asHWErrorState[] =
+{
+       {RGX_HW_ERR_NA, "N/A"},
+       {RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL, "Primitive ID failure during DM kill."},
+};
+
+#if !defined(NO_HARDWARE)
+static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause)
+{
+       switch (ui32Mcause)
+       {
+#define X(value, fatal, description) \
+               case value: \
+                       if (fatal) \
+                               return description; \
+                       return NULL;
+
+               RGXRISCVFW_MCAUSE_TABLE
+#undef X
+
+               default:
+                       PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause));
+                       return NULL;
+       }
+}
+#endif // !defined(NO_HARDWARE)
+
+/*
+       Appends flags strings to a null-terminated string buffer - each flag
+       description string starts with a space.
+*/
+static void _Flags2Description(IMG_CHAR *psDesc,
+                               IMG_UINT32 ui32DescSize,
+                               const IMG_FLAGS2DESC *psConvTable,
+                               IMG_UINT32 ui32TableSize,
+                               IMG_UINT32 ui32Flags)
+{
+       IMG_UINT32 ui32Idx;
+
+       for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+       {
+               if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
+               {
+                       OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize);
+               }
+       }
+}
+
+/*
+ *  Translate ID code to descriptive string.
+ *  Returns on the first match.
+ */
+static void _ID2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32ID)
+{
+       IMG_UINT32 ui32Idx;
+
+       for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+       {
+               if (ui32ID == psConvTable[ui32Idx].uiFlag)
+               {
+                       OSStringLCopy(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize);
+                       return;
+               }
+       }
+}
+
+/*
+       Writes flags strings to an uninitialised buffer.
+*/
+static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags)
+{
+       const IMG_CHAR szCswLabel[] = "Ctx switch options:";
+       size_t uLabelLen = sizeof(szCswLabel) - 1;
+       const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U;
+
+       OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
+
+       _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags);
+       _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
+}
+
+static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags)
+{
+       const IMG_CHAR szCswLabel[] = "Ctx switch:";
+       size_t uLabelLen = sizeof(szCswLabel) - 1;
+       const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U;
+
+       OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
+
+       _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+       const IMG_CHAR *pszTraceAssertPath;
+       const IMG_CHAR *pszTraceAssertInfo;
+       IMG_INT32 ui32TraceAssertLine;
+       IMG_UINT32 i;
+
+       for (i = 0; i < RGXFW_THREAD_NUM; i++)
+       {
+               pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+               pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+               ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+               /* print non-null assert strings */
+               if (*pszTraceAssertInfo)
+               {
+                       PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)",
+                                         i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine);
+               }
+       }
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _RGXDumpFWFaults
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psFwSysData       - RGX FW shared system data
+
+ @Return   void
+
+******************************************************************************/
+static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             const RGXFWIF_SYSDATA *psFwSysData)
+{
+       if (psFwSysData->ui32FWFaults > 0)
+       {
+               IMG_UINT32      ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX;
+               IMG_UINT32      ui32EndFault   = psFwSysData->ui32FWFaults - 1;
+               IMG_UINT32  ui32Index;
+
+               if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX)
+               {
+                       ui32StartFault = 0;
+               }
+
+               for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++)
+               {
+                       const RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX];
+                       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+                       /* Split OS timestamp in seconds and nanoseconds */
+                       ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+                       PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)",
+                                         ui32Index+1, psFaultInfo->sFaultBuf.szInfo,
+                                         psFaultInfo->sFaultBuf.szPath,
+                                         psFaultInfo->sFaultBuf.ui32LineNum);
+                       PVR_DUMPDEBUG_LOG("            Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+                                         psFaultInfo->ui32Data,
+                                         psFaultInfo->ui64CRTimer,
+                                         ui64Seconds, ui64Nanoseconds);
+               }
+       }
+}
+
+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       const RGXFWIF_SYSDATA *psFwSysData)
+{
+       IMG_UINT32 i;
+       for (i = 0; i < RGXFW_THREAD_NUM; i++)
+       {
+               if (psFwSysData->aui32CrPollAddr[i])
+               {
+                       PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)",
+                                         i,
+                                         ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+                                         psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET,
+                                         psFwSysData->aui32CrPollMask[i]);
+               }
+       }
+
+}
+
+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                         void *pvDumpDebugFile,
+                                                         const RGXFWIF_SYSDATA *psFwSysData,
+                                                         const RGXFWIF_HWRINFOBUF *psHWRInfoBuf,
+                                                         PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_BOOL          bAnyLocked = IMG_FALSE;
+       IMG_UINT32        dm, i;
+       IMG_UINT32        ui32LineSize;
+       IMG_CHAR          *pszLine, *pszTemp;
+       const IMG_CHAR    *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "GEOM", "3D", "CDM", "RAY", "GEOM2", "GEOM3", "GEOM4"};
+       const IMG_CHAR    szMsgHeader[] = "Number of HWR: ";
+       const IMG_CHAR    szMsgFalse[] = "FALSE(";
+       IMG_CHAR          *pszLockupType = "";
+       const IMG_UINT32  ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */
+       const IMG_UINT32  ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1;
+       IMG_UINT32        ui32HWRRecoveryFlags;
+       IMG_UINT32        ui32ReadIndex;
+
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] ||
+                   psHWRInfoBuf->aui32HwrDmOverranCount[dm])
+               {
+                       bAnyLocked = IMG_TRUE;
+                       break;
+               }
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK))
+       {
+               /* No HWR situation, print nothing */
+               return;
+       }
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               IMG_BOOL bAnyHWROccured = IMG_FALSE;
+
+               for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+               {
+                       if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 ||
+                               psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 ||
+                               psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0)
+                               {
+                                       bAnyHWROccured = IMG_TRUE;
+                                       break;
+                               }
+               }
+
+               if (!bAnyHWROccured)
+               {
+                       return;
+               }
+       }
+
+       ui32LineSize = sizeof(IMG_CHAR) * (
+                       ui32MsgHeaderCharCount +
+                       (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*(     4/*DM name + left parenthesis*/ +
+                               10/*UINT32 max num of digits*/ +
+                               1/*slash*/ +
+                               10/*UINT32 max num of digits*/ +
+                               3/*right parenthesis + comma + space*/)) +
+                       ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1
+                               /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */
+                       );
+
+       pszLine = OSAllocMem(ui32LineSize);
+       if (pszLine == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Out of mem allocating line string (size: %d)",
+                       __func__,
+                       ui32LineSize));
+               return;
+       }
+
+       OSStringLCopy(pszLine, szMsgHeader, ui32LineSize);
+       pszTemp = pszLine + ui32MsgHeaderCharCount;
+
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               pszTemp += OSSNPrintf(pszTemp,
+                               4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1
+                               /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */,
+                               "%s(%u/%u+%u), ",
+                               apszDmNames[dm],
+                               psHWRInfoBuf->aui32HwrDmRecoveredCount[dm],
+                               psHWRInfoBuf->aui32HwrDmLockedUpCount[dm],
+                               psHWRInfoBuf->aui32HwrDmOverranCount[dm]);
+       }
+
+       OSStringLCat(pszLine, szMsgFalse, ui32LineSize);
+       pszTemp += ui32MsgFalseCharCount;
+
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               pszTemp += OSSNPrintf(pszTemp,
+                               10 + 1 + 1 /* UINT32 max num + comma + \0 */,
+                               (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"),
+                               psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]);
+       }
+
+       PVR_DUMPDEBUG_LOG("%s", pszLine);
+
+       OSFreeMem(pszLine);
+
+       /* Print out per HWR info */
+       for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+       {
+               if (dm == RGXFWIF_DM_GP)
+               {
+                       PVR_DUMPDEBUG_LOG("DM %d (GP)", dm);
+               }
+               else
+               {
+                       if (!PVRSRV_VZ_MODE_IS(GUEST))
+                       {
+                               IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm];
+                               IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE];
+                               sPerDmHwrDescription[0] = '\0';
+
+                               if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING)
+                               {
+                                       OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE);
+                               }
+                               else
+                               {
+                                       _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE,
+                                               asDmState2Description, ARRAY_SIZE(asDmState2Description),
+                                               ui32HWRRecoveryFlags);
+                               }
+                               PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, ui32HWRRecoveryFlags, sPerDmHwrDescription);
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("DM %d", dm);
+                       }
+               }
+
+               ui32ReadIndex = 0;
+               for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+               {
+                       IMG_BOOL bPMFault = IMG_FALSE;
+                       IMG_UINT32 ui32PC;
+                       IMG_UINT32 ui32PageSize = 0;
+                       IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+                       const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex];
+
+                       if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+                       {
+                               IMG_CHAR aui8RecoveryNum[10+10+1];
+                               IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+                               IMG_BOOL bPageFault = IMG_FALSE;
+                               IMG_DEV_VIRTADDR sFaultDevVAddr;
+
+                               /* Split OS timestamp in seconds and nanoseconds */
+                               ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+                               ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+                               if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; }
+                               else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; }
+
+                               OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
+                               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s Core = %u, PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+                                                  aui8RecoveryNum,
+                                                  psHWRInfo->ui32CoreID,
+                                                  psHWRInfo->ui32PID,
+                                                  psHWRInfo->ui32FrameNum,
+                                                  psHWRInfo->ui32ActiveHWRTData,
+                                                  psHWRInfo->ui32EventStatus,
+                                                  pszLockupType);
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+                                                  aui8RecoveryNum,
+                                                  psHWRInfo->ui32PID,
+                                                  psHWRInfo->ui32FrameNum,
+                                                  psHWRInfo->ui32ActiveHWRTData,
+                                                  psHWRInfo->ui32EventStatus,
+                                                  pszLockupType);
+                               }
+
+                               if (psHWRInfo->eHWErrorCode != RGX_HW_ERR_NA)
+                               {
+                                       IMG_CHAR sHWDebugInfo[RGX_DEBUG_STR_SIZE] = "";
+
+                                       _ID2Description(sHWDebugInfo, RGX_DEBUG_STR_SIZE, asHWErrorState, ARRAY_SIZE(asHWErrorState),
+                                               psHWRInfo->eHWErrorCode);
+                                       PVR_DUMPDEBUG_LOG("  HW error code = 0x%X: %s",
+                                                                         psHWRInfo->eHWErrorCode, sHWDebugInfo);
+                               }
+
+                               pszTemp = &aui8RecoveryNum[0];
+                               while (*pszTemp != '\0')
+                               {
+                                       *pszTemp++ = ' ';
+                               }
+
+                               /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */
+                               if (!PVRSRV_VZ_MODE_IS(GUEST))
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+                                                                          aui8RecoveryNum,
+                                                                          psHWRInfo->ui64CRTimer,
+                                                                          ui64Seconds,
+                                                                          ui64Nanoseconds,
+                                                                          (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("  %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+                                                                          aui8RecoveryNum,
+                                                                          psHWRInfo->ui64CRTimer,
+                                                                          (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+                               }
+
+                               if (psHWRInfo->ui64CRTimeHWResetFinish != 0)
+                               {
+                                       if (psHWRInfo->ui64CRTimeFreelistReady != 0)
+                                       {
+                                               /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */
+                                               if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady)
+                                               {
+                                                       PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+                                                                                          aui8RecoveryNum,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+                                                                                          (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256,
+                                                                                          (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256);
+                                               }
+                                               else
+                                               {
+                                                       PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = <not_timed>, TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd,
+                                                                                          aui8RecoveryNum,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+                                                                                          (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               PVR_DUMPDEBUG_LOG("  %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd,
+                                                                                  aui8RecoveryNum,
+                                                                                  (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+                                                                                  (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+                                                                                  (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+                                       }
+                               }
+
+                               switch (psHWRInfo->eHWRType)
+                               {
+                                       case RGX_HWRTYPE_ECCFAULT:
+                                       {
+                                               PVR_DUMPDEBUG_LOG("    ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU);
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_MMUFAULT:
+                                       {
+                                               _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+                                                                               &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0],
+                                                                               "Core",
+                                                                               DD_NORMAL_INDENT);
+
+                                               bPageFault = IMG_TRUE;
+                                               sFaultDevVAddr.uiAddr =   psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0];
+                                               sFaultDevVAddr.uiAddr &=  ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK;
+                                               sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT;
+                                               sFaultDevVAddr.uiAddr <<= 4; /* align shift */
+                                               ui32PC  = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >>
+                                                                                                  RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT;
+#if defined(SUPPORT_TRUSTED_DEVICE)
+                                               ui32PC = ui32PC - 1;
+#endif
+                                               bPMFault = (ui32PC <= 8);
+                                               sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_MMUMETAFAULT:
+                                       {
+                                               const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "Meta" : "RiscV";
+
+                                               _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+                                                                                       &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0],
+                                                                                       pszMetaOrRiscv,
+                                                                                       DD_NORMAL_INDENT);
+
+                                               bPageFault = IMG_TRUE;
+                                               sFaultDevVAddr.uiAddr =   psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0];
+                                               sFaultDevVAddr.uiAddr &=  ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK;
+                                               sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT;
+                                               sFaultDevVAddr.uiAddr <<= 4; /* align shift */
+                                               sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_POLLFAILURE:
+                                       {
+                                               PVR_DUMPDEBUG_LOG("    T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)",
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+                                                                                 ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask,
+                                                                                 psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue);
+                                       }
+                                       break;
+
+                                       case RGX_HWRTYPE_OVERRUN:
+                                       case RGX_HWRTYPE_UNKNOWNFAILURE:
+                                       {
+                                               /* Nothing to dump */
+                                       }
+                                       break;
+
+                                       default:
+                                       {
+                                               PVR_DUMPDEBUG_LOG("    Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType);
+                                       }
+                                       break;
+                               }
+
+                               if (bPageFault)
+                               {
+
+                                       FAULT_INFO *psInfo;
+
+                                       OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+
+                                       /* Find the matching Fault Info for this HWRInfo */
+                                       psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex];
+
+                                       /* if they do not match, we need to update the psInfo */
+                                       if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) ||
+                                               (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr))
+                                       {
+                                               MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData;
+
+                                               psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN;
+
+                                               if (bPMFault)
+                                               {
+                                                       /* PM fault and we dump PC details only */
+                                                       psFaultData->eTopLevel = MMU_LEVEL_0;
+                                                       psFaultData->eType     = MMU_FAULT_TYPE_PM;
+                                                       psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr;
+                                               }
+                                               else
+                                               {
+                                                       RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData);
+                                               }
+
+                                               _RecordFaultInfo(psDevInfo, psInfo,
+                                                                       sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer,
+                                                                       _PageSizeHWToBytes(ui32PageSize));
+
+                                       }
+
+                                       _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT);
+
+                                       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+                                       {
+                                               _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT);
+                                       }
+
+                                       OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+                               }
+
+                       }
+
+                       if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+                               ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex;
+                       else
+                               ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+               }
+       }
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function     _CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+ MMU4 does not support pending pages, so return false.
+
+ @Input psDevInfo       - RGX device info
+
+ @Return   IMG_BOOL      - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       /* MMU4 doesn't support pending pages */
+       return (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) < 4) &&
+                  (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY) & RGX_CR_MMU_ENTRY_PENDING_EN);
+}
+
+/*!
+*******************************************************************************
+
+ @Function     _GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo       - RGX device info
+ @Output psDevVAddr      - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase    - The page catalog base
+
+ @Return   void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+                                                               IMG_UINT32 *pui32CatBase)
+{
+       IMG_UINT64 ui64BIFMMUEntryStatus;
+
+       ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY_STATUS);
+
+       psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+       *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK) >>
+                                                               RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT;
+}
+
+#endif
+
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       IMG_BOOL bRGXPoweredON)
+{
+       IMG_CHAR *pszState, *pszReason;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       IMG_UINT32 ui32OSid;
+       const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+       /* space for the current clock speed and 3 previous */
+       RGXFWIF_TIME_CORR asTimeCorrs[4];
+       IMG_UINT32 ui32NumClockSpeedChanges;
+
+#if defined(NO_HARDWARE)
+       PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+       if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               IMG_UINT64      aui64RegValMMUStatus[2];
+               const IMG_PCHAR pszMetaOrRiscv = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? "Meta" : "RiscV";
+
+               aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS1);
+               aui64RegValMMUStatus[1] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS2);
+               _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Core", DD_SUMMARY_INDENT);
+
+               aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+               _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], pszMetaOrRiscv, DD_SUMMARY_INDENT);
+
+               if (_CheckForPendingPage(psDevInfo))
+               {
+                       IMG_UINT32 ui32CatBase;
+                       IMG_DEV_VIRTADDR sDevVAddr;
+
+                       PVR_DUMPDEBUG_LOG("MMU Pending page: Yes");
+
+                       _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase);
+
+                       if (ui32CatBase <= MAX_RESERVED_FW_MMU_CONTEXT)
+                       {
+                               PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase);
+                       }
+                       else
+                       {
+                               IMG_UINT64 ui64CBaseMapping;
+                               IMG_DEV_PHYADDR sPCDevPAddr;
+                               MMU_FAULT_DATA sFaultData;
+                               IMG_BOOL bIsValid;
+
+                               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, ui32CatBase);
+
+                               ui64CBaseMapping = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING);
+                               sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK)
+                                                                                       >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+                                                                                       << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT);
+                               bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN);
+
+                               PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+                                                       " on cat base %u. PC Addr = 0x%llX is %s",
+                                                               (unsigned long long) sDevVAddr.uiAddr,
+                                                               ui32CatBase,
+                                                               (unsigned long long) sPCDevPAddr.uiAddr,
+                                                               bIsValid ? "valid":"invalid");
+                               RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData);
+                               _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT);
+                       }
+               }
+       }
+#endif /* NO_HARDWARE */
+
+       /* Firmware state */
+       switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus))
+       {
+               case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszState = "OK";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszState = "NOT RESPONDING";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszState = "DEAD";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:  pszState = "FAULT";  break;
+               case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:  pszState = "UNDEFINED";  break;
+               default:  pszState = "UNKNOWN";  break;
+       }
+
+       switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))
+       {
+               case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " - Asserted";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " - Poll failing";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " - Global Event Object timeouts rising";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " - KCCB offset invalid";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " - KCCB stalled";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_IDLING:  pszReason = " - Idling";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING:  pszReason = " - Restarting";  break;
+               case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:  pszReason = " - Missing interrupts";  break;
+               default:  pszReason = " - Unknown reason";  break;
+       }
+
+#if !defined(NO_HARDWARE)
+       /* Determine the type virtualisation support used */
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+#if defined(SUPPORT_AUTOVZ)
+#if defined(SUPPORT_AUTOVZ_HW_REGS)
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support");
+#else
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory");
+#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */
+#else
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation");
+#endif /* defined(SUPPORT_AUTOVZ) */
+#else
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation");
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+       }
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1))
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo);
+               RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo);
+
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)",
+                                                 ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"),
+                                                 (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"),
+                                                 (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid"));
+
+       }
+#endif
+
+#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo);
+               IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo);
+
+               PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u",
+                                                 ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS);
+       }
+#endif
+#endif /* !defined(NO_HARDWARE) */
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE];
+               IMG_BOOL bOsIsolationEnabled = IMG_FALSE;
+
+               if (psFwSysData == NULL)
+               {
+                       /* can't dump any more information */
+                       PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason);
+                       return;
+               }
+
+               sHwrStateDescription[0] = '\0';
+
+               _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE,
+                       asHwrState2Description, ARRAY_SIZE(asHwrState2Description),
+                       psFwSysData->ui32HWRStateFlags);
+               PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription);
+               PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)",
+                         pszPowStateName[psFwSysData->ePowState],
+                         (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+                         psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
+                         psDevInfo->ui32ActivePMReqDenied,
+                         psDevInfo->ui32ActivePMReqNonIdle,
+                         psDevInfo->ui32ActivePMReqRetry,
+                         psDevInfo->ui32ActivePMReqTotal -
+                                                 psDevInfo->ui32ActivePMReqOk -
+                                                 psDevInfo->ui32ActivePMReqDenied -
+                                                 psDevInfo->ui32ActivePMReqRetry -
+                                                 psDevInfo->ui32ActivePMReqNonIdle,
+                         psDevInfo->ui32ActivePMReqTotal,
+                         psRuntimeCfg->ui32ActivePMLatencyms);
+
+               ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
+               RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs));
+
+               PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. "
+                                 "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). "
+                                 "FW frequency: %u.%03u MHz.",
+                                 ui32NumClockSpeedChanges,
+                                 asTimeCorrs[0].ui32CoreClockSpeed / 1000000,
+                                 (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000,
+                                 asTimeCorrs[0].ui64OSTimeStamp,
+                                 psRuntimeCfg->ui32CoreClockSpeed / 1000000,
+                                 (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000);
+               if (ui32NumClockSpeedChanges > 0)
+               {
+                       PVR_DUMPDEBUG_LOG("          Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at "
+                                                       "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")",
+                                                                                               asTimeCorrs[1].ui32CoreClockSpeed / 1000000,
+                                                                                               (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000,
+                                                                                               asTimeCorrs[2].ui32CoreClockSpeed / 1000000,
+                                                                                               (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000,
+                                                                                               asTimeCorrs[3].ui32CoreClockSpeed / 1000000,
+                                                                                               (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000,
+                                                                                               asTimeCorrs[1].ui64OSTimeStamp,
+                                                                                               asTimeCorrs[2].ui64OSTimeStamp,
+                                                                                               asTimeCorrs[3].ui64OSTimeStamp);
+               }
+
+               for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+
+                       IMG_BOOL bMTSEnabled = (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ?
+                                                                       IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0);
+
+                       PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid,
+                                                         apszFwOsStateName[sFwRunFlags.bfOsState],
+                                                         (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok",
+                                                         (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "",
+                                                         psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid],
+                                                         (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "",
+                                                         (bMTSEnabled) ? "MTS on;" : "MTS off;"
+                                                        );
+
+                       bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS;
+               }
+
+#if defined(PVR_ENABLE_PHR)
+               {
+                       IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE];
+
+                       sPHRConfigDescription[0] = '\0';
+                       _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE,
+                                          asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description),
+                                          BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode));
+
+                       PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, sPHRConfigDescription);
+               }
+#endif
+
+               if (bOsIsolationEnabled)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS);
+               }
+
+               _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl);
+               _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData);
+               _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation");
+               PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation");
+       }
+
+       _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo);
+
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+       /* Dump all non-zero values in lines of 8... */
+       {
+               IMG_CHAR    pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1];
+               const IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf;
+               IMG_UINT32  ui32Index1, ui32Index2;
+
+               PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX);
+               for (ui32Index1 = 0;  ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX;  ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE)
+               {
+                       IMG_UINT32  ui32OrOfValues = 0;
+                       IMG_CHAR    *pszBuf = pszLine;
+
+                       /* Print all values in this line and skip if all zero... */
+                       for (ui32Index2 = 0;  ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE;  ui32Index2++)
+                       {
+                               ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2];
+                               OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]);
+                               pszBuf += 9; /* write over the '\0' */
+                       }
+
+                       if (ui32OrOfValues != 0)
+                       {
+                               PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine);
+                       }
+               }
+               PVR_DUMPDEBUG_LOG("STATS[END]");
+       }
+#endif
+}
+
+#if !defined(NO_HARDWARE)
+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile,
+                                               PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+/* Order in these two initialisers must match */
+#define RGX_META_SP_EXTRA_DEBUG \
+                       X(RGX_CR_META_SP_MSLVCTRL0) \
+                       X(RGX_CR_META_SP_MSLVCTRL1) \
+                       X(RGX_CR_META_SP_MSLVDATAX) \
+                       X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+                       X(RGX_CR_META_SP_MSLVIRQENABLE) \
+                       X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+#define RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES \
+                       X(RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES) \
+                       X(RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES) \
+                       X(RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES) \
+                       X(RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES) \
+                       X(RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES) \
+                       X(RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES)
+
+       IMG_UINT32 ui32Idx, ui32RegIdx;
+       IMG_UINT32 ui32RegVal;
+       IMG_UINT32 ui32RegAddr;
+
+       const IMG_UINT32* pui32DebugRegAddr;
+       const IMG_UINT32 aui32DebugRegAddr[] = {
+#define X(A) A,
+               RGX_META_SP_EXTRA_DEBUG
+#undef X
+               };
+       const IMG_UINT32 aui32DebugRegAddrUA[] = {
+#define X(A) A,
+               RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES
+#undef X
+               };
+
+       const IMG_CHAR* apszDebugRegName[] = {
+#define X(A) #A,
+       RGX_META_SP_EXTRA_DEBUG
+#undef X
+       };
+
+       const IMG_UINT32 aui32Debug2RegAddr[] = {0xA28, 0x0A30, 0x0A38};
+
+       PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
+
+       /* array of register offset values depends on feature. But don't augment names in apszDebugRegName */
+       PVR_ASSERT(sizeof(aui32DebugRegAddrUA) == sizeof(aui32DebugRegAddr));
+       pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ?
+                                                       aui32DebugRegAddrUA : aui32DebugRegAddr;
+
+       /* dump first set of Slave Port debug registers */
+       for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+       {
+               const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+               ui32RegAddr = pui32DebugRegAddr[ui32Idx];
+               ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+               PVR_DUMPDEBUG_LOG("  * %s: 0x%8.8X", pszRegName, ui32RegVal);
+       }
+
+       /* dump second set of Slave Port debug registers */
+       for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+       {
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+               ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+               PVR_DUMPDEBUG_LOG("  * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
+
+       }
+
+       for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+       {
+               ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+               for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+               {
+                       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+                       ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+                       PVR_DUMPDEBUG_LOG("  * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
+               }
+       }
+
+}
+#endif /* !defined(NO_HARDWARE) */
+
+/*
+ *  Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+       RGXFW_LOG_SFids eSFId;
+       const IMG_CHAR  *pszName;
+       const IMG_CHAR  *pszFmt;
+       IMG_UINT32              ui32ArgNum;
+} TRACEBUF_LOG;
+
+static const TRACEBUF_LOG aLogDefinitions[] =
+{
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+       RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                               void *pvDumpDebugFile)
+{
+       const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0];
+       IMG_BOOL bIntegrityOk = IMG_TRUE;
+
+       /*
+        * For every log ID, check the format string and number of arguments is valid.
+        */
+       while (psLogDef->eSFId != RGXFW_SF_LAST)
+       {
+               const TRACEBUF_LOG *psLogDef2;
+               const IMG_CHAR *pszString;
+               IMG_UINT32 ui32Count;
+
+               /*
+                * Check the number of arguments matches the number of '%' in the string and
+                * check that no string uses %s which is not supported as it requires a
+                * pointer to memory that is not going to be valid.
+                */
+               pszString = psLogDef->pszFmt;
+               ui32Count = 0;
+
+               while (*pszString != '\0')
+               {
+                       if (*pszString++ == '%')
+                       {
+                               ui32Count++;
+                               if (*pszString == 's')
+                               {
+                                       bIntegrityOk = IMG_FALSE;
+                                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+                                                                         psLogDef->pszName, *pszString);
+                               }
+                               else if (*pszString == '%')
+                               {
+                                       /* Double % is a printable % sign and not a format string... */
+                                       ui32Count--;
+                               }
+                       }
+               }
+
+               if (ui32Count != psLogDef->ui32ArgNum)
+               {
+                       bIntegrityOk = IMG_FALSE;
+                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+                                         psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum);
+               }
+
+               /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+               if (ui32Count > 20)
+               {
+                       bIntegrityOk = IMG_FALSE;
+                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+                                         psLogDef->pszName, ui32Count);
+               }
+
+               /* Check the id number is unique (don't take into account the number of arguments) */
+               ui32Count = 0;
+               psLogDef2 = &aLogDefinitions[0];
+
+               while (psLogDef2->eSFId != RGXFW_SF_LAST)
+               {
+                       if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+                       {
+                               ui32Count++;
+                       }
+                       psLogDef2++;
+               }
+
+               if (ui32Count != 1)
+               {
+                       bIntegrityOk = IMG_FALSE;
+                       PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+                                         psLogDef->pszName, psLogDef->eSFId, ui32Count - 1);
+               }
+
+               /* Move to the next log ID... */
+               psLogDef++;
+       }
+
+       return bIntegrityOk;
+}
+
+typedef struct {
+       IMG_UINT16     ui16Mask;
+       const IMG_CHAR *pszStr;
+} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXPrepareExtraDebugInfo
+
+ @Description
+
+ Prepares debug info string by decoding ui16DebugInfo value passed
+
+ @Input pszBuffer       - pointer to debug info string buffer
+
+ @Return   void
+
+******************************************************************************/
+static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo)
+{
+       const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] =
+       {
+#define X(a, b) {a, b},
+               RGXFWT_DEBUG_INFO_MSKSTRLIST
+#undef X
+       };
+
+       IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR);
+       IMG_UINT32 i;
+       IMG_BOOL   bHasExtraDebugInfo = IMG_FALSE;
+
+       /* Add prepend string */
+       OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize);
+
+       /* Add debug info strings */
+       for (i = 0; i < ui32NumFields; i++)
+       {
+               if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask)
+               {
+                       if (bHasExtraDebugInfo)
+                       {
+                               OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */
+                       }
+                       OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize);
+                       bHasExtraDebugInfo = IMG_TRUE;
+               }
+       }
+
+       /* Add append string */
+       OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize);
+}
+
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+       RGXFWIF_TRACEBUF  *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       static IMG_BOOL   bIntegrityCheckPassed = IMG_FALSE;
+
+       /* Check that the firmware trace is correctly defined... */
+       if (!bIntegrityCheckPassed)
+       {
+               bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile);
+               if (!bIntegrityCheckPassed)
+               {
+                       return;
+               }
+       }
+
+       /* Dump FW trace information... */
+       if (psRGXFWIfTraceBufCtl != NULL)
+       {
+               IMG_UINT32  tid;
+               IMG_UINT32  ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords;
+
+               /* Print the log type settings... */
+               if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+               {
+                       PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+                                                         ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+                                                         RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+                                                         );
+               }
+               else
+               {
+                       PVR_DUMPDEBUG_LOG("Debug log type: none");
+               }
+
+               /* Print the decoded log for each thread... */
+               for (tid = 0;  tid < RGXFW_THREAD_NUM;  tid++)
+               {
+                       volatile IMG_UINT32  *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+                       volatile IMG_UINT32  *pui32FWTracePtr  = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+                       IMG_UINT32           *pui32TraceBuf    = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+                       IMG_UINT32           ui32HostWrapCount = *pui32FWWrapCount;
+                       IMG_UINT32           ui32HostTracePtr  = *pui32FWTracePtr;
+                       IMG_UINT32           ui32Count         = 0;
+
+                       if (pui32TraceBuf == NULL)
+                       {
+                               /* trace buffer not yet allocated */
+                               continue;
+                       }
+
+                       while (ui32Count < ui32TraceBufSizeInDWords)
+                       {
+                               IMG_UINT32  ui32Data, ui32DataToId;
+
+                               /* Find the first valid log ID, skipping whitespace... */
+                               do
+                               {
+                                       ui32Data     = pui32TraceBuf[ui32HostTracePtr];
+                                       ui32DataToId = idToStringID(ui32Data, SFs);
+
+                                       /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */
+                                       if (ui32DataToId == RGXFW_SF_LAST  &&  RGXFW_LOG_VALIDID(ui32Data))
+                                       {
+                                               PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data);
+                                       }
+
+                                       /* Update the trace pointer... */
+                                       ui32HostTracePtr++;
+                                       if (ui32HostTracePtr >= ui32TraceBufSizeInDWords)
+                                       {
+                                               ui32HostTracePtr = 0;
+                                               ui32HostWrapCount++;
+                                       }
+                                       ui32Count++;
+                               } while ((RGXFW_SF_LAST == ui32DataToId)  &&
+                                        ui32Count < ui32TraceBufSizeInDWords);
+
+                               if (ui32Count < ui32TraceBufSizeInDWords)
+                               {
+                                       IMG_CHAR   szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> ";
+                                       IMG_CHAR   szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = "";
+                                       IMG_UINT64 ui64Timestamp;
+                                       IMG_UINT16 ui16DebugInfo;
+
+                                       /* If we hit the ASSERT message then this is the end of the log... */
+                                       if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+                                       {
+                                               PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u",
+                                                                                 psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+                                                                                 psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+                                                                                 psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+                                               break;
+                                       }
+
+                                       ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 |
+                                                       (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]);
+
+                                       ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT);
+                                       ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT;
+
+                                       /*
+                                        * Print the trace string and provide up to 20 arguments which
+                                        * printf function will be able to use. We have already checked
+                                        * that no string uses more than this.
+                                        */
+                                       OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN);
+
+                                       /* Check and append any extra debug info available */
+                                       if (ui16DebugInfo)
+                                       {
+                                               /* Prepare debug info string */
+                                               RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo);
+
+                                               /* Append debug info string */
+                                               OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN);
+                                       }
+
+                                       PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  2) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  3) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  4) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  5) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  6) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  7) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  8) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr +  9) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords],
+                                                                         pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]);
+
+                                       /* Update the trace pointer... */
+                                       ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data);
+                                       if (ui32HostTracePtr >= ui32TraceBufSizeInDWords)
+                                       {
+                                               ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords;
+                                               ui32HostWrapCount++;
+                                       }
+                                       ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+
+                                       /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */
+                                       if ((*pui32FWWrapCount > ui32HostWrapCount) ||
+                                           ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr)))
+                                       {
+                                               /* Move forward to the oldest entry again... */
+                                               PVR_DUMPDEBUG_LOG(". . .");
+                                               ui32HostWrapCount = *pui32FWWrapCount;
+                                               ui32HostTracePtr  = *pui32FWTracePtr;
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       /* Print the power monitoring counters... */
+       if (psFwSysData != NULL)
+       {
+               const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer;
+               IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer;
+               IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords;
+               IMG_UINT32 ui32Count = 0;
+               IMG_UINT64 ui64Timestamp;
+
+               if (pui32TraceBuf == NULL)
+               {
+                       /* power monitoring buffer not yet allocated */
+                       return;
+               }
+
+               if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available."));
+                       return;
+               }
+               ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 |
+                                               (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]);
+
+               /* Update the trace pointer... */
+               ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords;
+               ui32Count    = (ui32Count    + 3);
+
+               PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x",
+                                pui32TraceBuf,
+                                ui32TracePtr,
+                                ui32PowerMonBufSizeInDWords));
+
+               while (ui32Count < ui32PowerMonBufSizeInDWords)
+               {
+                       /* power monitoring data is (register, value) dword pairs */
+                       PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON  0x%08x 0x%08x  0x%08x 0x%08x",
+                                                         ui64Timestamp,
+                                                         pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords],
+                                                         pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords],
+                                                         pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords],
+                                                         pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]);
+
+                       if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID ||
+                               pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID)
+                       {
+                               /* end of buffer */
+                               break;
+                       }
+
+                       /* Update the trace pointer... */
+                       ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords;
+                       ui32Count    = (ui32Count    + 4);
+               }
+       }
+}
+#endif
+
+static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
+{
+       switch (eDevState)
+       {
+               case PVRSRV_DEVICE_STATE_INIT:
+                       return "Initialising";
+               case PVRSRV_DEVICE_STATE_ACTIVE:
+                       return "Active";
+               case PVRSRV_DEVICE_STATE_DEINIT:
+                       return "De-initialising";
+               case PVRSRV_DEVICE_STATE_BAD:
+                       return "Bad";
+               case PVRSRV_DEVICE_STATE_UNDEFINED:
+                       PVR_ASSERT(!"Device has undefined state");
+                       __fallthrough;
+               default:
+                       return "Unknown";
+       }
+}
+
+static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+       switch (ePowerState)
+       {
+               case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+               case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+               case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+               default: return "UNKNOWN";
+       }
+}
+
+/* Helper macros to emit data */
+#define REG32_FMTSPEC   "%-30s: 0x%08X"
+#define REG64_FMTSPEC   "%-30s: 0x%016" IMG_UINT64_FMTSPECX
+#define DDLOG32(R)      PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG64(R)      PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG32_DPX(R)  PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOG64_DPX(R)  PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V);
+#define DDLOG32UNPACKED(R)      PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R##__META_REGISTER_UNPACKED_ACCESSES));
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                                         void *pvDumpDebugFile,
+                                                                         PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+       RGXRISCVFW_STATE sRiscvState;
+       const IMG_CHAR *pszException;
+       PVRSRV_ERROR eError;
+
+       /* Limit dump to what is currently being used */
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG4);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG5);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG6);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG12);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG13);
+       DDLOG64(FWCORE_ADDR_REMAP_CONFIG14);
+
+       PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----");
+
+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB)
+       if (RGXRiscvIsHalted(psDevInfo))
+       {
+               /* Avoid resuming the RISC-V FW as most operations
+                * on the debug module require a halted core */
+               PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)");
+               return PVRSRV_OK;
+       }
+#endif
+
+       eError = RGXRiscvHalt(psDevInfo);
+       PVR_GOTO_IF_ERROR(eError, _RISCVDMError);
+
+#define X(name, address)                                                                                               \
+       eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name);        \
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError);        \
+       DDLOGVAL32(#name, sRiscvState.name);
+
+       RGXRISCVFW_DEBUG_DUMP_REGISTERS
+#undef X
+
+       eError = RGXRiscvResume(psDevInfo);
+       PVR_GOTO_IF_ERROR(eError, _RISCVDMError);
+
+       pszException = _GetRISCVException(sRiscvState.mcause);
+       if (pszException != NULL)
+       {
+               PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException);
+
+               eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption");
+               }
+       }
+
+       return PVRSRV_OK;
+
+_RISCVDMError:
+       PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module"));
+
+       return eError;
+}
+#endif
+
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                                void *pvDumpDebugFile,
+                                                                PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE)
+       IMG_UINT32   ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0;
+       IMG_UINT32   ui32RegVal;
+       PVRSRV_ERROR eError;
+       IMG_BOOL     bFirmwarePerf;
+#endif
+       IMG_BOOL     bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT);
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+
+#if !defined(NO_HARDWARE)
+       /* Check if firmware perf was set at Init time */
+       bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE);
+#endif
+
+       DDLOG64(CORE_ID);
+
+       if (bMulticore)
+       {
+               DDLOG64(MULTICORE);
+               DDLOG32(MULTICORE_SYSTEM);
+               DDLOG32(MULTICORE_DOMAIN);
+       }
+       DDLOG32(EVENT_STATUS);
+       DDLOG64(TIMER);
+       DDLOG64(CLK_CTRL0);
+       DDLOG64(CLK_STATUS0);
+       DDLOG64(CLK_CTRL1);
+       DDLOG64(CLK_STATUS1);
+       DDLOG64(MMU_FAULT_STATUS1);
+       DDLOG64(MMU_FAULT_STATUS2);
+       DDLOG64(MMU_FAULT_STATUS_PM);
+       DDLOG64(MMU_FAULT_STATUS_META);
+       DDLOG64(SLC_STATUS1);
+       DDLOG64(SLC_STATUS2);
+       DDLOG64(SLC_STATUS_DEBUG);
+       DDLOG64(MMU_STATUS);
+       DDLOG32(BIF_PFS);
+       DDLOG32(BIF_TEXAS0_PFS);
+       DDLOG32(BIF_TEXAS1_PFS);
+       DDLOG32(BIF_OUTSTANDING_READ);
+       DDLOG32(BIF_TEXAS0_OUTSTANDING_READ);
+       DDLOG32(BIF_TEXAS1_OUTSTANDING_READ);
+       DDLOG32(FBCDC_IDLE);
+       DDLOG32(FBCDC_STATUS);
+       DDLOG32(SPU_ENABLE);
+
+       DDLOG64(CONTEXT_MAPPING0);
+       DDLOG64(CONTEXT_MAPPING2);
+       DDLOG64(CONTEXT_MAPPING3);
+       DDLOG64(CONTEXT_MAPPING4);
+
+       if (bMulticore)
+       {
+#if !defined(RGX_CR_MULTICORE_AXI)
+#define RGX_CR_MULTICORE_AXI                              (0x2508U)
+#define RGX_CR_MULTICORE_AXI_ERROR                        (0x2510U)
+#endif
+               DDLOG32(MULTICORE_AXI);
+               DDLOG32(MULTICORE_AXI_ERROR);
+               DDLOG32(MULTICORE_TDM_CTRL_COMMON);
+               DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON);
+               DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON);
+       }
+
+       DDLOG32(PERF_PHASE_2D);
+       DDLOG32(PERF_CYCLE_2D_TOTAL);
+       DDLOG32(PERF_PHASE_GEOM);
+       DDLOG32(PERF_CYCLE_GEOM_TOTAL);
+       DDLOG32(PERF_PHASE_FRAG);
+       DDLOG32(PERF_CYCLE_FRAG_TOTAL);
+       DDLOG32(PERF_CYCLE_GEOM_OR_FRAG_TOTAL);
+       DDLOG32(PERF_CYCLE_GEOM_AND_FRAG_TOTAL);
+       DDLOG32(PERF_PHASE_COMP);
+       DDLOG32(PERF_CYCLE_COMP_TOTAL);
+       DDLOG32(PM_PARTIAL_RENDER_ENABLE);
+
+       DDLOG32(ISP_RENDER);
+       DDLOG32(ISP_CTL);
+
+       DDLOG32(MTS_INTCTX);
+       DDLOG32(MTS_BGCTX);
+       DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE);
+       DDLOG32(MTS_SCHEDULE);
+       DDLOG32(MTS_GPU_INT_STATUS);
+
+       DDLOG32(CDM_CONTEXT_STORE_STATUS);
+       DDLOG64(CDM_CONTEXT_PDS0);
+       DDLOG64(CDM_CONTEXT_PDS1);
+       DDLOG64(CDM_TERMINATE_PDS);
+       DDLOG64(CDM_TERMINATE_PDS1);
+       DDLOG64(CDM_CONTEXT_LOAD_PDS0);
+       DDLOG64(CDM_CONTEXT_LOAD_PDS1);
+
+       DDLOG32(JONES_IDLE);
+       DDLOG32(SLC_IDLE);
+       DDLOG32(SLC_FAULT_STOP_STATUS);
+
+       DDLOG64(SCRATCH0);
+       DDLOG64(SCRATCH1);
+       DDLOG64(SCRATCH2);
+       DDLOG64(SCRATCH3);
+       DDLOG64(SCRATCH4);
+       DDLOG64(SCRATCH5);
+       DDLOG64(SCRATCH6);
+       DDLOG64(SCRATCH7);
+       DDLOG64(SCRATCH8);
+       DDLOG64(SCRATCH9);
+       DDLOG64(SCRATCH10);
+       DDLOG64(SCRATCH11);
+       DDLOG64(SCRATCH12);
+       DDLOG64(SCRATCH13);
+       DDLOG64(SCRATCH14);
+       DDLOG64(SCRATCH15);
+       DDLOG32(IRQ_OS0_EVENT_STATUS);
+
+#if !defined(NO_HARDWARE)
+       if (ui32Meta)
+       {
+               IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE;
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+               {
+                       DDLOG32UNPACKED(META_SP_MSLVIRQSTATUS);
+               }
+               else
+               {
+                       DDLOG32(META_SP_MSLVIRQSTATUS);
+               }
+
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+               DDLOGVAL32("T0 TXENABLE", ui32RegVal);
+               if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT)
+               {
+                       bIsT0Enabled = IMG_TRUE;
+               }
+
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+               DDLOGVAL32("T0 TXSTATUS", ui32RegVal);
+
+               /* check for FW fault */
+               if (((ui32RegVal >> 20) & 0x3) == 0x2)
+               {
+                       bIsFWFaulted = IMG_TRUE;
+               }
+
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+               DDLOGVAL32("T0 TXDEFR", ui32RegVal);
+
+               eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+               DDLOGVAL32("T0 PC", ui32RegVal);
+
+               eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+               DDLOGVAL32("T0 PCX", ui32RegVal);
+
+               eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+               DDLOGVAL32("T0 SP", ui32RegVal);
+
+               if ((ui32Meta == MTP218) || (ui32Meta == MTP219))
+               {
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("T1 TXENABLE", ui32RegVal);
+
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("T1 TXSTATUS", ui32RegVal);
+
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("T1 TXDEFR", ui32RegVal);
+
+                       eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+                       DDLOGVAL32("T1 PC", ui32RegVal);
+
+                       eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+                       DDLOGVAL32("T1 PCX", ui32RegVal);
+
+                       eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+                       DDLOGVAL32("T1 SP", ui32RegVal);
+               }
+
+               if (bFirmwarePerf)
+               {
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("META_CR_PERF_COUNT0", ui32RegVal);
+
+                       eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
+                       DDLOGVAL32("META_CR_PERF_COUNT1", ui32RegVal);
+               }
+
+               if (bIsT0Enabled & bIsFWFaulted)
+               {
+                       eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption");
+                       }
+               }
+               else if (bIsFWFaulted)
+               {
+                       PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled");
+               }
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+               PVR_RETURN_IF_ERROR(eError);
+       }
+#endif
+
+       return PVRSRV_OK;
+
+#if !defined(NO_HARDWARE)
+_METASPError:
+       PVR_DUMPDEBUG_LOG("Dump Slave Port debug information");
+       _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+
+       return eError;
+#endif
+}
+
+#undef REG32_FMTSPEC
+#undef REG64_FMTSPEC
+#undef DDLOG32
+#undef DDLOG64
+#undef DDLOG32_DPX
+#undef DDLOG64_DPX
+#undef DDLOGVAL32
+#undef DDLOG32UNPACKED
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specified level of verbosity
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+ @Input ui32VerbLevel       - Verbosity level
+
+ @Return   void
+
+******************************************************************************/
+static
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32VerbLevel)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       PVRSRV_DEV_POWER_STATE  ePowerState;
+       IMG_BOOL                bRGXPoweredON;
+       IMG_UINT8               ui8FwOsCount;
+       RGXFWIF_TRACEBUF        *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       const RGXFWIF_OSDATA    *psFwOsData = psDevInfo->psRGXFWIfFwOsData;
+       IMG_BOOL                bPwrLockAlreadyHeld;
+
+       bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode);
+       if (!bPwrLockAlreadyHeld)
+       {
+               /* Only acquire the power-lock if not already held by the calling context */
+               eError = PVRSRVPowerLock(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       return;
+               }
+       }
+
+       ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
+
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Error retrieving RGX power state. No debug info dumped.",
+                               __func__));
+               goto Exit;
+       }
+
+       if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
+               (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+       {
+               PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
+                                                 (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount);
+       }
+
+       PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
+
+       bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+       PVR_DUMPDEBUG_LOG("------[ RGX Info ]------");
+       PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo);
+       PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B,
+                                                                                          psDevInfo->sDevFeatureCfg.ui32V,
+                                                                                          psDevInfo->sDevFeatureCfg.ui32N,
+                                                                                          psDevInfo->sDevFeatureCfg.ui32C,
+                                                                                          PVR_ARCH_NAME);
+       PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState));
+       PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState));
+       if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)
+       {
+               PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks);
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED");
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION))
+       {
+#if defined(SUPPORT_TRP)
+               PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW enabled");
+#else
+               PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW disabled");
+#endif
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("TRP: HW support - No");
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WORKGROUP_PROTECTION))
+       {
+#if defined(SUPPORT_WGP)
+               PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW enabled");
+#else
+               PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW disabled");
+#endif
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG("WGP: HW support - No");
+       }
+
+       RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+
+       /* Dump out the kernel CCB. */
+       {
+               const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+               if (psKCCBCtl != NULL)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X",
+                                                         psKCCBCtl->ui32WriteOffset,
+                                                         psKCCBCtl->ui32ReadOffset);
+               }
+       }
+
+       /* Dump out the firmware CCB. */
+       {
+               const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl;
+
+               if (psFCCBCtl != NULL)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X",
+                                                          psFCCBCtl->ui32WriteOffset,
+                                                          psFCCBCtl->ui32ReadOffset);
+               }
+       }
+
+       if (psFwOsData != NULL)
+       {
+               IMG_UINT32 ui32TID;
+
+               /* Dump the KCCB commands executed */
+               PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d",
+                                                 psFwOsData->ui32KCCBCmdsExecuted);
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+               /* Dump the number of times we have performed a forced UFO update,
+                * and (if non-zero) the timestamp of the most recent occurrence/
+                */
+               PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d",
+                                                 psFwOsData->ui32ForcedUpdatesRequested);
+               if (psFwOsData->ui32ForcedUpdatesRequested > 0)
+               {
+                       IMG_UINT8 ui8Idx;
+                       IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+                       if (psFwOsData->ui64LastForcedUpdateTime > 0ULL)
+                       {
+                               ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds);
+                               PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")",
+                                                                 ui64Seconds, ui64Nanoseconds);
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)");
+                       }
+                       /* Dump SLR log */
+                       if (psFwOsData->sSLRLogFirst.aszCCBName[0])
+                       {
+                               ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds);
+                               PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC
+                                                                 "} Fence found on context 0x%x '%s' has %d UFOs",
+                                                                 ui64Seconds, ui64Nanoseconds,
+                                                                 psFwOsData->sSLRLogFirst.ui32FWCtxAddr,
+                                                                 psFwOsData->sSLRLogFirst.aszCCBName,
+                                                                 psFwOsData->sSLRLogFirst.ui32NumUFOs);
+                       }
+                       for (ui8Idx=0; ui8Idx<PVR_SLR_LOG_ENTRIES;ui8Idx++)
+                       {
+                               if (psFwOsData->sSLRLog[ui8Idx].aszCCBName[0])
+                               {
+                                       ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds);
+                                       PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC
+                                                                         "] Fence found on context 0x%x '%s' has %d UFOs",
+                                                                         ui64Seconds, ui64Nanoseconds,
+                                                                         psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr,
+                                                                         psFwOsData->sSLRLog[ui8Idx].aszCCBName,
+                                                                         psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs);
+                               }
+                       }
+               }
+#else
+               PVR_DUMPDEBUG_LOG("RGX SLR: Disabled");
+#endif
+
+               /* Dump the error counts */
+               PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d",
+                                                 psDevInfo->sErrorCounts.ui32WGPErrorCount,
+                                                 psDevInfo->sErrorCounts.ui32TRPErrorCount);
+
+               for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+               {
+                       /* Dump the IRQ info for threads */
+                       PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u",
+                                                         ui32TID,
+                                                         psFwOsData->aui32InterruptCount[ui32TID],
+                                                         psDevInfo->aui32SampleIRQCount[ui32TID]);
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Dump out the Workload estimation CCB. */
+       {
+               const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
+
+               if (psWorkEstCCBCtl != NULL)
+               {
+                       PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X",
+                                                         psWorkEstCCBCtl->ui32WriteOffset,
+                                                         psWorkEstCCBCtl->ui32ReadOffset);
+               }
+       }
+#endif
+
+       /* Dump the FW Sys config flags on the Host */
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+               IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH];
+
+               if (!psFwSysData)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__));
+                       goto Exit;
+               }
+
+               _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags);
+               PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription);
+       }
+
+       /* Dump the FW OS config flags */
+       {
+               IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH];
+
+               if (!psFwOsData)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__));
+                       goto Exit;
+               }
+
+               _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags);
+               PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription);
+       }
+
+       if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+
+               PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+               PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear):   0x%p", psDevInfo->pvRegsBaseKM);
+               PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+               {
+                       /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+                       {
+                               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, 0x0);
+                       }
+                       else
+                       {
+                               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+                       }
+               }
+
+               eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: RGXDumpRGXRegisters failed (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+                       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+                       {
+                               PVR_DUMPDEBUG_LOG("Dump Slave Port debug information");
+                               _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+                       }
+#endif
+               }
+       }
+       else
+       {
+               PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down");
+       }
+
+       PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------");
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+       {
+               IMG_INT tid;
+               /* Dump FW trace information */
+               if (psRGXFWIfTraceBufCtl != NULL)
+               {
+                       for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++)
+                       {
+                               IMG_UINT32      i;
+                               IMG_BOOL        bPrevLineWasZero = IMG_FALSE;
+                               IMG_BOOL        bLineIsAllZeros = IMG_FALSE;
+                               IMG_UINT32      ui32CountLines = 0;
+                               IMG_UINT32      *pui32TraceBuffer;
+                               IMG_CHAR        *pszLine;
+
+                               if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+                               {
+                                       PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+                                                                         ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+                                                                         RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+                                                                         );
+                               }
+                               else
+                               {
+                                       PVR_DUMPDEBUG_LOG("Debug log type: none");
+                               }
+
+                               pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+
+                               /* Skip if trace buffer is not allocated */
+                               if (pui32TraceBuffer == NULL)
+                               {
+                                       PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid);
+                                       continue;
+                               }
+
+/* Max number of DWords to be printed per line, in debug dump output */
+#define PVR_DD_FW_TRACEBUF_LINESIZE 30U
+                               /* each element in the line is 8 characters plus a space.  The '+ 1' is because of the final trailing '\0'. */
+                               pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1);
+                               if (pszLine == NULL)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                       "%s: Out of mem allocating line string (size: %d)",
+                                                       __func__,
+                                                       9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1));
+                                       goto Exit;
+                               }
+
+                               PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid);
+                               PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+                               PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords);
+
+                               for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE)
+                               {
+                                       IMG_UINT32 k = 0;
+                                       IMG_UINT32 ui32Line = 0x0;
+                                       IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+                                       IMG_CHAR   *pszBuf = pszLine;
+
+                                       for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++)
+                                       {
+                                               if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords)
+                                               {
+                                                       /* Stop reading when the index goes beyond trace buffer size. This condition is
+                                                        * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not
+                                                        * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */
+                                                       break;
+                                               }
+
+                                               ui32Line |= pui32TraceBuffer[i + k];
+
+                                               /* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+                                               OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+                                               pszBuf += 9; /* write over the '\0' */
+                                       }
+
+                                       bLineIsAllZeros = (ui32Line == 0x0);
+
+                                       if (bLineIsAllZeros)
+                                       {
+                                               if (bPrevLineWasZero)
+                                               {
+                                                       ui32CountLines++;
+                                               }
+                                               else
+                                               {
+                                                       bPrevLineWasZero = IMG_TRUE;
+                                                       ui32CountLines = 1;
+                                                       PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               if (bPrevLineWasZero  &&  ui32CountLines > 1)
+                                               {
+                                                       PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines);
+                                               }
+                                               bPrevLineWasZero = IMG_FALSE;
+
+                                               PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine);
+                                       }
+
+                               }
+                               if (bPrevLineWasZero)
+                               {
+                                       PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines);
+                               }
+
+                               PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid);
+
+                               OSFreeMem(pszLine);
+                       }
+               }
+
+               {
+                       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+                       {
+                               PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------");
+                       }
+                       else
+                       {
+                               PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+                       }
+
+                       DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+                       DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+                       DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+
+                       DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+               }
+       }
+
+       PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
+
+Exit:
+       if (!bPwrLockAlreadyHeld)
+       {
+               PVRSRVPowerUnlock(psDeviceNode);
+       }
+}
+
+/*!
+ ******************************************************************************
+
+ @Function     RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+
+ ******************************************************************************/
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle,
+               IMG_UINT32 ui32VerbLevel,
+               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+               void *pvDumpDebugFile)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle;
+
+       /* Only action the request if we've fully init'ed */
+       if (psDevInfo->bDevInit2Done)
+       {
+               RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel);
+       }
+}
+
+PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify,
+                                                       psDevInfo->psDeviceNode,
+                                                       RGXDebugRequestNotify,
+                                                       DEBUG_REQUEST_RGX,
+                                                       psDevInfo);
+}
+
+PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (psDevInfo->hDbgReqNotify)
+       {
+               return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify);
+       }
+
+       /* No notifier registered */
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdebug.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdebug.h
new file mode 100644 (file)
index 0000000..d253d89
--- /dev/null
@@ -0,0 +1,191 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX debug header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXDEBUG_H)
+#define RGXDEBUG_H
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+/**
+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in
+ * LISR for each RGX FW thread.
+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input.
+ */
+#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \
+       do \
+       { \
+               IMG_UINT32 ui32TID; \
+               for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \
+               { \
+                       PVR_DPF((DBGPRIV_VERBOSE, \
+                                       "RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \
+                                       ui32TID, \
+                                       (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32TID], \
+                                       (psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \
+               } \
+       } while (0)
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDumpRGXRegisters
+
+ @Description
+
+ Dumps an extensive list of RGX registers required for debugging
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return PVRSRV_ERROR         PVRSRV_OK on success, error code otherwise
+
+******************************************************************************/
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                                                void *pvDumpDebugFile,
+                                                                PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input pvDumpDebugFile     - Optional file identifier to be passed to the
+                              'printf' function if required
+ @Input psDevInfo           - RGX device info
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo);
+
+#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile,
+                               PVRSRV_RGXDEV_INFO  *psDevInfo);
+#endif
+
+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
+/*!
+*******************************************************************************
+
+ @Function     ValidateFWOnLoad
+
+ @Description  Compare the Firmware image as seen from the CPU point of view
+               against the same memory area as seen from the firmware point
+               of view after first power up.
+
+ @Input        psDevInfo - Device Info
+
+ @Return       PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input pvDumpDebugFile      - Optional file identifier to be passed to the
+                               'printf' function if required
+ @Input psDevInfo           - RGX device info
+ @Input bRGXPoweredON        - IMG_TRUE if RGX device is on
+
+ @Return   void
+
+******************************************************************************/
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile,
+                                       PVRSRV_RGXDEV_INFO *psDevInfo,
+                                       IMG_BOOL bRGXPoweredON);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDebugInit
+
+ @Description
+
+ Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify
+
+ @Input          psDevInfo            RGX device info
+ @Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+
+******************************************************************************/
+PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDebugDeinit
+
+ @Description
+
+ Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify
+
+ @Output         phNotify             Points to debug notifier handle
+ @Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+
+******************************************************************************/
+PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXDEBUG_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdevice.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxdevice.h
new file mode 100644 (file)
index 0000000..90c8540
--- /dev/null
@@ -0,0 +1,832 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX device node header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX device node
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXDEVICE_H)
+#define RGXDEVICE_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "osfunc.h"
+#include "rgxlayer_impl.h"
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#endif
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+       DEVMEM_MEMDESC          *psFWFrameworkMemDesc;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST                          (0x1)  /*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN                  (0x2)  /*!< Used to disable the Devices Watchdog logging */
+#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN              (0x4)  /*!< Used for validation to inject SPU power state mask change every DM kick */
+#define RGXKM_DEVICE_STATE_CCB_GROW_EN                            (0x8)  /*!< Used to indicate CCB grow is permitted */
+#define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN  (0x10) /*!< Used for validation to enable SPU power state mask change */
+#define RGXKM_DEVICE_STATE_MASK                                   (0x1F)
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE                      32
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US       25000     /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US  150000    /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US    10000000  /* Time before the next periodic calibration and correlation */
+
+
+/*!
+ ******************************************************************************
+ * Global flags for driver validation
+ *****************************************************************************/
+#define RGX_VAL_LS_EN                             (0x1U)  /*!< Enable dual lockstep firmware */
+#define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN           (0x2U)  /*!< Enable FBDC signature check. Signatures must match */
+#define RGX_VAL_FBDC_SIG_CHECK_ERR_EN             (0x4U)  /*!< Enable FBDC signature check. Signatures must not match */
+#define RGX_VAL_GPUSTATEPIN_EN                    (0x8U)  /*!< Enable GPU state pin check */
+#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN            (0x10U)  /*!< Enable KZ signature check. Signatures must match */
+#define RGX_VAL_KZ_SIG_CHECK_ERR_EN              (0x20U)  /*!< Enable KZ signature check. Signatures must not match */
+#define RGX_VAL_SIG_CHECK_ERR_EN                 (RGX_VAL_FBDC_SIG_CHECK_ERR_EN)
+
+typedef struct _GPU_FREQ_TRACKING_DATA_
+{
+       /* Core clock speed estimated by the driver */
+       IMG_UINT32 ui32EstCoreClockSpeed;
+
+       /* Amount of successful calculations of the estimated core clock speed */
+       IMG_UINT32 ui32CalibrationCount;
+} GPU_FREQ_TRACKING_DATA;
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+#define RGX_GPU_FREQ_TRACKING_SIZE 16
+
+typedef struct
+{
+       IMG_UINT64 ui64BeginCRTimestamp;
+       IMG_UINT64 ui64BeginOSTimestamp;
+
+       IMG_UINT64 ui64EndCRTimestamp;
+       IMG_UINT64 ui64EndOSTimestamp;
+
+       IMG_UINT32 ui32EstCoreClockSpeed;
+       IMG_UINT32 ui32CoreClockSpeed;
+} GPU_FREQ_TRACKING_HISTORY;
+#endif
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+       /* Beginning of current calibration period (in us) */
+       IMG_UINT64 ui64CalibrationCRTimestamp;
+       IMG_UINT64 ui64CalibrationOSTimestamp;
+
+       /* Calculated calibration period (in us) */
+       IMG_UINT64 ui64CalibrationCRTimediff;
+       IMG_UINT64 ui64CalibrationOSTimediff;
+
+       /* Current calibration period (in us) */
+       IMG_UINT32 ui32CalibrationPeriod;
+
+       /* System layer frequency table and frequency tracking data */
+       IMG_UINT32 ui32FreqIndex;
+       IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE];
+       GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE];
+
+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY)
+       IMG_UINT32 ui32HistoryIndex;
+       GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE];
+#endif
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+       IMG_BOOL   bValid;                /* If TRUE, statistics are valid.
+                                            FALSE if the driver couldn't get reliable stats. */
+       IMG_UINT64 ui64GpuStatActive;     /* GPU active statistic */
+       IMG_UINT64 ui64GpuStatBlocked;    /* GPU blocked statistic */
+       IMG_UINT64 ui64GpuStatIdle;       /* GPU idle statistic */
+       IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+       IMG_UINT64 ui64TimeStamp;         /* Timestamp of the most recent sample of the GPU stats */
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+       IMG_BOOL               bEnabled;
+       RGXFWIF_REG_CFG_TYPE   eRegCfgTypeToPush;
+       IMG_UINT32             ui32NumRegRecords;
+       POS_LOCK               hLock;
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+#if defined(SUPPORT_VALIDATION)
+/**
+ * Structure containing information for calculating next SPU power domain state.
+ */
+typedef struct _RGX_POWER_DOMAIN_STATE_
+{
+       /**
+        * Total number of power units in the core.
+        */
+       IMG_UINT32 ui32PowUnitsCount;
+       /**
+        * Current power domain state
+        */
+       IMG_UINT32 ui32CurrentState;
+       /**
+        * Stores last transition that happened for each power domain state.
+        */
+       IMG_UINT32 *paui32LastTransition;
+} RGX_POWER_DOMAIN_STATE;
+#endif
+
+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_
+{
+       IMG_UINT64 ui64ErnsBrns;
+       IMG_UINT64 ui64Features;
+       IMG_UINT32 ui32B;
+       IMG_UINT32 ui32V;
+       IMG_UINT32 ui32N;
+       IMG_UINT32 ui32C;
+       IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX];
+       IMG_UINT32 ui32MAXDMCount;
+       IMG_UINT32 ui32MAXPowUnitCount;
+       IMG_UINT32 ui32MAXRACCount;
+       IMG_UINT32 ui32SLCSizeInBytes;
+       IMG_PCHAR  pszBVNCString;
+}PVRSRV_DEVICE_FEATURE_CONFIG;
+
+/* This is used to get the value of a specific feature.
+ * Note that it will assert if the feature is disabled or value is invalid. */
+#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \
+                       ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] )
+
+/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */
+#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \
+                       ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED )
+
+/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */
+#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \
+                       BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/* This is used to check if the ERN is available for the currently running BVNC or not */
+#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \
+                       BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK)
+
+/* This is used to check if the BRN is available for the currently running BVNC or not */
+#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \
+                       BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16U
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*!
+ * The host maintains a 512-deep cache of submitted workloads per device,
+ * i.e. a global look-up table for TA, 3D and compute (depending on the RGX
+ * hardware support present)
+ */
+
+/*
+ * For the workload estimation return data array, the max amount of commands the
+ * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for
+ * all corner cases
+ */
+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9)
+#define RETURN_DATA_ARRAY_SIZE      ((1U) << RETURN_DATA_ARRAY_SIZE_LOG2)
+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1)
+
+#define WORKLOAD_HASH_SIZE_LOG2                6
+#define WORKLOAD_HASH_SIZE                     ((1U) << WORKLOAD_HASH_SIZE_LOG2)
+#define WORKLOAD_HASH_WRAP_MASK                (WORKLOAD_HASH_SIZE - 1)
+
+/*!
+ * Workload characteristics for supported data masters.
+ * All characteristics must match for the workload estimate to be used/updated.
+ */
+typedef union _RGX_WORKLOAD_
+{
+       struct
+       {
+               IMG_UINT32                              ui32RenderTargetSize;
+               IMG_UINT32                              ui32NumberOfDrawCalls;
+               IMG_UINT32                              ui32NumberOfIndices;
+               IMG_UINT32                              ui32NumberOfMRTs;
+       } sTA3D;
+
+       struct
+       {
+               IMG_UINT32                              ui32NumberOfWorkgroups;
+               IMG_UINT32                              ui32NumberOfWorkitems;
+       } sCompute;
+
+       struct
+       {
+               IMG_UINT32                              ui32Characteristic1;
+               IMG_UINT32                              ui32Characteristic2;
+       } sTransfer;
+} RGX_WORKLOAD;
+
+/*!
+ * Host data used to match the return data (actual cycles count) to the
+ * submitted command packet.
+ * The hash table is a per-DM circular buffer containing a key based on the
+ * workload characteristics. On job completion, the oldest workload data
+ * is evicted if the CB is full and the driver matches the characteristics
+ * to the matching data.
+ *
+ * o If the driver finds a match the existing cycle estimate is averaged with
+ *   the actual cycles used.
+ * o Otherwise a new hash entry is created with the actual cycles for this
+ *   workload.
+ *
+ * Subsequently if a match is found during command submission, the estimate
+ * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled.
+ */
+typedef struct _WORKLOAD_MATCHING_DATA_
+{
+       POS_LOCK                                psHashLock;
+       HASH_TABLE                              *psHashTable;           /*! existing workload cycle estimates for this DM */
+       RGX_WORKLOAD                    asHashKeys[WORKLOAD_HASH_SIZE];
+       IMG_UINT64                              aui64HashData[WORKLOAD_HASH_SIZE];
+       IMG_UINT32                              ui32HashArrayWO;        /*! track the most recent workload estimates */
+} WORKLOAD_MATCHING_DATA;
+
+/*!
+ * A generic container for the workload matching data for GPU contexts:
+ * rendering (TA, 3D), compute, etc.
+ */
+typedef struct _WORKEST_HOST_DATA_
+{
+       union
+       {
+               struct
+               {
+                       WORKLOAD_MATCHING_DATA  sDataTA;        /*!< matching data for TA commands */
+                       WORKLOAD_MATCHING_DATA  sData3D;        /*!< matching data for 3D commands */
+               } sTA3D;
+
+               struct
+               {
+                       WORKLOAD_MATCHING_DATA  sDataCDM;       /*!< matching data for CDM commands */
+               } sCompute;
+
+               struct
+               {
+                       WORKLOAD_MATCHING_DATA  sDataTDM;       /*!< matching data for TDM-TQ commands */
+               } sTransfer;
+       } uWorkloadMatchingData;
+
+       /*
+        * This is a per-context property, hence the TA and 3D share the same
+        * per render context counter.
+        */
+       IMG_UINT32                              ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work
+                                                                                                                estimation commands are received
+                                                                                                                by the host before clean up. */
+} WORKEST_HOST_DATA;
+
+/*!
+ * Entries in the list of submitted workloads, used when the completed command
+ * returns data to the host.
+ *
+ * - the matching data is needed as it holds the hash data
+ * - the host data is needed for completion updates, ensuring memory is not
+ *   freed while workload estimates are in-flight.
+ * - the workload characteristic is used in the hash table look-up.
+ */
+typedef struct _WORKEST_RETURN_DATA_
+{
+       WORKEST_HOST_DATA               *psWorkEstHostData;
+       WORKLOAD_MATCHING_DATA  *psWorkloadMatchingData;
+       RGX_WORKLOAD                    sWorkloadCharacteristics;
+} WORKEST_RETURN_DATA;
+#endif
+
+
+#define RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES    4
+
+
+/*!
+ ******************************************************************************
+ * RGX Device error counts
+ *****************************************************************************/
+typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_
+{
+       IMG_UINT32 ui32WGPErrorCount;           /*!< count of the number of WGP checksum errors */
+       IMG_UINT32 ui32TRPErrorCount;           /*!< count of the number of TRP checksum errors */
+} PVRSRV_RGXDEV_ERROR_COUNTS;
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+       PVRSRV_DEVICE_NODE              *psDeviceNode;
+
+       PVRSRV_DEVICE_FEATURE_CONFIG    sDevFeatureCfg;
+
+       IMG_BOOL                                bDevInit2Done;
+
+       IMG_BOOL                                bFirmwareInitialised;
+       IMG_BOOL                                bPDPEnabled;
+
+       IMG_HANDLE                              hDbgReqNotify;
+
+       /* Kernel mode linear address of device registers */
+       void __iomem                    *pvRegsBaseKM;
+
+       IMG_HANDLE                              hRegMapping;
+
+       /* System physical address of device registers */
+       IMG_CPU_PHYADDR                 sRegsPhysBase;
+       /* Register region size in bytes */
+       IMG_UINT32                              ui32RegSize;
+
+       PVRSRV_STUB_PBDESC              *psStubPBDescListKM;
+
+       /* Firmware memory context info */
+       DEVMEM_CONTEXT                  *psKernelDevmemCtx;
+       DEVMEM_HEAP                             *psFirmwareMainHeap;
+       DEVMEM_HEAP                             *psFirmwareConfigHeap;
+       MMU_CONTEXT                             *psKernelMMUCtx;
+
+       void                                    *pvDeviceMemoryHeap;
+
+       /* Kernel CCB */
+       DEVMEM_MEMDESC                  *psKernelCCBCtlMemDesc;      /*!< memdesc for Kernel CCB control */
+       RGXFWIF_CCB_CTL                 *psKernelCCBCtl;             /*!< kernel mapping for Kernel CCB control */
+       DEVMEM_MEMDESC                  *psKernelCCBMemDesc;         /*!< memdesc for Kernel CCB */
+       IMG_UINT8                               *psKernelCCB;                /*!< kernel mapping for Kernel CCB */
+       DEVMEM_MEMDESC                  *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */
+       IMG_UINT32                              *pui32KernelCCBRtnSlots;     /*!< kernel mapping for return slot array */
+
+       /* Firmware CCB */
+       DEVMEM_MEMDESC                  *psFirmwareCCBCtlMemDesc;   /*!< memdesc for Firmware CCB control */
+       RGXFWIF_CCB_CTL                 *psFirmwareCCBCtl;          /*!< kernel mapping for Firmware CCB control */
+       DEVMEM_MEMDESC                  *psFirmwareCCBMemDesc;      /*!< memdesc for Firmware CCB */
+       IMG_UINT8                               *psFirmwareCCB;             /*!< kernel mapping for Firmware CCB */
+
+       /* Workload Estimation Firmware CCB */
+       DEVMEM_MEMDESC                  *psWorkEstFirmwareCCBCtlMemDesc;   /*!< memdesc for Workload Estimation Firmware CCB control */
+       RGXFWIF_CCB_CTL                 *psWorkEstFirmwareCCBCtl;          /*!< kernel mapping for Workload Estimation Firmware CCB control */
+       DEVMEM_MEMDESC                  *psWorkEstFirmwareCCBMemDesc;      /*!< memdesc for Workload Estimation Firmware CCB */
+       IMG_UINT8                               *psWorkEstFirmwareCCB;             /*!< kernel mapping for Workload Estimation Firmware CCB */
+
+       PVRSRV_MEMALLOCFLAGS_T  uiFWPoisonOnFreeFlag;           /*!< Flag for poisoning FW allocations when freed */
+
+       IMG_BOOL                                bIgnoreHWReportedBVNC;                  /*!< Ignore BVNC reported by HW */
+
+       /*
+               if we don't preallocate the pagetables we must
+               insert newly allocated page tables dynamically
+       */
+       void                                    *pvMMUContextList;
+
+       IMG_UINT32                              ui32ClkGateStatusReg;
+       IMG_UINT32                              ui32ClkGateStatusMask;
+
+       DEVMEM_MEMDESC                  *psRGXFWCodeMemDesc;
+       IMG_DEV_VIRTADDR                sFWCodeDevVAddrBase;
+       IMG_UINT32                      ui32FWCodeSizeInBytes;
+       DEVMEM_MEMDESC                  *psRGXFWDataMemDesc;
+       IMG_DEV_VIRTADDR                sFWDataDevVAddrBase;
+
+       DEVMEM_MEMDESC                  *psRGXFWCorememCodeMemDesc;
+       IMG_DEV_VIRTADDR                sFWCorememCodeDevVAddrBase;
+       RGXFWIF_DEV_VIRTADDR            sFWCorememCodeFWAddr;
+       IMG_UINT32                      ui32FWCorememCodeSizeInBytes;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfCorememDataStoreMemDesc;
+       IMG_DEV_VIRTADDR                sFWCorememDataStoreDevVAddrBase;
+       RGXFWIF_DEV_VIRTADDR            sFWCorememDataStoreFWAddr;
+
+       DEVMEM_MEMDESC                  *psRGXFWAlignChecksMemDesc;
+
+#if defined(PDUMP)
+       DEVMEM_MEMDESC                  *psRGXFWSigTAChecksMemDesc;
+       IMG_UINT32                              ui32SigTAChecksSize;
+
+       DEVMEM_MEMDESC                  *psRGXFWSig3DChecksMemDesc;
+       IMG_UINT32                              ui32Sig3DChecksSize;
+
+       DEVMEM_MEMDESC                  *psRGXFWSigCDMChecksMemDesc;
+       IMG_UINT32                              ui32SigCDMChecksSize;
+
+       DEVMEM_MEMDESC                  *psRGXFWSigTDMChecksMemDesc;
+       IMG_UINT32                              ui32SigTDMChecksSize;
+
+       DEVMEM_MEMDESC                  *psRGXFWSigRDMChecksMemDesc;
+       IMG_UINT32                              ui32SigRDMChecksSize;
+
+
+#if defined(SUPPORT_VALIDATION)
+       DEVMEM_MEMDESC                  *psRGXFWValidationSigMemDesc;
+       IMG_UINT32                              ui32ValidationSigSize;
+#endif
+
+       IMG_BOOL                                bDumpedKCCBCtlAlready;
+
+       POS_SPINLOCK                    hSyncCheckpointSignalSpinLock;                                          /*!< Guards data shared between an atomic & sleepable-context */
+#endif
+
+       POS_LOCK                                hRGXFWIfBufInitLock;                                                            /*!< trace buffer lock for initialisation phase */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfTraceBufCtlMemDesc;                                           /*!< memdesc of trace buffer control structure */
+       DEVMEM_MEMDESC                  *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM];         /*!< memdesc of actual FW trace (log) buffer(s) */
+       DEVMEM_MEMDESC                  *psRGXFWIfPowMonBufferMemDesc;                                          /*!< memdesc of FW power monitoring data */
+       RGXFWIF_TRACEBUF                *psRGXFWIfTraceBufCtl;                                                          /*!< structure containing trace control data and actual trace buffer */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfFwSysDataMemDesc;                                                     /*!< memdesc of the firmware-shared system data structure */
+       RGXFWIF_SYSDATA                 *psRGXFWIfFwSysData;                                                            /*!< structure containing trace control data and actual trace buffer */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfFwOsDataMemDesc;                                                      /*!< memdesc of the firmware-shared os structure */
+       RGXFWIF_OSDATA                  *psRGXFWIfFwOsData;                                                                     /*!< structure containing trace control data and actual trace buffer */
+
+#if defined(SUPPORT_TBI_INTERFACE)
+       DEVMEM_MEMDESC                  *psRGXFWIfTBIBufferMemDesc;                                                     /*!< memdesc of actual FW TBI buffer */
+       RGXFWIF_DEV_VIRTADDR    sRGXFWIfTBIBuffer;                                                                      /*!< TBI buffer data */
+       IMG_UINT32                              ui32FWIfTBIBufferSize;
+#endif
+
+       DEVMEM_MEMDESC                  *psRGXFWIfHWRInfoBufCtlMemDesc;
+       RGXFWIF_HWRINFOBUF              *psRGXFWIfHWRInfoBufCtl;
+       IMG_UINT32                              ui32ClockSource;
+       IMG_UINT32                              ui32LastClockSource;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfGpuUtilFWCbCtlMemDesc;
+       RGXFWIF_GPU_UTIL_FWCB   *psRGXFWIfGpuUtilFWCb;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfHWPerfBufMemDesc;
+       IMG_BYTE                                *psRGXFWIfHWPerfBuf;
+       IMG_UINT32                              ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+       DEVMEM_MEMDESC                  *psRGXFWIfRegCfgMemDesc;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfHWPerfCountersMemDesc;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfConnectionCtlMemDesc;
+       RGXFWIF_CONNECTION_CTL  *psRGXFWIfConnectionCtl;
+
+       DEVMEM_MEMDESC                  *psRGXFWHeapGuardPageReserveMemDesc;
+       DEVMEM_MEMDESC                  *psRGXFWIfSysInitMemDesc;
+       RGXFWIF_SYSINIT                 *psRGXFWIfSysInit;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfOsInitMemDesc;
+       RGXFWIF_OSINIT                  *psRGXFWIfOsInit;
+
+       DEVMEM_MEMDESC                  *psRGXFWIfRuntimeCfgMemDesc;
+       RGXFWIF_RUNTIME_CFG             *psRGXFWIfRuntimeCfg;
+
+       /* Additional guest firmware memory context info */
+       DEVMEM_HEAP                             *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED];
+       DEVMEM_MEMDESC                  *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED];
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Array to store data needed for workload estimation when a workload
+          has finished and its cycle time is returned to the host.      */
+       WORKEST_RETURN_DATA     asReturnData[RETURN_DATA_ARRAY_SIZE];
+       IMG_UINT32              ui32ReturnDataWO;
+       POS_LOCK                hWorkEstLock;
+#endif
+
+#if defined(SUPPORT_PDVFS)
+       /**
+        * Host memdesc and pointer to memory containing core clock rate in Hz.
+        * Firmware (PDVFS) updates the memory on changing the core clock rate over
+        * GPIO.
+        * Note: Shared memory needs atomic access from Host driver and firmware,
+        * hence size should not be greater than memory transaction granularity.
+        * Currently it is chosen to be 32 bits.
+        */
+       DEVMEM_MEMDESC                  *psRGXFWIFCoreClkRateMemDesc;
+       volatile IMG_UINT32             *pui32RGXFWIFCoreClkRate;
+       /**
+        * Last sampled core clk rate.
+        */
+       volatile IMG_UINT32             ui32CoreClkRateSnapshot;
+#endif
+
+       /*
+          HWPerf data for the RGX device
+        */
+
+       POS_LOCK    hHWPerfLock;  /*! Critical section lock that protects HWPerf code
+                                  *  from multiple thread duplicate init/deinit
+                                  *  and loss/freeing of FW & Host resources while in
+                                  *  use in another thread e.g. MSIR. */
+
+       IMG_UINT64  ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+       IMG_HANDLE  hHWPerfStream;    /*! TL Stream buffer (L2) for firmware event stream */
+       IMG_UINT32  ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */
+       IMG_BOOL    bSuspendHWPerfL2DataCopy;  /*! Flag to indicate if copying HWPerf data is suspended */
+
+       IMG_UINT32  ui32HWPerfHostFilter;      /*! Event filter for HWPerfHost stream (settable by AppHint) */
+       POS_LOCK    hLockHWPerfHostStream;     /*! Lock guarding access to HWPerfHost stream from multiple threads */
+       IMG_HANDLE  hHWPerfHostStream;         /*! TL Stream buffer for host only event stream */
+       IMG_UINT32  ui32HWPerfHostBufSize;     /*! Host side buffer size in bytes */
+       IMG_UINT32  ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream.
+                                               *  Guarded by hLockHWPerfHostStream */
+       IMG_UINT32  ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */
+       IMG_UINT8   *pui8DeferredEvents;       /*! List of HWPerfHost events yet to be emitted in the TL stream.
+                                               *  Events generated from atomic context are deferred "emitted"
+                                                                                       *  as the "emission" code can sleep */
+       IMG_UINT16  ui16DEReadIdx;             /*! Read index in the above deferred events buffer */
+       IMG_UINT16  ui16DEWriteIdx;            /*! Write index in the above deferred events buffer */
+       void        *pvHostHWPerfMISR;         /*! MISR to emit pending/deferred events in HWPerfHost TL stream */
+       POS_SPINLOCK hHWPerfHostSpinLock;      /*! Guards data shared between an atomic & sleepable-context */
+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+       IMG_UINT32  ui32DEHighWatermark;       /*! High watermark of deferred events buffer usage. Protected by
+                                               *! hHWPerfHostSpinLock */
+       /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */
+       IMG_UINT32  ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+       /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */
+       IMG_BOOL    bWarnedAtomicCtxPktLost;
+       /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */
+       IMG_UINT32  ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+       /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */
+       IMG_BOOL    bWarnedPktOrdinalBroke;
+#endif
+
+       void        *pvGpuFtraceData;
+
+       /* Poll data for detecting firmware fatal errors */
+       IMG_UINT32                              aui32CrLastPollCount[RGXFW_THREAD_NUM];
+       IMG_UINT32                              ui32KCCBCmdsExecutedLastTime;
+       IMG_BOOL                                bKCCBCmdsWaitingLastTime;
+       IMG_UINT32                              ui32GEOTimeoutsLastTime;
+       IMG_UINT32                              ui32InterruptCountLastTime;
+       IMG_UINT32                              ui32MissingInterruptsLastTime;
+
+       /* Client stall detection */
+       IMG_UINT32                              ui32StalledClientMask;
+
+       IMG_BOOL                                bWorkEstEnabled;
+       IMG_BOOL                                bPDVFSEnabled;
+
+       void                                    *pvLISRData;
+       void                                    *pvMISRData;
+       void                                    *pvAPMISRData;
+       RGX_ACTIVEPM_CONF               eActivePMConf;
+
+       volatile IMG_UINT32             aui32SampleIRQCount[RGXFW_THREAD_NUM];
+
+       DEVMEM_MEMDESC                  *psRGXFaultAddressMemDesc;
+
+       DEVMEM_MEMDESC                  *psSLC3FenceMemDesc;
+
+       /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */
+       IMG_UINT32                              ui32ZSBufferCurrID;     /*!< ID assigned to the next deferred devmem allocation */
+       IMG_UINT32                              ui32FreelistCurrID;     /*!< ID assigned to the next freelist */
+
+       POS_LOCK                                hLockZSBuffer;          /*!< Lock to protect simultaneous access to ZSBuffers */
+       DLLIST_NODE                             sZSBufferHead;          /*!< List of on-demand ZSBuffers */
+       POS_LOCK                                hLockFreeList;          /*!< Lock to protect simultaneous access to Freelists */
+       DLLIST_NODE                             sFreeListHead;          /*!< List of growable Freelists */
+       PSYNC_PRIM_CONTEXT              hSyncPrimContext;
+       PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+       IMG_UINT32                              ui32ActivePMReqOk;
+       IMG_UINT32                              ui32ActivePMReqDenied;
+       IMG_UINT32                              ui32ActivePMReqNonIdle;
+       IMG_UINT32                              ui32ActivePMReqRetry;
+       IMG_UINT32                              ui32ActivePMReqTotal;
+
+       IMG_HANDLE                              hProcessQueuesMISR;
+
+       IMG_UINT32                              ui32DeviceFlags;                /*!< Flags to track general device state */
+
+       /* GPU DVFS Table */
+       RGX_GPU_DVFS_TABLE              *psGpuDVFSTable;
+
+       /* Pointer to function returning the GPU utilisation statistics since the last
+        * time the function was called. Supports different users at the same time.
+        *
+        * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+        *                      in microseconds since the last time the function was called
+        *                      by a specific user (identified by hGpuUtilUser)
+        *
+        * Returns PVRSRV_OK in case the call completed without errors,
+        * some other value otherwise.
+        */
+       PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           IMG_HANDLE hGpuUtilUser,
+                                           RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+
+       /* Pointer to function that checks if the physical GPU IRQ
+        * line has been asserted and clears it if so */
+       IMG_BOOL (*pfnRGXAckIrq) (struct _PVRSRV_RGXDEV_INFO_ *psDevInfo);
+
+       POS_LOCK                                hGPUUtilLock;
+
+       /* Register configuration */
+       RGX_REG_CONFIG                  sRegCongfig;
+
+       IMG_BOOL                                bRGXPowered;
+       DLLIST_NODE                             sMemoryContextList;
+
+       POSWR_LOCK                              hRenderCtxListLock;
+       POSWR_LOCK                              hComputeCtxListLock;
+       POSWR_LOCK                              hTransferCtxListLock;
+       POSWR_LOCK                              hTDMCtxListLock;
+       POSWR_LOCK                              hMemoryCtxListLock;
+       POSWR_LOCK                              hKickSyncCtxListLock;
+
+       /* Linked list of deferred KCCB commands due to a full KCCB.
+        * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount
+        * are protected by the hLockKCCBDeferredCommandsList spin lock. */
+       POS_SPINLOCK                    hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */
+       DLLIST_NODE                             sKCCBDeferredCommandsListHead;
+       IMG_UINT32                              ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */
+
+       /* Linked lists of contexts on this device */
+       DLLIST_NODE                             sRenderCtxtListHead;
+       DLLIST_NODE                             sComputeCtxtListHead;
+       DLLIST_NODE                             sTDMCtxtListHead;
+       DLLIST_NODE                             sKickSyncCtxtListHead;
+
+       DLLIST_NODE                             sCommonCtxtListHead;
+       POSWR_LOCK                              hCommonCtxtListLock;
+       IMG_UINT32                              ui32CommonCtxtCurrentID;        /*!< ID assigned to the next common context */
+
+       POS_LOCK                                hDebugFaultInfoLock;    /*!< Lock to protect the debug fault info list */
+       POS_LOCK                                hMMUCtxUnregLock;               /*!< Lock to protect list of unregistered MMU contexts */
+
+#if defined(SUPPORT_VALIDATION)
+       RGX_POWER_DOMAIN_STATE  sPowerDomainState;              /*!< Power island sequence */
+       IMG_UINT32                              ui32PowDomainKickInterval;      /*!< Power island transition interval */
+       IMG_UINT32                              ui32ValidationFlags;    /*!< Validation flags for host driver */
+#endif
+       IMG_UINT32                              ui32AvailablePowUnitsMask;
+       IMG_UINT32                              ui32AvailableRACMask;
+
+       RGX_LAYER_PARAMS                sLayerParams;
+
+       RGXFWIF_DM                              eBPDM;                                  /*!< Current breakpoint data master */
+       IMG_BOOL                                bBPSet;                                 /*!< A Breakpoint has been set */
+       POS_LOCK                                hBPLock;                                /*!< Lock for break point operations */
+
+       IMG_UINT32                              ui32CoherencyTestsDone;
+
+       ATOMIC_T                                iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */
+       POS_LOCK                                hCCBRecoveryLock;      /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */
+       void                                    *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */
+       IMG_UINT32                              ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */
+       IMG_UINT32                              ui32SLRHoldoffCounter;   /* Decremented each time health check is called until zero. SLR only happen when zero. */
+
+       POS_LOCK                                hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       /* Firmware gcov buffer */
+       DEVMEM_MEMDESC                  *psFirmwareGcovBufferMemDesc;      /*!< mem desc for Firmware gcov dumping buffer */
+       IMG_UINT32                              ui32FirmwareGcovSize;
+#endif
+       /* Value to store for each page size range config register in MMU4 */
+       IMG_UINT64                              aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES];
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+       struct
+       {
+               IMG_UINT64 ui64timerGray;
+               IMG_UINT64 ui64timerBinary;
+               IMG_UINT64 *pui64uscTimers;
+       } sRGXTimerValues;
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       struct
+       {
+               IMG_UINT64 ui64RegVal;
+               struct completion sRegComp;
+       } sFwRegs;
+#endif
+
+       IMG_HANDLE                              hTQCLISharedMem;                /*!< TQ Client Shared Mem PMR */
+       IMG_HANDLE                              hTQUSCSharedMem;                /*!< TQ USC Shared Mem PMR */
+
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32                              ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */
+       IMG_UINT32                              ui32TestSLRCount;    /* (used to test SLR operation) */
+       IMG_UINT32                              ui32SLRSkipFWAddr;
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       DEVMEM_MEMDESC                  *psRGXFWIfSecureBufMemDesc;
+       DEVMEM_MEMDESC                  *psRGXFWIfNonSecureBufMemDesc;
+#endif
+
+       /* Timer Queries */
+       IMG_UINT32                              ui32ActiveQueryId;              /*!< id of the active line */
+       IMG_BOOL                                bSaveStart;                             /*!< save the start time of the next kick on the device*/
+       IMG_BOOL                                bSaveEnd;                               /*!< save the end time of the next kick on the device*/
+
+       DEVMEM_MEMDESC                  *psStartTimeMemDesc;    /*!< memdesc for Start Times */
+       IMG_UINT64                              *pui64StartTimeById;    /*!< CPU mapping of the above */
+
+       DEVMEM_MEMDESC                  *psEndTimeMemDesc;      /*!< memdesc for End Timer */
+       IMG_UINT64                              *pui64EndTimeById;      /*!< CPU mapping of the above */
+
+       IMG_UINT32                              aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES];      /*!< kicks Scheduled on QueryId */
+       DEVMEM_MEMDESC                  *psCompletedMemDesc;    /*!< kicks Completed on QueryId */
+       IMG_UINT32                              *pui32CompletedById;    /*!< CPU mapping of the above */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       POS_LOCK                                hTimerQueryLock;                /*!< lock to protect simultaneous access to timer query members */
+#endif
+
+       PVRSRV_RGXDEV_ERROR_COUNTS sErrorCounts;                /*!< struct containing device error counts */
+
+       IMG_UINT32                              ui32HostSafetyEventMask;/*!< mask of the safety events handled by the driver */
+
+       RGX_CONTEXT_RESET_REASON        eLastDeviceError;       /*!< device error reported to client */
+
+       IMG_UINT32              ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+       /*! GPU default core clock speed in Hz */
+       IMG_UINT32                      ui32CoreClockSpeed;
+
+       /*! Active Power Management: GPU actively requests the host driver to be powered off */
+       IMG_BOOL                        bEnableActivePM;
+
+       /*! Enable the GPU to power off internal Power Islands independently from the host driver */
+       IMG_BOOL                        bEnableRDPowIsland;
+
+       /*! Active Power Management: Delay between the GPU idle and the request to the host */
+       IMG_UINT32                      ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+       /*! Timing information */
+       RGX_TIMING_INFORMATION  *psRGXTimingInfo;
+} RGX_DATA;
+
+
+/*
+       RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME              "RGXREG"
+
+#endif /* RGXDEVICE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxfwutils.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxfwutils.c
new file mode 100644 (file)
index 0000000..935e6ea
--- /dev/null
@@ -0,0 +1,8160 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rogue firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Rogue firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#if defined(__linux__)
+#include "km_apphint.h"
+#endif
+#include "cache_km.h"
+#include "allocmem.h"
+#include "physheap.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgx_fwif_alignchecks.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "fwtrace_string.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtdmtransfer.h"
+#include "rgxpower.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxmmudefs_km.h"
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxutils.h"
+#include "rgxtimecorr.h"
+#include "rgxfwimageutils.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_external.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+#include "htbuffer.h"
+#include "info_page.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+#include "oskm_apphint.h"
+
+#ifdef __linux__
+#include <linux/kernel.h>      /* sprintf */
+#include "rogue_trace_events.h"
+#else
+#include <stdio.h>
+#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "rgxsoctimer.h"
+#endif
+
+#include "vz_vmm_pvz.h"
+#include "rgx_heaps.h"
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN        (16U)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT    PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB
+#define RGXFW_HWPERF_L1_SIZE_MAX        (12288U)
+#if defined(DEBUG)
+/* Catch the use of auto-increment when meta_registers_unpacked_accesses feature is
+ * present in case we ever use it. No WA exists so it must not be used */
+#define CHECK_HWBRN_68777(v) \
+       do { \
+               PVR_ASSERT(((v) & RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN) == 0); \
+       } while (0)
+#else
+#define CHECK_HWBRN_68777(v)
+#endif
+
+/* Firmware CCB length */
+#if defined(NO_HARDWARE) && defined(PDUMP)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (10)
+#elif defined(SUPPORT_PDVFS) || defined(SUPPORT_WORKLOAD_ESTIMATION)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (8)
+#else
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2   (5)
+#endif
+
+/*
+ * Maximum length of time a DM can run for before the DM will be marked
+ * as out-of-time. CDM has an increased value due to longer running kernels.
+ *
+ * These deadlines are increased on FPGA, EMU and VP due to the slower
+ * execution time of these platforms. PDUMPS are also included since they
+ * are often run on EMU, FPGA or in CSim.
+ */
+#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP)
+#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS     (480000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000)
+#else
+#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS     (30000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000)
+#endif
+
+/* Workload Estimation Firmware CCB length */
+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2   (7)
+
+/* Size of memory buffer for firmware gcov data
+ * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */
+#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024)
+
+typedef struct
+{
+       RGXFWIF_KCCB_CMD        sKCCBcmd;
+       DLLIST_NODE             sListNode;
+       PDUMP_FLAGS_T           uiPDumpFlags;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+} RGX_DEFERRED_KCCB_CMD;
+
+#if defined(PDUMP)
+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the
+ * PID filter example entries
+ */
+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32),
+               "FW PID filtering assumes the IMG_PID type is 32-bits wide as it "
+               "generates WRW commands for loading the PID values");
+#endif
+
+static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo);
+static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+       IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(
+                       RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+       PVR_DPF_ENTERED;
+
+       eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap,
+                       1,
+                       ui32CacheLineSize,
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                       PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                       "FwSLC3FenceWA",
+                       ppsSLC3FenceMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       /* We need to map it so the heap for this allocation is set */
+       eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+                                                          psDevInfo->psFirmwareMainHeap,
+                                                          &psFwSysInit->sSLC3FenceDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               DevmemFree(*ppsSLC3FenceMemDesc);
+               *ppsSLC3FenceMemDesc = NULL;
+       }
+
+       PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+       DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+       if (psSLC3FenceMemDesc)
+       {
+               DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+               DevmemFree(psSLC3FenceMemDesc);
+       }
+}
+
+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+       /* ensure memory is flushed before kicking MTS */
+       OSWriteMemoryBarrier(NULL);
+
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+       /* ensure the MTS kick goes through before continuing */
+#if !defined(NO_HARDWARE)
+       OSWriteMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + RGX_CR_MTS_SCHEDULE);
+#else
+       OSWriteMemoryBarrier(NULL);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       RGXSetupFwAllocation
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          psDevInfo       Device Info struct
+@Input          uiAllocFlags    Flags determining type of memory allocation
+@Input          ui32Size        Size of memory allocation
+@Input          pszName         Allocation label
+@Input          ppsMemDesc      pointer to the allocation's memory descriptor
+@Input          psFwPtr         Address of the firmware pointer to set
+@Input          ppvCpuPtr       Address of the cpu pointer to set
+@Input          ui32DevVAFlags  Any combination of  RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO*  psDevInfo,
+                                                                 PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+                                                                 IMG_UINT32           ui32Size,
+                                                                 const IMG_CHAR       *pszName,
+                                                                 DEVMEM_MEMDESC       **ppsMemDesc,
+                                                                 RGXFWIF_DEV_VIRTADDR *psFwPtr,
+                                                                 void                 **ppvCpuPtr,
+                                                                 IMG_UINT32           ui32DevVAFlags)
+{
+       PVRSRV_ERROR eError;
+#if defined(SUPPORT_AUTOVZ)
+       IMG_BOOL bClearByMemset;
+       if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiAllocFlags))
+       {
+               /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to
+                * be allocated from a different PMR than an allocation without the flag.
+                * When the content of an allocation needs to be recovered from physical memory
+                * on a later driver reboot, the memory then cannot be zeroed but the allocation
+                * addresses must still match.
+                * If the memory requires clearing, perform a memset after the allocation. */
+               uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+               bClearByMemset = IMG_TRUE;
+       }
+       else
+       {
+               bClearByMemset = IMG_FALSE;
+       }
+#endif
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate %s", pszName);
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         ui32Size,
+                                                         uiAllocFlags,
+                                                         pszName,
+                                                         ppsMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate %u bytes for %s (%u)",
+                                __func__,
+                                ui32Size,
+                                pszName,
+                                eError));
+               goto fail_alloc;
+       }
+
+       if (psFwPtr)
+       {
+               eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to acquire firmware virtual address for %s (%u)",
+                                        __func__,
+                                        pszName,
+                                        eError));
+                       goto fail_fwaddr;
+               }
+       }
+
+#if defined(SUPPORT_AUTOVZ)
+       if ((bClearByMemset) || (ppvCpuPtr))
+#else
+       if (ppvCpuPtr)
+#endif
+       {
+               void *pvTempCpuPtr;
+
+               eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to acquire CPU virtual address for %s (%u)",
+                                       __func__,
+                                        pszName,
+                                       eError));
+                       goto fail_cpuva;
+               }
+
+#if defined(SUPPORT_AUTOVZ)
+               if (bClearByMemset)
+               {
+                       if (PVRSRV_CHECK_CPU_WRITE_COMBINE(uiAllocFlags))
+                       {
+                               OSCachedMemSetWMB(pvTempCpuPtr, 0, ui32Size);
+                       }
+                       else
+                       {
+                               OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size);
+                       }
+               }
+               if (ppvCpuPtr)
+#endif
+               {
+                       *ppvCpuPtr = pvTempCpuPtr;
+               }
+#if defined(SUPPORT_AUTOVZ)
+               else
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+                       pvTempCpuPtr = NULL;
+               }
+#endif
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p with alloc flags 0x%" IMG_UINT64_FMTSPECX,
+                        __func__, pszName,
+                        (psFwPtr)   ? (psFwPtr->ui32Addr) : (0),
+                        (ppvCpuPtr) ? (*ppvCpuPtr)        : (NULL),
+                        uiAllocFlags));
+
+       return eError;
+
+fail_cpuva:
+       if (psFwPtr)
+       {
+               RGXUnsetFirmwareAddress(*ppsMemDesc);
+       }
+fail_fwaddr:
+       DevmemFree(*ppsMemDesc);
+fail_alloc:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       GetHwPerfBufferSize
+
+@Description    Computes the effective size of the HW Perf Buffer
+@Input          ui32HWPerfFWBufSizeKB       Device Info struct
+@Return         HwPerfBufferSize
+*/ /**************************************************************************/
+static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB)
+{
+       IMG_UINT32 HwPerfBufferSize;
+
+       /* HWPerf: Determine the size of the FW buffer */
+       if (ui32HWPerfFWBufSizeKB == 0 ||
+                       ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT)
+       {
+               /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero,
+                * use default size from driver constant. Set it to the default
+                * size, no logging.
+                */
+               HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10;
+       }
+       else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX))
+       {
+               /* Size specified as a AppHint but it is too big */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+                               __func__,
+                               ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX));
+               HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10;
+       }
+       else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN))
+       {
+               /* Size specified as in AppHint HWPerfFWBufSizeInKB */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: Using HWPerf FW buffer size of %u KB",
+                               __func__,
+                               ui32HWPerfFWBufSizeKB));
+               HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10;
+       }
+       else
+       {
+               /* Size specified as a AppHint but it is too small */
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+                               __func__,
+                               ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN));
+               HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10;
+       }
+
+       return HwPerfBufferSize;
+}
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+ @Function             RGXFWSetupSignatureChecks
+ @Description
+ @Input                        psDevInfo
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+                                              DEVMEM_MEMDESC**    ppsSigChecksMemDesc,
+                                              IMG_UINT32          ui32SigChecksBufSize,
+                                              RGXFWIF_SIGBUF_CTL* psSigBufCtl)
+{
+       PVRSRV_ERROR    eError;
+
+       /* Allocate memory for the checks */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                                 ui32SigChecksBufSize,
+                                                                 "FwSignatureChecks",
+                                                                 ppsSigChecksMemDesc,
+                                                                 &psSigBufCtl->sBuffer,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       DevmemPDumpLoadMem(     *ppsSigChecksMemDesc,
+                       0,
+                       ui32SigChecksBufSize,
+                       PDUMP_FLAGS_CONTINUOUS);
+
+       psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+fail:
+       return eError;
+}
+#endif
+
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+/*!
+*******************************************************************************
+ @Function             RGXFWSetupFirmwareGcovBuffer
+ @Description
+ @Input                        psDevInfo
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO*                   psDevInfo,
+               DEVMEM_MEMDESC**                        ppsBufferMemDesc,
+               IMG_UINT32                                      ui32FirmwareGcovBufferSize,
+               RGXFWIF_FIRMWARE_GCOV_CTL*      psFirmwareGcovCtl,
+               const IMG_CHAR*                         pszBufferName)
+{
+       PVRSRV_ERROR    eError;
+
+       /* Allocate memory for gcov */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS |
+                                                                  PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)),
+                                                                 ui32FirmwareGcovBufferSize,
+                                                                 pszBufferName,
+                                                                 ppsBufferMemDesc,
+                                                                 &psFirmwareGcovCtl->sBuffer,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation");
+
+       psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize;
+
+       return PVRSRV_OK;
+}
+#endif
+
+/*!
+*******************************************************************************
+ @Function      RGXFWSetupAlignChecks
+ @Description   This functions allocates and fills memory needed for the
+                aligns checks of the UM and KM structures shared with the
+                firmware. The format of the data in the memory is as follows:
+                    <number of elements in the KM array>
+                    <array of KM structures' sizes and members' offsets>
+                    <number of elements in the UM array>
+                    <array of UM structures' sizes and members' offsets>
+                The UM array is passed from the user side. Now the firmware is
+                is responsible for filling this part of the memory. If that
+                happens the check of the UM structures will be performed
+                by the host driver on client's connect.
+                If the macro is not defined the client driver fills the memory
+                and the firmware checks for the alignment of all structures.
+ @Input                        psDeviceNode
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                               RGXFWIF_DEV_VIRTADDR    *psAlignChecksDevFW)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32                      aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+       IMG_UINT32                      ui32RGXFWAlignChecksTotal;
+       IMG_UINT32*                     paui32AlignChecks;
+       PVRSRV_ERROR            eError;
+
+       /* In this case we don't know the number of elements in UM array.
+        * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX.
+        */
+       ui32RGXFWAlignChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+                                   + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32)
+                                   + 2 * sizeof(IMG_UINT32);
+
+       /* Allocate memory for the checks */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 ui32RGXFWAlignChecksTotal,
+                                                                 "FwAlignmentChecks",
+                                                                 &psDevInfo->psRGXFWAlignChecksMemDesc,
+                                                                 psAlignChecksDevFW,
+                                                                 (void**) &paui32AlignChecks,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               /* Copy the values */
+               *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM);
+               OSCachedMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0],
+                               sizeof(aui32RGXFWAlignChecksKM));
+               paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM);
+
+               *paui32AlignChecks = 0;
+       }
+
+       OSWriteMemoryBarrier(paui32AlignChecks);
+
+       DevmemPDumpLoadMem(     psDevInfo->psRGXFWAlignChecksMemDesc,
+                                               0,
+                                               ui32RGXFWAlignChecksTotal,
+                                               PDUMP_FLAGS_CONTINUOUS);
+
+       return PVRSRV_OK;
+
+fail:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+       if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+               psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+       }
+}
+
+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR        *ppDest,
+                                                  DEVMEM_MEMDESC               *psSrc,
+                                                  IMG_UINT32                   uiExtraOffset,
+                                                  IMG_UINT32                   ui32Flags)
+{
+       PVRSRV_ERROR            eError;
+       IMG_DEV_VIRTADDR        psDevVirtAddr;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+
+       psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc);
+       psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               IMG_UINT32          ui32Offset;
+               IMG_BOOL            bCachedInMETA;
+               PVRSRV_MEMALLOCFLAGS_T uiDevFlags;
+               IMG_UINT32          uiGPUCacheMode;
+
+               eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire);
+
+               /* Convert to an address in META memmap */
+               ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE;
+
+               /* Check in the devmem flags whether this memory is cached/uncached */
+               DevmemGetFlags(psSrc, &uiDevFlags);
+
+               /* Honour the META cache flags */
+               bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+               /* Honour the SLC cache flags */
+               eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode);
+
+               /*
+                * Choose Meta virtual address based on Meta and SLC cacheability.
+                */
+               ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+               if (bCachedInMETA)
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED;
+               }
+               else
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+               }
+
+               if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode))
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED;
+               }
+               else
+               {
+                       ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+               }
+
+               ppDest->ui32Addr = ui32Offset;
+       }
+       else
+       {
+               IMG_UINT32      ui32Offset;
+               IMG_BOOL        bCachedInRISCV;
+               PVRSRV_MEMALLOCFLAGS_T uiDevFlags;
+
+               eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire);
+
+               /* Convert to an address in RISCV memmap */
+               ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE;
+
+               /* Check in the devmem flags whether this memory is cached/uncached */
+               DevmemGetFlags(psSrc, &uiDevFlags);
+
+               /* Honour the RISCV cache flags */
+               bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+               if (bCachedInRISCV)
+               {
+                       ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE;
+               }
+               else
+               {
+                       ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE;
+               }
+
+               ppDest->ui32Addr = ui32Offset;
+       }
+
+       if ((ppDest->ui32Addr & 0x3U) != 0)
+       {
+               IMG_CHAR *pszAnnotation;
+               /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */
+               DevmemGetAnnotation(psSrc, &pszAnnotation);
+
+               PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit",
+                                __func__, pszAnnotation, ppDest->ui32Addr));
+
+               return PVRSRV_ERROR_INVALID_ALIGNMENT;
+       }
+
+       if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+       {
+               DevmemReleaseDevVirtAddr(psSrc);
+       }
+
+       return PVRSRV_OK;
+
+failDevCacheMode:
+       DevmemReleaseDevVirtAddr(psSrc);
+failDevVAAcquire:
+       return eError;
+}
+
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR             *psDest,
+                                                 DEVMEM_MEMDESC                *psSrcMemDesc,
+                                                 RGXFWIF_DEV_VIRTADDR  *psSrcFWDevVAddr,
+                                                 IMG_UINT32                    uiOffset)
+{
+       PVRSRV_ERROR            eError;
+       IMG_DEV_VIRTADDR        sDevVirtAddr;
+
+       eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+       psDest->psDevVirtAddr.uiAddr += uiOffset;
+       psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+       DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+
+
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+       DevmemReleaseDevVirtAddr(psSrc);
+}
+
+PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Wait for Slave Port to be Ready */
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+       {
+               eError = RGXPollReg32(hPrivate,
+                                                         RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
+                                                         RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                         | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
+                                                         RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                         | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
+               if (eError == PVRSRV_OK)
+               {
+                       /* Issue a Write */
+                       CHECK_HWBRN_68777(ui32RegAddr);
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32RegAddr);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32RegValue);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
+               }
+       }
+       else
+       {
+               eError = RGXPollReg32(hPrivate,
+                                                         RGX_CR_META_SP_MSLVCTRL1,
+                                                         RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                                                         RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+               if (eError == PVRSRV_OK)
+               {
+                       /* Issue a Write */
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Wait for Slave Port to be Ready */
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+       {
+               eError = RGXPollReg32(hPrivate,
+                                                         RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
+                                                         RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                         | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
+                                                         RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                         | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
+               if (eError == PVRSRV_OK)
+               {
+                       /* Issue a Read */
+                       CHECK_HWBRN_68777(ui32RegAddr);
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES,
+                                                                       ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
+
+                       /* Wait for Slave Port to be Ready */
+                       eError = RGXPollReg32(hPrivate,
+                                                                 RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
+                                                                 RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                                 | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
+                                                                 RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                                 | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
+                       if (eError != PVRSRV_OK) return eError;
+               }
+#if !defined(NO_HARDWARE)
+               *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES);
+#else
+               *ui32RegValue = 0xFFFFFFFF;
+#endif
+       }
+       else
+       {
+               eError = RGXPollReg32(hPrivate,
+                                                         RGX_CR_META_SP_MSLVCTRL1,
+                                                         RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                                                         RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+               if (eError == PVRSRV_OK)
+               {
+                       /* Issue a Read */
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
+
+                       /* Wait for Slave Port to be Ready */
+                       eError = RGXPollReg32(hPrivate,
+                                                                 RGX_CR_META_SP_MSLVCTRL1,
+                                                                 RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                                                                 RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+                       if (eError != PVRSRV_OK) return eError;
+               }
+#if !defined(NO_HARDWARE)
+               *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX);
+#else
+               *ui32RegValue = 0xFFFFFFFF;
+#endif
+       }
+
+       return eError;
+}
+
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+       PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+       SERVER_MMU_CONTEXT *psServerMMUContext;
+       DEVMEM_MEMDESC *psFWMemContextMemDesc;
+       DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+       DEVMEM_MEMDESC *psContextStateMemDesc;
+       RGX_CLIENT_CCB *psClientCCB;
+       DEVMEM_MEMDESC *psClientCCBMemDesc;
+       DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+       IMG_BOOL bCommonContextMemProvided;
+       IMG_UINT32 ui32ContextID;
+       DLLIST_NODE sListNode;
+       RGX_CONTEXT_RESET_REASON eLastResetReason;
+       IMG_UINT32 ui32LastResetJobRef;
+       IMG_INT32 i32Priority;
+       RGX_CCB_REQUESTOR_TYPE eRequestor;
+};
+
+/*************************************************************************/ /*!
+@Function       _CheckPriority
+@Description    Check if priority is allowed for requestor type
+@Input          psDevInfo    pointer to DevInfo struct
+@Input          i32Priority Requested priority
+@Input          eRequestor   Requestor type specifying data master
+@Return         PVRSRV_ERROR PVRSRV_OK on success
+*/ /**************************************************************************/
+static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                  IMG_INT32 i32Priority,
+                                                                  RGX_CCB_REQUESTOR_TYPE eRequestor)
+{
+       /* Only one context allowed with real time priority (highest priority) */
+       if (i32Priority == RGX_CTX_PRIORITY_REALTIME)
+       {
+               DLLIST_NODE *psNode, *psNext;
+
+               dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+               {
+                       RGX_SERVER_COMMON_CONTEXT *psThisContext =
+                               IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+                       if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME &&
+                               psThisContext->eRequestor == eRequestor)
+                       {
+                               PVR_LOG(("Only one context with real time priority allowed"));
+                               return PVRSRV_ERROR_INVALID_PARAMS;
+                       }
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+                                                                        PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                        RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+                                                                        RGXFWIF_DM eDM,
+                                                                        SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                        DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                                        IMG_UINT32 ui32AllocatedOffset,
+                                                                        DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                                        DEVMEM_MEMDESC *psContextStateMemDesc,
+                                                                        IMG_UINT32 ui32CCBAllocSize,
+                                                                        IMG_UINT32 ui32CCBMaxAllocSize,
+                                                                        IMG_UINT32 ui32ContextFlags,
+                                                                        IMG_UINT32 ui32Priority,
+                                                                        IMG_UINT32 ui32MaxDeadlineMS,
+                                                                        IMG_UINT64 ui64RobustnessAddress,
+                                                                        RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                                        RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+       RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+       IMG_UINT32 ui32FWCommonContextOffset;
+       IMG_UINT8 *pui8Ptr;
+       IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
+       PVRSRV_ERROR eError;
+
+       /*
+        * Allocate all the resources that are required
+        */
+       psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+       if (psServerCommonContext == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_alloc;
+       }
+
+       psServerCommonContext->psDevInfo = psDevInfo;
+       psServerCommonContext->psServerMMUContext = psServerMMUContext;
+
+       if (psAllocatedMemDesc)
+       {
+               PDUMPCOMMENT(psDeviceNode,
+                                        "Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+                                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                                        ui32AllocatedOffset);
+               ui32FWCommonContextOffset = ui32AllocatedOffset;
+               psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+               psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+       }
+       else
+       {
+               /* Allocate device memory for the firmware context */
+               PDUMPCOMMENT(psDeviceNode,
+                                        "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+               eError = DevmemFwAllocate(psDevInfo,
+                                                               sizeof(*psFWCommonContext),
+                                                               RGX_FWCOMCTX_ALLOCFLAGS,
+                                                               "FwContext",
+                                                               &psServerCommonContext->psFWCommonContextMemDesc);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate firmware %s context (%s)",
+                                __func__,
+                                aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                                PVRSRVGetErrorString(eError)));
+                       goto fail_contextalloc;
+               }
+               ui32FWCommonContextOffset = 0;
+               psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+       }
+
+       /* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+       psServerCommonContext->eLastResetReason    = RGX_CONTEXT_RESET_REASON_NONE;
+       psServerCommonContext->ui32LastResetJobRef = 0;
+       psServerCommonContext->ui32ContextID       = psDevInfo->ui32CommonCtxtCurrentID++;
+
+       /*
+        * Temporarily map the firmware context to the kernel and initialise it
+        */
+       eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+                                         (void **)&pui8Ptr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to map firmware %s context to CPU (%s)",
+                        __func__,
+                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                        PVRSRVGetErrorString(eError)));
+               goto fail_cpuvirtacquire;
+       }
+
+       /* Allocate the client CCB */
+       eError = RGXCreateCCB(psDevInfo,
+                                                 ui32CCBAllocSize,
+                                                 ui32CCBMaxAllocSize,
+                                                 ui32ContextFlags,
+                                                 psConnection,
+                                                 eRGXCCBRequestor,
+                                                 psServerCommonContext,
+                                                 &psServerCommonContext->psClientCCB,
+                                                 &psServerCommonContext->psClientCCBMemDesc,
+                                                 &psServerCommonContext->psClientCCBCtrlMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: failed to create CCB for %s context (%s)",
+                        __func__,
+                        aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                        PVRSRVGetErrorString(eError)));
+               goto fail_allocateccb;
+       }
+
+       psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+       psFWCommonContext->eDM = eDM;
+
+       /* Set the firmware CCB device addresses in the firmware common context */
+       eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+                                                 psServerCommonContext->psClientCCBMemDesc,
+                                                 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr);
+
+       eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+                                                 psServerCommonContext->psClientCCBCtrlMemDesc,
+                                                 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr);
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+       {
+               RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr,
+                                                        psServerCommonContext->psClientCCBMemDesc,
+                                                        &psFWCommonContext->psCCB,
+                                                        0);
+       }
+
+       /* Set the memory context device address */
+       psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+       eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+                                                 psFWMemContextMemDesc,
+                                                 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr);
+
+       /* Set the framework register updates address */
+       psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+       if (psInfo->psFWFrameworkMemDesc != NULL)
+       {
+               eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+                               psInfo->psFWFrameworkMemDesc,
+                               0, RFW_FWADDR_FLAG_NONE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwadd);
+       }
+       else
+       {
+               /* This should never be touched in this contexts without a framework
+                * memdesc, but ensure it is zero so we see crashes if it is.
+                */
+               psFWCommonContext->psRFCmd.ui32Addr = 0;
+       }
+
+       eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority);
+
+       psServerCommonContext->i32Priority = i32Priority;
+       psServerCommonContext->eRequestor = eRGXCCBRequestor;
+
+       /* Store the FWMemContext device virtual address in server mmu context
+        * to be used in schedule command path */
+       RGXSetFWMemContextDevVirtAddr(psServerMMUContext, psFWCommonContext->psFWMemContext);
+
+       psFWCommonContext->i32Priority = i32Priority;
+       psFWCommonContext->ui32PrioritySeqNum = 0;
+       psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS,
+                                                                                          (eDM == RGXFWIF_DM_CDM ?
+                                                                                               RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS :
+                                                                                               RGXFWIF_MAX_WORKLOAD_DEADLINE_MS));
+       psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress;
+
+       /* Store a references to Server Common Context and PID for notifications back from the FW. */
+       psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+       psFWCommonContext->ui32PID                   = OSGetCurrentClientProcessIDKM();
+
+       /* Set the firmware GPU context state buffer */
+       psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+       if (psContextStateMemDesc)
+       {
+               eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+                                                         psContextStateMemDesc,
+                                                         0,
+                                                         RFW_FWADDR_FLAG_NONE);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr);
+       }
+
+       /*
+        * Dump the created context
+        */
+       PDUMPCOMMENT(psDeviceNode,
+                                "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+       DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+                                          ui32FWCommonContextOffset,
+                                          sizeof(*psFWCommonContext),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       /* We've finished the setup so release the CPU mapping */
+       DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+       /* Map this allocation into the FW */
+       eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+                                                 psServerCommonContext->psFWCommonContextMemDesc,
+                                                 ui32FWCommonContextOffset,
+                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr);
+
+#if defined(__linux__)
+       {
+               IMG_UINT32 ui32FWAddr;
+               switch (eDM) {
+                       case RGXFWIF_DM_GEOM:
+                               ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+                                               psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext));
+                               break;
+                       case RGXFWIF_DM_3D:
+                               ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+                                               psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext));
+                               break;
+                       default:
+                               ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr;
+                               break;
+               }
+
+               trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
+                                                                         aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+                                                                         ui32FWAddr);
+       }
+#endif
+       /*Add the node to the list when finalised */
+       OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock);
+       dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock);
+
+       *ppsServerCommonContext = psServerCommonContext;
+       return PVRSRV_OK;
+
+fail_fwcommonctxfwaddr:
+       if (psContextStateMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psContextStateMemDesc);
+       }
+fail_ctxstatefwaddr:
+fail_checkpriority:
+       if (psInfo->psFWFrameworkMemDesc != NULL)
+       {
+               RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc);
+       }
+fail_fwframeworkfwadd:
+       RGXUnsetFirmwareAddress(psFWMemContextMemDesc);
+fail_fwmemctxfwaddr:
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+fail_cccbctrlfwaddr:
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+fail_cccbfwaddr:
+       RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB);
+fail_allocateccb:
+       DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+fail_cpuvirtacquire:
+       RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+       if (!psServerCommonContext->bCommonContextMemProvided)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc);
+               psServerCommonContext->psFWCommonContextMemDesc = NULL;
+       }
+fail_contextalloc:
+       OSFreeMem(psServerCommonContext);
+fail_alloc:
+       return eError;
+}
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+
+       OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+       /* Remove the context from the list of all contexts. */
+       dllist_remove_node(&psServerCommonContext->sListNode);
+       OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+
+       /*
+               Unmap the context itself and then all its resources
+       */
+
+       /* Unmap the FW common context */
+       RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+       /* Umap context state buffer (if there was one) */
+       if (psServerCommonContext->psContextStateMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+       }
+       /* Unmap the framework buffer */
+       if (psServerCommonContext->psFWFrameworkMemDesc != NULL)
+       {
+               RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+       }
+       /* Unmap client CCB and CCB control */
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+       RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+       /* Unmap the memory context */
+       RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+       /* Destroy the client CCB */
+       RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB);
+
+
+       /* Free the FW common context (if there was one) */
+       if (!psServerCommonContext->bCommonContextMemProvided)
+       {
+               DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo,
+                                               psServerCommonContext->psFWCommonContextMemDesc);
+               psServerCommonContext->psFWCommonContextMemDesc = NULL;
+       }
+       /* Free the hosts representation of the common context */
+       OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->psClientCCB;
+}
+
+SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->psServerMMUContext;
+}
+
+RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                               IMG_UINT32 *pui32LastResetJobRef)
+{
+       RGX_CONTEXT_RESET_REASON eLastResetReason;
+
+       PVR_ASSERT(psServerCommonContext != NULL);
+       PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+       /* Take the most recent reason & job ref and reset for next time... */
+       eLastResetReason      = psServerCommonContext->eLastResetReason;
+       *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef;
+       psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE;
+       psServerCommonContext->ui32LastResetJobRef = 0;
+
+       if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "A Hard Context Switch was triggered on the GPU to ensure Quality of Service."));
+       }
+
+       return eLastResetReason;
+}
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+       return psServerCommonContext->psDevInfo;
+}
+
+PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                          SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                                                          PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr)
+{
+       DLLIST_NODE *psNode, *psNext;
+       dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_COMMON_CONTEXT *psThisContext =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+               if (psThisContext->psServerMMUContext == psServerMMUContext)
+               {
+                       psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr;
+                       return PVRSRV_OK;
+               }
+       }
+       return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                     IMG_UINT32 ui32ContextFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)",
+                        __func__, ui32ContextFlags));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       else
+       {
+               RGXSetCCBFlags(psServerCommonContext->psClientCCB,
+                              ui32ContextFlags);
+       }
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function             RGXFreeCCB
+ @Description  Free the kernel or firmware CCB
+ @Input                        psDevInfo
+ @Input                        ppsCCBCtl
+ @Input                        ppsCCBCtlMemDesc
+ @Input                        ppsCCBMemDesc
+ @Input                        psCCBCtlFWAddr
+******************************************************************************/
+static void RGXFreeCCB(PVRSRV_RGXDEV_INFO      *psDevInfo,
+                                          RGXFWIF_CCB_CTL              **ppsCCBCtl,
+                                          DEVMEM_MEMDESC               **ppsCCBCtlMemDesc,
+                                          IMG_UINT8                    **ppui8CCB,
+                                          DEVMEM_MEMDESC               **ppsCCBMemDesc)
+{
+       if (*ppsCCBMemDesc != NULL)
+       {
+               if (*ppui8CCB != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc);
+                       *ppui8CCB = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc);
+               *ppsCCBMemDesc = NULL;
+       }
+       if (*ppsCCBCtlMemDesc != NULL)
+       {
+               if (*ppsCCBCtl != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc);
+                       *ppsCCBCtl = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc);
+               *ppsCCBCtlMemDesc = NULL;
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function             RGXFreeCCBReturnSlots
+ @Description  Free the kernel CCB's return slot array and associated mappings
+ @Input                        psDevInfo              Device Info struct
+ @Input                        ppui32CCBRtnSlots      CPU mapping of slot array
+ @Input                        ppsCCBRtnSlotsMemDesc  Slot array's device memdesc
+******************************************************************************/
+static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                  IMG_UINT32         **ppui32CCBRtnSlots,
+                                                                 DEVMEM_MEMDESC     **ppsCCBRtnSlotsMemDesc)
+{
+       /* Free the return slot array if allocated */
+       if (*ppsCCBRtnSlotsMemDesc != NULL)
+       {
+               /* Before freeing, ensure the CPU mapping as well is released */
+               if (*ppui32CCBRtnSlots != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc);
+                       *ppui32CCBRtnSlots = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc);
+               *ppsCCBRtnSlotsMemDesc = NULL;
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function             RGXSetupCCB
+ @Description  Allocate and initialise a circular command buffer
+ @Input                        psDevInfo
+ @Input                        ppsCCBCtl
+ @Input                        ppsCCBCtlMemDesc
+ @Input                        ppui8CCB
+ @Input                        ppsCCBMemDesc
+ @Input                        psCCBCtlFWAddr
+ @Input                        ui32NumCmdsLog2
+ @Input                        ui32CmdSize
+ @Input                        uiCCBMemAllocFlags
+ @Input                        pszName
+
+ @Return               PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO     *psDevInfo,
+                                                               RGXFWIF_CCB_CTL         **ppsCCBCtl,
+                                                               DEVMEM_MEMDESC          **ppsCCBCtlMemDesc,
+                                                               IMG_UINT8                       **ppui8CCB,
+                                                               DEVMEM_MEMDESC          **ppsCCBMemDesc,
+                                                               PRGXFWIF_CCB_CTL        *psCCBCtlFWAddr,
+                                                               PRGXFWIF_CCB            *psCCBFWAddr,
+                                                               IMG_UINT32                      ui32NumCmdsLog2,
+                                                               IMG_UINT32                      ui32CmdSize,
+                                                               PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags,
+                                                               const IMG_CHAR          *pszName)
+{
+       PVRSRV_ERROR            eError;
+       RGXFWIF_CCB_CTL         *psCCBCtl;
+       IMG_UINT32              ui32CCBSize = (1U << ui32NumCmdsLog2);
+       IMG_CHAR                szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN];
+       IMG_INT32               iStrLen;
+
+       /* Append "Control" to the name for the control struct. */
+       iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName);
+       PVR_ASSERT(iStrLen < sizeof(szCCBCtlName));
+
+       if (unlikely(iStrLen < 0))
+       {
+               OSStringLCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN);
+       }
+
+       /* Allocate memory for the CCB control.*/
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 sizeof(RGXFWIF_CCB_CTL),
+                                                                 szCCBCtlName,
+                                                                 ppsCCBCtlMemDesc,
+                                                                 psCCBCtlFWAddr,
+                                                                 (void**) ppsCCBCtl,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /*
+        * Allocate memory for the CCB.
+        * (this will reference further command data in non-shared CCBs)
+        */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 uiCCBMemAllocFlags,
+                                                                 ui32CCBSize * ui32CmdSize,
+                                                                 pszName,
+                                                                 ppsCCBMemDesc,
+                                                                 psCCBFWAddr,
+                                                                 (void**) ppui8CCB,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /*
+        * Initialise the CCB control.
+        */
+       psCCBCtl = *ppsCCBCtl;
+       psCCBCtl->ui32WriteOffset = 0;
+       psCCBCtl->ui32ReadOffset = 0;
+       psCCBCtl->ui32WrapMask = ui32CCBSize - 1;
+       psCCBCtl->ui32CmdSize = ui32CmdSize;
+
+       /* Pdump the CCB control */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName);
+       DevmemPDumpLoadMem(*ppsCCBCtlMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_CCB_CTL),
+                                          0);
+
+       return PVRSRV_OK;
+
+fail:
+       RGXFreeCCB(psDevInfo,
+                          ppsCCBCtl,
+                          ppsCCBCtlMemDesc,
+                          ppui8CCB,
+                          ppsCCBMemDesc);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK)
+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PMR *psPMR;
+
+       /* Run-time check feature support */
+       if (PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS))
+       {
+               if (psDevInfo->psRGXFaultAddressMemDesc)
+               {
+                       if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK)
+                       {
+                               PMRUnlockSysPhysAddresses(psPMR);
+                       }
+                       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+                       psDevInfo->psRGXFaultAddressMemDesc = NULL;
+               }
+       }
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       IMG_UINT32                      *pui32MemoryVirtAddr;
+       IMG_UINT32                      i;
+       size_t                          ui32PageSize = OSGetPageSize();
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PMR                                     *psPMR;
+
+       /* Run-time check feature support */
+       if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS))
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Allocate page of memory to use for page faults on non-blocking memory transactions.
+        * Doesn't need to be cleared as it is initialised with the 0xDEADBEEF pattern below. */
+       psDevInfo->psRGXFaultAddressMemDesc = NULL;
+       eError = DevmemFwAllocateExportable(psDeviceNode,
+                       ui32PageSize,
+                       ui32PageSize,
+                       RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                       "FwExFaultAddress",
+                       &psDevInfo->psRGXFaultAddressMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to allocate mem for fault address (%u)",
+                        __func__, eError));
+               goto failFaultAddressDescAlloc;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+                                                                         (void **)&pui32MemoryVirtAddr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to acquire mem for fault address (%u)",
+                        __func__, eError));
+               goto failFaultAddressDescAqCpuVirt;
+       }
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               /* fill the page with a known pattern when booting the firmware */
+               for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+               {
+                       *(pui32MemoryVirtAddr + i) = 0xDEADBEEF;
+               }
+       }
+
+       OSWriteMemoryBarrier(pui32MemoryVirtAddr);
+
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+       eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error getting PMR for fault address (%u)",
+                        __func__, eError));
+
+               goto failFaultAddressDescGetPMR;
+       }
+       else
+       {
+               IMG_BOOL bValid;
+               IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+
+               eError = PMRLockSysPhysAddresses(psPMR);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Error locking physical address for fault address MemDesc (%u)",
+                                __func__, eError));
+
+                       goto failFaultAddressDescLockPhys;
+               }
+
+               eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Error getting physical address for fault address MemDesc (%u)",
+                                __func__, eError));
+
+                       goto failFaultAddressDescGetPhys;
+               }
+
+               if (!bValid)
+               {
+                       psFwSysInit->sFaultPhysAddr.uiAddr = 0;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")",
+                                __func__, psFwSysInit->sFaultPhysAddr.uiAddr));
+
+                       goto failFaultAddressDescGetPhys;
+               }
+       }
+
+       return PVRSRV_OK;
+
+failFaultAddressDescGetPhys:
+       PMRUnlockSysPhysAddresses(psPMR);
+
+failFaultAddressDescLockPhys:
+failFaultAddressDescGetPMR:
+failFaultAddressDescAqCpuVirt:
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+       psDevInfo->psRGXFaultAddressMemDesc = NULL;
+
+failFaultAddressDescAlloc:
+
+       return eError;
+}
+
+#if defined(PDUMP)
+/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */
+static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR eError;
+       PMR *psFWInitPMR, *psFaultAddrPMR;
+       IMG_UINT32 ui32Dstoffset;
+
+       /* Run-time check feature support */
+       if (!PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS))
+       {
+               return PVRSRV_OK;
+       }
+
+       psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR);
+       ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr);
+
+       psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR);
+
+       eError = PDumpMemLabelToMem64(psFaultAddrPMR,
+                               psFWInitPMR,
+                               0,
+                               ui32Dstoffset,
+                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError));
+       }
+       return eError;
+}
+#endif
+#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */
+
+#if defined(SUPPORT_TBI_INTERFACE)
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferIsInitRequired
+
+@Description    Returns true if the firmware tbi buffer is not allocated and
+               might be required by the firmware soon. TBI buffer allocated
+               on-demand to reduce RAM footprint on systems not needing
+               tbi.
+
+@Input          psDevInfo       RGX device info
+
+@Return                IMG_BOOL        Whether on-demand allocation(s) is/are needed
+                               or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+
+       /* The firmware expects a tbi buffer only when:
+        *      - Logtype is "tbi"
+        */
+       if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL)
+                && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE)
+                && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+               for the FW tbi buffer
+
+@Input          ppsDevInfo      RGX device info
+@Return                void
+*/ /**************************************************************************/
+static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc);
+       psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL;
+       psDevInfo->ui32RGXFWIfHWPerfBufSize = 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTBIBufferInitOnDemandResources
+
+@Description    Allocates the firmware TBI buffer required for reading SFs
+               strings and initialize it with SFs.
+
+@Input          psDevInfo       RGX device info
+
+@Return                PVRSRV_OK       If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR       eError = PVRSRV_OK;
+       IMG_UINT32         i, ui32Len;
+       const IMG_UINT32   ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT);
+       RGXFW_STID_FMT     *psFW_SFs = NULL;
+
+       /* Firmware address should not be already set */
+       if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: FW address for FWTBI is already set. Resetting it with newly allocated one",
+                        __func__));
+       }
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS,
+                                                                 ui32FWTBIBufsize,
+                                                                 "FwTBIBuffer",
+                                                                 &psDevInfo->psRGXFWIfTBIBufferMemDesc,
+                                                                 &psDevInfo->sRGXFWIfTBIBuffer,
+                                                                 (void**)&psFW_SFs,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /* Copy SFs entries to FW buffer */
+       for (i = 0; i < g_ui32SFsCount; i++)
+       {
+               OSDeviceMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id));
+               ui32Len = OSStringLength(SFs[i].psName);
+               OSDeviceMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1));
+       }
+
+       /* flush write buffers for psFW_SFs */
+       OSWriteMemoryBarrier(psFW_SFs);
+
+       /* Set size of TBI buffer */
+       psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize;
+
+       /* release CPU mapping */
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc);
+
+       return PVRSRV_OK;
+fail:
+       RGXTBIBufferDeinit(psDevInfo);
+       return eError;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferIsInitRequired
+
+@Description    Returns true if the firmware trace buffer is not allocated and
+               might be required by the firmware soon. Trace buffer allocated
+               on-demand to reduce RAM footprint on systems not needing
+               firmware trace.
+
+@Input          psDevInfo       RGX device info
+
+@Return                IMG_BOOL        Whether on-demand allocation(s) is/are needed
+                               or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+
+       /* The firmware expects a trace buffer only when:
+        *      - Logtype is "trace" AND
+        *      - at least one LogGroup is configured
+        *      - the Driver Mode is not Guest
+        */
+       if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL)
+               && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)
+               && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+               && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+               for the FW trace buffer(s)
+
+@Input          ppsDevInfo      RGX device info
+@Return                void
+*/ /**************************************************************************/
+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       IMG_UINT32 i;
+
+       for (i = 0; i < RGXFW_THREAD_NUM; i++)
+       {
+               if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i])
+               {
+                       if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL)
+                       {
+                               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+                               psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL;
+                       }
+
+                       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+                       psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL;
+               }
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       RGXTraceBufferInitOnDemandResources
+
+@Description    Allocates the firmware trace buffer required for dumping trace
+               info from the firmware.
+
+@Input          psDevInfo       RGX device info
+
+@Return                PVRSRV_OK       If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo,
+                                                                                                PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+       RGXFWIF_TRACEBUF*  psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       PVRSRV_ERROR       eError = PVRSRV_OK;
+       IMG_UINT32         ui32FwThreadNum;
+       IMG_UINT32         ui32DefaultTraceBufSize;
+       IMG_DEVMEM_SIZE_T  uiTraceBufSizeInBytes;
+       void               *pvAppHintState = NULL;
+       IMG_CHAR           pszBufferName[] = "FwTraceBuffer_Thread0";
+
+       /* Check AppHint value for module-param FWTraceBufSizeInDWords */
+       OSCreateKMAppHintState(&pvAppHintState);
+       ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                                                pvAppHintState,
+                                                FWTraceBufSizeInDWords,
+                                                &ui32DefaultTraceBufSize,
+                                                &psTraceBufCtl->ui32TraceBufSizeInDWords);
+       OSFreeKMAppHintState(pvAppHintState);
+       pvAppHintState = NULL;
+
+       uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
+
+       for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
+       {
+#if !defined(SUPPORT_AUTOVZ)
+               /* Ensure allocation API is only called when not already allocated */
+               PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL);
+               /* Firmware address should not be already set */
+               PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0);
+#endif
+
+               /* update the firmware thread number in the Trace Buffer's name */
+               pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum;
+
+               eError = RGXSetupFwAllocation(psDevInfo,
+                                                                         uiAllocFlags,
+                                                                         uiTraceBufSizeInBytes,
+                                                                         pszBufferName,
+                                                                         &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+                                                                         &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer,
+                                                                         (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer,
+                                                                         RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+       }
+
+       return PVRSRV_OK;
+
+fail:
+       RGXTraceBufferDeinit(psDevInfo);
+       return eError;
+}
+
+#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+/*************************************************************************/ /*!
+@Function       RGXPowmonBufferIsInitRequired
+
+@Description    Returns true if the power monitoring buffer is not allocated and
+               might be required by the firmware soon. Powmon buffer allocated
+               on-demand to reduce RAM footprint on systems not needing
+               power monitoring.
+
+@Input          psDevInfo       RGX device info
+
+@Return                IMG_BOOL        Whether on-demand allocation(s) is/are needed
+                               or not
+*/ /**************************************************************************/
+INLINE IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       /* The firmware expects a power monitoring buffer only when:
+        *      - Single-shot power counters are enabled with RGX_HWPERF_PWR_EST_REQUEST
+        *      - the Driver Mode is not Guest
+        */
+       if ((psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL)
+               && (psDevInfo->ui64HWPerfFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_EST_REQUEST))
+               && !PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPowmonBufferDeinit
+
+@Description    Deinitialises all the allocations and references that are made
+               for the FW power monitoring buffer
+
+@Input          ppsDevInfo      RGX device info
+@Return                void
+*/ /**************************************************************************/
+static void RGXPowmonBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       if (psDevInfo->psRGXFWIfPowMonBufferMemDesc)
+       {
+               if (psFwSysData->sPowerMonBuf.pui32TraceBuffer != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfPowMonBufferMemDesc);
+                       psFwSysData->sPowerMonBuf.pui32TraceBuffer = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfPowMonBufferMemDesc);
+               psDevInfo->psRGXFWIfPowMonBufferMemDesc = NULL;
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPowmonBufferInitOnDemandResources
+
+@Description    Allocates the power monitoring buffer.
+
+@Input          psDevInfo      RGX device info
+
+@Return                    PVRSRV_OK   If all went good, PVRSRV_ERROR otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_SYSDATA    *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_ERROR       eError = PVRSRV_OK;
+
+#define POWER_MON_BUF_SIZE     (8192UL)
+       /* Ensure allocation API is only called when not already allocated */
+       PVR_ASSERT(psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                                 POWER_MON_BUF_SIZE,
+                                                                 "FwPowMonBuffer",
+                                                                 &psDevInfo->psRGXFWIfPowMonBufferMemDesc,
+                                                                 &psFwSysData->sPowerMonBuf.pui32RGXFWIfTraceBuffer,
+                                                                 (void **)&psFwSysData->sPowerMonBuf.pui32TraceBuffer,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Power Monitoring Buffer allocation", fail);
+
+       psFwSysData->ui32PowerMonBufSizeInDWords = POWER_MON_BUF_SIZE >> 2;
+       OSWriteMemoryBarrier(psFwSysData->sPowerMonBuf.pui32TraceBuffer);
+
+       return PVRSRV_OK;
+fail:
+       RGXPowmonBufferDeinit(psDevInfo);
+       return eError;
+}
+#endif
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       RGXPDumpLoadFWInitData
+
+@Description    Allocates the firmware trace buffer required for dumping trace
+                info from the firmware.
+
+@Input          psDevInfo RGX device info
+ */ /*************************************************************************/
+static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                  IMG_UINT32         ui32HWPerfCountersDataSize,
+                                                                  IMG_UINT32         ui32RenderKillingCtl,
+                                                                  IMG_UINT32         ui32CDMTDMKillingCtl,
+                                                                  IMG_BOOL           bEnableSignatureChecks)
+{
+       IMG_UINT32 ui32ConfigFlags    = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags;
+       IMG_UINT32 ui32FwOsCfgFlags   = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump RGXFW Init data");
+       if (!bEnableSignatureChecks)
+       {
+               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                        "(to enable rgxfw signatures place the following line after the RTCONF line)");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                  offsetof(RGXFWIF_SYSINIT, asSigBufCtl),
+                                                  sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX),
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump initial state of FW runtime configuration");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_RUNTIME_CFG),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw hwperfctl structure");
+       DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+                                                       0,
+                                                       ui32HWPerfCountersDataSize,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw trace control structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_TRACEBUF),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump firmware system data structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_SYSDATA),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump firmware OS data structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_OSDATA),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_TBI_INTERFACE)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgx TBI buffer");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc,
+                                          0,
+                                          psDevInfo->ui32FWIfTBIBufferSize,
+                                          PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(SUPPORT_TBI_INTERFACE) */
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw register configuration buffer");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_REG_CFG),
+                                          PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw system init structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_SYSINIT),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Dump rgxfw os init structure");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_OSINIT),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK)
+       /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address");
+       RGXPDumpFaultReadRegister(psDevInfo);
+#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "RTCONF: run-time configuration");
+
+       /* Dump the config options so they can be edited. */
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Set the FW system config options here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch Rand mode:                      0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch Soft Reset Enable:              0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable HWPerf:                             0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+#if defined(SUPPORT_VALIDATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable generic DM Killing Rand mode:       0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN);
+#endif /* defined(SUPPORT_VALIDATION) */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Rascal+Dust Power Island:                  0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( FBCDC Version 3.1 Enable:                  0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Check MList:                               0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Disable Auto Clock Gating:                 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable register configuration:             0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Assert on TA Out-of-Memory:                0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Disable HWPerf counter filter:             0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable Ctx Switch profile mode: 0x%08x (none=d'0, fast=d'1, medium=d'2, slow=d'3, nodelay=d'4))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable coherent memory accesses:           0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable IRQ validation:                     0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( SPU power state mask change Enable:        0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN);
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable Workload Estimation:                0x%08x)", RGXFWIF_INICFG_WORKEST);
+#if defined(SUPPORT_PDVFS)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Enable Proactive DVFS:                     0x%08x)", RGXFWIF_INICFG_PDVFS);
+#endif /* defined(SUPPORT_PDVFS) */
+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( ISP Scheduling Mode (v1=b'01, v2=b'10):    0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Validate SOC & USC timers:                 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                                       offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags),
+                                                       ui32ConfigFlags,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Extended FW system config options not used.)");
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Set the FW OS config options here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch TDM Enable:                0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch GEOM Enable:               0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch 3D Enable:                 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch CDM Enable:                0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Ctx Switch RDM Enable:                0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch  2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch  TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch  3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Lower Priority Ctx Switch RDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc,
+                                                         offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags),
+                                                         ui32FwOsCfgFlags,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+       {
+               PDUMP_FLAGS_T      ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+               IMG_UINT32         ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1;
+#if defined(SUPPORT_VALIDATION)
+               IMG_BOOL           bRunTimeUpdate = (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1);
+#else
+               IMG_BOOL           bRunTimeUpdate = IMG_FALSE;
+#endif
+               IMG_UINT32         ui32DstOffset = psDevInfo->psRGXFWIfRuntimeCfgMemDesc->uiOffset + offsetof(RGXFWIF_RUNTIME_CFG, ui32PowUnitsStateMask);
+               IMG_CHAR           aszPowUnitsMaskRegVar[] = ":SYSMEM:$1";
+               IMG_CHAR           aszPowUnitsEnable[] = "RUNTIME_POW_UNITS_MASK";
+               PMR                *psPMR = (PMR *)(psDevInfo->psRGXFWIfRuntimeCfgMemDesc->psImport->hPMR);
+
+
+               if (bRunTimeUpdate)
+               {
+                       PDUMPIF(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags);
+               }
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Load initial value power units mask in FW runtime configuration");
+               DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                                 ui32DstOffset,
+                                                                 psDevInfo->psRGXFWIfRuntimeCfg->ui32PowUnitsStateMask,
+                                                                 ui32PDumpFlags);
+
+               if (bRunTimeUpdate)
+               {
+                       PDUMPELSE(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags);
+                       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, "Read initial SPU mask value from HW registers");
+                       PDumpRegRead32ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SPU_ENABLE, aszPowUnitsMaskRegVar, ui32PDumpFlags);
+                       PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, aszPowUnitsMaskRegVar, ui32AllPowUnitsMask, ui32PDumpFlags);
+                       PDumpInternalVarToMemLabel(psPMR, ui32DstOffset, aszPowUnitsMaskRegVar, ui32PDumpFlags);
+                       PDUMPFI(psDevInfo->psDeviceNode, aszPowUnitsEnable, ui32PDumpFlags);
+               }
+       }
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Select one or more security tests here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Read/write FW code from non-FW contexts:         0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Execute FW code from non-secure memory:          0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Execute FW code from secure (non-FW) memory:     0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                         offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags),
+                                                         psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(SUPPORT_SECURITY_VALIDATION) */
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)",
+                                RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+                                RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT);
+
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                         offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode),
+                                                         psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+                                RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
+       {
+               IMG_UINT32 i;
+
+               /* generate a few WRWs in the pdump stream as an example */
+               for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++)
+               {
+                       /*
+                        * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of
+                        * a non-const variable in the expression, which it needs to be const. Typical compiler output is
+                        * "expression must have a constant value".
+                        */
+                       const IMG_DEVMEM_OFFSET_T uiPIDOff
+                       = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID);
+
+                       const IMG_DEVMEM_OFFSET_T uiOSIDOff
+                       = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and OSID pair %u)", i);
+
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)");
+                       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                                         uiPIDOff,
+                                                                         0,
+                                                                         PDUMP_FLAGS_CONTINUOUS);
+
+                       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)");
+                       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                                         uiOSIDOff,
+                                                                         0,
+                                                                         PDUMP_FLAGS_CONTINUOUS);
+               }
+       }
+#if defined(SUPPORT_VALIDATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the FW GEOM/3D Killing Control.)");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                                         offsetof(RGXFWIF_SYSDATA, ui32RenderKillingCtl),
+                                                         ui32RenderKillingCtl,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set the FW CDM/TDM Killing Control.)");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                                         offsetof(RGXFWIF_SYSDATA, ui32CDMTDMKillingCtl),
+                                                         ui32CDMTDMKillingCtl,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(SUPPORT_VALIDATION) */
+       /*
+        * Dump the log config so it can be edited.
+        */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Set the log config here)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( Log Type: set bit 0 for TRACE, reset for TBI)");
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+       {
+               PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                        "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                         offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+                                                         psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Set the HWPerf Filter config here, see \"hwperfbin2jsont -h\"");
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                         offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter),
+                                                         psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), TDM(%d))",
+                                RGXFWIF_REG_CFG_TYPE_PWR_ON,
+                                RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,
+                                RGXFWIF_REG_CFG_TYPE_TA,
+                                RGXFWIF_REG_CFG_TYPE_3D,
+                                RGXFWIF_REG_CFG_TYPE_CDM,
+                                RGXFWIF_REG_CFG_TYPE_TDM);
+
+       {
+               IMG_UINT32 i;
+
+               /* Write 32 bits in each iteration as required by PDUMP WRW command */
+               for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32))
+               {
+                       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                                       offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]),
+                                                                       0,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+               }
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set registers here: address, mask, value)");
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                         offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+                                                         0,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                         offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask),
+                                                         0,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+       DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                         offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+                                                         0,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+}
+#endif /* defined(PDUMP) */
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFwGuardPage
+
+ @Description Allocate a Guard Page at the start of a Guest's Main Heap
+
+ @Input       psDevceNode
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFwGuardPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       PVRSRV_ERROR eError;
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS |
+                                                                  PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)),
+                                                                 OSGetPageSize(),
+                                                                 "FwGuardPage",
+                                                                 &psDevInfo->psRGXFWHeapGuardPageReserveMemDesc,
+                                                                 NULL,
+                                                                 NULL,
+                                                                 RFW_FWADDR_FLAG_NONE);
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFwSysData
+
+ @Description Sets up all system-wide firmware related data
+
+ @Input       psDevInfo
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                                         IMG_BOOL                 bEnableSignatureChecks,
+                                                                         IMG_UINT32               ui32SignatureChecksBufSize,
+                                                                         IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                                                                         IMG_UINT64               ui64HWPerfFilter,
+                                                                         IMG_UINT32               ui32ConfigFlags,
+                                                                         IMG_UINT32               ui32ConfigFlagsExt,
+                                                                         IMG_UINT32               ui32LogType,
+                                                                         IMG_UINT32               ui32FilterFlags,
+                                                                         IMG_UINT32               ui32JonesDisableMask,
+                                                                         IMG_UINT32               ui32HWPerfCountersDataSize,
+                                                                         IMG_UINT32               ui32RenderKillingCtl,
+                                                                         IMG_UINT32               ui32CDMTDMKillingCtl,
+                                                                         IMG_UINT32               *pui32TPUTrilinearFracMask,
+                                                                         IMG_UINT32               *pui32USRMNumRegions,
+                                                                         IMG_UINT64               *pui64UVBRMNumRegions,
+                                                                         RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                                                                         FW_PERF_CONF             eFirmwarePerf,
+                                                                         IMG_UINT32               ui32AvailablePowUnitsMask,
+                                                                         IMG_UINT32               ui32AvailableRACMask)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32         ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1;
+       IMG_UINT32                      ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1;
+       RGXFWIF_SYSINIT    *psFwSysInitScratch = NULL;
+#if defined(SUPPORT_VALIDATION)
+       /* Create AppHint reference handle for use in SUPPORT_VALIDATION case.
+        * This is freed on exit from this routine.
+        */
+       IMG_UINT32 ui32ApphintDefault = 0;
+       void *pvAppHintState = NULL;
+       OSCreateKMAppHintState(&pvAppHintState);
+#endif /* defined(SUPPORT_VALIDATION) */
+
+       psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch));
+       PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail);
+
+       /* Sys Fw init data */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                     (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS |
+                                     PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) &
+                                     RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                     sizeof(RGXFWIF_SYSINIT),
+                                     "FwSysInitStructure",
+                                     &psDevInfo->psRGXFWIfSysInitMemDesc,
+                                     NULL,
+                                     (void**) &psDevInfo->psRGXFWIfSysInit,
+                                     RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail);
+
+#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK)
+       /* Setup Fault read register */
+       eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail);
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+       psFwSysInitScratch->ui32VzWdgPeriod = PVR_AUTOVZ_WDG_PERIOD_MS;
+#endif
+
+       /* RD Power Island */
+       {
+               RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+               IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+               IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+                                              (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+               ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+       }
+
+       /* Make sure to inform firmware if the device supports fullace fabric coherency */
+       ui32ConfigFlags |= (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) &&
+                           PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ?
+                          RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED : 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST;
+#if defined(SUPPORT_PDVFS)
+       {
+               RGXFWIF_PDVFS_OPP   *psPDVFSOPPInfo;
+               IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+
+               /* Pro-active DVFS depends on Workload Estimation */
+               psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo;
+               psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+               PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table");
+
+               if (psDVFSDeviceCfg->pasOPPTable != NULL)
+               {
+                       if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: OPP Table too large: Size = %u, Maximum size = %lu",
+                                        __func__,
+                                        psDVFSDeviceCfg->ui32OPPTableSize,
+                                        (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               goto fail;
+                       }
+
+                       OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+                                       psDVFSDeviceCfg->pasOPPTable,
+                                       sizeof(psPDVFSOPPInfo->asOPPValues));
+
+                       psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
+
+                       ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS;
+               }
+       }
+#endif /* defined(SUPPORT_PDVFS) */
+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
+
+       /* FW trace control structure */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_TRACEBUF),
+                                                                 "FwTraceCtlStruct",
+                                                                 &psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                                 &psFwSysInitScratch->sTraceBufCtl,
+                                                                 (void**) &psDevInfo->psRGXFWIfTraceBufCtl,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               /* Set initial firmware log type/group(s) */
+               if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Invalid initial log type (0x%X)",
+                                __func__, ui32LogType));
+                       goto fail;
+               }
+               psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType;
+       }
+
+       /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource
+        * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+        * be set during PDump playback in logconfig, at any point of time,
+        * Otherwise, allocate only if required. */
+#if !defined(PDUMP)
+#if defined(SUPPORT_AUTOVZ)
+       /* always allocate trace buffer for AutoVz Host drivers to allow
+        * deterministic addresses of all SysData structures */
+       if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo)))
+#else
+       if (RGXTraceBufferIsInitRequired(psDevInfo))
+#endif
+#endif
+       {
+               eError = RGXTraceBufferInitOnDemandResources(psDevInfo,
+                                                                                                        RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                                                                                        RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp));
+       }
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                     RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                     RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                     sizeof(RGXFWIF_SYSDATA),
+                                     "FwSysData",
+                                     &psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                     &psFwSysInitScratch->sFwSysData,
+                                     (void**) &psDevInfo->psRGXFWIfFwSysData,
+                                     RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       /* GPIO validation setup */
+       psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF;
+#if defined(SUPPORT_VALIDATION)
+       {
+               IMG_INT32 ui32GPIOValidationMode;
+               ui32ApphintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE;
+               /* Check AppHint for GPIO validation mode */
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                                    pvAppHintState,
+                                    GPIOValidationMode,
+                                    &ui32ApphintDefault,
+                                    &ui32GPIOValidationMode);
+
+               if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.",
+                               __func__,
+                               ui32GPIOValidationMode,
+                               RGXFWIF_GPIO_VAL_LAST));
+               }
+               else
+               {
+                       psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode;
+               }
+
+               psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode;
+       }
+
+//if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN))
+       {
+               IMG_BOOL bGPUStatePin;
+               IMG_BOOL bApphintDefault = IMG_FALSE;
+               /* Check AppHint for GPU state pin */
+               OSGetKMAppHintBOOL(APPHINT_NO_DEVICE,
+                                  pvAppHintState,
+                                  GPUStatePin,
+                                  &bApphintDefault,
+                                  &bGPUStatePin);
+
+               psDevInfo->ui32ValidationFlags |= (bGPUStatePin) ? RGX_VAL_GPUSTATEPIN_EN : 0;
+       }
+
+       {
+               IMG_UINT32 ui32EnablePollOnChecksumErrorStatus;
+               ui32ApphintDefault = 0;
+
+               /* Check AppHint for polling on GPU Checksum status */
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                                    pvAppHintState,
+                                    EnablePollOnChecksumErrorStatus,
+                                    &ui32ApphintDefault,
+                                    &ui32EnablePollOnChecksumErrorStatus);
+
+               switch (ui32EnablePollOnChecksumErrorStatus)
+               {
+                       case 0: /* no checking */ break;
+                       case 1: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_NOERR_EN; break;
+                       case 2: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_ERR_EN; break;
+                       case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break;
+                       case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break;
+                       default:
+                               PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus));
+                               break;
+               }
+       }
+
+       /* Check AppHint for power island transition interval */
+       ui32ApphintDefault = 0;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+                            pvAppHintState,
+                            PowerDomainKickInterval,
+                            &ui32ApphintDefault,
+                            &psDevInfo->ui32PowDomainKickInterval);
+
+#if defined(SUPPORT_RAY_TRACING)
+       {
+               IMG_UINT64 ui64RCEDisableMask;
+               IMG_UINT64 ui64ApphintDefault = PVRSRV_APPHINT_RCEDISABLEMASK;
+               OSGetKMAppHintUINT64(APPHINT_NO_DEVICE,
+                                    pvAppHintState,
+                                    RCEDisableMask,
+                                    &ui64ApphintDefault,
+                                    &ui64RCEDisableMask);
+               psFwSysInitScratch->ui64RCEDisableMask = ui64RCEDisableMask;
+
+       }
+#endif
+
+#endif /* defined(SUPPORT_VALIDATION) */
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo,
+                                             &psDevInfo->psFirmwareGcovBufferMemDesc,
+                                             RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE,
+                                             &psFwSysInitScratch->sFirmwareGcovCtl,
+                                             "FirmwareGcovBuffer");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail);
+       psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE;
+#endif /* defined(SUPPORT_FIRMWARE_GCOV) */
+
+#if defined(PDUMP)
+       /* Require a minimum amount of memory for the signature buffers */
+       if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN)
+       {
+               ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN;
+       }
+
+       /* Setup Signature and Checksum Buffers for TDM, GEOM, 3D and CDM */
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                          &psDevInfo->psRGXFWSigTDMChecksMemDesc,
+                                          ui32SignatureChecksBufSize,
+                                          &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail);
+       psDevInfo->ui32SigTDMChecksSize = ui32SignatureChecksBufSize;
+
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                          &psDevInfo->psRGXFWSigTAChecksMemDesc,
+                                          ui32SignatureChecksBufSize,
+                                          &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "GEOM Signature check setup", fail);
+       psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                          &psDevInfo->psRGXFWSig3DChecksMemDesc,
+                                          ui32SignatureChecksBufSize,
+                                          &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail);
+       psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                          &psDevInfo->psRGXFWSigCDMChecksMemDesc,
+                                          ui32SignatureChecksBufSize,
+                                          &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "CDM Signature check setup", fail);
+       psDevInfo->ui32SigCDMChecksSize = ui32SignatureChecksBufSize;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+               RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1)
+       {
+               eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                                  &psDevInfo->psRGXFWSigRDMChecksMemDesc,
+                                                  ui32SignatureChecksBufSize,
+                                                  &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_RAY]);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RDM Signature check setup", fail);
+               psDevInfo->ui32SigRDMChecksSize = ui32SignatureChecksBufSize;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       eError = RGXFWSetupSignatureChecks(psDevInfo,
+                                          &psDevInfo->psRGXFWValidationSigMemDesc,
+                                          ui32SignatureChecksBufSize,
+                                          &psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D]);
+       psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_CDM] = psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D];
+       PVR_LOG_GOTO_IF_ERROR(eError, "FBCDC/TRP/WGP Signature check setup", fail);
+       psDevInfo->ui32ValidationSigSize = ui32SignatureChecksBufSize;
+#endif
+#endif
+
+       if (!bEnableSignatureChecks)
+       {
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0;
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0;
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0;
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM].sBuffer.ui32Addr = 0x0;
+               psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_RAY].sBuffer.ui32Addr = 0x0;
+       }
+
+       eError = RGXFWSetupAlignChecks(psDeviceNode,
+                                      &psFwSysInitScratch->sAlignChecks);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail);
+
+       psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags;
+
+       /* Fill the remaining bits of fw the init data */
+       psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+       psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+       psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE;
+       psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE;
+       psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE;
+       psFwSysInitScratch->sPDSIndirectHeapBase.uiAddr = RGX_PDS_INDIRECT_STATE_HEAP_BASE;
+
+       psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask;
+
+       eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail);
+
+#if defined(SUPPORT_PDVFS)
+       /* Core clock rate */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                     RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                     RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                     sizeof(IMG_UINT32),
+                                     "FwPDVFSCoreClkRate",
+                                     &psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+                                     &psFwSysInitScratch->sCoreClockRate,
+                                     (void**) &psDevInfo->pui32RGXFWIFCoreClkRate,
+                                     RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail);
+#endif
+       {
+       /* Timestamps */
+       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags =
+               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) |
+               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+               PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
+               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+               PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+       /*
+         the timer query arrays
+       */
+       PDUMPCOMMENT(psDeviceNode, "Allocate timer query arrays (FW)");
+       eError = DevmemFwAllocate(psDevInfo,
+                                 sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+                                 uiMemAllocFlags |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+                                 "FwStartTimesArray",
+                                 & psDevInfo->psStartTimeMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map start times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc,
+                                         (void **)& psDevInfo->pui64StartTimeById);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map start times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                 sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+                                 uiMemAllocFlags |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+                                 "FwEndTimesArray",
+                                 & psDevInfo->psEndTimeMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map end times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc,
+                                         (void **)& psDevInfo->pui64EndTimeById);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map end times array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                 sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES,
+                                 uiMemAllocFlags,
+                                 "FwCompletedOpsArray",
+                                 & psDevInfo->psCompletedMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to completed ops array",
+                               __func__));
+               goto fail;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc,
+                                         (void **)& psDevInfo->pui32CompletedById);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map completed ops array",
+                               __func__));
+               goto fail;
+       }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       eError = OSLockCreate(&psDevInfo->hTimerQueryLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate log for timer query",
+                               __func__));
+               goto fail;
+       }
+#endif
+       }
+#if defined(SUPPORT_TBI_INTERFACE)
+#if !defined(PDUMP)
+       /* allocate only if required */
+       if (RGXTBIBufferIsInitRequired(psDevInfo))
+#endif /* !defined(PDUMP) */
+       {
+               /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource
+                * (irrespective of loggroup(s) enabled), given that logtype/loggroups
+                * can be set during PDump playback in logconfig, at any point of time
+                */
+               eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail);
+       }
+
+       psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer;
+#endif /* defined(SUPPORT_TBI_INTERFACE) */
+
+       /* Allocate shared buffer for GPU utilisation */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_GPU_UTIL_FWCB),
+                                                                 "FwGPUUtilisationBuffer",
+                                                                 &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+                                                                 &psFwSysInitScratch->sGpuUtilFWCbCtl,
+                                                                 (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_RUNTIME_CFG),
+                                                                 "FwRuntimeCfg",
+                                                                 &psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                                 &psFwSysInitScratch->sRuntimeCfg,
+                                                                 (void**) &psDevInfo->psRGXFWIfRuntimeCfg,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 sizeof(RGXFWIF_REG_CFG),
+                                                                 "FwRegisterConfigStructure",
+                                                                 &psDevInfo->psRGXFWIfRegCfgMemDesc,
+                                                                 &psFwSysInitScratch->sRegCfg,
+                                                                 NULL,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail);
+#endif
+
+       psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB);
+       /* Second stage initialisation or HWPerf, hHWPerfLock created in first
+        * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
+       if (psDevInfo->ui64HWPerfFilter == 0)
+       {
+               psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter;
+               psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter;
+       }
+       else
+       {
+               /* The filter has already been modified. This can happen if
+                * pvr/apphint/EnableFTraceGPU was enabled. */
+               psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter;
+       }
+
+#if !defined(PDUMP)
+       /* Allocate if HWPerf filter has already been set. This is possible either
+        * by setting a proper AppHint or enabling GPU ftrace events. */
+       if (psDevInfo->ui64HWPerfFilter != 0)
+#endif
+       {
+               /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources
+                * (irrespective of HWPerf enabled or not), given that HWPerf can be
+                * enabled during PDump playback via RTCONF at any point of time. */
+               eError = RGXHWPerfInitOnDemandResources(psDevInfo);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
+#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+               if (RGXPowmonBufferIsInitRequired(psDevInfo))
+               {
+                       /* Allocate power monitoring log buffer if enabled */
+                       eError = RGXPowmonBufferInitOnDemandResources(psDevInfo);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXPowmonBufferInitOnDemandResources", fail);
+               }
+#endif
+       }
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+                                                                 RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+                                                                 ui32HWPerfCountersDataSize,
+                                                                 "FwHWPerfControlStructure",
+                                                                 &psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+                                                                 &psFwSysInitScratch->sHWPerfCtl,
+                                                                 NULL,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail);
+
+       psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN)
+                                                         ? IMG_FALSE : IMG_TRUE;
+
+       psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf;
+
+#if defined(PDUMP)
+       /* default: no filter */
+       psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT;
+       psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0;
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       {
+               IMG_UINT32 dm;
+
+               /* TPU trilinear rounding mask override */
+               for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++)
+               {
+                       psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm];
+               }
+
+               /* USRM Config override */
+               for (dm = 0; dm < RGXFWIF_USRM_DM_LAST; dm++)
+               {
+                       psFwSysInitScratch->aui32USRMNumRegions[dm] = pui32USRMNumRegions[dm];
+               }
+
+               /* UVBRM Config override */
+               for (dm = 0; dm < RGXFWIF_UVBRM_DM_LAST; dm++)
+               {
+                       psFwSysInitScratch->aui64UVBRMNumRegions[dm] = pui64UVBRMNumRegions[dm];
+               }
+       }
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       {
+               PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS;
+               PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags);
+
+               PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test");
+               eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                                       OSGetPageSize(),
+                                                                                       OSGetPageSize(),
+                                                                                       RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                                                       "FwExNonSecureBuffer",
+                                                                                       &psDevInfo->psRGXFWIfNonSecureBufMemDesc);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail);
+
+               eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer,
+                                                                          psDevInfo->psRGXFWIfNonSecureBufMemDesc,
+                                                                          0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail);
+
+               PDUMPCOMMENT(psDeviceNode, "Allocate secure buffer for security validation test");
+               eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                                       OSGetPageSize(),
+                                                                                       OSGetPageSize(),
+                                                                                       uiFlags,
+                                                                                       "FwExSecureBuffer",
+                                                                                       &psDevInfo->psRGXFWIfSecureBufMemDesc);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail);
+
+               eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer,
+                                                                          psDevInfo->psRGXFWIfSecureBufMemDesc,
+                                                                          0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail);
+       }
+#endif /* SUPPORT_SECURITY_VALIDATION */
+
+       /* Initialize FW started flag */
+       psFwSysInitScratch->bFirmwareStarted = IMG_FALSE;
+       psFwSysInitScratch->ui32MarkerVal = 1;
+
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               IMG_UINT32 ui32OSIndex;
+
+               RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+               RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+               /* Required info by FW to calculate the ActivePM idle timer latency */
+               psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+               psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+               /* Initialise variable runtime configuration to the system defaults */
+               psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed;
+               psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms;
+               psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+               psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US;
+               psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS;
+
+               if (PVRSRV_VZ_MODE_IS(NATIVE))
+               {
+                       psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0;
+               }
+               else
+               {
+                       for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++)
+                       {
+                               const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] =
+                                       {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY,
+                                        RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY};
+
+                               /* Set up initial priorities between different OSes */
+                               psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex];
+                       }
+               }
+
+#if defined(PVR_ENABLE_PHR) && defined(PDUMP)
+               psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET;
+#else
+               psRuntimeCfg->ui32PHRMode = 0;
+#endif
+
+               /* Validate the power units mask and initialize to number of units to power up */
+               if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0)
+               {
+                       eError = PVRSRV_ERROR_INVALID_SPU_MASK;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s:Invalid power units mask (All=0x%X, Non Fused=0x%X). At-least one power unit must to be powered up.",
+                                __func__,
+                                ui32AllPowUnitsMask,
+                                ui32AvailablePowUnitsMask));
+                       goto fail;
+               }
+               psRuntimeCfg->ui32PowUnitsStateMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask;
+
+               psRuntimeCfg->ui32RACStateMask = ui32AvailableRACMask & ui32AllRACMask;
+
+               /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */
+               OSWriteMemoryBarrier(psDevInfo->psRGXFWIfRuntimeCfg);
+
+               /* Setup FW coremem data */
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+               {
+                       psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr;
+
+                       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+                       {
+                               RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore,
+                                                                        psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                                                                        &psFwSysInitScratch->sCorememDataStore.pbyFWAddr,
+                                                                        0);
+                       }
+               }
+
+               psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags    = ui32ConfigFlags    & RGXFWIF_INICFG_ALL;
+               psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL;
+#if defined(SUPPORT_VALIDATION)
+               psDevInfo->psRGXFWIfFwSysData->ui32RenderKillingCtl     = ui32RenderKillingCtl;
+               psDevInfo->psRGXFWIfFwSysData->ui32CDMTDMKillingCtl     = ui32CDMTDMKillingCtl;
+#else
+               PVR_UNREFERENCED_PARAMETER(ui32RenderKillingCtl);
+               PVR_UNREFERENCED_PARAMETER(ui32CDMTDMKillingCtl);
+#endif
+
+               /* Initialise GPU utilisation buffer */
+               psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+                   RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+               /* init HWPERF data */
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0;
+               psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0;
+
+               // flush write buffers for psRGXFWIfFwSysData
+               OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwSysData);
+
+               /*Send through the BVNC Feature Flags*/
+               eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail);
+
+               /* populate the real FwOsInit structure with the values stored in the scratch copy */
+               OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT));
+       }
+
+       OSFreeMem(psFwSysInitScratch);
+
+#if defined(SUPPORT_VALIDATION)
+       OSFreeKMAppHintState(pvAppHintState);
+#endif
+
+       return PVRSRV_OK;
+
+fail:
+       if (psFwSysInitScratch)
+       {
+               OSFreeMem(psFwSysInitScratch);
+       }
+
+       RGXFreeFwSysData(psDevInfo);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+#if defined(SUPPORT_VALIDATION)
+       OSFreeKMAppHintState(pvAppHintState);
+#endif
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFwOsData
+
+ @Description Sets up all os-specific firmware related data
+
+ @Input       psDevInfo
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                                        IMG_UINT32               ui32KCCBSizeLog2,
+                                                                        IMG_UINT32               ui32HWRDebugDumpLimit,
+                                                                        IMG_UINT32               ui32FwOsCfgFlags)
+{
+       PVRSRV_ERROR       eError;
+       RGXFWIF_OSINIT     sFwOsInitScratch;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT));
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = RGXSetupFwGuardPage(psDevInfo);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware heap guard pages", fail);
+       }
+
+       /* Memory tracking the connection state should be non-volatile and
+        * is not cleared on allocation to prevent loss of pre-reset information */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS &
+                                                                 ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                                                                 sizeof(RGXFWIF_CONNECTION_CTL),
+                                                                 "FwConnectionCtl",
+                                                                 &psDevInfo->psRGXFWIfConnectionCtlMemDesc,
+                                                                 NULL,
+                                                                 (void**) &psDevInfo->psRGXFWIfConnectionCtl,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS |
+                                                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED),
+                                                                 sizeof(RGXFWIF_OSINIT),
+                                                                 "FwOsInitStructure",
+                                                                 &psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                 NULL,
+                                                                 (void**) &psDevInfo->psRGXFWIfOsInit,
+                                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail);
+
+       /* init HWR frame info */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 sizeof(RGXFWIF_HWRINFOBUF),
+                                                                 "FwHWRInfoBuffer",
+                                                                 &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+                                                                 &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl,
+                                                                 (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail);
+
+       OSCachedMemSetWMB(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+       /* Allocate a sync for power management */
+       eError = SyncPrimContextCreate(psDevInfo->psDeviceNode,
+                                      &psDevInfo->hSyncPrimContext);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail);
+
+       eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail);
+
+       /* Set up kernel CCB */
+       eError = RGXSetupCCB(psDevInfo,
+                                                &psDevInfo->psKernelCCBCtl,
+                                                &psDevInfo->psKernelCCBCtlMemDesc,
+                                                &psDevInfo->psKernelCCB,
+                                                &psDevInfo->psKernelCCBMemDesc,
+                                                &sFwOsInitScratch.psKernelCCBCtl,
+                                                &sFwOsInitScratch.psKernelCCB,
+                                                ui32KCCBSizeLog2,
+                                                sizeof(RGXFWIF_KCCB_CMD),
+                                                (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS |
+                                                PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)),
+                                                "FwKernelCCB");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail);
+
+       /* KCCB additionally uses a return slot array for FW to be able to send back
+        * return codes for each required command
+        */
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 (1U << ui32KCCBSizeLog2) * sizeof(IMG_UINT32),
+                                                                 "FwKernelCCBRtnSlots",
+                                                                 &psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                                                 &sFwOsInitScratch.psKernelCCBRtnSlots,
+                                                                 (void**) &psDevInfo->pui32KernelCCBRtnSlots,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail);
+
+       /* Set up firmware CCB */
+       eError = RGXSetupCCB(psDevInfo,
+                                                &psDevInfo->psFirmwareCCBCtl,
+                                                &psDevInfo->psFirmwareCCBCtlMemDesc,
+                                                &psDevInfo->psFirmwareCCB,
+                                                &psDevInfo->psFirmwareCCBMemDesc,
+                                                &sFwOsInitScratch.psFirmwareCCBCtl,
+                                                &sFwOsInitScratch.psFirmwareCCB,
+                                                RGXFWIF_FWCCB_NUMCMDS_LOG2,
+                                                sizeof(RGXFWIF_FWCCB_CMD),
+                                                RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                "FwCCB");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail);
+
+       eError = RGXSetupFwAllocation(psDevInfo,
+                                                                 RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+                                                                 sizeof(RGXFWIF_OSDATA),
+                                                                 "FwOsData",
+                                                                 &psDevInfo->psRGXFWIfFwOsDataMemDesc,
+                                                                 &sFwOsInitScratch.sFwOsData,
+                                                                 (void**) &psDevInfo->psRGXFWIfFwOsData,
+                                                                 RFW_FWADDR_NOREF_FLAG);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail);
+
+       psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL;
+
+       eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail);
+
+       /* flush write buffers for psRGXFWIfFwOsData */
+       OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwOsData);
+
+       sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Set up Workload Estimation firmware CCB */
+       eError = RGXSetupCCB(psDevInfo,
+                                                &psDevInfo->psWorkEstFirmwareCCBCtl,
+                                                &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+                                                &psDevInfo->psWorkEstFirmwareCCB,
+                                                &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+                                                &sFwOsInitScratch.psWorkEstFirmwareCCBCtl,
+                                                &sFwOsInitScratch.psWorkEstFirmwareCCB,
+                                                RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+                                                sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+                                                RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+                                                "FwWEstCCB");
+       PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail);
+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
+
+       /* Initialise the compatibility check data */
+       RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC);
+       RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC);
+
+       /* populate the real FwOsInit structure with the values stored in the scratch copy */
+       OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT));
+
+       return PVRSRV_OK;
+
+fail:
+       RGXFreeFwOsData(psDevInfo);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXSetupFirmware
+
+ @Description Sets up all firmware related data
+
+ @Input       psDevInfo
+
+ @Return      PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                         IMG_BOOL                 bEnableSignatureChecks,
+                                                         IMG_UINT32               ui32SignatureChecksBufSize,
+                                                         IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                                                         IMG_UINT64               ui64HWPerfFilter,
+                                                         IMG_UINT32               ui32ConfigFlags,
+                                                         IMG_UINT32               ui32ConfigFlagsExt,
+                                                         IMG_UINT32               ui32FwOsCfgFlags,
+                                                         IMG_UINT32               ui32LogType,
+                                                         IMG_UINT32               ui32FilterFlags,
+                                                         IMG_UINT32               ui32JonesDisableMask,
+                                                         IMG_UINT32               ui32HWRDebugDumpLimit,
+                                                         IMG_UINT32               ui32HWPerfCountersDataSize,
+                                                         IMG_UINT32               ui32RenderKillingCtl,
+                                                         IMG_UINT32               ui32CDMTDMKillingCtl,
+                                                         IMG_UINT32               *pui32TPUTrilinearFracMask,
+                                                         IMG_UINT32               *pui32USRMNumRegions,
+                                                         IMG_UINT64               *pui64UVBRMNumRegions,
+                                                         RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                                                         FW_PERF_CONF             eFirmwarePerf,
+                                                         IMG_UINT32               ui32KCCBSizeLog2,
+                                                         IMG_UINT32               ui32AvailablePowUnitsMask,
+                                                         IMG_UINT32               ui32AvailableRACMask)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       eError = RGXSetupFwOsData(psDeviceNode,
+                                                         ui32KCCBSizeLog2,
+                                                         ui32HWRDebugDumpLimit,
+                                                         ui32FwOsCfgFlags);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail);
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               /* Guest drivers do not configure system-wide firmware data */
+               psDevInfo->psRGXFWIfSysInit = NULL;
+       }
+       else
+       {
+               /* Native and Host drivers must initialise the firmware's system data */
+               eError = RGXSetupFwSysData(psDeviceNode,
+                                                                  bEnableSignatureChecks,
+                                                                  ui32SignatureChecksBufSize,
+                                                                  ui32HWPerfFWBufSizeKB,
+                                                                  ui64HWPerfFilter,
+                                                                  ui32ConfigFlags,
+                                                                  ui32ConfigFlagsExt,
+                                                                  ui32LogType,
+                                                                  ui32FilterFlags,
+                                                                  ui32JonesDisableMask,
+                                                                  ui32HWPerfCountersDataSize,
+                                                                  ui32RenderKillingCtl,
+                                                                  ui32CDMTDMKillingCtl,
+                                                                  pui32TPUTrilinearFracMask,
+                                                                  pui32USRMNumRegions,
+                                                                  pui64UVBRMNumRegions,
+                                                                  eRGXRDPowerIslandConf,
+                                                                  eFirmwarePerf,
+                                                                  ui32AvailablePowUnitsMask,
+                                                                  ui32AvailableRACMask);
+               PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail);
+       }
+
+       psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+#if defined(PDUMP)
+       RGXPDumpLoadFWInitData(psDevInfo,
+                                                  ui32HWPerfCountersDataSize,
+                                                  ui32RenderKillingCtl,
+                                                  ui32CDMTDMKillingCtl,
+                                                  bEnableSignatureChecks);
+#endif /* PDUMP */
+
+fail:
+       return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXFreeFwSysData
+
+ @Description Frees all system-wide firmware related data
+
+ @Input       psDevInfo
+******************************************************************************/
+static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+       if (psDevInfo->psRGXFWAlignChecksMemDesc)
+       {
+               RGXFWFreeAlignChecks(psDevInfo);
+       }
+
+#if defined(PDUMP)
+       if (psDevInfo->psRGXFWSigTDMChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDMChecksMemDesc);
+               psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc);
+               psDevInfo->psRGXFWSigTAChecksMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc);
+               psDevInfo->psRGXFWSig3DChecksMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWSigCDMChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigCDMChecksMemDesc);
+               psDevInfo->psRGXFWSigCDMChecksMemDesc = NULL;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+               RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1 &&
+          psDevInfo->psRGXFWSigRDMChecksMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigRDMChecksMemDesc);
+               psDevInfo->psRGXFWSigRDMChecksMemDesc = NULL;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       if (psDevInfo->psRGXFWValidationSigMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWValidationSigMemDesc);
+               psDevInfo->psRGXFWValidationSigMemDesc = NULL;
+       }
+#endif
+#endif
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       if (psDevInfo->psFirmwareGcovBufferMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc);
+               psDevInfo->psFirmwareGcovBufferMemDesc = NULL;
+       }
+#endif
+
+#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK)
+       RGXSetupFaultReadRegisterRollback(psDevInfo);
+#endif
+
+       if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+                       psDevInfo->psRGXFWIfGpuUtilFWCb = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+               psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfRuntimeCfg != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+                       psDevInfo->psRGXFWIfRuntimeCfg = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+               psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL;
+       }
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+       {
+               if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+               {
+                       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+                       psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+               }
+       }
+
+       if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfTraceBufCtl != NULL)
+               {
+                       /* first deinit/free the tracebuffer allocation */
+                       RGXTraceBufferDeinit(psDevInfo);
+
+#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+                       /* second free the powmon log buffer if used */
+                       RGXPowmonBufferDeinit(psDevInfo);
+#endif
+
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+                       psDevInfo->psRGXFWIfTraceBufCtl = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+               psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfFwSysDataMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfFwSysData != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc);
+                       psDevInfo->psRGXFWIfFwSysData = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc);
+               psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL;
+       }
+
+#if defined(SUPPORT_TBI_INTERFACE)
+       if (psDevInfo->psRGXFWIfTBIBufferMemDesc)
+       {
+               RGXTBIBufferDeinit(psDevInfo);
+       }
+#endif
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+       if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc);
+               psDevInfo->psRGXFWIfRegCfgMemDesc = NULL;
+       }
+#endif
+       if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+               psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL;
+       }
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       if (psDevInfo->psRGXFWIfNonSecureBufMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc);
+               psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfSecureBufMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc);
+               psDevInfo->psRGXFWIfSecureBufMemDesc = NULL;
+       }
+#endif
+
+       /* Free the SLC3 fence object */
+       _FreeSLC3Fence(psDevInfo);
+
+#if defined(SUPPORT_PDVFS)
+       if (psDevInfo->psRGXFWIFCoreClkRateMemDesc)
+       {
+               if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+                       psDevInfo->pui32RGXFWIFCoreClkRate = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+               psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
+       }
+#endif
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXFreeFwOsData
+
+ @Description Frees all os-specific firmware related data
+
+ @Input       psDevInfo
+******************************************************************************/
+static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFreeCCBReturnSlots(psDevInfo,
+                             &psDevInfo->pui32KernelCCBRtnSlots,
+                             &psDevInfo->psKernelCCBRtnSlotsMemDesc);
+       RGXFreeCCB(psDevInfo,
+                  &psDevInfo->psKernelCCBCtl,
+                  &psDevInfo->psKernelCCBCtlMemDesc,
+                  &psDevInfo->psKernelCCB,
+                  &psDevInfo->psKernelCCBMemDesc);
+
+       RGXFreeCCB(psDevInfo,
+                  &psDevInfo->psFirmwareCCBCtl,
+                  &psDevInfo->psFirmwareCCBCtlMemDesc,
+                  &psDevInfo->psFirmwareCCB,
+                  &psDevInfo->psFirmwareCCBMemDesc);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFreeCCB(psDevInfo,
+                  &psDevInfo->psWorkEstFirmwareCCBCtl,
+                  &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+                  &psDevInfo->psWorkEstFirmwareCCB,
+                  &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+#endif
+
+       if (psDevInfo->psPowSyncPrim != NULL)
+       {
+               SyncPrimFree(psDevInfo->psPowSyncPrim);
+               psDevInfo->psPowSyncPrim = NULL;
+       }
+
+       if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL)
+       {
+               SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+               psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+                       psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+               psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfFwOsDataMemDesc)
+       {
+               if (psDevInfo->psRGXFWIfFwOsData != NULL)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc);
+                       psDevInfo->psRGXFWIfFwOsData = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc);
+               psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL;
+       }
+
+       if (psDevInfo->psCompletedMemDesc)
+       {
+               if (psDevInfo->pui32CompletedById)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc);
+                       psDevInfo->pui32CompletedById = NULL;
+               }
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCompletedMemDesc);
+               psDevInfo->psCompletedMemDesc = NULL;
+       }
+       if (psDevInfo->psEndTimeMemDesc)
+       {
+               if (psDevInfo->pui64EndTimeById)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc);
+                       psDevInfo->pui64EndTimeById = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psEndTimeMemDesc);
+               psDevInfo->psEndTimeMemDesc = NULL;
+       }
+       if (psDevInfo->psStartTimeMemDesc)
+       {
+               if (psDevInfo->pui64StartTimeById)
+               {
+                       DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc);
+                       psDevInfo->pui64StartTimeById = NULL;
+               }
+
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psStartTimeMemDesc);
+               psDevInfo->psStartTimeMemDesc = NULL;
+       }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+       if (psDevInfo->hTimerQueryLock)
+       {
+               OSLockDestroy(psDevInfo->hTimerQueryLock);
+               psDevInfo->hTimerQueryLock = NULL;
+       }
+#endif
+
+       if (psDevInfo->psRGXFWHeapGuardPageReserveMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWHeapGuardPageReserveMemDesc);
+       }
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXFreeFirmware
+
+ @Description Frees all the firmware-related allocations
+
+ @Input       psDevInfo
+******************************************************************************/
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO        *psDevInfo)
+{
+       RGXFreeFwOsData(psDevInfo);
+
+       if (psDevInfo->psRGXFWIfConnectionCtl)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc);
+               psDevInfo->psRGXFWIfConnectionCtl = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfConnectionCtlMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc);
+               psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfOsInit)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc);
+               psDevInfo->psRGXFWIfOsInit = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfOsInitMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc);
+               psDevInfo->psRGXFWIfOsInitMemDesc = NULL;
+       }
+
+       RGXFreeFwSysData(psDevInfo);
+       if (psDevInfo->psRGXFWIfSysInit)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc);
+               psDevInfo->psRGXFWIfSysInit = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfSysInitMemDesc)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc);
+               psDevInfo->psRGXFWIfSysInitMemDesc = NULL;
+       }
+}
+
+/******************************************************************************
+ FUNCTION      : RGXAcquireKernelCCBSlot
+
+ PURPOSE       : Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS    : psCCB - the CCB
+                       : Address of space if available, NULL otherwise
+
+ RETURNS       : PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                       const RGXFWIF_CCB_CTL *psKCCBCtl,
+                                                                                       IMG_UINT32              *pui32Offset)
+{
+       IMG_UINT32      ui32OldWriteOffset, ui32NextWriteOffset;
+#if defined(PDUMP)
+       const DEVMEM_MEMDESC *psKCCBCtrlMemDesc = psDevInfo->psKernelCCBCtlMemDesc;
+#endif
+
+       ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+       ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+#if defined(PDUMP)
+       /* Wait for sufficient CCB space to become available */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, 0,
+                             "Wait for kCCB woff=%u", ui32NextWriteOffset);
+       DevmemPDumpCBP(psKCCBCtrlMemDesc,
+                      offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+                      ui32NextWriteOffset,
+                      1,
+                      (psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+       if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset)
+       {
+               return PVRSRV_ERROR_KERNEL_CCB_FULL;
+       }
+       *pui32Offset = ui32NextWriteOffset;
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXPollKernelCCBSlot
+
+ PURPOSE       : Poll for space in Kernel CCB
+
+ PARAMETERS    : psCCB - the CCB
+                       : Address of space if available, NULL otherwise
+
+ RETURNS       : PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+                                                                                const RGXFWIF_CCB_CTL *psKCCBCtl)
+{
+       IMG_UINT32      ui32OldWriteOffset, ui32NextWriteOffset;
+
+       ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+       ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+
+               if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+               {
+                       return PVRSRV_OK;
+               }
+
+               /*
+                * The following check doesn't impact performance, since the
+                * CPU has to wait for the GPU anyway (full kernel CCB).
+                */
+               if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+               {
+                       return PVRSRV_ERROR_KERNEL_CCB_FULL;
+               }
+
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+/******************************************************************************
+ FUNCTION      : RGXGetCmdMemCopySize
+
+ PURPOSE       : Calculates actual size of KCCB command getting used
+
+ PARAMETERS    : eCmdType     Type of KCCB command
+
+ RETURNS       : Returns actual size of KCCB command on success else zero
+******************************************************************************/
+static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType)
+{
+       /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD
+        * This will account alignment requirement of uCmdData union
+        *
+        * Then add command-data size depending on command type to calculate actual
+        * command size required to do mem copy
+        *
+        * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct.
+        */
+       switch (eCmdType)
+       {
+               case RGXFWIF_KCCB_CMD_KICK:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_MMUCACHE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA);
+               }
+#if defined(SUPPORT_USC_BREAKPOINT)
+               case RGXFWIF_KCCB_CMD_BP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA);
+               }
+#endif
+               case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA);
+               }
+               case RGXFWIF_KCCB_CMD_CLEANUP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST);
+               }
+               case RGXFWIF_KCCB_CMD_POW:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST);
+               }
+               case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE:
+               case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_FORCE_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA);
+               }
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+               case RGXFWIF_KCCB_CMD_REGCONFIG:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA);
+               }
+#endif
+#if defined(SUPPORT_PDVFS)
+               case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA);
+               }
+#endif
+               case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS);
+               }
+               case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS);
+               }
+               case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE:
+               case RGXFWIF_KCCB_CMD_WDG_CFG:
+               case RGXFWIF_KCCB_CMD_HEALTH_CHECK:
+               case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL:
+               case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT:
+               {
+                       /* No command specific data */
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData);
+               }
+               case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_DEV_VIRTADDR);
+               }
+#if defined(SUPPORT_VALIDATION)
+               case RGXFWIF_KCCB_CMD_RGXREG:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA);
+               }
+               case RGXFWIF_KCCB_CMD_GPUMAP:
+               {
+                       return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_GPUMAP_DATA);
+               }
+#endif
+               default:
+               {
+                       /* Invalid (OR) Unused (OR) Newly added command type */
+                       return 0; /* Error */
+               }
+       }
+}
+
+PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                      IMG_UINT32 ui32SlotNum,
+                                                                         IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRVWaitForValueKM(
+                     (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum],
+                                 RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED,
+                                 RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM");
+
+#if defined(PDUMP)
+       /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */
+       if (PDumpCheckFlagsWrite(psDevInfo->psDeviceNode, ui32PDumpFlags))
+       {
+               PDUMPCOMMENT(psDevInfo->psDeviceNode, "Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum,
+                                        RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED);
+
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                                                               ui32SlotNum * sizeof(IMG_UINT32),
+                                                                               RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED,
+                                                                               RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED,
+                                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                                               ui32PDumpFlags);
+               PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32");
+
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+#endif
+
+       return eError;
+}
+
+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                                                         RGXFWIF_KCCB_CMD    *psKCCBCmd,
+                                                                         IMG_UINT32          uiPDumpFlags,
+                                                                         IMG_UINT32          *pui32CmdKCCBSlot)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE      *psDeviceNode = psDevInfo->psDeviceNode;
+       RGXFWIF_CCB_CTL         *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+       IMG_UINT8                       *pui8KCCB = psDevInfo->psKernelCCB;
+       IMG_UINT32                      ui32NewWriteOffset;
+       IMG_UINT32                      ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+       IMG_UINT32                      ui32CmdMemCopySize;
+
+#if !defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+#else
+       IMG_BOOL bContCaptureOn = PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); /* client connected or in pdump init phase */
+       IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, uiPDumpFlags); /* Are we in capture range or continuous and not in a power transition */
+
+       if (bContCaptureOn)
+       {
+               /* in capture range */
+               if (bPDumpEnabled)
+               {
+                       if (!psDevInfo->bDumpedKCCBCtlAlready)
+                       {
+                               /* entering capture range */
+                               psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE;
+
+                               /* Wait for the live FW to catch up */
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d",
+                                               __func__,
+                                               psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+                               PVRSRVPollForValueKM(psDeviceNode,
+                                                    (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset,
+                                                    ui32OldWriteOffset, 0xFFFFFFFF,
+                                                    POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP);
+
+                               /* Dump Init state of Kernel CCB control (read and write offset) */
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags,
+                                               "Initial state of kernel CCB Control, roff: %d, woff: %d",
+                                               psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+
+                               DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+                                               0,
+                                               sizeof(RGXFWIF_CCB_CTL),
+                                               uiPDumpFlags);
+                       }
+               }
+       }
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+       if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) ||
+               (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) &&
+               !PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:"
+                                                               "driver state = %u / firmware state = %u;"
+                                                               "expected READY (%u/%u) or ACTIVE (%u/%u);",
+                                                               __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo),
+                                                               RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY,
+                                                               RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE));
+               eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE;
+               goto _RGXSendCommandRaw_Exit;
+       }
+#endif
+
+       PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize);
+       if (!OSLockIsLocked(psDeviceNode->hPowerLock))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s called without power lock held!",
+                               __func__));
+               PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+       }
+
+       /* Acquire a slot in the CCB */
+       eError = RGXAcquireKernelCCBSlot(psDevInfo, psKCCBCtl, &ui32NewWriteOffset);
+       if (eError != PVRSRV_OK)
+       {
+               goto _RGXSendCommandRaw_Exit;
+       }
+
+       /* Calculate actual size of command to optimize device mem copy */
+       ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType);
+       PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND);
+
+       /* Copy the command into the CCB */
+       OSCachedMemCopyWMB(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+                       psKCCBCmd, ui32CmdMemCopySize);
+
+       /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */
+       if (pui32CmdKCCBSlot)
+       {
+               *pui32CmdKCCBSlot = ui32OldWriteOffset;
+
+               /* Each such command enqueue needs to reset the slot value first. This is so that a caller
+                * doesn't get to see stale/false value in allotted slot */
+               OSWriteDeviceMem32WithWMB(&psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset],
+                                         RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE);
+#if defined(PDUMP)
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags,
+                                                         "Reset kCCB slot number %u", ui32OldWriteOffset);
+               DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                  ui32OldWriteOffset * sizeof(IMG_UINT32),
+                                                  sizeof(IMG_UINT32),
+                                                  uiPDumpFlags);
+#endif
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %x",
+                        __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType));
+       }
+
+       /* Move past the current command */
+       psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+       OSWriteMemoryBarrier(&psKCCBCtl->ui32WriteOffset);
+
+#if defined(PDUMP)
+       if (bContCaptureOn)
+       {
+               /* in capture range */
+               if (bPDumpEnabled)
+               {
+                       /* Dump new Kernel CCB content */
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                       uiPDumpFlags, "Dump kCCB cmd woff = %d",
+                                       ui32OldWriteOffset);
+                       DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc,
+                                       ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+                                       ui32CmdMemCopySize,
+                                       uiPDumpFlags);
+
+                       /* Dump new kernel CCB write offset */
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                       uiPDumpFlags, "Dump kCCBCtl woff: %d",
+                                       ui32NewWriteOffset);
+                       DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+                                       offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+                                       sizeof(IMG_UINT32),
+                                       uiPDumpFlags);
+
+                       /* mimic the read-back of the write from above */
+                       DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+                                       offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+                                       ui32NewWriteOffset,
+                                       0xFFFFFFFF,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       uiPDumpFlags);
+               }
+               /* out of capture range */
+               else
+               {
+                       eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit);
+               }
+       }
+#endif
+
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "MTS kick for kernel CCB");
+       /*
+        * Kick the MTS to schedule the firmware.
+        */
+       __MTSScheduleWrite(psDevInfo, RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK);
+
+       PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE,
+                  RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPDumpFlags);
+
+#if defined(SUPPORT_AUTOVZ)
+       RGXUpdateAutoVzWdgToken(psDevInfo);
+#endif
+
+#if defined(NO_HARDWARE)
+       /* keep the roff updated because fw isn't there to update it */
+       psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+_RGXSendCommandRaw_Exit:
+       return eError;
+}
+
+/******************************************************************************
+ FUNCTION      : _AllocDeferredCommand
+
+ PURPOSE       : Allocate a KCCB command and add it to KCCB deferred list
+
+ PARAMETERS    : psDevInfo     RGX device info
+                       : eKCCBType             Firmware Command type
+                       : psKCCBCmd             Firmware Command
+                       : uiPDumpFlags  Pdump flags
+
+ RETURNS       : PVRSRV_OK     If all went good, PVRSRV_ERROR_RETRY otherwise.
+******************************************************************************/
+static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                          RGXFWIF_KCCB_CMD   *psKCCBCmd,
+                                          IMG_UINT32         uiPDumpFlags)
+{
+       RGX_DEFERRED_KCCB_CMD *psDeferredCommand;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand));
+
+       if (!psDeferredCommand)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Deferring a KCCB command failed: allocation failure: requesting retry"));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       psDeferredCommand->sKCCBcmd = *psKCCBCmd;
+       psDeferredCommand->uiPDumpFlags = uiPDumpFlags;
+       psDeferredCommand->psDevInfo = psDevInfo;
+
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode));
+       psDevInfo->ui32KCCBDeferredCommandsCount++;
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION      : _FreeDeferredCommand
+
+ PURPOSE       : Remove from the deferred list the sent deferred KCCB command
+
+ PARAMETERS    : psNode                        Node in deferred list
+                       : psDeferredKCCBCmd     KCCB Command to free
+
+ RETURNS       : None
+******************************************************************************/
+static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd)
+{
+       dllist_remove_node(psNode);
+       psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--;
+       OSFreeMem(psDeferredKCCBCmd);
+}
+
+/******************************************************************************
+ FUNCTION      : RGXSendCommandsFromDeferredList
+
+ PURPOSE       : Try send KCCB commands in deferred list to KCCB
+                 Should be called by holding PowerLock
+
+ PARAMETERS    : psDevInfo     RGX device info
+                       : bPoll         Poll for space in KCCB
+
+ RETURNS       : PVRSRV_OK     If all commands in deferred list are sent to KCCB,
+                         PVRSRV_ERROR_KERNEL_CCB_FULL otherwise.
+******************************************************************************/
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       DLLIST_NODE *psNode, *psNext;
+       RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd;
+       DLLIST_NODE sCommandList;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode));
+
+       /* !!! Important !!!
+        *
+        * The idea of moving the whole list hLockKCCBDeferredCommandsList below
+        * to the temporary list is only valid under the principle that all of the
+        * operations are also protected by the power lock. It must be held
+        * so that the order of the commands doesn't get messed up while we're
+        * performing the operations on the local list.
+        *
+        * The necessity of releasing the hLockKCCBDeferredCommandsList comes from
+        * the fact that _FreeDeferredCommand() is allocating memory and it can't
+        * be done in atomic context (inside section protected by a spin lock).
+        *
+        * We're using spin lock here instead of mutex to quickly perform a check
+        * if the list is empty in MISR without a risk that the MISR is going
+        * to sleep due to a lock.
+        */
+
+       /* move the whole list to a local list so it can be processed without lock */
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList);
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               if (dllist_is_empty(&sCommandList))
+               {
+                       return PVRSRV_OK;
+               }
+
+               /* For every deferred KCCB command, try to send it*/
+               dllist_foreach_node(&sCommandList, psNode, psNext)
+               {
+                       psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode);
+                       eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo,
+                                                  &psTempDeferredKCCBCmd->sKCCBcmd,
+                                                  psTempDeferredKCCBCmd->uiPDumpFlags,
+                                                  NULL /* We surely aren't interested in kCCB slot number of deferred command */);
+                       if (eError != PVRSRV_OK)
+                       {
+                               if (!bPoll)
+                               {
+                                       eError = PVRSRV_ERROR_KERNEL_CCB_FULL;
+                                       goto cleanup_;
+                               }
+                               break;
+                       }
+
+                       _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd);
+               }
+
+               if (bPoll)
+               {
+                       PVRSRV_ERROR eErrPollForKCCBSlot;
+
+                       /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the
+                        * outer loop times-out, we'll still want to return KCCB_FULL to caller
+                        */
+                       eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc,
+                                                                  psDevInfo->psKernelCCBCtl);
+                       if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL)
+                       {
+                               eError = PVRSRV_ERROR_KERNEL_CCB_FULL;
+                               goto cleanup_;
+                       }
+               }
+       } END_LOOP_UNTIL_TIMEOUT();
+
+cleanup_:
+       /* if the local list is not empty put it back to the deferred list head
+        * so that the old order of commands is retained */
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList);
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                                                                 RGXFWIF_KCCB_CMD    *psKCCBCmd,
+                                                                                 IMG_UINT32          uiPDumpFlags,
+                                                                                 IMG_UINT32          *pui32CmdKCCBSlot)
+{
+       IMG_BOOL     bPoll = (pui32CmdKCCBSlot != NULL);
+       PVRSRV_ERROR eError;
+
+       /*
+        * First try to Flush all the cmds in deferred list.
+        *
+        * We cannot defer an incoming command if the caller is interested in
+        * knowing the command's kCCB slot: it plans to poll/wait for a
+        * response from the FW just after the command is enqueued, so we must
+        * poll for space to be available.
+        */
+       eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll);
+       if (eError == PVRSRV_OK)
+       {
+               eError = RGXSendCommandRaw(psDevInfo,
+                                                                  psKCCBCmd,
+                                                                  uiPDumpFlags,
+                                                                  pui32CmdKCCBSlot);
+       }
+
+       /*
+        * If we don't manage to enqueue one of the deferred commands or the command
+        * passed as argument because the KCCB is full, insert the latter into the deferred commands list.
+        * The deferred commands will also be flushed eventually by:
+        *  - one more KCCB command sent for any DM
+        *  - RGX_MISRHandler_CheckFWActivePowerState
+        */
+       if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+       {
+               if (pui32CmdKCCBSlot == NULL)
+               {
+                       eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPDumpFlags);
+               }
+               else
+               {
+                       /* Let the caller retry. Otherwise if we deferred the command and returned OK,
+                        * the caller can end up looking in a stale CCB slot.
+                        */
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Couldn't flush the deferred queue for a command (Type:%d) "
+                                               "- will be retried", __func__, psKCCBCmd->eCmdType));
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO        *psDevInfo,
+                                                                                                        RGXFWIF_KCCB_CMD       *psKCCBCmd,
+                                                                                                        IMG_UINT32                     ui32PDumpFlags,
+                                                                                                        IMG_UINT32         *pui32CmdKCCBSlot)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+       /* Ensure Rogue is powered up before kicking MTS */
+       eError = PVRSRVPowerLock(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: failed to acquire powerlock (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+
+               goto _PVRSRVPowerLock_Exit;
+       }
+
+       PDUMPPOWCMDSTART(psDeviceNode);
+       eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE_ON,
+                                                                                PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+
+               goto _PVRSRVSetDevicePowerStateKM_Exit;
+       }
+
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                                                 psKCCBCmd,
+                                                                                 ui32PDumpFlags,
+                                             pui32CmdKCCBSlot);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+#if defined(DEBUG)
+               /* PVRSRVDebugRequest must be called without powerlock */
+               PVRSRVPowerUnlock(psDeviceNode);
+               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+               goto _PVRSRVPowerLock_Exit;
+#endif
+       }
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+_PVRSRVPowerLock_Exit:
+       return eError;
+}
+
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+#if defined(SUPPORT_VALIDATION)
+PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT64 ui64RegVal,
+                                                                         IMG_UINT64 ui64Size,
+                                                                         IMG_UINT32 ui32Offset,
+                                                                         IMG_BOOL bWriteOp)
+{
+       RGXFWIF_KCCB_CMD sRgxRegsCmd = {0};
+       IMG_UINT32 ui32kCCBCommandSlot;
+       PVRSRV_ERROR eError;
+
+       sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG;
+       sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal;
+       sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size;
+       sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset;
+       sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp;
+
+       eError =  RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                                          RGXFWIF_DM_GP,
+                                                                                          &sRgxRegsCmd,
+                                                                                          PDUMP_FLAGS_CONTINUOUS,
+                                                                                          &ui32kCCBCommandSlot);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot");
+
+       if (bWriteOp)
+       {
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo,
+                                                                                 ui32kCCBCommandSlot,
+                                                 PDUMP_FLAGS_CONTINUOUS);
+               PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+       }
+
+       return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGX_MISRHandler_ScheduleProcessQueues
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+                               the queue for all the DMs)
+******************************************************************************/
+static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData)
+{
+       PVRSRV_DEVICE_NODE     *psDeviceNode = pvData;
+       PVRSRV_RGXDEV_INFO     *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR           eError;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               return;
+       }
+
+       /* Check whether it's worth waking up the GPU */
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST) &&
+               (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               /* For now, guest drivers will always wake-up the GPU */
+               RGXFWIF_GPU_UTIL_FWCB  *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+               IMG_BOOL               bGPUHasWorkWaiting;
+
+               bGPUHasWorkWaiting =
+                   (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+               if (!bGPUHasWorkWaiting)
+               {
+                       /* all queues are empty, don't wake up the GPU */
+                       PVRSRVPowerUnlock(psDeviceNode);
+                       return;
+               }
+       }
+
+       PDUMPPOWCMDSTART(psDeviceNode);
+       /* wake up the GPU */
+       eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE_ON,
+                                                                                PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+
+               PVRSRVPowerUnlock(psDeviceNode);
+               return;
+       }
+
+       /* uncounted kick to the FW */
+       HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED);
+       __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED);
+
+       PVRSRVPowerUnlock(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       return OSInstallMISR(phMISR,
+                       RGX_MISRHandler_ScheduleProcessQueues,
+                       psDeviceNode,
+                       "RGX_ScheduleProcessQueues");
+}
+
+PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                              RGXFWIF_DM          eKCCBType,
+                                              RGXFWIF_KCCB_CMD    *psKCCBCmd,
+                                              IMG_UINT32          ui32PDumpFlags,
+                                              IMG_UINT32          *pui32CmdKCCBSlot)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiMMUSyncUpdate;
+#if defined(SUPPORT_VALIDATION)
+       static IMG_UINT32 ui32PowDomainFrameCounter;
+#endif
+
+       /* Don't send the command/power up request if the device is de-initialising.
+        * The de-init thread could destroy the device whilst the power up
+        * sequence below is accessing the HW registers.
+        */
+       if (unlikely((psDevInfo == NULL) ||
+                    (psDevInfo->psDeviceNode == NULL) ||
+                    (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)))
+       {
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful
+          in a scenario with several applications allocating resources. */
+       eError = PVRSRVPowerLock(psDevInfo->psDeviceNode);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+
+               /* If system is found powered OFF, Retry scheduling the command */
+               if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF))
+               {
+                       eError = PVRSRV_ERROR_RETRY;
+               }
+
+               goto RGXScheduleCommand_exit;
+       }
+
+       if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))
+       {
+               /* If we have the power lock the device is valid but the deinit
+                * thread could be waiting for the lock. */
+               PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       /* Ensure device is powered up before sending any commands */
+       PDUMPPOWCMDSTART(psDevInfo->psDeviceNode);
+       eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode,
+                                            PVRSRV_DEV_POWER_STATE_ON,
+                                            PVRSRV_POWER_FLAGS_NONE);
+       PDUMPPOWCMDEND(psDevInfo->psDeviceNode);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               goto _PVRSRVSetDevicePowerStateKM_Exit;
+       }
+
+       eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate);
+       if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot);
+       if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+       PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+
+#if defined(SUPPORT_VALIDATION)
+       /**
+        * For validation, force the core to different powered units between
+        * DM kicks. PVRSRVDeviceGPUUnitsPowerChange acquires the power lock, hence
+        * ensure that this is done after the power lock is released.
+        */
+       if ((eError == PVRSRV_OK) && (eKCCBType != RGXFWIF_DM_GP))
+       {
+               IMG_BOOL bInsertPowerDomainTransition =
+                       (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN);
+               if (psDevInfo->ui32PowDomainKickInterval > 0)
+               {
+                       if (eKCCBType == RGXFWIF_DM_3D)
+                       {
+                               /* Insert a power domain transition every N '3D' frames */
+                               ui32PowDomainFrameCounter++;
+                               if ((ui32PowDomainFrameCounter % psDevInfo->ui32PowDomainKickInterval) == 0)
+                               {
+                                       bInsertPowerDomainTransition = IMG_TRUE;
+                               }
+                       }
+               }
+
+               if (bInsertPowerDomainTransition)
+               {
+                       IMG_UINT32 ui32PowerDomainState;
+                       IMG_BOOL bIsValid;
+                       do {
+                               ui32PowerDomainState = RGXPowerDomainGetNextState(&psDevInfo->sPowerDomainState);
+                               bIsValid = ui32PowerDomainState &&
+                                                  ((ui32PowerDomainState & ~(psDevInfo->ui32AvailablePowUnitsMask)) == 0);
+                       } while (!bIsValid);
+
+                       eError = PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32PowerDomainState);
+                       if (eError != PVRSRV_OK)
+                               goto RGXScheduleCommand_exit;
+               }
+       }
+#endif
+
+RGXScheduleCommand_exit:
+       return eError;
+}
+
+/*
+ * RGXCheckFirmwareCCB
+ */
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
+       IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) ||
+                                                                (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) &&
+                                                                KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)),
+                                                                "FW-KM connection is down");
+#endif
+
+       while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+       {
+               /* Point to the next command */
+               const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+               HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType);
+               switch (psFwCCBCmd->eCmdType)
+               {
+                       case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+                       {
+                               if (psDevInfo->bPDPEnabled)
+                               {
+                                       PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_BACKING, "Request to add backing to ZSBuffer");
+                               }
+                               RGXProcessRequestZSBufferBacking(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+                       {
+                               if (psDevInfo->bPDPEnabled)
+                               {
+                                       PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer");
+                               }
+                               RGXProcessRequestZSBufferUnbacking(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+                       {
+                               if (psDevInfo->bPDPEnabled)
+                               {
+                                       PDUMP_PANIC(psDevInfo->psDeviceNode, FREELIST_GROW, "Request to grow the free list");
+                               }
+                               RGXProcessRequestGrow(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+                       {
+                               if (psDevInfo->bPDPEnabled)
+                               {
+                                       PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists");
+                               }
+
+                               if (PVRSRV_VZ_MODE_IS(GUEST))
+                               {
+                                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists",
+                                                       __func__,
+                                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+                                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists",
+                                                       __func__,
+                                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+                                                       psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1,
+                                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+                               }
+
+                               RGXProcessRequestFreelistsReconstruction(psDevInfo,
+                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+                                       psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION:
+                       {
+                               /* Notify client drivers */
+                               /* Client notification of device error will be achieved by
+                                * clients calling UM function RGXGetLastDeviceError() */
+                               psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT;
+
+                               /* Notify system layer */
+                               {
+                                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+                                       const RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault =
+                                                       &psFwCCBCmd->uCmdData.sCmdFWPagefault;
+
+                                       if (psDevConfig->pfnSysDevErrorNotify)
+                                       {
+                                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                                               sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT;
+                                               sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr;
+
+                                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                                                                 &sErrorData);
+                                       }
+                               }
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+                       {
+                               DLLIST_NODE *psNode, *psNext;
+                               const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification =
+                                               &psFwCCBCmd->uCmdData.sCmdContextResetNotification;
+                               RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL;
+                               IMG_UINT32 ui32ErrorPid = 0;
+
+                               OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock);
+                               dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+                               {
+                                       RGX_SERVER_COMMON_CONTEXT *psThisContext =
+                                               IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+                                       /* If the notification applies to all contexts update reset info
+                                        * for all contexts, otherwise only do so for the appropriate ID.
+                                        */
+                                       if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS)
+                                       {
+                                               /* Notification applies to all contexts */
+                                               psThisContext->eLastResetReason    = psCmdContextResetNotification->eResetReason;
+                                               psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+                                       }
+                                       else
+                                       {
+                                               /* Notification applies to one context only */
+                                               if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID)
+                                               {
+                                                       psServerCommonContext = psThisContext;
+                                                       psServerCommonContext->eLastResetReason    = psCmdContextResetNotification->eResetReason;
+                                                       psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+                                                       ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext);
+                                                       break;
+                                               }
+                                       }
+                               }
+
+                               if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS)
+                               {
+                                       PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)",
+                                                       __func__,
+                                                       (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+                                                       psCmdContextResetNotification->ui32ResetJobRef));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)",
+                                                       __func__,
+                                                       psServerCommonContext,
+                                                       psCmdContextResetNotification->ui32ServerCommonContextID,
+                                                       (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+                                                       psCmdContextResetNotification->ui32ResetJobRef));
+                               }
+
+                               /* Increment error counter (if appropriate) */
+                               if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM)
+                               {
+                                       /* Avoid wrapping the error count (which would then
+                                        * make it appear we had far fewer errors), by limiting
+                                        * it to IMG_UINT32_MAX.
+                                        */
+                                       if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX)
+                                       {
+                                               psDevInfo->sErrorCounts.ui32WGPErrorCount++;
+                                       }
+                               }
+                               else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM)
+                               {
+                                       /* Avoid wrapping the error count (which would then
+                                        * make it appear we had far fewer errors), by limiting
+                                        * it to IMG_UINT32_MAX.
+                                        */
+                                       if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX)
+                                       {
+                                               psDevInfo->sErrorCounts.ui32TRPErrorCount++;
+                                       }
+                               }
+                               OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock);
+
+                               /* Notify system layer */
+                               {
+                                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                                       if (psDevConfig->pfnSysDevErrorNotify)
+                                       {
+                                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                                               sErrorData.eResetReason = psCmdContextResetNotification->eResetReason;
+                                               sErrorData.pid = ui32ErrorPid;
+
+                                               /* Populate error data according to reset reason */
+                                               switch (psCmdContextResetNotification->eResetReason)
+                                               {
+                                                       case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM:
+                                                       case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM:
+                                                       {
+                                                               sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+                                                               sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM;
+                                                               break;
+                                                       }
+                                                       default:
+                                                       {
+                                                               break;
+                                                       }
+                                               }
+
+                                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                                 &sErrorData);
+                                       }
+                               }
+
+                               /* Notify if a page fault */
+                               if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF)
+                               {
+                                       DevmemIntPFNotify(psDevInfo->psDeviceNode,
+                                                       psCmdContextResetNotification->ui64PCAddress,
+                                                       psCmdContextResetNotification->sFaultAddress);
+                               }
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+                       {
+                               PVRSRV_ERROR eError;
+                               PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+                               OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE);
+                               eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__));
+                                       PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+                               }
+                               break;
+                       }
+
+                       case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+                       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                               IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+                               IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+                               switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+                               {
+                                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+                                       {
+                                               PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+                                               break;
+                                       }
+                                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+                                       {
+                                               PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+                                               break;
+                                       }
+                                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+                                       {
+                                               PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+                                               break;
+                                       }
+                                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+                                       {
+                                               PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+                                               break;
+                                       }
+                                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+                                       {
+                                               PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+                                               break;
+                                       }
+                                       case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES:
+                                       {
+                                               PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+                                               break;
+                                       }
+                               }
+#endif
+                               break;
+                       }
+#if defined(SUPPORT_PDVFS)
+                       case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE:
+                       {
+                               PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo,
+                                                                                         psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate);
+                               break;
+                       }
+#endif
+                       case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+                       {
+                               if (psDevInfo->psRGXFWIfFwSysData != NULL  &&
+                                   psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF)
+                               {
+                                       PVRSRV_ERROR eError;
+
+                                       /* Power down... */
+                                       eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+                                                                                                                        PVRSRV_SYS_POWER_STATE_OFF,
+                                                                                                                        PVRSRV_POWER_FLAGS_NONE);
+                                       if (eError == PVRSRV_OK)
+                                       {
+                                               /* Clear the FW faulted flags... */
+                                               psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED);
+                                               OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags);
+
+                                               /* Power back up again... */
+                                               eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+                                                                                                                                PVRSRV_SYS_POWER_STATE_ON,
+                                                                                                                                PVRSRV_POWER_FLAGS_NONE);
+
+                                               /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */
+                                               if (eError == PVRSRV_OK)
+                                               {
+                                                       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+                                                       {
+                                                               eError = RGXFWHealthCheckCmd(psDevInfo);
+                                                               if (eError != PVRSRV_ERROR_RETRY)
+                                                               {
+                                                                       break;
+                                                               }
+                                                               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+                                                       } END_LOOP_UNTIL_TIMEOUT();
+                                               }
+                                       }
+
+                                       /* Notify client drivers and system layer of FW fault */
+                                       {
+                                               PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                                               PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                                               /* Client notification of device error will be achieved by
+                                                * clients calling UM function RGXGetLastDeviceError() */
+                                               psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR;
+
+                                               /* Notify system layer */
+                                               if (psDevConfig->pfnSysDevErrorNotify)
+                                               {
+                                                       PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                                                       sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR;
+                                                       psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                                         &sErrorData);
+                                               }
+                                       }
+
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)",
+                                                                __func__, PVRSRVGetErrorString(eError)));
+                                       }
+                               }
+                               break;
+                       }
+#if defined(SUPPORT_VALIDATION)
+               case RGXFWIF_FWCCB_CMD_REG_READ:
+               {
+                       psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue;
+                       complete(&psDevInfo->sFwRegs.sRegComp);
+                       break;
+               }
+#if defined(SUPPORT_SOC_TIMER)
+                       case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS:
+                       {
+                               if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER)
+                               {
+                                       PVRSRV_ERROR eSOCtimerErr = RGXValidateSOCUSCTimer(psDevInfo,
+                                                                                             PDUMP_NONE,
+                                                                                             psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray,
+                                                                                             psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary,
+                                                                                             psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers);
+                                       if (PVRSRV_OK == eSOCtimerErr)
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time"));
+                                       }
+                                       else
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time"));
+                                       }
+                               }
+                               break;
+                       }
+#endif
+#endif
+                       default:
+                       {
+                               /* unknown command */
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)",
+                                        __func__, psFwCCBCmd->eCmdType));
+                               /* Assert on magic value corruption */
+                               PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD);
+                       }
+               }
+
+               /* Update read offset */
+               psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+       }
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+*/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+               DEVMEM_MEMDESC  *psFWFrameworkMemDesc,
+               IMG_PBYTE               pbyGPUFRegisterList,
+               IMG_UINT32              ui32FrameworkRegisterSize)
+{
+       PVRSRV_ERROR    eError;
+       RGXFWIF_RF_REGISTERS    *psRFReg;
+
+       eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+                       (void **)&psRFReg);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware render context state (%u)",
+                               __func__, eError));
+               return eError;
+       }
+
+       OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+
+       /* Release the CPU mapping */
+       DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+       /*
+        * Dump the FW framework buffer
+        */
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump FWFramework buffer");
+       DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+*/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE     *psDeviceNode,
+               DEVMEM_MEMDESC          **ppsFWFrameworkMemDesc,
+               IMG_UINT32              ui32FrameworkCommandSize)
+{
+       PVRSRV_ERROR                    eError;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+
+       /*
+               Allocate device memory for the firmware GPU framework state.
+               Sufficient info to kick one or more DMs should be contained in this buffer
+        */
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate Volcanic firmware framework state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                       ui32FrameworkCommandSize,
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwGPUFrameworkState",
+                       ppsFWFrameworkMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware framework state (%u)",
+                               __func__, eError));
+               return eError;
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE  *psDevNode,
+                                                                                               volatile IMG_UINT32     __iomem *pui32LinMemAddr,
+                                                                                               IMG_UINT32                      ui32Value,
+                                                                                               IMG_UINT32                      ui32Mask)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDevNode->pvDevice;
+       const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+       ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+                                       psKCCBCtl->ui32WriteOffset -
+                                       psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+       ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount;
+
+       for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+                               ui32MaxRetries > 0;
+                               ui32MaxRetries--)
+       {
+
+               /*
+                * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function
+                * does not generate an error message. In this case, the PollForValueKM is expected to
+                * timeout as there is work ongoing on the GPU which may take longer than the timeout period.
+                */
+               eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE);
+               if (eError != PVRSRV_ERROR_TIMEOUT)
+               {
+                       break;
+               }
+
+               RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)",
+                        __func__, PVRSRVGetErrorString(eError),
+                        pui32LinMemAddr, ui32Value));
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32Config,
+                               IMG_UINT32 *pui32ConfigState,
+                               IMG_BOOL bSetNotClear)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+       RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 };
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       RGXFWIF_SYSDATA *psFwSysData;
+       IMG_UINT32 ui32kCCBCommandSlot;
+       IMG_BOOL bWaitForFwUpdate = IMG_FALSE;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       if (!psDevInfo)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       psDeviceNode = psDevInfo->psDeviceNode;
+       psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       if (NULL == psFwSysData)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Fw Sys Config is not mapped into CPU space",
+                        __func__));
+               return PVRSRV_ERROR_INVALID_CPU_ADDR;
+       }
+
+       /* apply change and ensure the new data is written to memory
+        * before requesting the FW to read it
+        */
+       ui32Config = ui32Config & RGXFWIF_INICFG_ALL;
+       if (bSetNotClear)
+       {
+               psFwSysData->ui32ConfigFlags |= ui32Config;
+       }
+       else
+       {
+               psFwSysData->ui32ConfigFlags &= ~ui32Config;
+       }
+       OSWriteMemoryBarrier(&psFwSysData->ui32ConfigFlags);
+
+       /* return current/new value to caller */
+       if (pui32ConfigState)
+       {
+               *pui32ConfigState = psFwSysData->ui32ConfigFlags;
+       }
+
+       OSMemoryBarrier(&psFwSysData->ui32ConfigFlags);
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock");
+
+       /* notify FW to update setting */
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               /* Ask the FW to update its cached version of the value */
+               sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL;
+
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                                                         &sStateFlagCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock);
+               bWaitForFwUpdate = IMG_TRUE;
+       }
+
+unlock:
+       PVRSRVPowerUnlock(psDeviceNode);
+       if (bWaitForFwUpdate)
+       {
+               /* Wait for the value to be updated as the FW validates
+                * the parameters and modifies the ui32ConfigFlags
+                * accordingly
+                * (for completeness as registered callbacks should also
+                *  not permit invalid transitions)
+                */
+               eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+               PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate");
+       }
+       return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO      *psDevInfo,
+                                                                          RGXFWIF_DM                   eDM,
+                                                                          RGXFWIF_KCCB_CMD             *psKCCBCmd,
+                                                                          RGXFWIF_CLEANUP_TYPE eCleanupType,
+                                                                          IMG_UINT32                   ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32kCCBCommandSlot;
+
+       /* Clean-up commands sent during frame capture intervals must be dumped even when not in capture range... */
+       ui32PDumpFlags |= PDUMP_FLAGS_INTERVAL;
+
+       psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+       psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+
+       /*
+               Send the cleanup request to the firmware. If the resource is still busy
+               the firmware will tell us and we'll drop out with a retry.
+       */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                                         eDM,
+                                                                                         psKCCBCmd,
+                                                                                         ui32PDumpFlags,
+                                                                                         &ui32kCCBCommandSlot);
+       if (eError != PVRSRV_OK)
+       {
+               /* If caller may retry, fail with no error message */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       PVR_DPF((PVR_DBG_ERROR ,"RGXScheduleCommandAndGetKCCBSlot() failed (%s) in %s()",
+                                PVRSRVGETERRORSTRING(eError), __func__));
+               }
+               goto fail_command;
+       }
+
+       /* Wait for command kCCB slot to be updated by FW */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                                 "Wait for the firmware to reply to the cleanup command");
+       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot,
+                                                                         ui32PDumpFlags);
+       /*
+               If the firmware hasn't got back to us in a timely manner
+               then bail and let the caller retry the command.
+        */
+       if (eError == PVRSRV_ERROR_TIMEOUT)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.",
+                        __func__));
+
+               eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+               PVRSRVDebugRequest(psDevInfo->psDeviceNode,
+                               DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+#endif
+               goto fail_poll;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               goto fail_poll;
+       }
+
+#if defined(PDUMP)
+       /*
+        * The cleanup request to the firmware will tell us if a given resource is busy or not.
+        * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is
+        * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers
+        * and they will re-issue the cleanup request until it succeed.
+        *
+        * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+        * that cleanup requests are only submitted if the resource is unused.
+        * If this is not the case, the following poll will block infinitely, making sure
+        * the issue doesn't go unnoticed.
+        */
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                       "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+                                       eDM,
+                                       psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+                                       psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc,
+                                                                       ui32kCCBCommandSlot * sizeof(IMG_UINT32),
+                                                                       0,
+                                                                       RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY,
+                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                       ui32PDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32");
+#endif
+
+       /*
+               If the command has was run but a resource was busy, then the request
+               will need to be retried.
+       */
+       if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY))
+       {
+               if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+               }
+               eError = PVRSRV_ERROR_RETRY;
+               goto fail_requestbusy;
+       }
+
+       return PVRSRV_OK;
+
+fail_requestbusy:
+fail_poll:
+fail_command:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+/*
+       RGXRequestCommonContextCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                                                         RGXFWIF_DM eDM,
+                                                                                         IMG_UINT32 ui32PDumpFlags)
+{
+       RGXFWIF_KCCB_CMD                        sRCCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+       PRGXFWIF_FWCOMMONCONTEXT        psFWCommonContextFWAddr;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+
+       /* Force retry if this context's CCB is currently being dumped
+        * as part of the stalled CCB debug */
+       if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                        "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>",
+                        __func__,
+                        (void*)psServerCommonContext->psClientCCB));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext);
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Common ctx cleanup Request DM%d [context = 0x%08x]",
+                    eDM, psFWCommonContextFWAddr.ui32Addr);
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Wait for CCB to be empty before common ctx cleanup");
+
+       RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags);
+#endif
+
+       /* Setup our command data, the cleanup call will fill in the rest */
+       sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+       /* Request cleanup of the firmware resource */
+       eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+                                                                          eDM,
+                                                                          &sRCCleanUpCmd,
+                                                                          RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+                                                                          ui32PDumpFlags);
+
+       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to schedule a memory context cleanup with error (%u)",
+                        __func__, eError));
+       }
+
+       return eError;
+}
+
+/*
+ * RGXFWRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         PRGXFWIF_HWRTDATA psHWRTData)
+{
+       RGXFWIF_KCCB_CMD                        sHWRTDataCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+
+       PDUMPCOMMENT(psDeviceNode, "HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr);
+
+       sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+       eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+                                          RGXFWIF_DM_GP,
+                                          &sHWRTDataCleanUpCmd,
+                                          RGXFWIF_CLEANUP_HWRTDATA,
+                                          PDUMP_FLAGS_NONE);
+
+       if (eError != PVRSRV_OK)
+       {
+               /* If caller may retry, fail with no error message */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to schedule a HWRTData cleanup with error (%u)",
+                                __func__, eError));
+               }
+       }
+
+       return eError;
+}
+
+/*
+       RGXFWRequestFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                PRGXFWIF_FREELIST psFWFreeList)
+{
+       RGXFWIF_KCCB_CMD                        sFLCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+       /* Setup our command data, the cleanup call will fill in the rest */
+       sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+       /* Request cleanup of the firmware resource */
+       eError = RGXScheduleCleanupCommand(psDevInfo,
+                                                                          RGXFWIF_DM_GP,
+                                                                          &sFLCleanUpCmd,
+                                                                          RGXFWIF_CLEANUP_FREELIST,
+                                                                          PDUMP_FLAGS_NONE);
+
+       if (eError != PVRSRV_OK)
+       {
+               /* If caller may retry, fail with no error message */
+               if ((eError != PVRSRV_ERROR_RETRY) &&
+                   (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to schedule a memory context cleanup with error (%u)",
+                                __func__, eError));
+               }
+       }
+
+       return eError;
+}
+
+/*
+       RGXFWRequestZSBufferCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                PRGXFWIF_ZSBUFFER psFWZSBuffer)
+{
+       RGXFWIF_KCCB_CMD                        sZSBufferCleanUpCmd = {0};
+       PVRSRV_ERROR                            eError;
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode, "ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+       /* Setup our command data, the cleanup call will fill in the rest */
+       sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+       /* Request cleanup of the firmware resource */
+       eError = RGXScheduleCleanupCommand(psDevInfo,
+                                                                          RGXFWIF_DM_3D,
+                                                                          &sZSBufferCleanUpCmd,
+                                                                          RGXFWIF_CLEANUP_ZSBUFFER,
+                                                                          PDUMP_FLAGS_NONE);
+
+       if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to schedule a memory context cleanup with error (%u)",
+                        __func__, eError));
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32HCSDeadlineMs)
+{
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the Hard Context Switching deadline inside RGXFWIfRuntimeCfg");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, ui32HCSDeadlineMS),
+                                                         ui32HCSDeadlineMs,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_KCCB_CMD        sCmpKCCBCmd;
+
+       sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+       return  RGXScheduleCommand(psDevInfo,
+                                                          RGXFWIF_DM_GP,
+                                                          &sCmpKCCBCmd,
+                                                          PDUMP_FLAGS_CONTINUOUS);
+}
+
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32OSid,
+                                                               RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 };
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
+       sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+       sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
+
+#if defined(SUPPORT_AUTOVZ)
+       {
+               IMG_BOOL bConnectionDown = IMG_FALSE;
+
+               PVR_UNREFERENCED_PARAMETER(psFwSysData);
+               sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE;
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       /* Send the offline command regardless if power lock is held or not.
+                        * Under AutoVz this is done during regular driver deinit, store-to-ram suspend
+                        * or (optionally) from a kernel panic callback. Deinit and suspend operations
+                        * take the lock in the rgx pre/post power functions as expected.
+                        * The kernel panic callback is a last resort way of letting the firmware know that
+                        * the VM is unrecoverable and the vz connection must be disabled. It cannot wait
+                        * on other kernel threads to finish and release the lock. */
+                       eError = RGXSendCommand(psDevInfo,
+                                                                       &sOSOnlineStateCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               /* Guests and Host going offline should wait for confirmation
+                * from the Firmware of the state change. If this fails, break
+                * the connection on the OS Driver's end as backup. */
+               if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS))
+               {
+                       LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2)
+                       {
+                               if (KM_FW_CONNECTION_IS(READY, psDevInfo))
+                               {
+                                       bConnectionDown = IMG_TRUE;
+                                       break;
+                               }
+                       } END_LOOP_UNTIL_TIMEOUT();
+
+                       if (!bConnectionDown)
+                       {
+                               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+                       }
+               }
+       }
+#else
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               /* no reason for Guests to update their state or any other VM's.
+                * This is the Hypervisor and Host driver's responsibility. */
+               return PVRSRV_OK;
+       }
+       else if (eOSOnlineState == RGXFWIF_OS_ONLINE)
+       {
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError = RGXScheduleCommand(psDevInfo,
+                                                                               RGXFWIF_DM_GP,
+                                                                               &sOSOnlineStateCmd,
+                                                                               PDUMP_FLAGS_CONTINUOUS);
+                       if (eError != PVRSRV_ERROR_RETRY) break;
+
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+       }
+       else if (psFwSysData)
+       {
+               const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags =
+                        (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+
+               /* Attempt several times until the FW manages to offload the OS */
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       IMG_UINT32 ui32kCCBCommandSlot;
+
+                       /* Send request */
+                       eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+                                                                                                         RGXFWIF_DM_GP,
+                                                                                                         &sOSOnlineStateCmd,
+                                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                                         &ui32kCCBCommandSlot);
+                       if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue;
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+                       /* Wait for FW to process the cmd */
+                       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_);
+
+                       /* read the OS state */
+                       OSMemoryBarrier(NULL);
+                       /* check if FW finished offloading the OSID and is stopped */
+                       if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE)
+                       {
+                               eError = PVRSRV_OK;
+                               break;
+                       }
+                       else
+                       {
+                               eError = PVRSRV_ERROR_TIMEOUT;
+                       }
+
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+       }
+       else
+       {
+               eError = PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+return_ :
+#endif
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32OSid,
+                                                               IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD        sOSidPriorityCmd = { 0 };
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
+       psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid);
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)),
+                                                         ui32Priority ,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                                                       RGXFWIF_DM_GP,
+                                                                       &sOSidPriorityCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+                                                               CONNECTION_DATA *psConnection,
+                                                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32Priority,
+                                                               RGXFWIF_DM eDM)
+{
+       IMG_UINT32                              ui32CmdSize;
+       IMG_UINT8                               *pui8CmdPtr;
+       RGXFWIF_KCCB_CMD                sPriorityCmd = { 0 };
+       RGXFWIF_CCB_CMD_HEADER  *psCmdHeader;
+       RGXFWIF_CMD_PRIORITY    *psCmd;
+       PVRSRV_ERROR                    eError;
+       IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
+       RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext);
+
+       eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor);
+       PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority);
+
+       /*
+               Get space for command
+       */
+       ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+       eError = RGXAcquireCCB(psClientCCB,
+                                                  ui32CmdSize,
+                                                  (void **) &pui8CmdPtr,
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__));
+               }
+               goto fail_ccbacquire;
+       }
+
+       /*
+               Write the command header and command
+       */
+       psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+       psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+       psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+       pui8CmdPtr += sizeof(*psCmdHeader);
+
+       psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+       psCmd->i32Priority = i32Priority;
+       pui8CmdPtr += sizeof(*psCmd);
+
+       /*
+               We should reserve space in the kernel CCB here and fill in the command
+               directly.
+               This is so if there isn't space in the kernel CCB we can return with
+               retry back to services client before we take any operations
+       */
+
+       /*
+               Submit the command
+       */
+       RGXReleaseCCB(psClientCCB,
+                                 ui32CmdSize,
+                                 PDUMP_FLAGS_CONTINUOUS);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__));
+               return eError;
+       }
+
+       /* Construct the priority command. */
+       sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+       sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+       sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+       sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+       sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+       sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                                                       eDM,
+                                                                       &sPriorityCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to submit set priority command with error (%u)",
+                               __func__,
+                               eError));
+       }
+
+       psContext->i32Priority = i32Priority;
+
+       return PVRSRV_OK;
+
+fail_ccbacquire:
+fail_checkpriority:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo,
+                            IMG_UINT32 ui32PHRMode)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 };
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG;
+       psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the Periodic Hardware Reset Mode inside RGXFWIfRuntimeCfg");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, ui32PHRMode),
+                                                         ui32PHRMode,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                           RGXFWIF_DM_GP,
+                                           &sCfgPHRCmd,
+                                           PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                       IMG_UINT32 ui32WdgPeriodUs)
+{
+       PVRSRV_ERROR eError;
+       RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 };
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG;
+       psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs;
+       OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs);
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                                "Updating the firmware watchdog period inside RGXFWIfRuntimeCfg");
+       DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+                                                         offsetof(RGXFWIF_RUNTIME_CFG, ui32WdgPeriodUs),
+                                                         ui32WdgPeriodUs,
+                                                         PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                                                       RGXFWIF_DM_GP,
+                                                                       &sCfgWdgCmd,
+                                                                       PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       return eError;
+}
+
+
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious)
+{
+       /* Attempt to detect and deal with any stalled client contexts.
+        * bIgnorePrevious may be set by the caller if they know a context to be
+        * stalled, as otherwise this function will only identify stalled
+        * contexts which have not been previously reported.
+        */
+
+       IMG_UINT32 ui32StalledClientMask = 0;
+
+       if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock)))
+       {
+               PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning..."));
+               return;
+       }
+
+       ui32StalledClientMask |= CheckForStalledClientTDMTransferCtxt(psDevInfo);
+
+       ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
+
+       ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
+
+       ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo);
+
+       /* If at least one DM stalled bit is different than before */
+       if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))
+       {
+               if (ui32StalledClientMask > 0)
+               {
+                       static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+                                       "force";
+#else
+                                       "warn";
+#endif
+                       /* Print all the stalled DMs */
+                       PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s",
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D),
+                                        RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D)));
+
+                       PVR_LOG(("Trying to identify stalled context...(%s) [%d]",
+                                pszStalledAction, bIgnorePrevious));
+
+                       DumpStalledContextInfo(psDevInfo);
+               }
+               else
+               {
+                       if (psDevInfo->ui32StalledClientMask> 0)
+                       {
+                               /* Indicate there are no stalled DMs */
+                               PVR_LOG(("No further stalled client contexts exist"));
+                       }
+               }
+               psDevInfo->ui32StalledClientMask = ui32StalledClientMask;
+               psDevInfo->pvEarliestStalledClientCCB = NULL;
+       }
+       OSLockRelease(psDevInfo->hCCBStallCheckLock);
+}
+
+/*
+       RGXUpdateHealthStatus
+*/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed)
+{
+       const PVRSRV_DATA*           psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_HEALTH_STATUS  eNewStatus   = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+       PVRSRV_DEVICE_HEALTH_REASON  eNewReason   = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+       PVRSRV_RGXDEV_INFO*          psDevInfo;
+       const RGXFWIF_TRACEBUF*      psRGXFWIfTraceBufCtl;
+       const RGXFWIF_SYSDATA*       psFwSysData;
+       const RGXFWIF_OSDATA*        psFwOsData;
+       const RGXFWIF_CCB_CTL*       psKCCBCtl;
+       IMG_UINT32                   ui32ThreadCount;
+       IMG_BOOL                     bKCCBCmdsWaiting;
+
+       PVR_ASSERT(psDevNode != NULL);
+       psDevInfo = psDevNode->pvDevice;
+
+       /* If the firmware is not yet initialised or has already deinitialised, stop here */
+       if (psDevInfo  == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL ||
+               psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)
+       {
+               return PVRSRV_OK;
+       }
+
+       psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
+       psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       psFwOsData = psDevInfo->psRGXFWIfFwOsData;
+
+       /* If this is a quick update, then include the last current value... */
+       if (!bCheckAfterTimePassed)
+       {
+               eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus);
+               eNewReason = OSAtomicRead(&psDevNode->eHealthReason);
+       }
+
+       /* Decrement the SLR holdoff counter (if non-zero) */
+       if (psDevInfo->ui32SLRHoldoffCounter > 0)
+       {
+               psDevInfo->ui32SLRHoldoffCounter--;
+       }
+
+
+
+       /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */
+       if (PVRSRVIsDevicePowered(psDevNode))
+       {
+               /*
+                  Firmware thread checks...
+               */
+               if (psRGXFWIfTraceBufCtl != NULL)
+               {
+                       for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++)
+                       {
+                               const IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+                               /*
+                               Check if the FW has hit an assert...
+                               */
+                               if (*pszTraceAssertInfo != '\0')
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)",
+                                                       __func__, ui32ThreadCount, pszTraceAssertInfo,
+                                                       psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+                                                       psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+                                       eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+                                       goto _RGXUpdateHealthStatus_Exit;
+                               }
+
+                               /*
+                                  Check the threads to see if they are in the same poll locations as last time...
+                               */
+                               if (bCheckAfterTimePassed)
+                               {
+                                       if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0  &&
+                                               psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount])
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)",
+                                                               __func__, ui32ThreadCount,
+                                                               ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+                                                               psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET,
+                                                               psFwSysData->aui32CrPollMask[ui32ThreadCount]));
+                                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                                               eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING;
+                                               goto _RGXUpdateHealthStatus_Exit;
+                                       }
+                                       psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount];
+                               }
+                       }
+
+                       /*
+                       Check if the FW has faulted...
+                       */
+                       if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                               "%s: Firmware has faulted and needs to restart",
+                                               __func__));
+                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT;
+                               if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED)
+                               {
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING;
+                               }
+                               else
+                               {
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING;
+                               }
+                               goto _RGXUpdateHealthStatus_Exit;
+                       }
+               }
+
+               /*
+                  Event Object Timeouts check...
+               */
+               if (!bCheckAfterTimePassed)
+               {
+                       if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)",
+                                        __func__,
+                                        psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                               eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+                       }
+                       psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+               }
+
+               /*
+                  Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check
+                  that some have executed since then.
+               */
+               bKCCBCmdsWaiting = IMG_FALSE;
+               psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+               if (psKCCBCtl != NULL)
+               {
+                       if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask  ||
+                               psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)",
+                                               __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+                               eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+                               eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+                       }
+
+                       if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+                       {
+                               bKCCBCmdsWaiting = IMG_TRUE;
+                       }
+               }
+
+               if (bCheckAfterTimePassed && psFwOsData != NULL)
+               {
+                       IMG_UINT32 ui32KCCBCmdsExecuted = psFwOsData->ui32KCCBCmdsExecuted;
+
+                       if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+                       {
+                               /*
+                                  If something was waiting last time then the Firmware has stopped processing commands.
+                               */
+                               if (psDevInfo->bKCCBCmdsWaitingLastTime)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!",
+                                                       __func__));
+                                       eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+                               }
+
+                               /*
+                                  If no commands are currently pending and nothing happened since the last poll, then
+                                  schedule a dummy command to ping the firmware so we know it is alive and processing.
+                               */
+                               if (!bKCCBCmdsWaiting)
+                               {
+                                       PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice);
+
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)",
+                                                               __func__, eError));
+                                       }
+                                       else
+                                       {
+                                               bKCCBCmdsWaiting = IMG_TRUE;
+                                       }
+                               }
+                       }
+
+                       psDevInfo->bKCCBCmdsWaitingLastTime     = bKCCBCmdsWaiting;
+                       psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+               }
+       }
+
+       /*
+          Interrupt counts check...
+       */
+       if (bCheckAfterTimePassed  &&  psFwOsData != NULL)
+       {
+               IMG_UINT32  ui32LISRCount   = 0;
+               IMG_UINT32  ui32FWCount     = 0;
+               IMG_UINT32  ui32MissingInts = 0;
+               IMG_UINT32  ui32Index;
+
+               /* Add up the total number of interrupts issued, sampled/received and missed... */
+               for (ui32Index = 0;  ui32Index < RGXFW_THREAD_NUM;  ui32Index++)
+               {
+                       ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index];
+                       ui32FWCount   += psFwOsData->aui32InterruptCount[ui32Index];
+               }
+
+               if (ui32LISRCount < ui32FWCount)
+               {
+                       ui32MissingInts = (ui32FWCount-ui32LISRCount);
+               }
+
+               if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime  &&
+                   ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime  &&
+                   psDevInfo->ui32MissingInterruptsLastTime > 1)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts",
+                                       __func__, ui32MissingInts));
+                       eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+                       eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS;
+
+                       /* Schedule the MISRs to help mitigate the problems of missing interrupts. */
+                       OSScheduleMISR(psDevInfo->pvMISRData);
+                       if (psDevInfo->pvAPMISRData != NULL)
+                       {
+                               OSScheduleMISR(psDevInfo->pvAPMISRData);
+                       }
+               }
+               psDevInfo->ui32InterruptCountLastTime    = ui32LISRCount;
+               psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts;
+       }
+
+       /*
+          Stalled CCB check...
+       */
+       if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+       {
+               RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE);
+       }
+
+       /* Notify client driver and system layer of any eNewStatus errors */
+       if (eNewStatus > PVRSRV_DEVICE_HEALTH_STATUS_OK)
+       {
+               /* Client notification of device error will be achieved by
+                * clients calling UM function RGXGetLastDeviceError() */
+               psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR;
+
+               /* Notify system layer */
+               {
+                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                       if (psDevConfig->pfnSysDevErrorNotify)
+                       {
+                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                               sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR;
+                               sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus;
+                               sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason;
+
+                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                 &sErrorData);
+                       }
+               }
+       }
+
+       /*
+          Finished, save the new status...
+       */
+_RGXUpdateHealthStatus_Exit:
+       OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
+       OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
+       RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason);
+
+       /*
+        * Attempt to service the HWPerf buffer to regularly transport idle/periodic
+        * packets to host buffer.
+        */
+       if (psDevNode->pfnServiceHWPerf != NULL)
+       {
+               PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: "
+                                "Error occurred when servicing HWPerf buffer (%d)",
+                                __func__, eError));
+               }
+       }
+
+       /* Attempt to refresh timer correlation data */
+       RGXTimeCorrRestartPeriodic(psDevNode);
+
+       return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+#if defined(SUPPORT_AUTOVZ)
+void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)))
+       {
+               /* read and write back the alive token value to confirm to the
+                * virtualisation watchdog that this connection is healthy */
+               KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo);
+       }
+}
+
+/*
+       RGXUpdateAutoVzWatchdog
+*/
+void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode)
+{
+       if (likely(psDevNode != NULL))
+       {
+               PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+               if (unlikely((psDevInfo  == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered ||
+                       psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)))
+               {
+                       /* If the firmware is not initialised, stop here */
+                       return;
+               }
+               else
+               {
+                       PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode);
+                       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock");
+
+                       RGXUpdateAutoVzWdgToken(psDevInfo);
+                       PVRSRVPowerUnlock(psDevNode);
+               }
+       }
+}
+#endif /* SUPPORT_AUTOVZ */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+       if (psCurrentServerCommonContext == NULL)
+       {
+               /* the context has already been freed so there is nothing to do here */
+               return PVRSRV_OK;
+       }
+
+       return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode,
+                                 psCurrentServerCommonContext->psClientCCB,
+                                 eKickTypeDM);
+}
+
+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             IMG_UINT32 ui32VerbLevel)
+{
+       if (psCurrentServerCommonContext == NULL)
+       {
+               /* the context has already been freed so there is nothing to do here */
+               return;
+       }
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH))
+       {
+               /* If high verbosity requested, dump whole CCB */
+               DumpCCB(psCurrentServerCommonContext->psDevInfo,
+                       psCurrentServerCommonContext->sFWCommonContextFWAddr,
+                       psCurrentServerCommonContext->psClientCCB,
+                       pfnDumpDebugPrintf,
+                       pvDumpDebugFile);
+       }
+       else
+       {
+               /* Otherwise, only dump first stalled command in the CCB */
+               DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr,
+                                     psCurrentServerCommonContext->psClientCCB,
+                                     pfnDumpDebugPrintf,
+                                     pvDumpDebugFile);
+       }
+}
+
+PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+                                                                       IMG_UINT32 *pui32NumCleanupCtl,
+                                                                       RGXFWIF_DM eDM,
+                                                                       IMG_BOOL bKick,
+                                                                       RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+                                                                       RGX_ZSBUFFER_DATA              *psZSBuffer,
+                                                                       RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer)
+{
+       PVRSRV_ERROR eError;
+       PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+       PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D));
+       PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D));
+
+       if (bKick)
+       {
+               if (psKMHWRTDataSet)
+               {
+                       PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+                       eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc,
+                                       offsetof(RGXFWIF_HWRTDATA, sCleanupState),
+                                       RFW_FWADDR_NOREF_FLAG);
+                       PVR_RETURN_IF_ERROR(eError);
+
+                       *(psCleanupCtlWrite++) = psCleanupCtl;
+               }
+
+               if (eDM == RGXFWIF_DM_3D)
+               {
+                       RGXFWIF_PRBUFFER_TYPE eBufferType;
+                       RGX_ZSBUFFER_DATA *psBuffer = NULL;
+
+                       for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++)
+                       {
+                               switch (eBufferType)
+                               {
+                               case RGXFWIF_PRBUFFER_ZSBUFFER:
+                                       psBuffer = psZSBuffer;
+                                       break;
+                               case RGXFWIF_PRBUFFER_MSAABUFFER:
+                                       psBuffer = psMSAAScratchBuffer;
+                                       break;
+                               case RGXFWIF_PRBUFFER_MAXSUPPORTED:
+                                       psBuffer = NULL;
+                                       break;
+                               }
+                               if (psBuffer)
+                               {
+                                       (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr +
+                                                                       offsetof(RGXFWIF_PRBUFFER, sCleanupState);
+                                       psBuffer = NULL;
+                               }
+                       }
+               }
+       }
+
+       *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+       PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+       PVRSRV_RGXDEV_INFO       *psDevInfo;
+       RGXFWIF_HWRINFOBUF       *psHWRInfoBuf;
+       IMG_UINT32               i;
+
+       if (psDevNode->pvDevice == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_DEVINFO;
+       }
+       psDevInfo = psDevNode->pvDevice;
+
+       psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl;
+
+       for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++)
+       {
+               /* Reset the HWR numbers */
+               psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0;
+               psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0;
+               psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0;
+               psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0;
+       }
+
+       for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+       {
+               psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+       }
+
+       psHWRInfoBuf->ui32WriteIndex = 0;
+       psHWRInfoBuf->ui32DDReqCount = 0;
+
+       OSWriteMemoryBarrier(&psHWRInfoBuf->ui32DDReqCount);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+                                                  IMG_DEV_PHYADDR *psPhyAddr,
+                                                  IMG_UINT32 ui32LogicalOffset,
+                                                  IMG_UINT32 ui32Log2PageSize,
+                                                  IMG_UINT32 ui32NumOfPages,
+                                                  IMG_BOOL *bValid)
+{
+
+       PVRSRV_ERROR eError;
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMRLockSysPhysAddresses failed (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       eError = PMR_DevPhysAddr(psPMR,
+                                                                ui32Log2PageSize,
+                                                                ui32NumOfPages,
+                                                                ui32LogicalOffset,
+                                                                psPhyAddr,
+                                                                bValid);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMR_DevPhysAddr failed (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+
+       eError = PMRUnlockSysPhysAddresses(psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMRUnLockSysPhysAddresses failed (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       return eError;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       if (psDevInfo->bDumpedKCCBCtlAlready)
+       {
+               /* exiting capture range or pdump block */
+               psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+               /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                              PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER,
+                              "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+                              psDevInfo->psKernelCCBCtl,
+                              ui32WriteOffset,
+                              ui32WriteOffset);
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+                               offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+                               ui32WriteOffset,
+                               0xffffffff,
+                               PDUMP_POLL_OPERATOR_EQUAL,
+                               PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError));
+               }
+       }
+
+       return eError;
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXClientConnectCompatCheck_ClientAgainstFW
+
+ @Description
+
+ Check compatibility of client and firmware (build options)
+ at the connection time.
+
+ @Input psDeviceNode - device node
+ @Input ui32ClientBuildOptions - build options for the client
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+#if !defined(NO_HARDWARE) || defined(PDUMP)
+#if !defined(NO_HARDWARE)
+       IMG_UINT32              ui32BuildOptionsMismatch;
+       IMG_UINT32              ui32BuildOptionsFW;
+#endif
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+       if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.",
+                        __func__));
+               return PVRSRV_ERROR_NOT_INITIALISED;
+       }
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated))
+               {
+                       /* No need to wait if the FW has already updated the values */
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+#endif
+
+#if defined(PDUMP)
+       {
+               PVRSRV_ERROR eError;
+
+               PDUMPCOMMENT(psDeviceNode, "Compatibility check: client and FW build options");
+               eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                               offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+                               ui32ClientBuildOptions,
+                               0xffffffff,
+                               PDUMP_POLL_OPERATOR_EQUAL,
+                               PDUMP_FLAGS_CONTINUOUS);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)",
+                                       __func__,
+                                       eError));
+                       return eError;
+               }
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions;
+       ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+
+       if (ui32BuildOptionsMismatch != 0)
+       {
+               if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+                       "extra options present in client: (0x%x). Please check rgx_options.h",
+                       ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+               }
+
+               if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+                       "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+                       ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+               }
+
+               return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RGXFwRawHeapAllocMap
+
+ @Description Register firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+ @Input sDevPAddr    - Heap address
+ @Input ui64DevPSize - Heap size
+
+ @Return   PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+******************************************************************************/
+PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                 IMG_UINT32 ui32OSID,
+                                                                 IMG_DEV_PHYADDR sDevPAddr,
+                                                                 IMG_UINT64 ui64DevPSize)
+{
+       PVRSRV_ERROR eError;
+       IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH];
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS |
+                                                                                                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID));
+       PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
+                                                                                                                  PHYS_HEAP_USAGE_FW_MAIN);
+       PHYS_HEAP_CONFIG sFwHeapConfig;
+
+       PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK);
+
+       if (psFwMainConfig == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found."));
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+
+       if (!ui64DevPSize ||
+               !sDevPAddr.uiAddr ||
+               ui32OSID >= RGX_NUM_OS_SUPPORTED ||
+               ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       sFwHeapConfig = *psFwMainConfig;
+       sFwHeapConfig.sStartAddr.uiAddr = 0;
+       sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr;
+       sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
+
+       eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, szRegionRAName, &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
+       PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID);
+
+       eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
+       PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID);
+
+       psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID];
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID);
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+       /* don't clear the heap of other guests on allocation */
+       uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL);
+#endif
+
+       /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */
+       if (psDeviceNode->bAutoVzFwIsUp)
+       {
+               uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+               DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+       }
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                                         uiRawFwHeapAllocFlags,
+                                                         psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName,
+                                                         &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
+
+       /* Mark this devmem heap as premapped so allocations will not require device mapping. */
+       DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+
+       if (ui32OSID == RGXFW_HOST_OS)
+       {
+               /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly
+                * No memory allocated from these sub-heaps will be individually mapped into the device's
+                * address space so they can remain marked permanently as premapped. */
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE);
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE);
+       }
+
+       return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RGXFwRawHeapUnmapFree
+
+ @Description Unregister firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID     - Guest OSID
+
+******************************************************************************/
+void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  IMG_UINT32 ui32OSID)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       /* remove the premap status, so the heap can be unmapped and freed */
+       if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID])
+       {
+               DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE);
+       }
+
+       if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID])
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+               psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL;
+       }
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvHalt
+
+@Description    Halt the RISC-V FW core (required for certain operations
+                done through Debug Module)
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW");
+
+       /* Send halt request (no need to select one or more harts on this RISC-V core) */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN |
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until hart is halted */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DMSTATUS,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       /* Clear halt request */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Send halt request (no need to select one or more harts on this RISC-V core) */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN |
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+
+       /* Wait until hart is halted */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32),
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Clear halt request */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvIsHalted
+
+@Description    Check if the RISC-V FW is halted
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         IMG_BOOL
+******************************************************************************/
+IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(NO_HARDWARE)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       /* Assume the core is always halted in nohw */
+       return IMG_TRUE;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       return (OSReadHWReg32(pui32RegsBase, RGX_CR_FWCORE_DMI_DMSTATUS) &
+               RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U;
+#endif
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvResume
+
+@Description    Resume the RISC-V FW core
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW");
+
+       /* Send resume request (no need to select one or more harts on this RISC-V core) */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN |
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until hart is resumed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DMSTATUS,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                   RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       /* Clear resume request */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL,
+                  RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Send resume request (no need to select one or more harts on this RISC-V core) */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN |
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+
+       /* Wait until hart is resumed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32),
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                                RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Clear resume request */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL,
+                      RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvCheckAbstractCmdError
+
+@Description    Check for RISC-V abstract command errors and clear them
+
+@Input          psDevInfo    Pointer to GPU device info
+
+@Return         RGXRISCVFW_ABSTRACT_CMD_ERR
+******************************************************************************/
+static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr;
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR;
+
+       /* Check error status */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT,
+                   ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+
+       /* Check error status */
+       eCmdErr = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)
+                 & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK)
+                 >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT;
+
+       if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr));
+
+               /* Clear the error (note CMDERR field is write-1-to-clear) */
+               OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                              ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK);
+       }
+#endif
+
+       return eCmdErr;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadReg
+
+@Description    Read a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 *pui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32RegAddr);
+       PVR_UNREFERENCED_PARAMETER(pui32Value);
+
+       /* Reading HW registers is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Send abstract register read command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_READ |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                      ui32RegAddr);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR)
+       {
+               /* Read register value */
+               *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0);
+       }
+       else
+       {
+               *pui32Value = 0U;
+       }
+
+       return PVRSRV_OK;
+#endif
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollReg
+
+@Description    Poll for a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Poll RISC-V register 0x%x (expected 0x%08x)",
+                             ui32RegAddr, ui32Value);
+
+       /* Send abstract register read command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_READ |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                  ui32RegAddr,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvCheckAbstractCmdError(psDevInfo);
+
+       /* Check read value */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DATA0,
+                   ui32Value,
+                   0xFFFFFFFF,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32RegAddr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       /* Polling HW registers is currently not required driverlive */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteReg
+
+@Description    Write a value to the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32RegAddr,
+                              IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Write RISC-V register 0x%x (value 0x%08x)",
+                             ui32RegAddr, ui32Value);
+
+       /* Prepare data to be written to register */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0,
+                  ui32Value, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Send abstract register write command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_WRITE |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                  ui32RegAddr,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Prepare data to be written to register */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value);
+
+       /* Send abstract register write command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_WRITE |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT |
+                      ui32RegAddr);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvCheckSysBusError
+
+@Description    Check for RISC-V system bus errors and clear them
+
+@Input          psDevInfo    Pointer to GPU device info
+
+@Return         RGXRISCVFW_SYSBUS_ERR
+******************************************************************************/
+static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXRISCVFW_SYSBUS_ERR eSBError;
+
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       eSBError = RISCV_SYSBUS_NO_ERROR;
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBCS,
+                   RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT,
+                   ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+
+       eSBError = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS)
+                & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK)
+                >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT;
+
+       if (eSBError != RISCV_SYSBUS_NO_ERROR)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError));
+
+               /* Clear the error (note SBERROR field is write-1-to-clear) */
+               OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS,
+                              ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK);
+       }
+#endif
+
+       return eSBError;
+}
+
+#if !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadAbstractMem
+
+@Description    Read a value at the given address in RISC-V memory space
+                using RISC-V abstract memory commands
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(pui32Value);
+
+       /* Reading memory is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Prepare read address  */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr);
+
+       /* Send abstract memory read command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_READ |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR)
+       {
+               /* Read memory value */
+               *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0);
+       }
+       else
+       {
+               *pui32Value = 0U;
+       }
+
+       return PVRSRV_OK;
+#endif
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollAbstractMem
+
+@Description    Poll for a value at the given address in RISC-V memory space
+                using RISC-V abstract memory commands
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
+                             PDUMP_FLAGS_CONTINUOUS,
+                             "Poll RISC-V address 0x%x (expected 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Prepare read address  */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1,
+                  ui32Addr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Send abstract memory read command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_READ |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvCheckAbstractCmdError(psDevInfo);
+
+       /* Check read value */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_DATA0,
+                   ui32Value,
+                   0xFFFFFFFF,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       /* Polling memory is currently not required driverlive */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+#if !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadSysBusMem
+
+@Description    Read a value at the given address in RISC-V memory space
+                using the RISC-V system bus
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(pui32Value);
+
+       /* Reading memory is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Configure system bus to read 32 bit every time a new address is provided */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_SBCS,
+                      (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) |
+                      RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN);
+
+       /* Perform read */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr);
+
+       /* Wait until system bus is idle */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR)
+       {
+               /* Read value from debug system bus */
+               *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0);
+       }
+       else
+       {
+               *pui32Value = 0U;
+       }
+
+       return PVRSRV_OK;
+#endif
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollSysBusMem
+
+@Description    Poll for a value at the given address in RISC-V memory space
+                using the RISC-V system bus
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Poll RISC-V address 0x%x (expected 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Configure system bus to read 32 bit every time a new address is provided */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS,
+                  (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) |
+                  RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Perform read */
+       PDUMPREG32(psDevInfo->psDeviceNode,
+                  RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0,
+                  ui32Addr,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until system bus is idle */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvCheckSysBusError(psDevInfo);
+
+       /* Check read value */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBDATA0,
+                   ui32Value,
+                   0xFFFFFFFF,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+#else
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(ui32Addr);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+       /* Polling memory is currently not required driverlive */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+#if !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadMem
+
+@Description    Read a value at the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32Addr,
+                             IMG_UINT32 *pui32Value)
+{
+       if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END)
+       {
+               return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value);
+       }
+
+       return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value);
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollMem
+
+@Description    Poll a value at the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32Addr,
+                             IMG_UINT32 ui32Value)
+{
+       if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END)
+       {
+               return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value);
+       }
+
+       return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value);
+}
+
+#if !defined(EMULATOR)
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteAbstractMem
+
+@Description    Write a value at the given address in RISC-V memory space
+                using RISC-V abstract memory commands
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Write RISC-V address 0x%x (value 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Prepare write address */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1,
+                  ui32Addr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Prepare write data */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0,
+                  ui32Value, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Send abstract register write command */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND,
+                  (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                  RGXRISCVFW_DMI_COMMAND_WRITE |
+                  RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until abstract command is completed */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Prepare write address */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr);
+
+       /* Prepare write data */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value);
+
+       /* Send abstract memory write command */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_COMMAND,
+                      (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) |
+                      RGXRISCVFW_DMI_COMMAND_WRITE |
+                      RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT);
+
+       /* Wait until abstract command is completed */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteSysBusMem
+
+@Description    Write a value at the given address in RISC-V memory space
+                using the RISC-V system bus
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR
+RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Write RISC-V address 0x%x (value 0x%08x)",
+                             ui32Addr, ui32Value);
+
+       /* Configure system bus to read 32 bit every time a new address is provided */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS,
+                  RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT,
+                  PDUMP_FLAGS_CONTINUOUS);
+
+       /* Prepare write address */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0,
+                  ui32Addr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Prepare write data and initiate write */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBDATA0,
+                  ui32Value, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Wait until system bus is idle */
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   RGX_CR_FWCORE_DMI_SBCS,
+                   0U,
+                   RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                   PDUMP_FLAGS_CONTINUOUS,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+#else
+       IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* Configure system bus for 32 bit accesses */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                      RGX_CR_FWCORE_DMI_SBCS,
+                      RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT);
+
+       /* Prepare write address */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr);
+
+       /* Prepare write data and initiate write */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value);
+
+       /* Wait until system bus is idle */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32),
+                                0U,
+                                RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN,
+                                POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)",
+                        __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS)));
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteMem
+
+@Description    Write a value to the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32Addr,
+                              IMG_UINT32 ui32Value)
+{
+       if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END)
+       {
+               return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value);
+       }
+
+       return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value);
+}
+#endif /* !defined(EMULATOR) */
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvDmiOp
+
+@Description    Acquire the powerlock and perform an operation on the RISC-V
+                Debug Module Interface, but only if the GPU is powered on.
+
+@Input          psDevInfo       Pointer to device info
+@InOut          pui64DMI        Encoding of a request for the RISC-V Debug
+                                Module with same format as the 'dmi' register
+                                from the RISC-V debug specification (v0.13+).
+                                On return, this is updated with the result of
+                                the request, encoded the same way.
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT64 *pui64DMI)
+{
+#if defined(NO_HARDWARE) && defined(PDUMP)
+       PVR_UNREFERENCED_PARAMETER(psDevInfo);
+       PVR_UNREFERENCED_PARAMETER(pui64DMI);
+
+       /* Accessing DM registers is not supported in nohw/pdump */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#else
+#define DMI_BASE     RGX_CR_FWCORE_DMI_RESERVED00
+#define DMI_STRIDE  (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00)
+#define DMI_REG(r)  ((DMI_BASE) + (DMI_STRIDE) * (r))
+
+#define DMI_OP_SHIFT            0U
+#define DMI_OP_MASK             0x3ULL
+#define DMI_DATA_SHIFT          2U
+#define DMI_DATA_MASK           0x3FFFFFFFCULL
+#define DMI_ADDRESS_SHIFT       34U
+#define DMI_ADDRESS_MASK        0xFC00000000ULL
+
+#define DMI_OP_NOP                 0U
+#define DMI_OP_READ                1U
+#define DMI_OP_WRITE           2U
+#define DMI_OP_RESERVED                3U
+
+#define DMI_OP_STATUS_SUCCESS  0U
+#define DMI_OP_STATUS_RESERVED 1U
+#define DMI_OP_STATUS_FAILED   2U
+#define DMI_OP_STATUS_BUSY         3U
+
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       PVRSRV_DEV_POWER_STATE ePowerState;
+       PVRSRV_ERROR eError;
+       IMG_UINT64 ui64Op, ui64Address, ui64Data;
+
+       ui64Op      = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT;
+       ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT;
+       ui64Data    = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT;
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               ui64Op = DMI_OP_STATUS_FAILED;
+               goto dmiop_update;
+       }
+
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               ui64Op = DMI_OP_STATUS_FAILED;
+               goto dmiop_release_lock;
+       }
+
+       if (ePowerState == PVRSRV_DEV_POWER_STATE_ON)
+       {
+               switch (ui64Op)
+               {
+                       case DMI_OP_NOP:
+                               ui64Op = DMI_OP_STATUS_SUCCESS;
+                               break;
+                       case DMI_OP_WRITE:
+                               OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+                                               DMI_REG(ui64Address),
+                                               (IMG_UINT32)ui64Data);
+                               ui64Op = DMI_OP_STATUS_SUCCESS;
+                               break;
+                       case DMI_OP_READ:
+                               ui64Data = (IMG_UINT64)OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+                                               DMI_REG(ui64Address));
+                               ui64Op = DMI_OP_STATUS_SUCCESS;
+                               break;
+                       default:
+                               PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op));
+                               ui64Op = DMI_OP_STATUS_FAILED;
+                               break;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not "
+                                       "possible while the GPU is powered off", __func__));
+
+               ui64Op = DMI_OP_STATUS_FAILED;
+       }
+
+dmiop_release_lock:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+dmiop_update:
+       *pui64DMI = (ui64Op << DMI_OP_SHIFT) |
+               (ui64Address << DMI_ADDRESS_SHIFT) |
+               (ui64Data << DMI_DATA_SHIFT);
+
+       return eError;
+#endif
+}
+
+/*
+       RGXReadMETAAddr
+*/
+static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+       IMG_UINT8 __iomem  *pui8RegBase = psDevInfo->pvRegsBaseKM;
+       IMG_UINT32 ui32PollValue;
+       IMG_UINT32 ui32PollMask;
+       IMG_UINT32 ui32PollRegOffset;
+       IMG_UINT32 ui32ReadOffset;
+       IMG_UINT32 ui32WriteOffset;
+       IMG_UINT32 ui32WriteValue;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+       {
+               ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                               | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN;
+               ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                               | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN;
+               ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES;
+               ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES;
+               ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN;
+               CHECK_HWBRN_68777(ui32WriteValue);
+               ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES;
+       }
+       else
+       {
+               ui32PollValue = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN;
+               ui32PollMask = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN;
+               ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1;
+               ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0;
+               ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN;
+               ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX;
+       }
+
+       /* Wait for Slave Port to be Ready */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                       (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset),
+               ui32PollValue,
+               ui32PollMask,
+               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Issue the Read */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset, ui32WriteValue);
+       (void)OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset);
+
+       /* Wait for Slave Port to be Ready: read complete */
+       if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                       (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset),
+               ui32PollValue,
+               ui32PollMask,
+               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* Read the value */
+       *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32ReadOffset);
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGXWriteMETAAddr
+*/
+static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value)
+{
+       IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+       {
+               /* Wait for Slave Port to be Ready */
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                               (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES),
+                               RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                               | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
+                               RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                               | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
+                               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               /* Issue the Write */
+               CHECK_HWBRN_68777(ui32METAAddr);
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32METAAddr);
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32Value);
+       }
+       else
+       {
+               /* Wait for Slave Port to be Ready */
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                               (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+                               RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                               RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                               POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               /* Issue the Write */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
+               (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value);
+               (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT); /* Fence write */
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value)
+{
+       PVRSRV_ERROR eError;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value);
+       }
+#if !defined(EMULATOR)
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eError = RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value);
+       }
+#endif
+       else
+       {
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eError;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value);
+       }
+#if !defined(EMULATOR)
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eError = RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value);
+       }
+#endif
+       else
+       {
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       return eError;
+
+}
+
+PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32FwVA,
+                             IMG_CPU_PHYADDR *psCpuPA,
+                             IMG_DEV_PHYADDR *psDevPA,
+                             IMG_UINT64 *pui64RawPTE)
+{
+       PVRSRV_ERROR eError       = PVRSRV_OK;
+       IMG_CPU_PHYADDR sCpuPA    = {0U};
+       IMG_DEV_PHYADDR sDevPA    = {0U};
+       IMG_UINT64 ui64RawPTE     = 0U;
+       MMU_FAULT_DATA sFaultData = {0U};
+       MMU_CONTEXT *psFwMMUCtx   = psDevInfo->psKernelMMUCtx;
+       IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX);
+       IMG_UINT32 ui32FwHeapEnd  = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
+       IMG_UINT32 ui32OSID       = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE;
+       IMG_UINT32 ui32HeapId;
+       PHYS_HEAP *psPhysHeap;
+       IMG_UINT64 ui64FwDataBaseMask;
+       IMG_DEV_VIRTADDR sDevVAddr;
+
+       /* default to 4K pages */
+       IMG_UINT32 ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT);
+       IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1));
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED),
+                                     eError, ErrorExit);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) ||
+                                      (psDevPA != NULL) ||
+                                      (pui64RawPTE != NULL)),
+                                     eError, ErrorExit);
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(((ui32FwVA >= ui32FwHeapBase) &&
+                                     (ui32FwVA < ui32FwHeapEnd)),
+                                     eError, ErrorExit);
+
+       ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ?
+                     PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID);
+       psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId];
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK |
+                                        RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK |
+                                        RGXFW_SEGMMU_DATA_BASE_ADDRESS);
+       }
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               ui64FwDataBaseMask = ~(RGXRISCVFW_GET_REGION_BASE(0xF));
+       }
+       else
+       {
+               PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit);
+       }
+
+       sDevVAddr.uiAddr = (ui32FwVA & ui64FwDataBaseMask) | RGX_FIRMWARE_RAW_HEAP_BASE;
+
+       /* Fw CPU shares a subset of the GPU's VA space */
+       MMU_CheckFaultAddress(psFwMMUCtx, &sDevVAddr, &sFaultData);
+
+       ui64RawPTE = sFaultData.sLevelData[MMU_LEVEL_1].ui64Address;
+
+       if (eError == PVRSRV_OK)
+       {
+               if (!BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN))
+               {
+                       /* don't report invalid pages */
+                       eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+               }
+               else
+               {
+                       sDevPA.uiAddr = ui32PageOffset + (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK);
+
+                       /* Only the Host's Firmware heap is present in the Host's CPU IPA space */
+                       if (ui32OSID == RGXFW_HOST_OS)
+                       {
+                               PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA);
+                       }
+                       else
+                       {
+                               sCpuPA.uiAddr = 0U;
+                       }
+               }
+       }
+
+       if (psCpuPA != NULL)
+       {
+               *psCpuPA = sCpuPA;
+       }
+
+       if (psDevPA != NULL)
+       {
+               *psDevPA = sDevPA;
+       }
+
+       if (pui64RawPTE != NULL)
+       {
+               *pui64RawPTE = ui64RawPTE;
+       }
+
+ErrorExit:
+       return eError;
+}
+/******************************************************************************
+ End of file (rgxfwutils.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxfwutils.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxfwutils.h
new file mode 100644 (file)
index 0000000..dbe723b
--- /dev/null
@@ -0,0 +1,1371 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXFWUTILS_H
+#define RGXFWUTILS_H
+
+#include "rgx_memallocflags.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+#include "devicemem_utils.h"
+#include "rgxmem.h"
+
+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT   "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */
+
+static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                        PVRSRV_MEMALLOCFLAGS_T *puiFlags,
+                                                                                        DEVMEM_HEAP **ppsFwHeap)
+{
+       PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)(PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags);
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (ePhysHeap)
+       {
+#if defined(SUPPORT_SECURITY_VALIDATION)
+               /* call with GPU_SECURE from RGXSetupFwSysData */
+               case PVRSRV_PHYS_HEAP_GPU_SECURE:
+#endif
+               case PVRSRV_PHYS_HEAP_FW_CODE:
+               case PVRSRV_PHYS_HEAP_FW_PRIV_DATA:
+               case PVRSRV_PHYS_HEAP_FW_MAIN:
+               {
+                       *ppsFwHeap = psDevInfo->psFirmwareMainHeap;
+                       break;
+               }
+               case PVRSRV_PHYS_HEAP_FW_CONFIG:
+               {
+                       *ppsFwHeap = psDevInfo->psFirmwareConfigHeap;
+                       break;
+               }
+               case PVRSRV_PHYS_HEAP_FW_PREMAP0:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP1:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP2:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP3:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP4:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP5:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP6:
+               case PVRSRV_PHYS_HEAP_FW_PREMAP7:
+               {
+                       IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0;
+
+                       PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID");
+                       *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID];
+                       break;
+               }
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: invalid phys heap", __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       break;
+               }
+       }
+
+       return eError;
+}
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                                                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                       const IMG_CHAR *pszText,
+                                                                                       DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP *psFwHeap;
+       IMG_DEVMEM_ALIGN_T uiAlign;
+
+       PVR_DPF_ENTERED;
+
+       /* Enforce the standard pre-fix naming scheme callers must follow */
+       PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+       /* Imported from AppHint , flag to poison allocations when freed */
+       uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag;
+
+       eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       uiAlign = (psFwHeap == psDevInfo->psFirmwareConfigHeap) ?
+                               (RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) :
+                               (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)));
+
+       eError = DevmemAllocateAndMap(psFwHeap,
+                               uiSize,
+                               uiAlign,
+                               uiFlags,
+                               pszText,
+                               ppsMemDescPtr,
+                               &sTmpDevVAddr);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                         IMG_DEVMEM_SIZE_T uiSize,
+                                                                                                         IMG_DEVMEM_ALIGN_T uiAlign,
+                                                                                                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                                         const IMG_CHAR *pszText,
+                                                                                                         DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP *psFwHeap;
+
+       PVR_DPF_ENTERED;
+
+       /* Enforce the standard pre-fix naming scheme callers must follow */
+       PVR_ASSERT((pszText != NULL) &&
+                       (pszText[0] == 'F') && (pszText[1] == 'w') &&
+                       (pszText[2] == 'E') && (pszText[3] == 'x'));
+
+       /* Imported from AppHint , flag to poison allocations when freed */
+       uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag;
+
+       eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       eError = DevmemAllocateExportable(psDeviceNode,
+                                                                         uiSize,
+                                                                         uiAlign,
+                                                                         DevmemGetHeapLog2PageSize(psFwHeap),
+                                                                         uiFlags,
+                                                                         pszText,
+                                                                         ppsMemDescPtr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError));
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       /*
+               We need to map it so the heap for this allocation
+               is set
+       */
+       eError = DevmemMapToDevice(*ppsMemDescPtr,
+                                                          psDevInfo->psFirmwareMainHeap,
+                                                          &sTmpDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               DevmemFree(*ppsMemDescPtr);
+               PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError));
+       }
+
+       PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                               IMG_DEVMEM_SIZE_T uiSize,
+                                                                                               IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                                                               IMG_UINT32 ui32NumPhysChunks,
+                                                                                               IMG_UINT32 ui32NumVirtChunks,
+                                                                                               IMG_UINT32 *pui32MappingTable,
+                                                                                               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                                                               const IMG_CHAR *pszText,
+                                                                                               DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       IMG_DEV_VIRTADDR sTmpDevVAddr;
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP *psFwHeap;
+       IMG_UINT32 ui32Align;
+
+       PVR_DPF_ENTERED;
+
+       /* Enforce the standard pre-fix naming scheme callers must follow */
+       PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+       ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+       /* Imported from AppHint , flag to poison allocations when freed */
+       uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag;
+
+       eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       eError = DevmemAllocateSparse(psDevInfo->psDeviceNode,
+                                                               uiSize,
+                                                               uiChunkSize,
+                                                               ui32NumPhysChunks,
+                                                               ui32NumVirtChunks,
+                                                               pui32MappingTable,
+                                                               ui32Align,
+                                                               DevmemGetHeapLog2PageSize(psFwHeap),
+                                                               uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING,
+                                                               pszText,
+                                                               ppsMemDescPtr);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF_RETURN_RC(eError);
+       }
+       /*
+               We need to map it so the heap for this allocation
+               is set
+       */
+       eError = DevmemMapToDevice(*ppsMemDescPtr,
+                                  psFwHeap,
+                                  &sTmpDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               DevmemFree(*ppsMemDescPtr);
+               PVR_DPF_RETURN_RC(eError);
+       }
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+
+static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               DEVMEM_MEMDESC *psMemDesc)
+{
+       PVR_DPF_ENTERED1(psMemDesc);
+
+       DevmemReleaseDevVirtAddr(psMemDesc);
+       DevmemFree(psMemDesc);
+
+       PVR_DPF_RETURN;
+}
+
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+       /*
+       *  In order to avoid having to issue three 32-bit reads to detect the
+       *  lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+       *  in the MSB of the high 32-bit word. If the wrap happens, we just read
+       *  the register again (it will not wrap again so soon).
+       */
+       if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+       {
+               ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+       }
+
+       return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT;
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency, and write-combine will
+ * suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS      (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                      PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \
+                                      PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                      PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                      PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+                                      PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                      PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                      PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                      PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                      PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                      PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN))
+
+#define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                         PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                         PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                         PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                         PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                         PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                         PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                         PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                         PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                         PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN))
+
+#define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG))
+
+#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN))
+
+/* Firmware memory that is not accessible by the CPU. */
+#define RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                             PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                             PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                             PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                             PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/* Firmware shared memory that is supposed to be read-only to the CPU.
+ * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE
+ * flag on the allocations. */
+#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                           PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                           PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                           PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \
+                                           PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \
+                                           PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */
+#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL))
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE           (0)                     /*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG          (1U << 0)       /*!< It is safe to immediately release the reference to the pointer,
+                                                                                                 otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
+IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+#if defined(SUPPORT_TBI_INTERFACE)
+IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                              IMG_BOOL                 bEnableSignatureChecks,
+                              IMG_UINT32               ui32SignatureChecksBufSize,
+                              IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                              IMG_UINT64               ui64HWPerfFilter,
+                              IMG_UINT32               ui32ConfigFlags,
+                              IMG_UINT32               ui32ConfigFlagsExt,
+                              IMG_UINT32               ui32FwOsCfgFlags,
+                              IMG_UINT32               ui32LogType,
+                              IMG_UINT32               ui32FilterFlags,
+                              IMG_UINT32               ui32JonesDisableMask,
+                              IMG_UINT32               ui32HWRDebugDumpLimit,
+                              IMG_UINT32               ui32HWPerfCountersDataSize,
+                              IMG_UINT32               ui32RenderKillingCtl,
+                              IMG_UINT32               ui32CDMTDMKillingCtl,
+                              IMG_UINT32               *pui32TPUTrilinearFracMask,
+                              IMG_UINT32               *pui32USRMNumRegions,
+                              IMG_UINT64               *pui64UVBRMNumRegions,
+                              RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+                              FW_PERF_CONF             eFirmwarePerf,
+                              IMG_UINT32               ui32KCCBSizeLog2,
+                              IMG_UINT32               ui32AvailableSPUMask,
+                                                         IMG_UINT32               ui32AvailableRACMask);
+
+
+
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       RGXSetupFwAllocation
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          psDevInfo       Device Info struct
+@Input          uiAllocFlags    Flags determining type of memory allocation
+@Input          ui32Size        Size of memory allocation
+@Input          pszName         Allocation label
+@Input          psFwPtr         Address of the firmware pointer to set
+@Input          ppvCpuPtr       Address of the cpu pointer to set
+@Input          ui32DevVAFlags  Any combination of  RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                                                 PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+                                                                 IMG_UINT32           ui32Size,
+                                                                 const IMG_CHAR       *pszName,
+                                                                 DEVMEM_MEMDESC       **ppsMemDesc,
+                                                                 RGXFWIF_DEV_VIRTADDR *psFwPtr,
+                                                                 void                 **ppvCpuPtr,
+                                                                 IMG_UINT32           ui32DevVAFlags);
+
+/*************************************************************************/ /*!
+@Function       RGXSetFirmwareAddress
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          ppDest          Address of the pointer to set
+@Input          psSrc           MemDesc describing the pointer
+@Input          ui32Flags       Any combination of RFW_FWADDR_*_FLAG
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR        *ppDest,
+                                                                  DEVMEM_MEMDESC               *psSrc,
+                                                                  IMG_UINT32                   uiOffset,
+                                                                  IMG_UINT32                   ui32Flags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXSetMetaDMAAddress
+
+@Description    Fills a Firmware structure used to setup the Meta DMA with two
+                pointers to the same data, one on 40 bit and one on 32 bit
+                (pointer in the FW memory space).
+
+@Input          ppDest          Address of the structure to set
+@Input          psSrcMemDesc    MemDesc describing the pointer
+@Input          psSrcFWDevVAddr Firmware memory space pointer
+
+@Return         void
+*/ /**************************************************************************/
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR             *psDest,
+                                                 DEVMEM_MEMDESC                *psSrcMemDesc,
+                                                 RGXFWIF_DEV_VIRTADDR  *psSrcFWDevVAddr,
+                                                 IMG_UINT32                    uiOffset);
+
+
+/*************************************************************************/ /*!
+@Function       RGXUnsetFirmwareAddress
+
+@Description    Unsets a pointer in a firmware data structure
+
+@Input          psSrc           MemDesc describing the pointer
+
+@Return         void
+*/ /**************************************************************************/
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc);
+
+PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue);
+PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue);
+
+/*************************************************************************/ /*!
+@Function       FWCommonContextAllocate
+
+@Description    Allocate a FW common context. This allocates the HW memory
+                for the context, the CCB and wires it all together.
+
+@Input          psConnection            Connection this context is being created on
+@Input          psDeviceNode            Device node to create the FW context on
+                                        (must be RGX device node)
+@Input          eRGXCCBRequestor        RGX_CCB_REQUESTOR_TYPE enum constant which
+                                        which represents the requestor of this FWCC
+@Input          eDM                     Data Master type
+@Input          psServerMMUContext      Server MMU memory context.
+@Input          psAllocatedMemDesc      Pointer to pre-allocated MemDesc to use
+                                        as the FW context or NULL if this function
+                                        should allocate it
+@Input          ui32AllocatedOffset     Offset into pre-allocate MemDesc to use
+                                        as the FW context. If psAllocatedMemDesc
+                                        is NULL then this parameter is ignored
+@Input          psFWMemContextMemDesc   MemDesc of the FW memory context this
+                                        common context resides on
+@Input          psContextStateMemDesc   FW context state (context switch) MemDesc
+@Input          ui32CCBAllocSizeLog2    Size of the CCB for this context
+@Input          ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context
+@Input          ui32ContextFlags        Flags which specify properties of the context
+@Input          ui32Priority            Priority of the context
+@Input          ui32MaxDeadlineMS       Max deadline limit in MS that the workload can run
+@Input          ui64RobustnessAddress   Address for FW to signal a context reset
+@Input          psInfo                  Structure that contains extra info
+                                        required for the creation of the context
+                                        (elements might change from core to core)
+@Return         PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+                                                                        PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                        RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+                                                                        RGXFWIF_DM eDM,
+                                                                        SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                        DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                                        IMG_UINT32 ui32AllocatedOffset,
+                                                                        DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                                        DEVMEM_MEMDESC *psContextStateMemDesc,
+                                                                        IMG_UINT32 ui32CCBAllocSizeLog2,
+                                                                        IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+                                                                        IMG_UINT32 ui32ContextFlags,
+                                                                        IMG_UINT32 ui32Priority,
+                                                                        IMG_UINT32 ui32MaxDeadlineMS,
+                                                                        IMG_UINT64 ui64RobustnessAddress,
+                                                                        RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                                        RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                           IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                          SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                                                                          PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr);
+
+PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                     IMG_UINT32 ui32ContextFlags);
+
+/*!
+*******************************************************************************
+@Function       RGXScheduleProcessQueuesKM
+
+@Description    Software command complete handler
+                (sends uncounted kicks for all the DMs through the MISR)
+
+@Input          hCmdCompHandle  RGX device node
+
+@Return         None
+******************************************************************************/
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+#if defined(SUPPORT_VALIDATION)
+/*!
+*******************************************************************************
+@Function       RGXScheduleRgxRegCommand
+
+@Input          psDevInfo       Device Info struct
+@Input          ui64RegVal      Value to write into FW register
+@Input          ui64Size        Register size
+@Input          ui32Offset      Register Offset
+@Input          bWriteOp        Register Write or Read toggle
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT64 ui64RegVal,
+                                                                         IMG_UINT64 ui64Size,
+                                                                         IMG_UINT32 ui32Offset,
+                                                                         IMG_BOOL bWriteOp);
+
+#endif
+
+/*!
+*******************************************************************************
+
+@Function       RGXInstallProcessQueuesMISR
+
+@Description    Installs the MISR to handle Process Queues operations
+
+@Input          phMISR          Pointer to the MISR handler
+@Input          psDeviceNode    RGX Device node
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandWithPowLockAndGetKCCBSlot
+
+@Description    Sends a command to a particular DM without honouring
+                pending cache operations but taking the power lock.
+
+@Input          psDevInfo       Device Info
+@Input          psKCCBCmd       The cmd to send.
+@Input          ui32PDumpFlags  Pdump flags
+@Output         pui32CmdKCCBSlot   When non-NULL:
+                                   - Pointer on return contains the kCCB slot
+                                     number in which the command was enqueued.
+                                   - Resets the value of the allotted slot to
+                                     RGXFWIF_KCCB_RTN_SLOT_RST
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO        *psDevInfo,
+                                                                                                        RGXFWIF_KCCB_CMD       *psKCCBCmd,
+                                                                                                        IMG_UINT32                     ui32PDumpFlags,
+                                                                                                        IMG_UINT32                     *pui32CmdKCCBSlot);
+
+#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \
+  RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL)
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandAndGetKCCBSlot
+
+@Description    Sends a command to a particular DM without honouring
+                pending cache operations or the power lock.
+                The function flushes any deferred KCCB commands first.
+
+@Input          psDevInfo       Device Info
+@Input          psKCCBCmd       The cmd to send.
+@Input          uiPdumpFlags    PDump flags.
+@Output         pui32CmdKCCBSlot   When non-NULL:
+                                   - Pointer on return contains the kCCB slot
+                                     number in which the command was enqueued.
+                                   - Resets the value of the allotted slot to
+                                     RGXFWIF_KCCB_RTN_SLOT_RST
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                 RGXFWIF_KCCB_CMD   *psKCCBCmd,
+                                                                                 PDUMP_FLAGS_T      uiPdumpFlags,
+                                                                                 IMG_UINT32         *pui32CmdKCCBSlot);
+
+#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \
+  RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL)
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommandAndGetKCCBSlot
+
+@Description    Sends a command to a particular DM and kicks the firmware but
+                first schedules any commands which have to happen before
+                handle
+
+@Input          psDevInfo           Device Info
+@Input          eDM                 To which DM the cmd is sent.
+@Input          psKCCBCmd           The cmd to send.
+@Input          ui32PDumpFlags      PDump flags
+@Output         pui32CmdKCCBSlot    When non-NULL:
+                                    - Pointer on return contains the kCCB slot
+                                      number in which the command was enqueued.
+                                    - Resets the value of the allotted slot to
+                                      RGXFWIF_KCCB_RTN_SLOT_RST
+
+@Return                        PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               RGXFWIF_DM         eKCCBType,
+                                                               RGXFWIF_KCCB_CMD   *psKCCBCmd,
+                                                               IMG_UINT32         ui32PDumpFlags,
+                                                               IMG_UINT32         *pui32CmdKCCBSlot);
+#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \
+  RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL)
+
+/*************************************************************************/ /*!
+@Function       RGXWaitForKCCBSlotUpdate
+
+@Description    Waits until the required kCCB slot value is updated by the FW
+                (signifies command completion). Additionally, dumps a relevant
+                PDump poll command.
+
+@Input          psDevInfo       Device Info
+@Input          ui32SlotNum     The kCCB slot number to wait for an update on
+@Input          ui32PDumpFlags
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                      IMG_UINT32 ui32SlotNum,
+                                                                         IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXFrameworkCopyCommand
+
+@Description    Copy framework command into FW addressable buffer
+
+@param          psDeviceNode
+@param          psFWFrameworkMemDesc
+@param          pbyGPUFRegisterList
+@param          ui32FrameworkRegisterSize
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                  DEVMEM_MEMDESC *psFWFrameworkMemDesc,
+                                                                                  IMG_PBYTE pbyGPUFRegisterList,
+                                                                                  IMG_UINT32 ui32FrameworkRegisterSize);
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRGXFrameworkCreateKM
+
+@Description    Create FW addressable buffer for framework
+
+@param          psDeviceNode
+@param          ppsFWFrameworkMemDesc
+@param          ui32FrameworkRegisterSize
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                       DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc,
+                                                       IMG_UINT32 ui32FrameworkRegisterSize);
+
+
+/*************************************************************************/ /*!
+@Function       RGXPollForGPCommandCompletion
+
+@Description    Polls for completion of a submitted GP command. Poll is done
+                on a value matching a masked read from the address.
+
+@Input          psDevNode       Pointer to device node struct
+@Input          pui32LinMemAddr CPU linear address to poll
+@Input          ui32Value       Required value
+@Input          ui32Mask        Mask
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                       volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+                                                                       IMG_UINT32                   ui32Value,
+                                                                       IMG_UINT32                   ui32Mask);
+
+/*************************************************************************/ /*!
+@Function       RGXStateFlagCtrl
+
+@Description    Set and return FW internal state flags.
+
+@Input          psDevInfo       Device Info
+@Input          ui32Config      AppHint config flags
+@Output         pui32State      Current AppHint state flag configuration
+@Input          bSetNotClear    Set or clear the provided config flags
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32Config,
+                               IMG_UINT32 *pui32State,
+                               IMG_BOOL bSetNotClear);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestCommonContextCleanUp
+
+@Description    Schedules a FW common context cleanup. The firmware doesn't
+                block waiting for the resource to become idle but rather
+                notifies the host that the resources is busy.
+
+@Input          psDeviceNode    pointer to device node
+@Input          psServerCommonContext context to be cleaned up
+@Input          eDM             Data master, to which the cleanup command should
+                                be sent
+@Input          ui32PDumpFlags  PDump continuous flag
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+                                                                                         RGXFWIF_DM eDM,
+                                                                                         IMG_UINT32 ui32PDumpFlags);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestHWRTDataCleanUp
+
+@Description    Schedules a FW HWRTData memory cleanup. The firmware doesn't
+                block waiting for the resource to become idle but rather
+                notifies the host that the resources is busy.
+
+@Input          psDeviceNode    pointer to device node
+@Input          psHWRTData      firmware address of the HWRTData for clean-up
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                PRGXFWIF_HWRTDATA psHWRTData);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestFreeListCleanUp
+
+@Description    Schedules a FW FreeList cleanup. The firmware doesn't block
+                waiting for the resource to become idle but rather notifies the
+                host that the resources is busy.
+
+@Input          psDeviceNode    pointer to device node
+@Input          psFWFreeList    firmware address of the FreeList for clean-up
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+                                                                                PRGXFWIF_FREELIST psFWFreeList);
+
+/*!
+*******************************************************************************
+@Function       RGXFWRequestZSBufferCleanUp
+
+@Description    Schedules a FW ZS Buffer cleanup. The firmware doesn't block
+                waiting for the resource to become idle but rather notifies the
+                host that the resources is busy.
+
+@Input          psDevInfo       pointer to device node
+@Input          psFWZSBuffer    firmware address of the ZS Buffer for clean-up
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                PRGXFWIF_ZSBUFFER psFWZSBuffer);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+                                                               CONNECTION_DATA *psConnection,
+                                                               PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32Priority,
+                                                               RGXFWIF_DM eDM);
+
+/*!
+*******************************************************************************
+@Function       RGXFWSetHCSDeadline
+
+@Description    Requests the Firmware to set a new Hard Context Switch timeout
+                deadline. Context switches that surpass that deadline cause the
+                system to kill the currently running workloads.
+
+@Input          psDeviceNode    pointer to device node
+@Input          ui32HCSDeadlineMs  The deadline in milliseconds.
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32HCSDeadlineMs);
+
+/*!
+*******************************************************************************
+@Function       RGXFWChangeOSidPriority
+
+@Description    Requests the Firmware to change the priority of an operating
+                system. Higher priority number equals higher priority on the
+                scheduling system.
+
+@Input          psDevInfo       pointer to device info
+@Input          ui32OSid        The OSid whose priority is to be altered
+@Input          ui32Priority    The new priority number for the specified OSid
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                        IMG_UINT32 ui32OSid,
+                                                                        IMG_UINT32 ui32Priority);
+
+/*!
+*******************************************************************************
+@Function       RGXFWHealthCheckCmd
+
+@Description    Ping the firmware to check if it is responsive.
+
+@Input          psDevInfo       pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXFWSetFwOsState
+
+@Description    Requests the Firmware to change the guest OS Online states.
+                This should be initiated by the VMM when a guest VM comes
+                online or goes offline. If offline, the FW offloads any current
+                resource from that OSID. The request is repeated until the FW
+                has had time to free all the resources or has waited for
+                workloads to finish.
+
+@Input          psDevInfo       pointer to device info
+@Input          ui32OSid        The Guest OSid whose state is being altered
+@Input          eOSOnlineState  The new state (Online or Offline)
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                               IMG_UINT32 ui32OSid,
+                                                               RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
+
+#if defined(SUPPORT_AUTOVZ)
+/*!
+*******************************************************************************
+@Function       RGXUpdateAutoVzWdgToken
+
+@Description    If the driver-firmware connection is active, read the
+                firmware's watchdog token and copy its value back into the OS
+                token. This indicates to the firmware that this driver is alive
+                and responsive.
+
+@Input          psDevInfo       pointer to device info
+******************************************************************************/
+void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif
+
+/*!
+*******************************************************************************
+@Function       RGXFWConfigPHR
+
+@Description    Configure the Periodic Hardware Reset functionality
+
+@Input          psDevInfo       pointer to device info
+@Input          ui32PHRMode     desired PHR mode
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo,
+                            IMG_UINT32 ui32PHRMode);
+/*!
+*******************************************************************************
+@Function       RGXFWConfigWdg
+
+@Description    Configure the Safety watchdog trigger period
+
+@Input          psDevInfo        pointer to device info
+@Input          ui32WdgPeriodUs  requested period in microseconds
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                            IMG_UINT32 ui32WdgPeriod);
+
+/*!
+*******************************************************************************
+@Function       RGXCheckFirmwareCCB
+
+@Description    Processes all commands that are found in the Firmware CCB.
+
+@Input          psDevInfo       pointer to device
+
+@Return         None
+******************************************************************************/
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXCheckForStalledClientContexts
+
+@Description    Checks all client contexts, for the device with device info
+                provided, to see if any are waiting for a fence to signal and
+                optionally force signalling of the fence for the context which
+                has been waiting the longest.
+                This function is called by RGXUpdateHealthStatus() and also
+                may be invoked from other trigger points.
+
+@Input          psDevInfo       pointer to device info
+@Input          bIgnorePrevious If IMG_TRUE, any stalled contexts will be
+                                indicated immediately, rather than only
+                                checking against any previous stalled contexts
+
+@Return         None
+******************************************************************************/
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious);
+
+/*!
+*******************************************************************************
+@Function       RGXUpdateHealthStatus
+
+@Description    Tests a number of conditions which might indicate a fatal error
+                has occurred in the firmware. The result is stored in the
+                device node eHealthStatus.
+
+@Input         psDevNode        Pointer to device node structure.
+@Input         bCheckAfterTimePassed  When TRUE, the function will also test
+                                for firmware queues and polls not changing
+                                since the previous test.
+
+                                Note: if not enough time has passed since the
+                                last call, false positives may occur.
+
+@Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed);
+
+#if defined(SUPPORT_AUTOVZ)
+/*!
+*******************************************************************************
+@Function       RGXUpdateAutoVzWatchdog
+
+@Description    Updates AutoVz watchdog that maintains the fw-driver connection
+
+@Input         psDevNode        Pointer to device node structure.
+******************************************************************************/
+void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode);
+#endif /* SUPPORT_AUTOVZ */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile,
+                             IMG_UINT32 ui32VerbLevel);
+
+/*!
+*******************************************************************************
+@Function       AttachKickResourcesCleanupCtls
+
+@Description    Attaches the cleanup structures to a kick command so that
+                submission reference counting can be performed when the
+                firmware processes the command
+
+@Output         apsCleanupCtl   Array of CleanupCtl structure pointers to populate.
+@Output         pui32NumCleanupCtl  Number of CleanupCtl structure pointers written out.
+@Input          eDM             Which data master is the subject of the command.
+@Input          bKick           TRUE if the client originally wanted to kick this DM.
+@Input          psRTDataCleanup Optional RTData cleanup associated with the command.
+@Input          psZBuffer       Optional ZSBuffer associated with the command.
+
+@Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+                                                                       IMG_UINT32 *pui32NumCleanupCtl,
+                                                                       RGXFWIF_DM eDM,
+                                                                       IMG_BOOL bKick,
+                                                                       RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+                                                                       RGX_ZSBUFFER_DATA              *psZSBuffer,
+                                                                       RGX_ZSBUFFER_DATA              *psMSAAScratchBuffer);
+
+/*!
+*******************************************************************************
+@Function       RGXResetHWRLogs
+
+@Description    Resets the HWR Logs buffer
+                (the hardware recovery count is not reset)
+
+@Input          psDevNode       Pointer to the device
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*!
+*******************************************************************************
+@Function       RGXGetPhyAddr
+
+@Description    Get the physical address of a PMR at an offset within it
+
+@Input          psPMR           PMR of the allocation
+@Input          ui32LogicalOffset  Logical offset
+
+@Output         psPhyAddr       Physical address of the allocation
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+                                                  IMG_DEV_PHYADDR *psPhyAddr,
+                                                  IMG_UINT32 ui32LogicalOffset,
+                                                  IMG_UINT32 ui32Log2PageSize,
+                                                  IMG_UINT32 ui32NumOfPages,
+                                                  IMG_BOOL *bValid);
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+@Function       RGXPdumpDrainKCCB
+
+@Description    Wait for the firmware to execute all the commands in the kCCB
+
+@Input          psDevInfo       Pointer to the device
+@Input          ui32WriteOffset Woff we have to POL for the Roff to be equal to
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                          IMG_UINT32 ui32WriteOffset);
+#endif /* PDUMP */
+
+/*!
+*******************************************************************************
+@Function       RGXFwRawHeapAllocMap
+
+@Description    Register and maps to device, a raw firmware physheap
+
+@Return         PVRSRV_ERROR    PVRSRV_OK on success.
+                                Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                 IMG_UINT32 ui32OSID,
+                                                                 IMG_DEV_PHYADDR sDevPAddr,
+                                                                 IMG_UINT64 ui64DevPSize);
+
+/*!
+*******************************************************************************
+@Function       RGXFwRawHeapUnmapFree
+
+@Description    Unregister and unmap from device, a raw firmware physheap
+
+******************************************************************************/
+void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                  IMG_UINT32 ui32OSID);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvHalt
+
+@Description    Halt the RISC-V FW core (required for certain operations
+                done through Debug Module)
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvIsHalted
+
+@Description    Check if the RISC-V FW is halted
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         IMG_BOOL
+******************************************************************************/
+IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvResume
+
+@Description    Resume the RISC-V FW core
+
+@Input          psDevInfo       Pointer to device info
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvReadReg
+
+@Description    Read a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 *pui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollReg
+
+@Description    Poll for a value from the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32RegAddr,
+                             IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvWriteReg
+
+@Description    Write a value to the given RISC-V register (GPR or CSR)
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32RegAddr     RISC-V register address
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              IMG_UINT32 ui32RegAddr,
+                              IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvPollMem
+
+@Description    Poll for a value at the given address in RISC-V memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Expected value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+                             IMG_UINT32 ui32Addr,
+                             IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXRiscvDmiOp
+
+@Description    Acquire the powerlock and perform an operation on the RISC-V
+                Debug Module Interface, but only if the GPU is powered on.
+
+@Input          psDevInfo       Pointer to device info
+@InOut          pui64DMI        Encoding of a request for the RISC-V Debug
+                                Module with same format as the 'dmi' register
+                                from the RISC-V debug specification (v0.13+).
+                                On return, this is updated with the result of
+                                the request, encoded the same way.
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT64 *pui64DMI);
+
+/*!
+*******************************************************************************
+@Function       RGXReadFWModuleAddr
+
+@Description    Read a value at the given address in META or RISCV memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in META or RISCV memory space
+
+@Output         pui32Value      Read value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                 IMG_UINT32 ui32Addr,
+                                 IMG_UINT32 *pui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXWriteFWModuleAddr
+
+@Description    Write a value to the given address in META or RISC memory space
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32Addr        Address in RISC-V memory space
+@Input          ui32Value       Write value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                  IMG_UINT32 ui32MemAddr,
+                                  IMG_UINT32 ui32Value);
+
+/*!
+*******************************************************************************
+@Function       RGXGetFwMapping
+
+@Description    Retrieve any of the CPU Physical Address, Device Physical
+                Address or the raw value of the page table entry associated
+                with the firmware virtual address given.
+
+@Input          psDevInfo       Pointer to device info
+@Input          ui32FwVA        The Fw VA that needs decoding
+@Output         psCpuPA         Pointer to the resulting CPU PA
+@Output         psDevPA         Pointer to the resulting Dev PA
+@Output         pui64RawPTE     Pointer to  the raw Page Table Entry value
+
+@Return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    IMG_UINT32 ui32FwVA,
+                                    IMG_CPU_PHYADDR *psCpuPA,
+                                    IMG_DEV_PHYADDR *psDevPA,
+                                    IMG_UINT64 *pui64RawPTE);
+
+#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ)
+#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers."
+#endif
+
+#if defined(SUPPORT_AUTOVZ_HW_REGS)
+/* AutoVz with hw support */
+#define KM_GET_FW_CONNECTION(psDevInfo)                                OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3)
+#define KM_GET_OS_CONNECTION(psDevInfo)                                OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2)
+#define KM_SET_OS_CONNECTION(val, psDevInfo)           OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val)
+
+#define KM_GET_FW_ALIVE_TOKEN(psDevInfo)                       OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1)
+#define KM_GET_OS_ALIVE_TOKEN(psDevInfo)                       OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0)
+#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo)          OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val)
+#else
+
+#if defined(SUPPORT_AUTOVZ)
+#define KM_GET_FW_ALIVE_TOKEN(psDevInfo)                       (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken)
+#define KM_GET_OS_ALIVE_TOKEN(psDevInfo)                       (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken)
+#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo)          OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val)
+#endif /* defined(SUPPORT_AUTOVZ) */
+
+#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)))
+/* native, static-vz and AutoVz using shared memory */
+#define KM_GET_FW_CONNECTION(psDevInfo)                        (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState)
+#define KM_GET_OS_CONNECTION(psDevInfo)                        (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState)
+#define KM_SET_OS_CONNECTION(val, psDevInfo)   OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val)
+#else
+/* dynamic-vz & nohw */
+#define KM_GET_FW_CONNECTION(psDevInfo)                        (RGXFW_CONNECTION_FW_ACTIVE)
+#define KM_GET_OS_CONNECTION(psDevInfo)                        (RGXFW_CONNECTION_OS_ACTIVE)
+#define KM_SET_OS_CONNECTION(val, psDevInfo)
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */
+#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */
+
+#if defined(SUPPORT_AUTOVZ)
+#define RGX_FIRST_RAW_HEAP_OSID                RGXFW_HOST_OS
+#else
+#define RGX_FIRST_RAW_HEAP_OSID                RGXFW_GUEST_OSID_START
+#endif
+
+#define KM_OS_CONNECTION_IS(val, psDevInfo)            (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val)
+#define KM_FW_CONNECTION_IS(val, psDevInfo)            (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val)
+
+#endif /* RGXFWUTILS_H */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxhwperf.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxhwperf.c
new file mode 100644 (file)
index 0000000..2c0a53c
--- /dev/null
@@ -0,0 +1,415 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+#include "process_stats.h"
+#include "rgx_hwperf_table.h"
+#include "rgxinit.h"
+
+#include "info_page_defs.h"
+
+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */
+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT)
+
+
+IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **);
+
+static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock(
+       RGX_HWPERF_BVNC_BLOCK   * const psBlocks,
+       IMG_UINT16                              * const pui16Count,
+       const IMG_UINT16                ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */
+       const IMG_UINT16                ui16NumCounters,
+       const IMG_UINT16                ui16NumBlocks)
+{
+       const IMG_UINT16 ui16Count = *pui16Count;
+
+       if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN)
+       {
+               RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count];
+
+               /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to
+                * RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the
+                * number of blocks and counters) but PVRScopeServices expects the
+                * latter (plus the number of blocks and counters). The conversion
+                * could always be moved to PVRScopeServices, but it's less code this
+                * way.
+                * For SLC0 we generate a single SLCBANK_ALL which has NUM_MEMBUS
+                * instances.
+                * This replaces the SLC0 .. SLC3 entries.
+                */
+               if ((ui16BlockID == RGX_CNTBLK_ID_SLCBANK0) || (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK))
+               {
+                       psBlock->ui16BlockID    = ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK;
+               }
+               else
+               {
+                       psBlock->ui16BlockID    = ui16BlockID;
+               }
+               psBlock->ui16NumCounters        = ui16NumCounters;
+               psBlock->ui16NumBlocks          = ui16NumBlocks;
+
+               *pui16Count = ui16Count + 1;
+               return IMG_TRUE;
+       }
+       return IMG_FALSE;
+}
+
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC)
+{
+       IMG_PCHAR pszBVNC;
+       PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+       if ((pszBVNC = RGXDevBVNCString(psDevInfo)))
+       {
+               size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1);
+               OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1);
+               memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength);
+       }
+       else
+       {
+               *psBVNC->aszBvncString = 0;
+       }
+
+       psBVNC->ui32BvncKmFeatureFlags = 0x0;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG;
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_MULTICORE_FLAG;
+       }
+
+       /* Determine if we've got the new RAY_TRACING feature supported. This is
+        * only determined by comparing the RAY_TRACING_ARCHITECTURE value */
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_RAYTRACING_FLAG;
+       }
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CATURIX_TOP_INFRASTRUCTURE))
+       {
+               psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG;
+       }
+
+#ifdef SUPPORT_WORKLOAD_ESTIMATION
+       /* Not a part of BVNC feature line and so doesn't need the feature supported check */
+       psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+#endif
+
+       /* Define the HW counter block counts. */
+       {
+               RGX_HWPERF_BVNC_BLOCK                                   * const psBlocks        = psBVNC->aBvncBlocks;
+               IMG_UINT16                                                              * const pui16Count      = &psBVNC->ui16BvncBlocks;
+               const RGXFW_HWPERF_CNTBLK_TYPE_MODEL    *asCntBlkTypeModel;
+               const IMG_UINT32                                                ui32CntBlkModelLen      = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+               IMG_UINT32                                                              ui32BlkCfgIdx;
+               size_t                                                                  uiCount;
+               IMG_BOOL                                                                bOk                                     = IMG_TRUE;
+
+               // Initialise to zero blocks
+               *pui16Count = 0;
+
+               // Add all the blocks
+               for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+               {
+                       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL    * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx];
+                       RGX_HWPERF_CNTBLK_RT_INFO                               sCntBlkRtInfo;
+                       /* psCntBlkInfo->uiNumUnits gives compile-time info. For BVNC agnosticism, we use this: */
+                       if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo))
+                       {
+                               IMG_UINT32      uiNumUnits;
+
+                               switch (psCntBlkInfo->uiCntBlkIdBase)
+                               {
+                                       case RGX_CNTBLK_ID_SLCBANK0:
+                                               /* Generate the SLCBANK_ALL block for SLC0..SLC3
+                                                * we have to special-case this as the iteration will
+                                                * generate entries starting at SLC0 and we need to
+                                                * defer until we are processing the last 'present'
+                                                * entry.
+                                                * The SLC_BLKID_ALL is keyed from SLC0. Need to access
+                                                * the NUM_MEMBUS feature to see how many are physically
+                                                * present.
+                                                * For CXT_TOP_INFRASTRUCTURE systems we present a
+                                                * singleton SLCBANK - this provides accumulated counts
+                                                * for all registers within the physical SLCBANK instances.
+                                                */
+                                               if (psBVNC->ui32BvncKmFeatureFlags &
+                                                   RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG)
+                                               {
+                                                       uiNumUnits = 1U;
+                                               }
+                                               else
+                                               {
+                                                       uiNumUnits = RGX_GET_FEATURE_VALUE(psDevInfo,
+                                                                                          NUM_MEMBUS);
+                                               }
+                                               break;
+                                       case RGX_CNTBLK_ID_SLCBANK1:
+                                       case RGX_CNTBLK_ID_SLCBANK2:
+                                       case RGX_CNTBLK_ID_SLCBANK3:
+                                               /* These are contained within SLCBANK_ALL block */
+                                               continue;
+                                       default:
+                                               uiNumUnits = sCntBlkRtInfo.uiNumUnits;
+                                               break;
+                               }
+                               bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->uiCntBlkIdBase, RGX_CNTBLK_COUNTERS_MAX, uiNumUnits);
+                       }
+               }
+
+               /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */
+               PVR_ASSERT(bOk);
+
+               // Zero the remaining entries
+               uiCount = *pui16Count;
+               OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks));
+       }
+
+       return PVRSRV_OK;
+}
+
+/*
+       PVRSRVRGXConfigureHWPerfBlocksKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM(
+               CONNECTION_DATA          * psConnection,
+               PVRSRV_DEVICE_NODE       * psDeviceNode,
+               IMG_UINT32                 ui32CtrlWord,
+               IMG_UINT32                 ui32ArrayLen,
+               RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs)
+{
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       RGXFWIF_KCCB_CMD        sKccbCmd;
+       DEVMEM_MEMDESC*         psFwBlkConfigsMemDesc;
+       RGX_HWPERF_CONFIG_CNTBLK* psFwArray;
+       IMG_UINT32                      ui32kCCBCommandSlot;
+       PVRSRV_RGXDEV_INFO      *psDevice;
+
+       PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+
+       psDevice = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+       PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL",
+                               PVRSRV_ERROR_INVALID_PARAMS);
+
+       PVR_DPF_ENTERED;
+
+       /* Fill in the command structure with the parameters needed
+        */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS;
+       sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32CtrlWord = ui32CtrlWord;
+       sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+       /* used for passing counters config to the Firmware, write-only for the CPU */
+       eError = DevmemFwAllocate(psDevice,
+                                 sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                 PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                 PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                 PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                 "FwHWPerfCountersConfigBlock",
+                                 &psFwBlkConfigsMemDesc);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
+
+       eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs,
+                             psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1);
+
+       eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2);
+
+       OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+       DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+                          0,
+                          sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+                          PDUMP_FLAGS_CONTINUOUS);
+
+       /* Ask the FW to carry out the HWPerf configuration command
+        */
+       eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
+                                                                                         RGXFWIF_DM_GP,
+                                                                                         &sKccbCmd,
+                                                                                         PDUMP_FLAGS_CONTINUOUS,
+                                                                                         &ui32kCCBCommandSlot);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2);
+
+       /* Wait for FW to complete */
+       eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3);
+
+       /* Release temporary memory used for block configuration
+        */
+       RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+       DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+       DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc);
+
+       PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen));
+
+       PVR_DPF_RETURN_OK;
+
+fail3:
+       DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+fail2:
+       RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+fail1:
+       DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+       PVRSRV_DEVICE_NODE* psRgxDevNode;
+       PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+       /* TL Open/close state */
+       IMG_HANDLE          hSD[RGX_HWPERF_MAX_STREAM_ID];
+
+       /* TL Acquire/release state */
+       IMG_PBYTE                       pHwpBuf[RGX_HWPERF_MAX_STREAM_ID];                      /*!< buffer returned to user in acquire call */
+       IMG_PBYTE                       pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID];           /*!< pointer to end of HwpBuf */
+       IMG_PBYTE                       pTlBuf[RGX_HWPERF_MAX_STREAM_ID];                       /*!< buffer obtained via TlAcquireData */
+       IMG_PBYTE                       pTlBufPos[RGX_HWPERF_MAX_STREAM_ID];            /*!< initial position in TlBuf to acquire packets */
+       IMG_PBYTE                       pTlBufRead[RGX_HWPERF_MAX_STREAM_ID];           /*!< pointer to the last packet read */
+       IMG_UINT32                      ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID];       /*!< length of acquired TlBuf */
+       IMG_BOOL                        bRelease[RGX_HWPERF_MAX_STREAM_ID];             /*!< used to determine whether or not to release currently held TlBuf */
+
+
+} RGX_KM_HWPERF_DEVDATA;
+
+PVRSRV_ERROR RGXHWPerfConfigureCounters(
+               RGX_HWPERF_CONNECTION *psHWPerfConnection,
+               IMG_UINT32                                      ui32CtrlWord,
+               IMG_UINT32                                      ui32NumBlocks,
+               RGX_HWPERF_CONFIG_CNTBLK*       asBlockConfigs)
+{
+       PVRSRV_ERROR           eError;
+       RGX_KM_HWPERF_DEVDATA* psDevData;
+       RGX_HWPERF_DEVICE *psHWPerfDev;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       /* Validate input argument values supplied by the caller */
+       if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+       while (psHWPerfDev)
+       {
+               psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+               /* Call the internal server API */
+               eError = PVRSRVRGXConfigureHWPerfBlocksKM(NULL,
+                                                         psDevData->psRgxDevNode,
+                                                         ui32CtrlWord,
+                                                         ui32NumBlocks,
+                                                         asBlockConfigs);
+
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXConfigureHWPerfBlocksKM");
+
+               psHWPerfDev = psHWPerfDev->psNext;
+       }
+
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxhwperf.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxhwperf.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxhwperf.h
new file mode 100644 (file)
index 0000000..9bf9d45
--- /dev/null
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX HWPerf functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+
+#include "rgxhwperf_common.h"
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s) - Volcanic specific
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM(
+       CONNECTION_DATA       * psConnection,
+       PVRSRV_DEVICE_NODE    * psDeviceNode,
+       IMG_UINT32            ui32CtrlWord,
+       IMG_UINT32            ui32ArrayLen,
+       RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+
+#endif /* RGXHWPERF_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxinit.c
new file mode 100644 (file)
index 0000000..4e8980b
--- /dev/null
@@ -0,0 +1,4597 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "log2.h"
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_bridge_init.h"
+#include "rgx_bridge_init.h"
+#include "syscommon.h"
+#include "rgx_heaps.h"
+#include "rgxheapconfig.h"
+#include "rgxdefs_km.h"
+#include "rgxpower.h"
+#include "tlstream.h"
+#include "pvrsrv_tlstreams.h"
+
+#include "rgxinit.h"
+#include "rgxbvnc.h"
+#include "rgxmulticore.h"
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "rgxfwdbg.h"
+#include "info_page.h"
+
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+#include "physmem_lma.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "htbserver.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "rgxtimecorr.h"
+#include "rgxshader.h"
+
+#if defined(PDUMP)
+#include "rgxstartstop.h"
+#endif
+
+#include "rgx_fwif_alignchecks.h"
+#include "vmm_pvz_client.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "rgxsoctimer.h"
+#endif
+
+#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION)
+#include "pdump_physmem.h"
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue, IMG_UINT64 ui64SPUResetValue);
+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid);
+static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap);
+#endif
+
+/* Services internal heap identification used in this file only */
+#define RGX_FIRMWARE_MAIN_HEAP_IDENT   "FwMain"   /*!< RGX Main Firmware Heap identifier */
+#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */
+
+#define RGX_MMU_PAGE_SIZE_4KB   (   4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB  (  16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB  (  64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB   (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB   (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+#define MAX_BVNC_LEN (12)
+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1)
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#if !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+@Function       SampleIRQCount
+@Description    Utility function taking snapshots of RGX FW interrupt count.
+@Input          paui32Input  A pointer to RGX FW IRQ count array.
+                             Size of the array should be equal to RGX FW thread
+                             count.
+@Input          paui32Output A pointer to array containing sampled RGX FW
+                             IRQ counts
+@Return         IMG_BOOL     Returns IMG_TRUE, if RGX FW IRQ is not equal to
+                             sampled RGX FW IRQ count for any RGX FW thread.
+*/ /**************************************************************************/
+static INLINE IMG_BOOL SampleIRQCount(const volatile IMG_UINT32 *paui32Input,
+                                                                         volatile IMG_UINT32 *paui32Output)
+{
+       IMG_UINT32 ui32TID;
+       IMG_BOOL bReturnVal = IMG_FALSE;
+
+       for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+       {
+               if (paui32Output[ui32TID] != paui32Input[ui32TID])
+               {
+                       /**
+                        * we are handling any unhandled interrupts here so align the host
+                        * count with the FW count
+                        */
+
+                       /* Sample the current count from the FW _after_ we've cleared the interrupt. */
+                       paui32Output[ui32TID] = paui32Input[ui32TID];
+                       bReturnVal = IMG_TRUE;
+               }
+       }
+
+       return bReturnVal;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXHostSafetyEvents
+@Description    Returns the event status masked to keep only the safety
+                events handled by the Host
+@Input          psDevInfo    Device Info structure
+@Return         IMG_UINT32   Status of Host-handled safety events
+ */ /**************************************************************************/
+static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0))
+       {
+               return 0;
+       }
+       else
+       {
+               IMG_UINT32 ui32EventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_STATUS);
+
+               return (ui32EventStatus & psDevInfo->ui32HostSafetyEventMask);
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       RGXSafetyEventHandler
+@Description    Handles the Safety Events that the Host is responsible for
+@Input          psDevInfo    Device Info structure
+ */ /**************************************************************************/
+static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo);
+       RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE;
+
+       if (ui32HostSafetyStatus != 0)
+       {
+               /* clear the safety bus events handled by the Host */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, ui32HostSafetyStatus);
+
+               if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT))
+               {
+                       IMG_UINT32 ui32FaultFlag;
+                       IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS);
+                       IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT -
+                                                                                               RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT;
+
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW));
+
+                       for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++)
+                       {
+                               if (BIT_ISSET(ui32FaultFW, ui32FaultFlag))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).",
+                                                __func__, BIT(ui32FaultFlag)));
+                                       eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR;
+                               }
+                               else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).",
+                                                __func__, BIT(ui32FaultFlag)));
+
+                                       /* Only report this if we haven't detected a more serious error */
+                                       if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR)
+                                       {
+                                               eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK;
+                                       }
+                               }
+                       }
+
+                       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW);
+               }
+
+               if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_WDT_TIMEOUT_SHIFT))
+               {
+                       volatile RGXFWIF_POW_STATE ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState;
+
+                       if (ePowState == RGXFWIF_POW_ON)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__));
+
+                               /* Only report this if we haven't detected a more serious error */
+                               if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR)
+                               {
+                                       eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG;
+                               }
+                       }
+               }
+
+               /* Notify client and system layer of any error */
+               if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE)
+               {
+                       PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode;
+                       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+
+                       /* Client notification of device error will be achieved by
+                        * clients calling UM function RGXGetLastDeviceError() */
+                       psDevInfo->eLastDeviceError = eResetReason;
+
+                       /* Notify system layer of any error */
+                       if (psDevConfig->pfnSysDevErrorNotify)
+                       {
+                               PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0};
+
+                               sErrorData.eResetReason = eResetReason;
+
+                               psDevConfig->pfnSysDevErrorNotify(psDevConfig,
+                                                                 &sErrorData);
+                       }
+               }
+       }
+}
+
+static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+       IMG_UINT32 ui32TID;
+#endif
+
+       RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       PVR_DPF((PVR_DBG_ERROR,
+               "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %llu",
+                        psDeviceNode->sDevId.ui32InternalID,
+                        psDeviceNode->sLISRExecutionInfo.ui32Status,
+                        psDeviceNode->sLISRExecutionInfo.ui64Clockns));
+
+       for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "RGX FW thread %u: InterruptCountSnapshot: 0x%X",
+                                ui32TID, psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID]));
+       }
+#else
+       PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION"));
+#endif
+
+       return SampleIRQCount(psDevInfo->psRGXFWIfFwOsData->aui32InterruptCount,
+                                                 psDevInfo->aui32SampleIRQCount);
+}
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_BOOL bScheduleMISR;
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               bScheduleMISR = IMG_TRUE;
+       }
+       else
+       {
+               bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo);
+       }
+
+       if (bScheduleMISR)
+       {
+               OSScheduleMISR(psDevInfo->pvMISRData);
+
+               if (psDevInfo->pvAPMISRData != NULL)
+               {
+                       OSScheduleMISR(psDevInfo->pvAPMISRData);
+               }
+       }
+}
+
+static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                  IMG_UINT32 ui32IRQStatusReg,
+                                                                  IMG_UINT32 ui32IRQStatusEventMsk,
+                                                                  IMG_UINT32 ui32IRQClearReg,
+                                                                  IMG_UINT32 ui32IRQClearMask)
+{
+       IMG_UINT32 ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg);
+
+       /* clear only the pending bit of the thread that triggered this interrupt */
+       ui32IRQClearMask &= ui32IRQStatus;
+
+       if (ui32IRQStatus & ui32IRQStatusEventMsk)
+       {
+               /* acknowledge and clear the interrupt */
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask);
+               return IMG_TRUE;
+       }
+       else
+       {
+               /* spurious interrupt */
+               return IMG_FALSE;
+       }
+}
+
+static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+               /* status & clearing registers are available on both Host and Guests
+                * and are agnostic of the Fw CPU type. Due to the remappings done by
+                * the 2nd stage device MMU, all drivers assume they are accessing
+                * register bank 0 */
+       return RGXAckHwIrq(psDevInfo,
+                                          RGX_CR_IRQ_OS0_EVENT_STATUS,
+                                          ~RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK,
+                                          RGX_CR_IRQ_OS0_EVENT_CLEAR,
+                                          ~RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK);
+}
+
+static IMG_BOOL RGX_LISRHandler(void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData;
+       IMG_BOOL bIrqAcknowledged = IMG_FALSE;
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       IMG_UINT32 ui32TID;
+
+       for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+       {
+               UPDATE_LISR_DBG_SNAPSHOT(ui32TID, psFwOsData->aui32InterruptCount[ui32TID]);
+       }
+
+       UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT);
+       UPDATE_LISR_DBG_TIMESTAMP();
+#endif
+
+       UPDATE_LISR_DBG_COUNTER();
+
+       if (psDevInfo->bRGXPowered)
+       {
+               IMG_BOOL bSafetyEvent = (RGXHostSafetyEvents(psDevInfo) != 0);
+
+               if ((psDevInfo->pfnRGXAckIrq == NULL) || psDevInfo->pfnRGXAckIrq(psDevInfo) || bSafetyEvent)
+               {
+                       bIrqAcknowledged = IMG_TRUE;
+
+                       if (SampleIRQCount(psFwOsData->aui32InterruptCount,
+                                                          psDevInfo->aui32SampleIRQCount) || bSafetyEvent)
+                       {
+                               UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED);
+                               UPDATE_MISR_DBG_COUNTER();
+
+                               OSScheduleMISR(psDevInfo->pvMISRData);
+
+#if defined(SUPPORT_AUTOVZ)
+                               RGXUpdateAutoVzWdgToken(psDevInfo);
+#endif
+                               if (psDevInfo->pvAPMISRData != NULL)
+                               {
+                                       OSScheduleMISR(psDevInfo->pvAPMISRData);
+                               }
+                       }
+                       else
+                       {
+                               UPDATE_LISR_DBG_STATUS(RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED);
+                       }
+               }
+               else
+               {
+                       UPDATE_LISR_DBG_STATUS(RGX_LISR_NOT_TRIGGERED_BY_HW);
+               }
+       }
+       else
+       {
+               /* AutoVz drivers rebooting while the firmware is active must acknowledge
+                * and clear the hw IRQ line before the RGXInit() has finished. */
+               if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp &&
+                         (psDevInfo->pfnRGXAckIrq != NULL) &&
+                         psDevInfo->pfnRGXAckIrq(psDevInfo)))
+               {
+                       UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED);
+               }
+       }
+
+       return bIrqAcknowledged;
+}
+
+static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE        *psDeviceNode)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       OS_SPINLOCK_FLAGS uiFlags;
+
+       /* First check whether there are pending commands in Deferred KCCB List */
+       OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+       if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead))
+       {
+               OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+               return;
+       }
+       OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags);
+
+       /* Powerlock to avoid further Power transition requests
+          while KCCB deferred list is being processed */
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to acquire PowerLock (device: %p, error: %s)",
+                                __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+               return;
+       }
+
+       /* Try to send deferred KCCB commands Do not Poll from here*/
+       eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                                "%s could not flush Deferred KCCB list, KCCB is full.",
+                                __func__));
+       }
+}
+
+static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psDevice;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE)
+       {
+               RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+       }
+
+       if (psFwSysData->ePowState == RGXFWIF_POW_IDLE)
+       {
+               /* The FW is IDLE and therefore could be shut down */
+               eError = RGXActivePowerRequest(psDeviceNode);
+
+               if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+               {
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: Failed RGXActivePowerRequest call (device: %p) with %s",
+                                       __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+                               PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+                       }
+                       else
+                       {
+                               /* Re-schedule the power down request as it was deferred. */
+                               OSScheduleMISR(psDevInfo->pvAPMISRData);
+                       }
+               }
+       }
+
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_IDLE       RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE     RGXFWIF_GPU_UTIL_STATE_ACTIVE
+#define GPU_BLOCKED    RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS 64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_HANDLE hGpuUtilUser,
+                                       RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+       IMG_UINT64 ui64TimeNow;
+       IMG_UINT32 ui32Attempts;
+       IMG_UINT32 ui32Remainder;
+
+
+       /***** (1) Initialise return stats *****/
+
+       psReturnStats->bValid = IMG_FALSE;
+       psReturnStats->ui64GpuStatIdle       = 0;
+       psReturnStats->ui64GpuStatActive     = 0;
+       psReturnStats->ui64GpuStatBlocked    = 0;
+       psReturnStats->ui64GpuStatCumulative = 0;
+
+       if (hGpuUtilUser == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       psAggregateStats = hGpuUtilUser;
+
+
+       /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */
+       for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++)
+       {
+               IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+               IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+               IMG_UINT32 i = 0;
+
+
+               /***** (2) Get latest data from shared area *****/
+
+               OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+               /*
+                * First attempt at detecting if the FW is in the middle of an update.
+                * This should also help if the FW is in the middle of a 64 bit variable update.
+                */
+               while (((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+                               (aui64TmpCounters[ui64LastState] !=
+                                psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+                          (i < MAX_ITERATIONS))
+               {
+                       ui64LastWord  = psUtilFWCb->ui64LastWord;
+                       ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+                       aui64TmpCounters[GPU_IDLE]    = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+                       aui64TmpCounters[GPU_ACTIVE]  = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE];
+                       aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+                       i++;
+               }
+
+               OSLockRelease(psDevInfo->hGPUUtilLock);
+
+               if (i == MAX_ITERATIONS)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "RGXGetGpuUtilStats could not get reliable data after trying %u times", i));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+
+               /***** (3) Compute return stats *****/
+
+               /* Update temp counters to account for the time since the last update to the shared ones */
+               OSMemoryBarrier(NULL); /* Ensure the current time is read after the loop above */
+               ui64TimeNow    = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDeviceNode));
+               ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+               ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+               aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+               /* Get statistics for a user since its last request */
+               psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+                                                                            psAggregateStats->ui64GpuStatIdle);
+               psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE],
+                                                                              psAggregateStats->ui64GpuStatActive);
+               psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+                                                                               psAggregateStats->ui64GpuStatBlocked);
+               psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle +
+                                                      psReturnStats->ui64GpuStatActive +
+                                                      psReturnStats->ui64GpuStatBlocked;
+
+               if (psAggregateStats->ui64TimeStamp != 0)
+               {
+                       IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp;
+                       /* We expect to return at least 75% of the time since the last call in GPU stats */
+                       IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4);
+
+                       /*
+                        * If the returned stats are substantially lower than the time since
+                        * the last call, then the Host might have read a partial update from the FW.
+                        * If this happens, try sampling the shared counters again.
+                        */
+                       if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                        "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low "
+                                        "(call period %" IMG_UINT64_FMTSPEC ")",
+                                        __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall));
+                               PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again",
+                                        __func__, ui32Attempts));
+                               continue;
+                       }
+               }
+
+               break;
+       }
+
+
+       /***** (4) Update aggregate stats for the current user *****/
+
+       psAggregateStats->ui64GpuStatIdle    += psReturnStats->ui64GpuStatIdle;
+       psAggregateStats->ui64GpuStatActive  += psReturnStats->ui64GpuStatActive;
+       psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked;
+       psAggregateStats->ui64TimeStamp       = ui64TimeNow;
+
+
+       /***** (5) Convert return stats to microseconds *****/
+
+       psReturnStats->ui64GpuStatIdle       = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder);
+       psReturnStats->ui64GpuStatActive     = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder);
+       psReturnStats->ui64GpuStatBlocked    = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder);
+       psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder);
+
+       /* Check that the return stats make sense */
+       if (psReturnStats->ui64GpuStatCumulative == 0)
+       {
+               /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+                * returned 0. This could happen if the GPU frequency value
+                * is not well calibrated and the FW is updating the GPU state
+                * while the Host is reading it.
+                * When such an event happens frequently, timers or the aggregate
+                * stats might not be accurate...
+                */
+               PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data."));
+               return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+       }
+
+       psReturnStats->bValid = IMG_TRUE;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser)
+{
+       RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+       /* NoStats used since this may be called outside of the register/de-register
+        * process calls which track memory use. */
+       psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
+       if (psAggregateStats == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psAggregateStats->ui64GpuStatIdle    = 0;
+       psAggregateStats->ui64GpuStatActive  = 0;
+       psAggregateStats->ui64GpuStatBlocked = 0;
+       psAggregateStats->ui64TimeStamp      = 0;
+
+       /* Not used */
+       psAggregateStats->bValid = IMG_FALSE;
+       psAggregateStats->ui64GpuStatCumulative = 0;
+
+       *phGpuUtilUser = psAggregateStats;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser)
+{
+       RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+       if (hGpuUtilUser == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psAggregateStats = hGpuUtilUser;
+       OSFreeMemNoStats(psAggregateStats);
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGX MISR Handler
+*/
+static void RGX_MISRHandler_Main (void *pvData)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       /* Give the HWPerf service a chance to transfer some data from the FW
+        * buffer to the host driver transport layer buffer.
+        */
+       RGXHWPerfDataStoreCB(psDeviceNode);
+
+       /* Inform other services devices that we have finished an operation */
+       PVRSRVNotifyCommandCompletion(psDeviceNode);
+
+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD)
+       /*
+        * Firmware CCB only exists for primary FW thread. Only requirement for
+        * non primary FW thread(s) to communicate with host driver is in the case
+        * of PDVFS running on non primary FW thread.
+        * This requirement is directly handled by the below
+        */
+       RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice);
+#endif
+
+       /* Handle Safety events if necessary */
+       RGXSafetyEventHandler(psDeviceNode->pvDevice);
+
+       /* Signal the global event object */
+       PVRSRVSignalGlobalEO();
+
+       /* Process the Firmware CCB for pending commands */
+       RGXCheckFirmwareCCB(psDeviceNode->pvDevice);
+
+       /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */
+       RGXTimeCorrRestartPeriodic(psDeviceNode);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Process Workload Estimation Specific commands from the FW */
+       WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+#endif
+
+       if (psDevInfo->pvAPMISRData == NULL)
+       {
+               RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+       }
+}
+#endif /* !defined(NO_HARDWARE) */
+
+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO   *psDevInfo,
+                                      PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Save information used on power transitions for later
+        * (when RGXStart and RGXStop are executed)
+        */
+       psDevInfo->sLayerParams.psDevInfo = psDevInfo;
+       psDevInfo->sLayerParams.psDevConfig = psDevConfig;
+#if defined(PDUMP)
+       psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ||
+           RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               IMG_DEV_PHYADDR sKernelMMUCtxPCAddr;
+
+               if (psDevInfo->psDeviceNode->bAutoVzFwIsUp)
+               {
+                       /* If AutoVz firmware is up at this stage, the driver initialised it
+                        * during a previous life-cycle. The firmware's memory is already pre-mapped
+                        * and the MMU page tables reside in the predetermined memory carveout.
+                        * The Kernel MMU Context created in this life-cycle is a dummy structure
+                        * that is not used for mapping.
+                        * To program the Device's BIF with the correct PC address, use the base
+                        * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */
+#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
+                       sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
+#else
+                       PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig,
+                                                                                                                          PHYS_HEAP_USAGE_FW_MAIN);
+                       eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG;
+                       PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)");
+
+                       sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr +
+                                                                                (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED);
+#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */
+               }
+               else
+               {
+                       eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx,
+                                                    &sKernelMMUCtxPCAddr);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog"));
+                               return eError;
+                       }
+               }
+
+               psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr;
+       }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       /* Send information used on power transitions to the trusted device as
+        * in this setup the driver cannot start/stop the GPU and perform resets
+        */
+       if (psDevConfig->pfnTDSetPowerParams)
+       {
+               PVRSRV_TD_POWER_PARAMS sTDPowerParams;
+
+               if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ||
+                   RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+               {
+                       sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr;
+               }
+
+               eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData,
+                                                                                                 &sTDPowerParams);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!"));
+               eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+#endif
+
+       return eError;
+}
+
+/*
+       RGXSystemGetFabricCoherency
+*/
+PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig,
+                                                                                IMG_CPU_PHYADDR sRegsCpuPBase,
+                                                                                IMG_UINT32 ui32RegsSize,
+                                                                                PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType,
+                                                                                PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode)
+{
+       IMG_CHAR *aszLabels[] = {"none", "acelite", "fullace", "unknown"};
+       PVRSRV_DEVICE_SNOOP_MODE eAppHintCacheSnoopingMode;
+       PVRSRV_DEVICE_SNOOP_MODE eDeviceCacheSnoopingMode;
+       IMG_UINT32 ui32AppHintFabricCoherency;
+       IMG_UINT32 ui32DeviceFabricCoherency;
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault;
+#if !defined(NO_HARDWARE)
+       void *pvRegsBaseKM;
+       IMG_BOOL bPowerDown = IMG_TRUE;
+       PVRSRV_ERROR eError;
+#endif
+
+       if (!sRegsCpuPBase.uiAddr || !ui32RegsSize)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "RGXSystemGetFabricCoherency: Invalid RGX register base/size parameters"));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if !defined(NO_HARDWARE)
+       pvRegsBaseKM = OSMapPhysToLin(sRegsCpuPBase, ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+       if (!pvRegsBaseKM)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "RGXSystemGetFabricCoherency: Failed to create RGX register mapping"));
+               return PVRSRV_ERROR_BAD_MAPPING;
+       }
+
+       if (psDevConfig->psDevNode != NULL)
+       {
+               bPowerDown = (psDevConfig->psDevNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF);
+       }
+
+       /* Power-up the device as required to read the registers */
+       if (bPowerDown)
+       {
+               eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON");
+       }
+
+       /* AXI support within the SoC, bitfield COHERENCY_SUPPORT [1 .. 0]
+               value NO_COHERENCY        0x0 {SoC does not support any form of Coherency}
+               value ACE_LITE_COHERENCY  0x1 {SoC supports ACE-Lite or I/O Coherency}
+               value FULL_ACE_COHERENCY  0x2 {SoC supports full ACE or 2-Way Coherency} */
+       ui32DeviceFabricCoherency = OSReadHWReg32(pvRegsBaseKM, RGX_CR_SOC_AXI);
+       PVR_LOG(("AXI fabric coherency (RGX_CR_SOC_AXI): 0x%x", ui32DeviceFabricCoherency));
+#if defined(DEBUG)
+       if (ui32DeviceFabricCoherency & ~((IMG_UINT32)RGX_CR_SOC_AXI_MASKFULL))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid RGX_CR_SOC_AXI value.", __func__));
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+#endif
+       ui32DeviceFabricCoherency &= ~((IMG_UINT32)RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK);
+       ui32DeviceFabricCoherency >>= RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT;
+
+       if (bPowerDown)
+       {
+               eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_OFF);
+               PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF");
+       }
+
+       /* UnMap Regs */
+       OSUnMapPhysToLin(pvRegsBaseKM, ui32RegsSize);
+
+       switch (ui32DeviceFabricCoherency)
+       {
+       case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY:
+               eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS;
+               *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE;
+               break;
+
+       case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY:
+               eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY;
+               *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE;
+               break;
+
+       case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY:
+       default:
+               eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+               *peDevFabricType = PVRSRV_DEVICE_FABRIC_NONE;
+               break;
+       }
+#else /* !defined(NO_HARDWARE) */
+#if defined(RGX_FEATURE_GPU_CPU_COHERENCY)
+       *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE;
+       eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS;
+       ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY;
+#else
+       *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE;
+       eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY;
+       ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY;
+#endif
+#endif /* !defined(NO_HARDWARE) */
+
+       OSCreateKMAppHintState(&pvAppHintState);
+       ui32AppHintDefault = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY;
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FabricCoherencyOverride,
+                                                &ui32AppHintDefault, &ui32AppHintFabricCoherency);
+       OSFreeKMAppHintState(pvAppHintState);
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+       /* Temporarily disable coherency */
+       ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY;
+#endif
+
+       /* Suppress invalid AppHint value */
+       switch (ui32AppHintFabricCoherency)
+       {
+       case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY:
+               eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+               break;
+
+       case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY:
+               eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY;
+               break;
+
+       case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY:
+               eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS;
+               break;
+
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                               "Invalid FabricCoherencyOverride AppHint %d, ignoring",
+                               ui32AppHintFabricCoherency));
+               eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS;
+               ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY;
+               break;
+       }
+
+       if (ui32AppHintFabricCoherency < ui32DeviceFabricCoherency)
+       {
+               PVR_LOG(("Downgrading device fabric coherency from %s to %s",
+                               aszLabels[ui32DeviceFabricCoherency],
+                               aszLabels[ui32AppHintFabricCoherency]));
+               eDeviceCacheSnoopingMode = eAppHintCacheSnoopingMode;
+       }
+       else if (ui32AppHintFabricCoherency > ui32DeviceFabricCoherency)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "Cannot upgrade device fabric coherency from %s to %s, not supported by device!",
+                               aszLabels[ui32DeviceFabricCoherency],
+                               aszLabels[ui32AppHintFabricCoherency]));
+
+               /* Override requested-for app-hint with actual app-hint value being used */
+               ui32AppHintFabricCoherency = ui32DeviceFabricCoherency;
+       }
+
+       *peCacheSnoopingMode = eDeviceCacheSnoopingMode;
+       return PVRSRV_OK;
+}
+
+/*
+       RGXSystemHasFBCDCVersion31
+*/
+static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32 ui32FBCDCVersionOverride = 0;
+#endif
+
+       {
+
+#if defined(SUPPORT_VALIDATION)
+               if (ui32FBCDCVersionOverride == 2)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!",
+                                __func__));
+               }
+#endif
+
+#if !defined(NO_HARDWARE)
+               if (psDeviceNode->psDevConfig->bHasFBCDCVersion31)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: System uses FBCDC3.1 but GPU doesn't support it!",
+                                __func__));
+               }
+#endif
+       }
+
+       return IMG_FALSE;
+}
+
+/*
+       RGXDevMMUAttributes
+*/
+static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                              IMG_BOOL bKernelMemoryCtx)
+{
+       MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL;
+
+       /* bKernelMemoryCtx is only used for rogue cores */
+       PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx);
+
+       if (psDeviceNode->pfnCheckDeviceFeature)
+       {
+               psMMUDevAttrs = psDeviceNode->psMMUDevAttrs;
+       }
+
+       return psMMUDevAttrs;
+}
+
+/*
+ * RGXInitDevPart2
+ */
+PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                         IMG_UINT32                    ui32DeviceFlags,
+                                                         IMG_UINT32                    ui32HWPerfHostFilter,
+                                                         RGX_ACTIVEPM_CONF             eActivePMConf,
+                                                         IMG_UINT32                    ui32AvailablePowUnitsMask,
+                                                         IMG_UINT32                    ui32AvailableRACMask)
+{
+       PVRSRV_ERROR                    eError;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_DEV_POWER_STATE  eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+       PVRSRV_DEVICE_CONFIG    *psDevConfig = psDeviceNode->psDevConfig;
+       IMG_UINT32                      ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1;
+       IMG_UINT32                      ui32AllRACMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1;
+
+       /* Assume system layer has turned power on by this point, required before powering device */
+       psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+       PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 2");
+
+#if defined(TIMING) || defined(DEBUG)
+       OSUserModeAccessToPerfCountersEn();
+#endif
+
+       /* Initialise Device Flags */
+       psDevInfo->ui32DeviceFlags = 0;
+       RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE);
+
+       /* Allocate DVFS Table (needs to be allocated before GPU trace events
+        *  component is initialised because there is a dependency between them) */
+       psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+       PVR_LOG_GOTO_IF_NOMEM(psDevInfo->psGpuDVFSTable, eError, ErrorExit);
+
+
+       if (psDevInfo->ui32HWPerfHostFilter == 0)
+       {
+               RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter);
+       }
+
+       /* If HWPerf enabled allocate all resources for the host side buffer. */
+       if (psDevInfo->ui32HWPerfHostFilter != 0)
+       {
+               if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand"
+                               " initialisation failed."));
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* Initialise work estimation lock */
+       eError = OSLockCreate(&psDevInfo->hWorkEstLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit);
+#endif
+
+       /* Initialise lists of ZSBuffers */
+       eError = OSLockCreate(&psDevInfo->hLockZSBuffer);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockZSBuffer)", ErrorExit);
+       dllist_init(&psDevInfo->sZSBufferHead);
+       psDevInfo->ui32ZSBufferCurrID = 1;
+
+       /* Initialise lists of growable Freelists */
+       eError = OSLockCreate(&psDevInfo->hLockFreeList);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockFreeList)", ErrorExit);
+       dllist_init(&psDevInfo->sFreeListHead);
+       psDevInfo->ui32FreelistCurrID = 1;
+
+       eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(DebugFaultInfoLock)", ErrorExit);
+
+       if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock);
+               PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit);
+       }
+
+       /* Setup GPU utilisation stats update callback */
+       eError = OSLockCreate(&psDevInfo->hGPUUtilLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(GPUUtilLock)", ErrorExit);
+#if !defined(NO_HARDWARE)
+       psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+#endif
+
+       eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+       psDevInfo->eActivePMConf = eActivePMConf;
+
+       /* Validate the SPU mask and initialize to number of SPUs to power up */
+       if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s:Invalid SPU mask (All=0x%X, Non Fused=0x%X). At-least one SPU must to be powered up.",
+                        __func__,
+                        ui32AllPowUnitsMask,
+                        ui32AvailablePowUnitsMask));
+               PVR_LOG_GOTO_WITH_ERROR("ui32AvailablePowUnitsMask", eError, PVRSRV_ERROR_INVALID_SPU_MASK, ErrorExit);
+       }
+
+       psDevInfo->ui32AvailablePowUnitsMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask;
+
+       psDevInfo->ui32AvailableRACMask = ui32AvailableRACMask & ui32AllRACMask;
+
+#if !defined(NO_HARDWARE)
+       /* set-up the Active Power Mgmt callback */
+       {
+               RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+               IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+               IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+                                                          (eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+
+               if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE)))
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__));
+                       bEnableAPM = IMG_FALSE;
+               }
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ)
+               /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */
+               PVR_ASSERT(bEnableAPM == IMG_FALSE);
+#endif
+
+               if (bEnableAPM)
+               {
+                       eError = OSInstallMISR(&psDevInfo->pvAPMISRData,
+                                       RGX_MISRHandler_CheckFWActivePowerState,
+                                       psDeviceNode,
+                                       "RGX_CheckFWActivePower");
+                       PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(APMISR)", ErrorExit);
+
+                       /* Prevent the device being woken up before there is something to do. */
+                       eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+               }
+       }
+#endif
+
+       PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM,
+                                           RGXQueryAPMState,
+                                           RGXSetAPMState,
+                                           psDeviceNode,
+                                           NULL);
+
+       RGXTimeCorrInitAppHintCallbacks(psDeviceNode);
+
+       /* Register the device with the power manager */
+       eError = PVRSRVRegisterPowerDevice(psDeviceNode,
+                                                                          (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState,
+                                                                          (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState,
+                                                                          psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+                                                                          &RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+                                                                          &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+                                                                          &RGXPowUnitsStateMaskChange,
+                                                                          (IMG_HANDLE)psDeviceNode,
+                                                                          PVRSRV_DEV_POWER_STATE_OFF,
+                                                                          eDefaultPowerState);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit);
+
+       eError = RGXSetPowerParams(psDevInfo, psDevConfig);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit);
+
+#if defined(SUPPORT_VALIDATION)
+       {
+               void *pvAppHintState = NULL;
+
+               IMG_UINT32 ui32AppHintDefault;
+
+               OSCreateKMAppHintState(&pvAppHintState);
+               ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL;
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TestSLRInterval,
+                                    &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval);
+               PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d",
+                       ui32AppHintDefault, psDevInfo->ui32TestSLRInterval));
+               OSFreeKMAppHintState(pvAppHintState);
+               psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval;
+               psDevInfo->ui32SLRSkipFWAddr = 0;
+       }
+#endif
+
+#if defined(PDUMP)
+#if defined(NO_HARDWARE)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle");
+
+       /* Kick the FW once, in case it still needs to detect and set the idle state */
+       PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME,
+                          RGX_CR_MTS_SCHEDULE,
+                          RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK,
+                          PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT);
+
+       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc,
+                                       offsetof(RGXFWIF_SYSDATA, ePowState),
+                                       RGXFWIF_POW_IDLE,
+                                       0xFFFFFFFFU,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", ErrorExit);
+#endif
+
+       /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT,
+                             "RGX deinitialisation commands");
+
+       psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW;
+
+       if (! PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = RGXStop(&psDevInfo->sLayerParams);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit);
+       }
+
+       psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW);
+#endif
+
+#if !defined(NO_HARDWARE)
+       eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXInstallProcessQueuesMISR", ErrorExit);
+
+       /* Register RGX to receive notifies when other devices complete some work */
+       PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+       /* Register the interrupt handlers */
+       eError = OSInstallMISR(&psDevInfo->pvMISRData,
+                       RGX_MISRHandler_Main,
+                       psDeviceNode,
+                       "RGX_Main");
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit);
+
+       /* only the HOST_IRQ bus is supported on volcanic for IRQ delivery */
+       psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated;
+
+       eError = SysInstallDeviceLISR(psDevConfig->hSysData,
+                                                                 psDevConfig->ui32IRQ,
+                                                                 PVRSRV_MODNAME,
+                                                                 RGX_LISRHandler,
+                                                                 psDeviceNode,
+                                                                 &psDevInfo->pvLISRData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "SysInstallDeviceLISR", ErrorExit);
+#endif /* !defined(NO_HARDWARE) */
+
+#if defined(PDUMP)
+/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside
+ * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the
+ * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its
+ * last parameter which will not exist on architectures which do not have this
+ * feature.
+ * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for
+ * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this
+ * means we can build the kernel driver without having to worry about the BVNC
+ * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given
+ *  architecture, whereas the FEATURE is only defined for those BVNCs that
+ *  support it).
+ */
+#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)
+       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY)))
+#endif
+       {
+               if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) &&
+                               !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+               {
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                             "System has NO cache snooping");
+               }
+               else
+               {
+                       if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig))
+                       {
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                     "System has CPU cache snooping");
+                       }
+                       if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+                       {
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                                     "System has DEVICE cache snooping");
+                       }
+               }
+       }
+#endif
+
+       eError = PVRSRVTQLoadShaders(psDeviceNode);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit);
+
+       psDevInfo->bDevInit2Done = IMG_TRUE;
+
+       return PVRSRV_OK;
+
+ErrorExit:
+       DevPart2DeInitRGX(psDeviceNode);
+
+       return eError;
+}
+
+#define VZ_RGX_FW_FILENAME_SUFFIX ".vz"
+#define RGX_FW_FILENAME_MAX_SIZE   ((sizeof(RGX_FW_FILENAME)+ \
+                       RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX)))
+
+static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_CHAR *pszFWFilenameStr,
+               IMG_CHAR *pszFWpFilenameStr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const IMG_CHAR * const pszFWFilenameSuffix =
+                       PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX;
+
+       OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE,
+                       "%s." RGX_BVNC_STR_FMTSPEC "%s",
+                       RGX_FW_FILENAME,
+                       psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+                       psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+                       pszFWFilenameSuffix);
+
+       OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE,
+                       "%s." RGX_BVNC_STRP_FMTSPEC "%s",
+                       RGX_FW_FILENAME,
+                       psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+                       psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+                       pszFWFilenameSuffix);
+}
+
+PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 OS_FW_IMAGE **ppsRGXFW,
+                                 const IMG_BYTE **ppbFWData)
+{
+       IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE];
+       IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE];
+       IMG_CHAR *pszLoadedFwStr;
+       PVRSRV_ERROR eErr;
+
+       /* Prepare the image filenames to use in the following code */
+       _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr);
+
+       /* Get pointer to Firmware image */
+       pszLoadedFwStr = aszFWFilenameStr;
+       eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW);
+       if (eErr == PVRSRV_ERROR_NOT_FOUND)
+       {
+               pszLoadedFwStr = aszFWpFilenameStr;
+               eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW);
+               if (eErr == PVRSRV_ERROR_NOT_FOUND)
+               {
+                       pszLoadedFwStr = RGX_FW_FILENAME;
+                       eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW);
+                       if (eErr == PVRSRV_ERROR_NOT_FOUND)
+                       {
+                               PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s' (%s)",
+                                               aszFWFilenameStr, PVRSRVGetErrorString(eErr)));
+                       }
+               }
+       }
+
+       if (eErr == PVRSRV_OK)
+       {
+               PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr));
+               *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW);
+       }
+       else
+       {
+               *ppbFWData = NULL;
+       }
+
+       return eErr;
+
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       RGXFWIF_KCCB_CMD sKccbCmd;
+       PVRSRV_ERROR eError;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       /* Fill in the command structure with the parameters needed */
+       sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT;
+
+       eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+                                          &sKccbCmd,
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       return eError;
+}
+#endif
+
+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /* set up fw memory contexts */
+       PVRSRV_RGXDEV_INFO   *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_AUTOVZ)
+       PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
+
+       if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp))
+       {
+               /* Temporarily swap the MMU and default GPU physheap to allow the page
+                * tables of all memory mapped by the FwKernel context to be placed
+                * in a dedicated memory carveout. This should allow the firmware mappings to
+                * persist after a Host kernel crash or driver reset. */
+
+               psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+       }
+#endif
+
+       /* Set the device fabric coherency before FW context creation */
+       eError = RGXSystemGetFabricCoherency(psDevConfig,
+                                                                                psDevConfig->sRegsCpuPBase,
+                                                                                psDevConfig->ui32RegsSize,
+                                                                                &psDeviceNode->eDevFabricType,
+                                                                                &psDevConfig->eCacheSnoopingMode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed RGXSystemGetFabricCoherency (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_create_ctx;
+       }
+
+       /* Register callbacks for creation of device memory contexts */
+       psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+       psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+       /* Create the memory context for the firmware. */
+       eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+                                    &psDevInfo->psKernelDevmemCtx);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed DevmemCreateContext (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_create_ctx;
+       }
+
+       eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT,
+                                     &psDevInfo->psFirmwareMainHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed DevmemFindHeapByName (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_find_heap;
+       }
+
+       eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT,
+                                     &psDevInfo->psFirmwareConfigHeap);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed DevmemFindHeapByName (%u)",
+                        __func__,
+                        eError));
+               goto failed_to_find_heap;
+       }
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_UINT32 ui32OSID;
+               for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+               {
+                       IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH];
+
+                       OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+                       eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
+                                                                                 &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap);
+               }
+       }
+#endif
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_DEV_PHYADDR sPhysHeapBase;
+               IMG_UINT32 ui32OSID;
+
+               eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap);
+
+               for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+               {
+                       IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)};
+
+                       eError = RGXFwRawHeapAllocMap(psDeviceNode,
+                                                                                 ui32OSID,
+                                                                                 sRawFwHeapBase,
+                                                                                 RGX_FIRMWARE_RAW_HEAP_SIZE);
+                       if (eError != PVRSRV_OK)
+                       {
+                               for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--)
+                               {
+                                       RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+                               }
+                               PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap);
+                       }
+               }
+
+#if defined(SUPPORT_AUTOVZ)
+               /* restore default Px setup */
+               psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
+#endif
+       }
+#else
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap);
+       }
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE);
+               DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE);
+       }
+
+       return eError;
+
+failed_to_find_heap:
+       /*
+        * Clear the mem context create callbacks before destroying the RGX firmware
+        * context to avoid a spurious callback.
+        */
+       psDeviceNode->pfnRegisterMemoryContext = NULL;
+       psDeviceNode->pfnUnregisterMemoryContext = NULL;
+       DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+       psDevInfo->psKernelDevmemCtx = NULL;
+failed_to_create_ctx:
+       return eError;
+}
+
+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR       eError;
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+#if defined(SUPPORT_AUTOVZ)
+               PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
+
+               psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+
+               if (!psDeviceNode->bAutoVzFwIsUp)
+#endif
+               {
+                       IMG_UINT32 ui32OSID;
+
+                       for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+                       {
+                               RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+                       }
+               }
+#if defined(SUPPORT_AUTOVZ)
+               psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
+#endif
+       }
+#else
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig);
+
+               if (psDevInfo->psFirmwareMainHeap)
+               {
+                       DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE);
+               }
+               if (psDevInfo->psFirmwareConfigHeap)
+               {
+                       DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE);
+               }
+       }
+#endif
+
+       /*
+        * Clear the mem context create callbacks before destroying the RGX firmware
+        * context to avoid a spurious callback.
+        */
+       psDeviceNode->pfnRegisterMemoryContext = NULL;
+       psDeviceNode->pfnUnregisterMemoryContext = NULL;
+
+       if (psDevInfo->psKernelDevmemCtx)
+       {
+               eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
+                                      IMG_UINT32 ui32AlignChecksSizeUM,
+                                      IMG_UINT32 aui32AlignChecksUM[])
+{
+       static const IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM};
+       IMG_UINT32 ui32UMChecksOffset = ARRAY_SIZE(aui32AlignChecksKM) + 1;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+       IMG_UINT32 i, *paui32FWAlignChecks;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Skip the alignment check if the driver is guest
+          since there is no firmware to check against */
+       PVRSRV_VZ_RET_IF_MODE(GUEST, eError);
+
+       if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: FW Alignment Check Mem Descriptor is NULL",
+                        __func__));
+               return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+                                         (void **) &paui32FWAlignChecks);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to acquire kernel address for alignment checks (%u)",
+                        __func__,
+                        eError));
+               return eError;
+       }
+
+       paui32FWAlignChecks += ui32UMChecksOffset;
+       if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Mismatching sizes of RGXFW_ALIGN_CHECKS_INIT"
+                        " array between UM(%d) and FW(%d)",
+                        __func__,
+                        ui32AlignChecksSizeUM,
+                        *paui32FWAlignChecks));
+               eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+               goto return_;
+       }
+
+       for (i = 0; i < ui32AlignChecksSizeUM; i++)
+       {
+               if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i])
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: size/offset mismatch in RGXFW_ALIGN_CHECKS_INIT[%d]"
+                                       " between UM(%d) and FW(%d)",
+                                       __func__, i, aui32AlignChecksUM[i], paui32FWAlignChecks[i]));
+                       eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+               }
+       }
+
+       if (eError == PVRSRV_ERROR_INVALID_ALIGNMENT)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Check for FW/KM structure"
+                               " alignment failed.", __func__));
+       }
+
+return_:
+
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+
+       return eError;
+}
+
+static
+PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_DEVMEM_SIZE_T ui32Size,
+                                       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags,
+                                       const IMG_PCHAR pszText,
+                                       DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+
+       uiMemAllocFlags = (uiMemAllocFlags |
+                                          PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                          PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &
+                          RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       uiMemAllocFlags &= PVRSRV_MEMALLOCFLAGS_TDFWMASK;
+#endif
+
+       PVR_UNREFERENCED_PARAMETER(uiLog2Align);
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate FW %s memory", pszText);
+
+       eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+                                 ui32Size,
+                                 uiMemAllocFlags,
+                                 pszText,
+                                 ppsMemDescPtr);
+
+       return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is redundant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are
+ build-compatible then server-firmware are build-compatible as well.
+
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit)
+{
+#if !defined(NO_HARDWARE)
+       IMG_UINT32                      ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW);
+
+       ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW;
+
+       /* Check if the FW is missing support for any features required by the driver */
+       if (~ui32BuildOptionsFWKMPart & ui32BuildOptions)
+       {
+               ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+               /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+               ui32BuildOptionsMismatch &= ~OPTIONS_DEBUG_MASK;
+#endif
+               if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+                                       "extra options present in the KM driver: (0x%x). Please check rgx_options.h",
+                                       ui32BuildOptions & ui32BuildOptionsMismatch ));
+                       return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+               }
+
+               if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+                                       "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+                                       ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+                       return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+               }
+               PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ."));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                                                                       RGXFWIF_OSINIT *psFwOsInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       IMG_UINT32                      ui32DDKVersion;
+       PVRSRV_ERROR            eError;
+
+       ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW DDK version");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                               offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+                                                                                               ui32DDKVersion,
+                                                                                               0xffffffff,
+                                                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+       {
+               PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK revision (%u.%u).",
+                               PVRVERSION_MAJ, PVRVERSION_MIN,
+                               PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion),
+                               PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion)));
+               eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+               PVR_DBG_BREAK;
+               return eError;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]",
+                               PVRVERSION_MAJ, PVRVERSION_MIN,
+                               PVRVERSION_MAJ, PVRVERSION_MIN));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                                                                       RGXFWIF_OSINIT *psFwOsInit)
+{
+       PVRSRV_ERROR            eError=PVRSRV_OK;
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       IMG_UINT32                      ui32DDKBuild;
+
+       ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW DDK build");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                               offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+                                                                                               ui32DDKBuild,
+                                                                                               0xffffffff,
+                                                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                               PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+       {
+               PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).",
+                               ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+               eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+               PVR_DBG_BREAK;
+               return eError;
+#endif
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]",
+                               ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild));
+       }
+#endif
+       return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                                                                       RGXFWIF_OSINIT *psFwOsInit)
+{
+#if !defined(NO_HARDWARE)
+       IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+       PVRSRV_ERROR                            eError;
+
+       sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+                                       psDevInfo->sDevFeatureCfg.ui32V,
+                                       psDevInfo->sDevFeatureCfg.ui32N,
+                                       psDevInfo->sDevFeatureCfg.ui32C);
+#endif
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW BVNC (struct version)");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+                                                                                       sBVNC.ui32LayoutVersion,
+                                                                                       0xffffffff,
+                                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+       }
+
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC),
+                                                                                       (IMG_UINT32)sBVNC.ui64BVNC,
+                                                                                       0xffffffff,
+                                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+       }
+
+       PDUMPCOMMENT(psDevInfo->psDeviceNode,
+                    "Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+                                                                                       sizeof(IMG_UINT32),
+                                                                                       (IMG_UINT32)(sBVNC.ui64BVNC >> 32),
+                                                                                       0xffffffff,
+                                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                       PDUMP_FLAGS_CONTINUOUS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+       }
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC);
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               bCompatibleAll = IMG_TRUE;
+       }
+
+       if (!bCompatibleAll)
+       {
+               if (!bCompatibleVersion)
+               {
+                       PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).",
+                                       __func__,
+                                       sBVNC.ui32LayoutVersion,
+                                       psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+
+               if (!bCompatibleBVNC)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)",
+                                       RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC)));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+       }
+#endif
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                                                                       RGXFWIF_OSINIT *psFwOsInit)
+{
+#if defined(PDUMP) || !defined(NO_HARDWARE)
+       IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B |
+                                 RGX_BVNC_PACK_MASK_V |
+                                 RGX_BVNC_PACK_MASK_N |
+                                 RGX_BVNC_PACK_MASK_C;
+
+       PVRSRV_ERROR                            eError;
+       RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+
+#if defined(PDUMP)
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+
+#if !defined(NO_HARDWARE)
+       RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+       IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC;
+#endif
+
+       if (psDevInfo->bIgnoreHWReportedBVNC)
+       {
+               PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)"));
+               return PVRSRV_OK;
+       }
+
+#if defined(PDUMP) || !defined(NO_HARDWARE)
+#if defined(COMPAT_BVNC_MASK_B)
+       ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B;
+#endif
+#if defined(COMPAT_BVNC_MASK_V)
+       ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V;
+#endif
+#if defined(COMPAT_BVNC_MASK_N)
+       ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+       ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+
+       sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
+                                                                       psDevInfo->sDevFeatureCfg.ui32V,
+                                                                       psDevInfo->sDevFeatureCfg.ui32N,
+                                                                       psDevInfo->sDevFeatureCfg.ui32C);
+
+
+
+       if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C))
+       {
+               PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")),
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")),
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")),
+                               ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+       }
+#endif
+
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "Compatibility check: Layout version of compchecks struct");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                                                                                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+                                                                                       sSWBVNC.ui32LayoutVersion,
+                                                                                       0xffffffff,
+                                                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                       ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+
+       PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check started");
+       if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C))
+       {
+               PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+               PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Compatibility check: HW BNC and FW BNC (Lower 32 bits)");
+               eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                               offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                                                                                               offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC),
+                                                                                               (IMG_UINT32)sSWBVNC.ui64BVNC ,
+                                                                                               (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V),
+                                                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                               ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+                       return eError;
+               }
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Compatibility check: HW BNC and FW BNC (Higher 32 bits)");
+               eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                                                                               offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                                                                               offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                                                                                               offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+                                                                                               sizeof(IMG_UINT32),
+                                                                                               (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32),
+                                                                                               (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32),
+                                                                                               PDUMP_POLL_OPERATOR_EQUAL,
+                                                                                               ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+                       return eError;
+               }
+
+               PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags);
+       }
+       if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V)
+       {
+               PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags);
+               PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags);
+
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                                     "Compatibility check: HW V and FW V");
+               eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                       offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+                                       offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) +
+                                       ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0),
+                                       (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)),
+                                       RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0),
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+                       return eError;
+               }
+               PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags);
+       }
+       PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check finished");
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC;
+
+       sHWBVNC.ui64BVNC &= ui64MaskBVNC;
+       sSWBVNC.ui64BVNC &= ui64MaskBVNC;
+
+       RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC);
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               bCompatibleAll = IMG_TRUE;
+       }
+
+       if (!bCompatibleAll)
+       {
+               if (!bCompatibleVersion)
+               {
+                       PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+                                       __func__,
+                                       sHWBVNC.ui32LayoutVersion,
+                                       sSWBVNC.ui32LayoutVersion));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+
+               if (!bCompatibleBVNC)
+               {
+                       PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).",
+                                       RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC),
+                                       RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC)));
+                       eError = PVRSRV_ERROR_BVNC_MISMATCH;
+                       return eError;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]",
+                               RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC),
+                               RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC)));
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function     RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psFwOsInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                       RGXFWIF_OSINIT *psFwOsInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+       PVRSRV_ERROR            eError;
+#endif
+
+#if defined(PDUMP)
+       PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+
+       IMG_UINT32      ui32FWCoreIDValue = 0;
+       IMG_CHAR *pcRGXFW_PROCESSOR = NULL;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               switch (RGX_GET_FEATURE_VALUE(psDevInfo, META))
+               {
+               case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break;
+               case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break;
+               case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break;
+               case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__));
+                       PVR_ASSERT(0);
+               }
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+       }
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE;
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__));
+               PVR_ASSERT(0);
+       }
+
+#if defined(PDUMP)
+       PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+       PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
+                             "Compatibility check: KM driver and HW FW Processor version");
+       eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc,
+                                       offsetof(RGXFWIF_OSINIT, sRGXCompChecks) +
+                                       offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion),
+                                       ui32FWCoreIDValue,
+                                       0xffffffff,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError));
+               return eError;
+       }
+       PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags);
+#endif
+
+#if !defined(NO_HARDWARE)
+       if (psFwOsInit == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue)
+       {
+               PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).",
+                                pcRGXFW_PROCESSOR,
+                                ui32FWCoreIDValue,
+                                pcRGXFW_PROCESSOR,
+                                psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion));
+               eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH;
+               PVR_DBG_BREAK;
+               return eError;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].",
+                                pcRGXFW_PROCESSOR,
+                                ui32FWCoreIDValue,
+                                pcRGXFW_PROCESSOR,
+                                psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion));
+       }
+#endif
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR            eError;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+#if !defined(NO_HARDWARE)
+       IMG_UINT32                      ui32RegValue;
+       IMG_UINT8                       ui8FwOsCount;
+       IMG_UINT32                      ui32FwTimeout = MAX_HW_TIME_US;
+
+       LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
+       {
+               if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated))
+               {
+                       /* No need to wait if the FW has already updated the values */
+                       break;
+               }
+               OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       ui32RegValue = 0;
+
+       if ((!PVRSRV_VZ_MODE_IS(GUEST)) &&
+               RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+                                       __func__, eError));
+                       goto chk_exit;
+               }
+
+               if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+               {
+                       eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+                                       __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError));
+                       goto chk_exit;
+               }
+       }
+
+       if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated))
+       {
+               eError = PVRSRV_ERROR_TIMEOUT;
+               PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)",
+                               __func__, eError));
+               if (PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's "
+                                                                       "OsConfig initialisation data was not accepted by the firmware", __func__));
+               }
+               goto chk_exit;
+       }
+
+       ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
+       if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
+               (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
+                               __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount));
+       }
+#endif /* defined(NO_HARDWARE) */
+
+       eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+               if (eError != PVRSRV_OK)
+               {
+                       goto chk_exit;
+               }
+
+               eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+               if (eError != PVRSRV_OK)
+               {
+                       goto chk_exit;
+               }
+       }
+
+       eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit);
+       if (eError != PVRSRV_OK)
+       {
+               goto chk_exit;
+       }
+
+       eError = PVRSRV_OK;
+chk_exit:
+
+       return eError;
+}
+
+static void _RGXSoftResetToggle(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                IMG_UINT64  ui64ResetValue,
+                                IMG_UINT64  ui64SPUResetValue)
+{
+       OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue);
+       if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1)
+       {
+               OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU, ui64SPUResetValue);
+       }
+
+       /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+       (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+       if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1)
+       {
+               (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU);
+       }
+}
+
+/**************************************************************************/ /*!
+@Function       RGXSoftReset
+@Description    Resets some modules of the RGX device
+@Input          psDeviceNode           Device node
+@Input          ui64ResetValue  A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET
+                                register).
+@Input          ui64SPUResetValue A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET_SPU
+                                register).
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT64  ui64ResetValue,
+                                 IMG_UINT64  ui64SPUResetValue)
+{
+       PVRSRV_RGXDEV_INFO        *psDevInfo;
+
+       PVR_ASSERT(psDeviceNode != NULL);
+       PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       if (((ui64ResetValue & RGX_CR_SOFT_RESET_MASKFULL) != ui64ResetValue)
+               || (ui64SPUResetValue & RGX_CR_SOFT_RESET_SPU_MASKFULL) != ui64SPUResetValue)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* The device info */
+       psDevInfo = psDeviceNode->pvDevice;
+
+       /* Set in soft-reset */
+       _RGXSoftResetToggle(psDevInfo, ui64ResetValue, ui64SPUResetValue);
+
+       /* Take the modules out of reset... */
+       _RGXSoftResetToggle(psDevInfo, 0, 0);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T    uiFWCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWDataLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememDataLen)
+{
+       PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR        eError;
+
+       /*
+        * Set up Allocation for FW code section
+        */
+       uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                         PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                         PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                         PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                         PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                         PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                         PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                         PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                         PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE);
+
+       eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                                          uiFWCodeLen,
+                                          uiMemAllocFlags,
+                                          "FwCodeRegion",
+                                          &psDevInfo->psRGXFWCodeMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to allocate fw code mem (%u)",
+                        eError));
+               goto failFWCodeMemDescAlloc;
+       }
+
+       eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+                                         &psDevInfo->sFWCodeDevVAddrBase);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to acquire devVAddr for fw code mem (%u)",
+                        eError));
+               goto failFWCodeMemDescAqDevVirt;
+       }
+
+       /*
+       * The FW code must be the first allocation in the firmware heap, otherwise
+       * the bootloader will not work (the FW will not be able to find the bootloader).
+       */
+       PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_MAIN_HEAP_BASE);
+
+       /*
+        * Set up Allocation for FW data section
+        */
+       uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                          PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                          PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                          PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                          PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                          PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                          PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                                          PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                          PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                          PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                          PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) &
+                                          RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+
+       eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                                          uiFWDataLen,
+                                          uiMemAllocFlags,
+                                          "FwDataRegion",
+                                          &psDevInfo->psRGXFWDataMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to allocate fw data mem (%u)",
+                        eError));
+               goto failFWDataMemDescAlloc;
+       }
+
+       eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+                                         &psDevInfo->sFWDataDevVAddrBase);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Failed to acquire devVAddr for fw data mem (%u)",
+                        eError));
+               goto failFWDataMemDescAqDevVirt;
+       }
+
+       if (uiFWCorememCodeLen != 0)
+       {
+               /*
+                * Set up Allocation for FW coremem code section
+                */
+               uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                  PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                  PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                  PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                  PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                                                  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                  PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                                  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                  PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE)) &
+                                                  RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+
+               eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                                                  uiFWCorememCodeLen,
+                                                  uiMemAllocFlags,
+                                                  "FwCorememCodeRegion",
+                                                  &psDevInfo->psRGXFWCorememCodeMemDesc);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to allocate fw coremem code mem, size: %"  IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)",
+                                uiFWCorememCodeLen, uiMemAllocFlags, eError));
+                       goto failFWCorememMemDescAlloc;
+               }
+
+               eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc,
+                                                 &psDevInfo->sFWCorememCodeDevVAddrBase);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to acquire devVAddr for fw coremem mem code (%u)",
+                                eError));
+                       goto failFWCorememCodeMemDescAqDevVirt;
+               }
+
+               eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr,
+                                     psDevInfo->psRGXFWCorememCodeMemDesc,
+                                     0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr);
+       }
+       else
+       {
+               psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0;
+               psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0;
+       }
+
+       if (uiFWCorememDataLen != 0)
+       {
+               /*
+                * Set up Allocation for FW coremem data section
+                */
+               uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                               PVRSRV_MEMALLOCFLAG_GPU_READABLE  |
+                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                               PVRSRV_MEMALLOCFLAG_CPU_READABLE  |
+                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                               PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) &
+                               RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
+
+               eError = RGXAllocateFWMemoryRegion(psDeviceNode,
+                               uiFWCorememDataLen,
+                               uiMemAllocFlags,
+                               "FwCorememDataRegion",
+                               &psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to allocate fw coremem data mem, "
+                                "size: %"  IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)",
+                                uiFWCorememDataLen,
+                                uiMemAllocFlags,
+                                eError));
+                       goto failFWCorememDataMemDescAlloc;
+               }
+
+               eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                               &psDevInfo->sFWCorememDataStoreDevVAddrBase);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to acquire devVAddr for fw coremem mem data (%u)",
+                                eError));
+                       goto failFWCorememDataMemDescAqDevVirt;
+               }
+
+               eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr,
+                               psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                               0, RFW_FWADDR_NOREF_FLAG);
+               PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr);
+       }
+       else
+       {
+               psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0;
+               psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0;
+       }
+
+       return PVRSRV_OK;
+
+failFWCorememDataMemDescFwAddr:
+failFWCorememDataMemDescAqDevVirt:
+       if (uiFWCorememDataLen != 0)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+               psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+       }
+failFWCorememDataMemDescAlloc:
+failFWCorememCodeMemDescFwAddr:
+failFWCorememCodeMemDescAqDevVirt:
+       if (uiFWCorememCodeLen != 0)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc);
+               psDevInfo->psRGXFWCorememCodeMemDesc = NULL;
+       }
+failFWCorememMemDescAlloc:
+failFWDataMemDescAqDevVirt:
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+       psDevInfo->psRGXFWDataMemDesc = NULL;
+failFWDataMemDescAlloc:
+failFWCodeMemDescAqDevVirt:
+       DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+       psDevInfo->psRGXFWCodeMemDesc = NULL;
+failFWCodeMemDescAlloc:
+       return eError;
+}
+
+/*
+       AppHint parameter interface
+ */
+static
+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT32 *pui32Value)
+{
+       PVRSRV_ERROR eResult;
+
+       eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+       *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK;
+       return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const void *psPrivate,
+                                   IMG_UINT32 *pui32Value)
+{
+       PVRSRV_ERROR eResult;
+
+       eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+       if (PVRSRV_OK == eResult)
+       {
+               if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE)
+               {
+                       *pui32Value = 0; /* Trace */
+               }
+               else
+               {
+                       *pui32Value = 1; /* TBI */
+               }
+       }
+       return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 const void *psPrivate,
+                                 IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eResult;
+       IMG_UINT32 ui32RGXFWLogType;
+
+       eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType);
+       if (PVRSRV_OK == eResult)
+       {
+               if (0 == ui32RGXFWLogType)
+               {
+                       BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE);
+               }
+               eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value);
+       }
+       return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const void *psPrivate,
+                                  IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eResult;
+       IMG_UINT32 ui32RGXFWLogType = ui32Value;
+
+       eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType);
+       if (PVRSRV_OK != eResult)
+       {
+               return eResult;
+       }
+
+       /* 0 - trace, 1 - tbi */
+       if (0 == ui32Value)
+       {
+               BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE);
+       }
+#if defined(SUPPORT_TBI_INTERFACE)
+       else if (1 == ui32Value)
+       {
+               BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE);
+       }
+#endif
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Invalid parameter %u specified to set FW log type AppHint.",
+                        __func__, ui32Value));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType);
+       return eResult;
+}
+
+#if defined(DEBUG)
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       const void *psPrivate,
+                                                                       IMG_BOOL *pbValue)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+       *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->uiFWPoisonOnFreeFlag)
+               ? IMG_TRUE
+               : IMG_FALSE;
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       const void *psPrivate,
+                                                                       IMG_BOOL bValue)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+       psDevInfo->uiFWPoisonOnFreeFlag = bValue
+                       ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+                       : 0ULL;
+
+       return PVRSRV_OK;
+}
+#endif
+
+/*
+ * RGXInitFirmware
+ */
+PVRSRV_ERROR
+RGXInitFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                IMG_BOOL                 bEnableSignatureChecks,
+                IMG_UINT32               ui32SignatureChecksBufSize,
+                IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                IMG_UINT64               ui64HWPerfFilter,
+                IMG_UINT32               ui32ConfigFlags,
+                IMG_UINT32               ui32LogType,
+                IMG_UINT32               ui32FilterFlags,
+                IMG_UINT32               ui32JonesDisableMask,
+                IMG_UINT32               ui32HWRDebugDumpLimit,
+                IMG_UINT32                              ui32RenderKillingCtl,
+                IMG_UINT32                              ui32CDMTDMKillingCtl,
+                IMG_UINT32                              *pui32TPUTrilinearFracMask,
+                IMG_UINT32                              *pui32USRMNumRegions,
+                IMG_UINT64                              *pui64UVBRMNumRegions,
+                IMG_UINT32               ui32HWPerfCountersDataSize,
+                RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+                FW_PERF_CONF             eFirmwarePerf,
+                IMG_UINT32               ui32KCCBSizeLog2,
+                IMG_UINT32               ui32ConfigFlagsExt,
+                IMG_UINT32               ui32AvailablePowUnitsMask,
+                IMG_UINT32               ui32AvailableRACMask,
+                IMG_UINT32               ui32FwOsCfgFlags)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+#if defined(DEBUG)
+       IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE;
+#endif
+
+       eError = RGXSetupFirmware(psDeviceNode,
+                                 bEnableSignatureChecks,
+                                 ui32SignatureChecksBufSize,
+                                 ui32HWPerfFWBufSizeKB,
+                                 ui64HWPerfFilter,
+                                 ui32ConfigFlags,
+                                 ui32ConfigFlagsExt,
+                                 ui32FwOsCfgFlags,
+                                 ui32LogType,
+                                 ui32FilterFlags,
+                                 ui32JonesDisableMask,
+                                 ui32HWRDebugDumpLimit,
+                                 ui32HWPerfCountersDataSize,
+                                 ui32RenderKillingCtl,
+                                 ui32CDMTDMKillingCtl,
+                                 pui32TPUTrilinearFracMask,
+                                 pui32USRMNumRegions,
+                                 pui64UVBRMNumRegions,
+                                 eRGXRDPowerIslandingConf,
+                                 eFirmwarePerf,
+                                 ui32KCCBSizeLog2,
+                                 ui32AvailablePowUnitsMask,
+                                                         ui32AvailableRACMask);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)",
+                        eError));
+               goto failed_init_firmware;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup,
+                                                   RGXFWTraceQueryFilter,
+                                                   RGXFWTraceSetFilter,
+                                                   psDeviceNode,
+                                                   NULL);
+               PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType,
+                                                   RGXFWTraceQueryLogType,
+                                                   RGXFWTraceSetLogType,
+                                                   psDeviceNode,
+                                                   NULL);
+       }
+
+#if defined(DEBUG)
+       bEnableFWPoisonOnFree = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree,
+                                          RGXQueryFWPoisonOnFree,
+                                          RGXSetFWPoisonOnFree,
+                                          psDeviceNode,
+                                          NULL);
+
+       psDevInfo->uiFWPoisonOnFreeFlag = bEnableFWPoisonOnFree
+                       ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+                       : 0ULL;
+#else
+       psDevInfo->uiFWPoisonOnFreeFlag = 0ULL;
+#endif
+
+       return PVRSRV_OK;
+
+failed_init_firmware:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                        DEVMEM_MEMDESC **psMemDesc,
+                                                                        IMG_UINT32 *puiSyncPrimVAddr,
+                                                                        IMG_UINT32 *puiSyncPrimBlockSize)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_ERROR eError;
+       RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+       IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+       IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+       IMG_UINT32 ui32CoherencyFlag = 0;
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       /* Size and align are 'expanded' because we request an Exportalign allocation */
+       eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+                                                                               &uiUFOBlockSize,
+                                                                               &ui32UFOBlockAlign);
+
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+               PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+       {
+               ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT;
+       }
+       else
+       {
+               ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED;
+       }
+
+       eError = DevmemFwAllocateExportable(psDeviceNode,
+                                                                               uiUFOBlockSize,
+                                                                               ui32UFOBlockAlign,
+                                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) |
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                                               ui32CoherencyFlag,
+                                                                               "FwExUFOBlock",
+                                                                               psMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+       *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+       return PVRSRV_OK;
+
+e1:
+       DevmemFwUnmapAndFree(psDevInfo, *psMemDesc);
+e0:
+       return eError;
+}
+
+/* See device.h for function declaration */
+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       DEVMEM_MEMDESC *psMemDesc)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       /*
+               If the system has snooping of the device cache then the UFO block
+               might be in the cache so we need to flush it out before freeing
+               the memory
+
+               When the device is being shutdown/destroyed we don't care anymore.
+               Several necessary data structures to issue a flush were destroyed
+               already.
+        */
+       if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+               psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+       {
+               RGXFWIF_KCCB_CMD sFlushInvalCmd;
+               PVRSRV_ERROR eError;
+               IMG_UINT32 ui32kCCBCommandSlot;
+
+               /* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+               sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+               sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+               eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo,
+                                                                                                                &sFlushInvalCmd,
+                                                                                                                PDUMP_FLAGS_CONTINUOUS,
+                                                                                                                &ui32kCCBCommandSlot);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to schedule SLC flush command with error (%u)",
+                                __func__,
+                                eError));
+               }
+               else
+               {
+                       /* Wait for the SLC flush to complete */
+                       eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: SLC flush and invalidate aborted with error (%u)",
+                                        __func__,
+                                        eError));
+                       }
+                       else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] &
+                                                         RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE))
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+                       }
+               }
+       }
+
+       RGXUnsetFirmwareAddress(psMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psMemDesc);
+}
+
+static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+
+       psDevInfo->bDevInit2Done = IMG_FALSE;
+
+       if ((psDevInfo->hTQUSCSharedMem != NULL) &&
+           (psDevInfo->hTQCLISharedMem != NULL))
+       {
+               PVRSRVTQUnloadShaders(psDeviceNode);
+       }
+
+#if !defined(NO_HARDWARE)
+       if (psDevInfo->pvLISRData != NULL)
+       {
+               (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData);
+       }
+       if (psDevInfo->pvMISRData != NULL)
+       {
+               (void) OSUninstallMISR(psDevInfo->pvMISRData);
+       }
+       if (psDevInfo->hProcessQueuesMISR != NULL)
+       {
+               (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+       }
+       if (psDevInfo->pvAPMISRData != NULL)
+       {
+               (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+       }
+       if (psDeviceNode->hCmdCompNotify != NULL)
+       {
+               PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+               psDeviceNode->hCmdCompNotify = NULL;
+       }
+#endif /* !NO_HARDWARE */
+
+       /* Remove the device from the power manager */
+       PVRSRVRemovePowerDevice(psDeviceNode);
+
+       psDevInfo->pfnGetGpuUtilStats = NULL;
+       if (psDevInfo->hGPUUtilLock != NULL)
+       {
+               OSLockDestroy(psDevInfo->hGPUUtilLock);
+       }
+
+       if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) &&
+               (psDevInfo->hMMUCtxUnregLock != NULL))
+       {
+               OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+       }
+
+       if (psDevInfo->hDebugFaultInfoLock != NULL)
+       {
+               OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+       }
+
+       /* De-init Freelists/ZBuffers... */
+       if (psDevInfo->hLockFreeList != NULL)
+       {
+               OSLockDestroy(psDevInfo->hLockFreeList);
+       }
+
+       if (psDevInfo->hLockZSBuffer != NULL)
+       {
+               OSLockDestroy(psDevInfo->hLockZSBuffer);
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* De-init work estimation lock */
+       if (psDevInfo->hWorkEstLock != NULL)
+       {
+               OSLockDestroy(psDevInfo->hWorkEstLock);
+       }
+#endif
+
+       /* Free DVFS Table */
+       if (psDevInfo->psGpuDVFSTable != NULL)
+       {
+               OSFreeMem(psDevInfo->psGpuDVFSTable);
+               psDevInfo->psGpuDVFSTable = NULL;
+       }
+}
+
+/*
+       DevDeInitRGX
+ */
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO              *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+       PVRSRV_ERROR                    eError;
+       DEVICE_MEMORY_INFO              *psDevMemoryInfo;
+       IMG_UINT32              ui32Temp=0;
+
+       if (!psDevInfo)
+       {
+               /* Can happen if DevInitRGX failed */
+               PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo"));
+               return PVRSRV_OK;
+       }
+
+       if (psDevInfo->psRGXFWIfOsInit)
+       {
+               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+       }
+
+       DeviceDepBridgeDeInit(psDevInfo);
+
+#if defined(PDUMP)
+       DevmemIntFreeDefBackingPage(psDeviceNode,
+                                                               &psDeviceNode->sDummyPage,
+                                                               DUMMY_PAGE);
+       DevmemIntFreeDefBackingPage(psDeviceNode,
+                                                               &psDeviceNode->sDevZeroPage,
+                                                               DEV_ZERO_PAGE);
+#endif
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
+               PVR_UNREFERENCED_PARAMETER(ui32Temp);
+       }
+       else
+#else
+       {
+               /*Delete the Dummy page related info */
+               ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
+               if (0 != ui32Temp)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Dummy page reference counter is non zero (%u)",
+                                __func__,
+                                ui32Temp));
+                       PVR_ASSERT(0);
+               }
+       }
+#endif
+
+       /*Delete the Dummy page related info */
+       ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter);
+       if (0 != ui32Temp)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Zero page reference counter is non zero (%u)",
+                        __func__,
+                        ui32Temp));
+       }
+
+#if defined(PDUMP)
+       if (NULL != psDeviceNode->sDummyPage.hPdumpPg)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active");
+       }
+
+       if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active");
+       }
+#endif
+
+       /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+       OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
+
+       /* Destroy the zero page lock */
+       OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
+
+       /* Unregister debug request notifiers first as they could depend on anything. */
+
+       RGXDebugDeinit(psDevInfo);
+
+       /*
+        * De-initialise in reverse order, so stage 2 init is undone first.
+        */
+       if (psDevInfo->bDevInit2Done)
+       {
+               DevPart2DeInitRGX(psDeviceNode);
+       }
+
+       /* Unregister MMU related stuff */
+       eError = RGXMMUInit_Unregister(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)",
+                        eError));
+       }
+
+       /* UnMap Regs */
+       if (psDevInfo->pvRegsBaseKM != NULL)
+       {
+#if !defined(NO_HARDWARE)
+               OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+                                                psDevInfo->ui32RegSize);
+#endif /* !NO_HARDWARE */
+               psDevInfo->pvRegsBaseKM = NULL;
+       }
+
+#if 0 /* not required at this time */
+       if (psDevInfo->hTimer)
+       {
+               eError = OSRemoveTimer(psDevInfo->hTimer);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "DevDeInitRGX: Failed to remove timer"));
+                       return eError;
+               }
+               psDevInfo->hTimer = NULL;
+       }
+#endif
+
+       psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+       RGXDeInitHeaps(psDevMemoryInfo);
+
+       if (psDevInfo->psRGXFWCodeMemDesc)
+       {
+               /* Free fw code */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW code memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+               psDevInfo->psRGXFWCodeMemDesc = NULL;
+       }
+       if (psDevInfo->psRGXFWDataMemDesc)
+       {
+               /* Free fw data */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW data memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+               psDevInfo->psRGXFWDataMemDesc = NULL;
+       }
+       if (psDevInfo->psRGXFWCorememCodeMemDesc)
+       {
+               /* Free fw core mem code */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem code memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc);
+               psDevInfo->psRGXFWCorememCodeMemDesc = NULL;
+       }
+
+       if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+       {
+               /* Free fw core mem data */
+               PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem data store memory");
+               DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+               psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+       }
+
+       /*
+          Free the firmware allocations.
+        */
+       RGXFreeFirmware(psDevInfo);
+
+#if defined(SUPPORT_VALIDATION)
+       RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState);
+#endif
+
+       RGXDeInitMultiCoreInfo(psDeviceNode);
+
+       /* De-initialise non-device specific (TL) users of RGX device memory */
+       RGXHWPerfDeinit(psDevInfo);
+
+       RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+       RGXHWPerfHostDeInit(psDevInfo);
+       eError = HTBDeInit();
+       PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+       /* destroy the stalled CCB locks */
+       OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+       OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+
+       /* destroy the context list locks */
+       OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+       OSLockDestroy(psDevInfo->hBPLock);
+       OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+       OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+       OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+       OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+       OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+       OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+       OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+       OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+       OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+
+       /* Free device BVNC string */
+       if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString)
+       {
+               OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString);
+       }
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+       if (NULL != psDevInfo->sRGXTimerValues.pui64uscTimers)
+       {
+               OSFreeMem(psDevInfo->sRGXTimerValues.pui64uscTimers);
+               psDevInfo->sRGXTimerValues.pui64uscTimers = NULL;
+       }
+#endif
+
+       /* DeAllocate devinfo */
+       OSFreeMem(psDevInfo);
+
+       psDeviceNode->pvDevice = NULL;
+
+       return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+       psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+/* Takes a log2 page size parameter and calculates a suitable page size
+ * for the RGX heaps. Returns 0 if parameter is wrong.*/
+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize)
+{
+       IMG_BOOL bFound = IMG_FALSE;
+
+       /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT,
+        * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/
+       if (uiLog2PageSize == 0U ||
+           (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) ||
+           (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Provided incompatible log2 page size %u",
+                               __func__,
+                               uiLog2PageSize));
+               PVR_ASSERT(0);
+               return 0;
+       }
+
+       do
+       {
+               switch (uiLog2PageSize)
+               {
+                       case RGX_HEAP_4KB_PAGE_SHIFT:
+                       case RGX_HEAP_16KB_PAGE_SHIFT:
+                       case RGX_HEAP_64KB_PAGE_SHIFT:
+                       case RGX_HEAP_256KB_PAGE_SHIFT:
+                       case RGX_HEAP_1MB_PAGE_SHIFT:
+                       case RGX_HEAP_2MB_PAGE_SHIFT:
+                               /* All good, RGX page size equals given page size
+                                * => use it as default for heaps */
+                               bFound = IMG_TRUE;
+                               break;
+                       default:
+                               /* We have to fall back to a smaller device
+                                * page size than given page size because there
+                                * is no exact match for any supported size. */
+                               uiLog2PageSize -= 1U;
+                               break;
+               }
+       } while (!bFound);
+
+       return uiLog2PageSize;
+}
+
+/* First 16-bits define possible types */
+#define HEAP_INST_VALUE_MASK     (0xFFFF)
+#define HEAP_INST_DEFAULT_VALUE  (1U)  /* Used to show either the heap is always instantiated by default (pfn = NULL)
+                                             OR
+                                             that this is the default configuration of the heap with an Alternative BRN */
+#define HEAP_INST_BRN_DEP_VALUE  (2U)  /* The inclusion of this heap is dependent on the brn being present */
+#define HEAP_INST_FEAT_DEP_VALUE (3U)  /* The inclusion of this heap is dependent on the feature being present */
+#define HEAP_INST_BRN_ALT_VALUE  (4U)  /* This entry is a possible alternative to the default determined by a BRN */
+#define HEAP_INST_FEAT_ALT_VALUE (5U)  /* The entry is a possible alternative to the default determined by a Feature define */
+
+/* Latter 16-bits define other flags we may need */
+#define HEAP_INST_NON4K_FLAG     (1 << 16U) /* This is a possible NON4K Entry and we should use the device
+                                               NON4K size when instantiating */
+
+typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration
+typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*);
+
+struct RGX_HEAP_INFO_TAG
+{
+       IMG_CHAR           *pszName;
+       IMG_UINT64         ui64HeapBase;
+       IMG_DEVMEM_SIZE_T  uiHeapLength;
+       IMG_DEVMEM_SIZE_T  uiHeapReservedRegionLength;
+       IMG_UINT32         ui32Log2ImportAlignment;
+       PFN_IS_PRESENT     pfnIsHeapPresent;
+       IMG_UINT32         ui32HeapInstanceFlags;
+};
+
+/* Feature Present function prototypes */
+
+/* FW Feature Present function prototypes */
+
+static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo)
+{
+       /* Used to determine the correct table row to instantiate as a heap by checking
+        * the Heap base at run time VS the current table instance
+        */
+
+       /* Determine if we should include this entry based upon previous checks */
+       return (pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_CONFIG_HEAP_BASE) ? IMG_TRUE : IMG_FALSE;
+}
+
+static IMG_BOOL FWVZMainHeapPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo)
+{
+       /* Used to determine the correct table row to instantiate as a heap by checking
+        * the Heap base at run time VS the current table instance
+        */
+       return (pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ? IMG_TRUE : IMG_FALSE;
+}
+
+static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] =
+{
+       /* Name                             HeapBase                          HeapLength                        HeapReservedRegionLength                     Log2ImportAlignment pfnIsHeapPresent HeapInstanceFlags   */
+       {RGX_GENERAL_SVM_HEAP_IDENT,        RGX_GENERAL_SVM_HEAP_BASE,        RGX_GENERAL_SVM_HEAP_SIZE,        0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_GENERAL_HEAP_IDENT,            RGX_GENERAL_HEAP_BASE,            RGX_GENERAL_HEAP_SIZE,            (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_GENERAL_NON4K_HEAP_IDENT,      RGX_GENERAL_NON4K_HEAP_BASE,      RGX_GENERAL_NON4K_HEAP_SIZE,      0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG},
+       {RGX_PDSCODEDATA_HEAP_IDENT,        RGX_PDSCODEDATA_HEAP_BASE,        RGX_PDSCODEDATA_HEAP_SIZE,        (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_USCCODE_HEAP_IDENT,            RGX_USCCODE_HEAP_BASE,            RGX_USCCODE_HEAP_SIZE,            (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_VK_CAPT_REPLAY_HEAP_IDENT,     RGX_VK_CAPT_REPLAY_HEAP_BASE,     RGX_VK_CAPT_REPLAY_HEAP_SIZE,     0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_SIGNALS_HEAP_IDENT,            RGX_SIGNALS_HEAP_BASE,            RGX_SIGNALS_HEAP_SIZE,            0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_COMPONENT_CTRL_HEAP_IDENT,     RGX_COMPONENT_CTRL_HEAP_BASE,     RGX_COMPONENT_CTRL_HEAP_SIZE,     0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_FBCDC_HEAP_IDENT,              RGX_FBCDC_HEAP_BASE,              RGX_FBCDC_HEAP_SIZE,              0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_FBCDC_LARGE_HEAP_IDENT,        RGX_FBCDC_LARGE_HEAP_BASE,        RGX_FBCDC_LARGE_HEAP_SIZE,        0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_PDS_INDIRECT_STATE_HEAP_IDENT, RGX_PDS_INDIRECT_STATE_HEAP_BASE, RGX_PDS_INDIRECT_STATE_HEAP_SIZE, 0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_CMP_MISSION_RMW_HEAP_IDENT,    RGX_CMP_MISSION_RMW_HEAP_BASE,    RGX_CMP_MISSION_RMW_HEAP_SIZE,    0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_CMP_SAFETY_RMW_HEAP_IDENT,     RGX_CMP_SAFETY_RMW_HEAP_BASE,     RGX_CMP_SAFETY_RMW_HEAP_SIZE,     0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_TEXTURE_STATE_HEAP_IDENT,      RGX_TEXTURE_STATE_HEAP_BASE,      RGX_TEXTURE_STATE_HEAP_SIZE,      0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE},
+       {RGX_VISIBILITY_TEST_HEAP_IDENT,    RGX_VISIBILITY_TEST_HEAP_BASE,    RGX_VISIBILITY_TEST_HEAP_SIZE,    0,                                           0,                  NULL,            HEAP_INST_DEFAULT_VALUE}
+};
+
+static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] =
+{
+       /* Name                          HeapBase                             HeapLength                        HeapReservedRegionLength Log2ImportAlignment pfnPresent           HeapInstanceFlags*/
+       {RGX_FIRMWARE_MAIN_HEAP_IDENT,   RGX_FIRMWARE_MAIN_HEAP_BASE,    RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0,                       0,                  FWVZMainHeapPresent, HEAP_INST_DEFAULT_VALUE},
+       {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE,  RGX_FIRMWARE_CONFIG_HEAP_SIZE,       0,                       0,                  FWVZConfigPresent,   HEAP_INST_DEFAULT_VALUE}
+};
+
+/* Generic counting method. */
+static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO  *psDevInfo,
+                                   const RGX_HEAP_INFO  pksHeapInfo[],
+                                   IMG_UINT32           ui32HeapListSize,
+                                   IMG_UINT32*          ui32HeapCount)
+{
+       IMG_UINT32 i;
+
+       /* Loop over rows in the heap data array using callback to decide if we
+        * should include the heap
+        */
+       for (i = 0; i < ui32HeapListSize; i++)
+       {
+               const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i];
+
+               if (psHeapInfo->pfnIsHeapPresent)
+               {
+                       if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo))
+                       {
+                               /* We don't need to create this heap */
+                               continue;
+                       }
+               }
+
+               (*ui32HeapCount)++;
+       }
+}
+/* Generic heap instantiator */
+static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO     *psDevInfo,
+                                         const RGX_HEAP_INFO     pksHeapInfo[],
+                                         IMG_UINT32              ui32HeapListSize,
+                                         DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor)
+{
+       IMG_UINT32 i;
+       /* We now have a list of the heaps to include and so we should loop over this
+        * list and instantiate.
+        */
+       for (i = 0; i < ui32HeapListSize; i++)
+       {
+               IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
+               IMG_UINT32 ui32Log2DataPageSize = 0;
+
+               const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i];
+
+               if (psHeapInfo->pfnIsHeapPresent)
+               {
+                       if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo))
+                       {
+                               /* We don't need to create this heap */
+                               continue;
+                       }
+               }
+
+               if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG)
+               {
+                       ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize;
+               }
+               else
+               {
+                       ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift;
+               }
+
+               HeapCfgBlueprintInit(psHeapInfo->pszName,
+                                        psHeapInfo->ui64HeapBase,
+                                        psHeapInfo->uiHeapLength,
+                                        psHeapInfo->uiHeapReservedRegionLength,
+                                        ui32Log2DataPageSize,
+                                        psHeapInfo->ui32Log2ImportAlignment,
+                                        *psDeviceMemoryHeapCursor);
+
+               (*psDeviceMemoryHeapCursor)++;
+       }
+}
+
+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                    DEVICE_MEMORY_INFO *psNewMemoryInfo)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+
+       IMG_UINT32 ui32HeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp);
+       IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW);
+       IMG_UINT32 ui32CountedHeapSize;
+
+       IMG_UINT32 ui32HeapCount = 0;
+       IMG_UINT32 ui32FWHeapCount = 0;
+
+       /* Count heaps required for the app heaps */
+       _CountRequiredHeaps(psDevInfo,
+                               gasRGXHeapLayoutApp,
+                               ui32HeapListSize,
+                               &ui32HeapCount);
+
+       /* Count heaps required for the FW heaps */
+       _CountRequiredHeaps(psDevInfo,
+                               gasRGXHeapLayoutFW,
+                               ui32FWHeapListSize,
+                               &ui32FWHeapCount);
+
+       ui32CountedHeapSize = (ui32HeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED);
+
+       psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize);
+       PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0);
+
+       /* Initialise the heaps */
+       psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+       /* Instantiate App Heaps */
+       _InstantiateRequiredHeaps(psDevInfo,
+                                     gasRGXHeapLayoutApp,
+                                     ui32HeapListSize,
+                                     &psDeviceMemoryHeapCursor);
+
+       /* Instantiate FW Heaps */
+       _InstantiateRequiredHeaps(psDevInfo,
+                                     gasRGXHeapLayoutFW,
+                                     ui32FWHeapListSize,
+                                     &psDeviceMemoryHeapCursor);
+
+       /* set the heap count */
+       psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+       /* Check we have allocated the correct # of heaps, minus any VZ heaps as these
+        * have not been created at this point
+        */
+       PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED));
+
+       /*
+          In the new heap setup, we initialise 2 configurations:
+               1 - One will be for the firmware only (index 1 in array)
+                       a. This primarily has the firmware heap in it.
+                       b. It also has additional guest OSID firmware heap(s)
+                               - Only if the number of support firmware OSID > 1
+               2 - Others shall be for clients only (index 0 in array)
+                       a. This has all the other client heaps in it.
+        */
+       psNewMemoryInfo->uiNumHeapConfigs = 2;
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+       PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeapConfigArray, eError, e1);
+
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
+
+       if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4)
+       {
+               IMG_UINT32 i;
+               const IMG_UINT32 ui32GeneralNon4KHeapPageSize = (1 << psDevInfo->ui32Log2Non4KPgSize);
+               const IMG_UINT32 ui32RgxDefaultPageSize = (1 << RGXHeapDerivePageSize(OSGetPageShift()));
+
+               /*
+                * Initialise all MMU Page Size Range Config register to the default page size
+                * used by the OS, leaving the address range 0;
+                */
+               for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i)
+               {
+                       psDevInfo->aui64MMUPageSizeRangeValue[i] =
+                                       RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize,
+                                                                                                  0,
+                                                                                                  (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT));
+               }
+
+
+               /* set the last MMU config range covering the entire virtual memory to the OS's page size */
+               psDevInfo->aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES - 1] =
+                               RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, 0, (1ULL << 40));
+
+               /*
+                * If the Non4K heap has a different page size than the OS's page size
+                * (used as default for all other heaps), configure one MMU config range
+                * for the Non4K heap
+                */
+               if (ui32GeneralNon4KHeapPageSize != ui32RgxDefaultPageSize)
+               {
+                       psDevInfo->aui64MMUPageSizeRangeValue[0] =
+                                       RGXMMUInit_GetConfigRangeValue(ui32GeneralNon4KHeapPageSize,
+                                                                                                  RGX_GENERAL_NON4K_HEAP_BASE,
+                                                                                                  RGX_GENERAL_NON4K_HEAP_SIZE);
+               }
+       }
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_UINT32 ui32OSid;
+
+               /* Create additional raw firmware heaps */
+               for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK)
+                       {
+                               /* if any allocation fails, free previously allocated heaps and abandon initialisation */
+                               for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--)
+                               {
+                                       RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
+                                       psDeviceMemoryHeapCursor--;
+                               }
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto e1;
+                       }
+
+                       /* Append additional firmware heaps to host driver firmware context heap configuration */
+                       psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1;
+
+                       /* advance to the next heap */
+                       psDeviceMemoryHeapCursor++;
+               }
+       }
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+       return PVRSRV_OK;
+e1:
+       OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+e0:
+       return eError;
+}
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+#if (RGX_NUM_OS_SUPPORTED > 1)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               IMG_UINT32 ui32OSid;
+               DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap;
+
+               /* Delete all guest firmware heaps */
+               for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
+                       psDeviceMemoryHeapCursor++;
+               }
+       }
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+       OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+       OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
+                                                                                                                  PHYS_HEAP_USAGE_FW_MAIN);
+
+#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+       /* VZ heap validation */
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL,
+                                                               "FW Main heap is required for VZ Guest.",
+                                                               PVRSRV_ERROR_PHYSHEAP_CONFIG);
+       }
+#endif
+
+       if (psFwMainConfig != NULL)
+       {
+               /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided
+                  into subheaps, shared usage with other heaps is not allowed.  */
+               PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN,
+                                                               "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.",
+                                                               PVRSRV_ERROR_PHYSHEAP_CONFIG);
+       }
+
+       if (psFwMainConfig == NULL)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__));
+       }
+       else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__));
+       }
+       else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */
+       {
+               PHYS_HEAP_CONFIG sFwHeapConfig;
+
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__));
+
+               PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                                         "Invalid firmware physical heap size.", ErrorDeinit);
+
+#if defined(SUPPORT_AUTOVZ)
+               if (PVRSRV_VZ_MODE_IS(HOST))
+               {
+                       /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers:
+                        *  MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb;
+                        *  MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */
+                       IMG_UINT64 uMaxFwMmuPageTableSize = 1 * 1024 * 1024;
+
+                       sFwHeapConfig = *psFwMainConfig;
+
+                       /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap.
+                        * If a different base address is specified for this reserved range, use the overriding define instead. */
+#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
+                       sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
+                       sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
+#else
+                       sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
+                       sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
+#endif
+
+                       sFwHeapConfig.uiSize = uMaxFwMmuPageTableSize;
+                       sFwHeapConfig.ui32UsageFlags = 0;
+
+                       eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw MMU subheap",
+                                                     &psDeviceNode->psFwMMUReservedPhysHeap);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit);
+               }
+#endif
+
+               /* Subheap layout: Main + (optional MIPS reserved range) + Config */
+               sFwHeapConfig = *psFwMainConfig;
+               sFwHeapConfig.uiSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE;
+               sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN;
+
+               eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Main subheap", &psDeviceNode->psFWMainPhysHeap);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit);
+
+               sFwHeapConfig = *psFwMainConfig;
+               sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+               sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+               sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+               sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG;
+
+               eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Cfg subheap", &psDeviceNode->psFWCfgPhysHeap);
+               PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit);
+       }
+
+       /* Acquire FW heaps */
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit);
+
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit);
+
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit);
+
+       eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode,
+                                                                                 &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit);
+
+       return eError;
+
+ErrorDeinit:
+       PVR_ASSERT(IMG_FALSE);
+       PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+
+       return eError;
+}
+
+static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize)
+{
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE;
+       IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+       /* Get the page size for the dummy page from the NON4K heap apphint */
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState,
+                GeneralNon4KHeapPageSize,&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+       *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
+       OSFreeKMAppHintState(pvAppHintState);
+}
+
+/* RGXRegisterDevice
+ *
+ * WARNING!
+ *
+ * No PDUMP statements are allowed in until Part 2 of the device initialisation
+ * is reached.
+ */
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+       DEVICE_MEMORY_INFO *psDevMemoryInfo;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = HWPERF_HOST_TL_STREAM_SIZE_DEFAULT, ui32HWPerfHostBufSizeKB;
+
+       ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE;
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB,
+                            &ui32AppHintDefault, &ui32HWPerfHostBufSizeKB);
+       OSFreeKMAppHintState(pvAppHintState);
+       pvAppHintState = NULL;
+
+       /*********************
+        * Device node setup *
+        *********************/
+       /* Setup static data and callbacks on the device agnostic device node */
+#if defined(PDUMP)
+       psDeviceNode->sDevId.pszPDumpRegName    = RGX_PDUMPREG_NAME;
+       psDeviceNode->sDevId.pszPDumpDevName    = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]);
+       psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+       psDeviceNode->ui64FBCClearColour = RGX_FBC_CC_DEFAULT;
+
+#endif /* PDUMP */
+
+       OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
+       OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
+
+       psDeviceNode->pfnDevSLCFlushRange = RGXSLCFlushRange;
+       psDeviceNode->pfnInvalFBSCTable = RGXInvalidateFBSCTable;
+
+       psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL;
+
+       psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+       psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick;
+
+       psDeviceNode->pfnInitDeviceCompatCheck  = &RGXDevInitCompatCheck;
+
+       /* Register callbacks for creation of device memory contexts */
+       psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+       psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+       /* Register callbacks for Unified Fence Objects */
+       psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+       psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+       /* Register callback for checking the device's health */
+       psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus;
+
+#if defined(SUPPORT_AUTOVZ)
+       /* Register callback for updating the virtualization watchdog */
+       psDeviceNode->pfnUpdateAutoVzWatchdog = RGXUpdateAutoVzWatchdog;
+#endif
+
+       /* Register method to service the FW HWPerf buffer */
+       psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+       /* Register callback for getting the device version information string */
+       psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+       /* Register callback for getting the device clock speed */
+       psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+       /* Register callback for soft resetting some device modules */
+       psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+       /* Register callback for resetting the HWR logs */
+       psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+       /* Register callback for resetting the HWR logs */
+       psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC;
+
+       /* Register callback for checking alignment of UM structures */
+       psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck;
+
+       /*Register callback for checking the supported features and getting the
+        * corresponding values */
+       psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported;
+       psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue;
+
+       /* Callback for checking if system layer supports FBC 3.1 */
+       psDeviceNode->pfnHasFBCDCVersion31 = NULL;
+
+       /* Callback for getting the MMU device attributes */
+       psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes;
+
+       /* Register callback for initialising device-specific physical memory heaps */
+       psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit;
+
+       /* Set up required support for dummy page */
+       OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
+       OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0);
+
+       /* Set the order to 0 */
+       psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0;
+       psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0;
+
+       /* Set the size of the Dummy page to zero */
+       psDeviceNode->sDummyPage.ui32Log2PgSize = 0;
+
+       /* Set the size of the Zero page to zero */
+       psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0;
+
+       /* Set the Dummy page phys addr */
+       psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+       /* Set the Zero page phys addr */
+       psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+       /* The lock can be acquired from MISR (Z-buffer) path */
+       eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
+               return eError;
+       }
+
+       /* Create the lock for zero page */
+       eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__));
+               goto free_dummy_page;
+       }
+#if defined(PDUMP)
+       psDeviceNode->sDummyPage.hPdumpPg = NULL;
+       psDeviceNode->sDevZeroPage.hPdumpPg = NULL;
+#endif
+
+       psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31;
+
+       /* The device shared-virtual-memory heap address-space size is stored here for faster
+          look-up without having to walk the device heap configuration structures during
+          client device connection  (i.e. this size is relative to a zero-based offset) */
+       psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE;
+
+       /*********************
+        * Device info setup *
+        *********************/
+       /* Allocate device control block */
+       psDevInfo = OSAllocZMem(sizeof(*psDevInfo));
+       if (psDevInfo == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /* create locks for the context lists stored in the DevInfo structure.
+        * these lists are modified on context create/destroy and read by the
+        * watchdog thread
+        */
+
+       eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+               goto e0;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+               goto e1;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+               goto e2;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__));
+               goto e3;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__));
+               goto e4;
+       }
+
+       eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+               goto e5;
+       }
+
+       eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__));
+               goto e6;
+       }
+       dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead));
+
+       dllist_init(&(psDevInfo->sRenderCtxtListHead));
+       dllist_init(&(psDevInfo->sComputeCtxtListHead));
+       dllist_init(&(psDevInfo->sTDMCtxtListHead));
+       dllist_init(&(psDevInfo->sKickSyncCtxtListHead));
+
+       dllist_init(&(psDevInfo->sCommonCtxtListHead));
+       psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+
+       eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__));
+               goto e7;
+       }
+
+       eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__));
+               goto e8;
+       }
+
+       eError = OSLockCreate(&psDevInfo->hBPLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__));
+               goto e9;
+       }
+
+       eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__));
+               goto e10;
+       }
+
+       eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__));
+               goto e11;
+       }
+       eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__));
+               goto e12;
+       }
+
+       dllist_init(&psDevInfo->sMemoryContextList);
+
+       /* initialise ui32SLRHoldoffCounter */
+       if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT)
+       {
+               psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+       }
+       else
+       {
+               psDevInfo->ui32SLRHoldoffCounter = 0;
+       }
+
+       /* Setup static data and callbacks on the device specific device info */
+       psDevInfo->psDeviceNode         = psDeviceNode;
+
+       psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+       psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+       /*
+        * Map RGX Registers
+        */
+       psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize;
+       psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase;
+
+#if !defined(NO_HARDWARE)
+       psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase,
+                                                                                               psDeviceNode->psDevConfig->ui32RegsSize,
+                                                                                    PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+       if (psDevInfo->pvRegsBaseKM == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "PVRSRVRGXInitDevPart2KM: Failed to create RGX register mapping"));
+               eError = PVRSRV_ERROR_BAD_MAPPING;
+               goto e13;
+       }
+#else
+       psDevInfo->pvRegsBaseKM = NULL;
+#endif /* !NO_HARDWARE */
+
+       psDeviceNode->pvDevice = psDevInfo;
+
+       eError = RGXBvncInitialiseConfiguration(psDeviceNode);
+       if (PVRSRV_OK != eError)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Unsupported HW device detected by driver",
+                        __func__));
+               goto e14;
+       }
+
+       _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize);
+
+       /*Set the zero & dummy page sizes as needed for the heap with largest page size */
+       psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
+       psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
+
+       /* Configure MMU specific stuff */
+       RGXMMUInit_Register(psDeviceNode);
+
+       eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo);
+       if (eError != PVRSRV_OK)
+       {
+               goto e14;
+       }
+
+       eError = RGXHWPerfInit(psDevInfo);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14);
+
+       eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw);
+
+#if defined(SUPPORT_VALIDATION)
+       eError = RGXPowerDomainInitState(&psDevInfo->sPowerDomainState,
+                                                                               psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount);
+       if (eError != PVRSRV_OK)
+       {
+               goto ErrorDeInitHWPerfHost;
+       }
+
+       /* This completion will be signaled by the ISR when processing
+        * the answer CCB command carrying an RGX Register read value */
+       init_completion(&psDevInfo->sFwRegs.sRegComp);
+       psDevInfo->sFwRegs.ui64RegVal = 0;
+
+#if defined(SUPPORT_SOC_TIMER)
+       {
+               IMG_BOOL ui32AppHintDefault = IMG_FALSE;
+               IMG_BOOL bInitSocTimer;
+               void *pvAppHintState = NULL;
+
+               OSCreateKMAppHintState(&pvAppHintState);
+               OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer);
+               OSFreeKMAppHintState(pvAppHintState);
+
+               if (bInitSocTimer)
+               {
+                       eError = RGXInitSOCUSCTimer(psDeviceNode);
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSOCUSCTimer", e16);
+               }
+       }
+#endif
+#endif
+
+       /* Register callback for dumping debug info */
+       eError = RGXDebugInit(psDevInfo);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e16);
+
+       /* Initialise the device dependent bridges */
+       eError = DeviceDepBridgeInit(psDevInfo);
+       PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit");
+
+       /* Initialise error counters */
+       memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS));
+
+       return PVRSRV_OK;
+
+e16:
+#if defined(SUPPORT_VALIDATION)
+       RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState);
+ErrorDeInitHWPerfHost:
+#endif
+       RGXHWPerfHostDeInit(psDevInfo);
+ErrorDeInitHWPerfFw:
+       RGXHWPerfDeinit(psDevInfo);
+e14:
+#if !defined(NO_HARDWARE)
+       OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+                                                        psDevInfo->ui32RegSize);
+
+e13:
+#endif /* !NO_HARDWARE */
+       OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+e12:
+       OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+e11:
+       OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+e10:
+       OSLockDestroy(psDevInfo->hBPLock);
+e9:
+       OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+e8:
+       OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+e7:
+       OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+e6:
+       OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+e5:
+       OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+e4:
+       OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+e3:
+       OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+e2:
+       OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+e1:
+       OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+e0:
+       OSFreeMem(psDevInfo);
+
+       /* Destroy the zero page lock created above */
+       OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
+
+free_dummy_page:
+       /* Destroy the dummy page lock created above */
+       OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString;
+       if (NULL == psz)
+       {
+               IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN];
+               size_t uiBVNCStringSize;
+               size_t uiStringLength;
+
+               uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d",
+                               psDevInfo->sDevFeatureCfg.ui32B,
+                               psDevInfo->sDevFeatureCfg.ui32V,
+                               psDevInfo->sDevFeatureCfg.ui32N,
+                               psDevInfo->sDevFeatureCfg.ui32C);
+               PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN);
+
+               uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR);
+               psz = OSAllocMem(uiBVNCStringSize);
+               if (NULL != psz)
+               {
+                       OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize);
+                       psDevInfo->sDevFeatureCfg.pszBVNCString = psz;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                       "%s: Allocating memory for BVNC Info string failed",
+                                       __func__));
+               }
+       }
+
+       return psz;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDevVersionString
+@Description    Gets the version string for the given device node and returns
+                a pointer to it in ppszVersionString. It is then the
+                responsibility of the caller to free this memory.
+@Input          psDeviceNode            Device node from which to obtain the
+                                        version string
+@Output                ppszVersionString       Contains the version string upon return
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_CHAR **ppszVersionString)
+{
+#if defined(COMPAT_BVNC_MASK_B) || defined(COMPAT_BVNC_MASK_V) || defined(COMPAT_BVNC_MASK_N) || defined(COMPAT_BVNC_MASK_C) || defined(NO_HARDWARE) || defined(EMULATOR)
+       const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)";
+#else
+       const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)";
+#endif
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_PCHAR pszBVNC;
+       size_t uiStringLength;
+
+       if (psDeviceNode == NULL || ppszVersionString == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       pszBVNC = RGXDevBVNCString(psDevInfo);
+
+       if (NULL == pszBVNC)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       uiStringLength = OSStringLength(pszBVNC);
+       uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */
+       *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+       if (*ppszVersionString == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString,
+               pszBVNC);
+
+       return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXDevClockSpeed
+@Description    Gets the clock speed for the given device node and returns
+                it in pui32RGXClockSpeed.
+@Input          psDeviceNode           Device node
+@Output         pui32RGXClockSpeed  Variable for storing the clock speed
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_PUINT32  pui32RGXClockSpeed)
+{
+       RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+       /* get clock speed */
+       *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+       return PVRSRV_OK;
+}
+
+#if (RGX_NUM_OS_SUPPORTED > 1)
+/*!
+ *******************************************************************************
+
+ @Function             RGXInitFwRawHeap
+
+ @Description  Called to perform additional initialisation
+ ******************************************************************************/
+static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid)
+{
+       IMG_UINT32 uiStringLength;
+       IMG_UINT32 uiStringLengthMax = 32;
+
+       IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
+
+       uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1);
+
+       /* Start by allocating memory for this OSID heap identification string */
+       psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+       if (psDevMemHeap->pszName == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
+       OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid);
+
+       /* Use the common blueprint template support function to initialise the heap */
+       HeapCfgBlueprintInit(psDevMemHeap->pszName,
+                                RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE),
+                                RGX_FIRMWARE_RAW_HEAP_SIZE,
+                                0,
+                                ui32Log2RgxDefaultPageShift,
+                                0,
+                                psDevMemHeap);
+
+       return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function             RGXDeInitFwRawHeap
+
+ @Description  Called to perform additional deinitialisation
+ ******************************************************************************/
+static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap)
+{
+       IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE;
+       IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
+
+       /* Safe to do as the guest firmware heaps are last in the list */
+       if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase &&
+           psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan)
+       {
+               void *pszName = (void*)psDevMemHeap->pszName;
+               OSFreeMem(pszName);
+       }
+}
+#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+
+/******************************************************************************
+ End of file (rgxinit.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxinit.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxinit.h
new file mode 100644 (file)
index 0000000..87d8509
--- /dev/null
@@ -0,0 +1,308 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX initialisation header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXINIT_H)
+#define RGXINIT_H
+
+#include "connection_server.h"
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_bridge.h"
+#include "fwload.h"
+
+#if defined(__linux__)
+#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware
+#else
+#define OS_FW_VERIFY_FUNCTION NULL
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXInitDevPart2
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                         IMG_UINT32                    ui32DeviceFlags,
+                                                         IMG_UINT32                    ui32HWPerfHostFilter,
+                                                         RGX_ACTIVEPM_CONF             eActivePMConf,
+                                                         IMG_UINT32                    ui32AvailableSPUMask,
+                                                         IMG_UINT32                    ui32AvailableRACMask);
+
+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                  IMG_DEVMEM_SIZE_T    ui32FWCodeLen,
+                                  IMG_DEVMEM_SIZE_T    ui32FWDataLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememCodeLen,
+                                  IMG_DEVMEM_SIZE_T    uiFWCorememDataLen);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXInitFirmware
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR
+RGXInitFirmware(PVRSRV_DEVICE_NODE       *psDeviceNode,
+                IMG_BOOL                 bEnableSignatureChecks,
+                IMG_UINT32               ui32SignatureChecksBufSize,
+                IMG_UINT32               ui32HWPerfFWBufSizeKB,
+                IMG_UINT64               ui64HWPerfFilter,
+                IMG_UINT32               ui32ConfigFlags,
+                IMG_UINT32               ui32LogType,
+                IMG_UINT32               ui32FilterFlags,
+                IMG_UINT32               ui32JonesDisableMask,
+                IMG_UINT32               ui32HWRDebugDumpLimit,
+                IMG_UINT32                              ui32RenderKillingCtl,
+                IMG_UINT32                              ui32CDMTDMKillingCtl,
+                IMG_UINT32                              *pui32TPUTrilinearFracMask,
+                IMG_UINT32                              *pui32USRMNumRegions,
+                IMG_UINT64                              *pui64UVBRMNumRegions,
+                IMG_UINT32               ui32HWPerfCountersDataSize,
+                RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+                FW_PERF_CONF             eFirmwarePerf,
+                IMG_UINT32               ui32KCCBSizeLog2,
+                IMG_UINT32               ui32ConfigFlagsExt,
+                IMG_UINT32               ui32AvailableSPUMask,
+                IMG_UINT32               ui32AvailableRACMask,
+                IMG_UINT32               ui32FwOsCfgFlags);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXLoadAndGetFWData
+
+ @Description
+
+ Load FW and return pointer to FW data.
+
+ @Input psDeviceNode - device node
+
+ @Input ppsRGXFW - fw pointer
+
+ @Output ppbFWData - pointer to FW data (NULL if an error occurred)
+
+ @Return PVRSRV_ERROR - PVRSRV_OK on success
+                        PVRSRV_ERROR_NOT_READY if filesystem is not ready
+                        PVRSRV_ERROR_NOT_FOUND if no suitable FW image found
+                        PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc memory for FW image
+                        PVRSRV_ERROR_NOT_AUTHENTICATED if FW image failed verification
+
+******************************************************************************/
+PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 OS_FW_IMAGE **ppsRGXFW,
+                                 const IMG_BYTE **ppbFWData);
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function     RGXInitHWPerfCounters
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE  *psDeviceNode);
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function     RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input:       psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function     RGXDevBVNCString
+
+ @Description
+
+ Returns the Device BVNC string. It will allocate and fill it first, if necessary.
+
+ @Input:   psDevInfo - device info (must not be null)
+
+ @Return   IMG_PCHAR - pointer to BVNC string
+
+******************************************************************************/
+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input psDeviceNode - device info. structure
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#if !defined(NO_HARDWARE)
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsRegister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsUnregister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+/*!
+************************************************************************************
+ @Function             RGXSystemGetFabricCoherency
+
+ @Description  Get the system fabric coherency for the device by reading default
+                               configuration from device register, subject to AppHint overrides.
+
+ @Input                        sRegsCpuPBase           : Device register CPU physical address base
+                               ui32RegsSize            : Device register size
+                               peDevFabricType         : Device memory bus fabric type
+                               peCacheSnoopingMode : Fabric coherency override
+
+ @Return               PVRSRV_ERROR
+************************************************************************************/
+PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDeviceConfig,
+                                                                                IMG_CPU_PHYADDR sRegsCpuPBase,
+                                                                                IMG_UINT32 ui32RegsSize,
+                                                                                PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType,
+                                                                                PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXInitCreateFWKernelMemoryContext
+
+ @Description   Called to perform initialisation during firmware kernel context
+                creation.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+ *******************************************************************************
+
+ @Function      RGXDeInitDestroyFWKernelMemoryContext
+
+ @Description   Called to perform deinitialisation during firmware kernel
+                context destruction.
+
+ @Input         psDeviceNode  device node
+ ******************************************************************************/
+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* RGXINIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer.h
new file mode 100644 (file)
index 0000000..42fdf1e
--- /dev/null
@@ -0,0 +1,510 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declaration of an interface layer used to abstract code that
+                can be compiled outside of the DDK, potentially in a
+                completely different OS.
+                All the headers included by this file must also be copied to
+                the alternative source tree.
+                All the functions declared here must have a DDK implementation
+                inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and
+                another different implementation in case they are used outside
+                of the DDK.
+                All of the functions accept as a first parameter a
+                "const void *hPrivate" argument. It should be used to pass
+                around any implementation specific data required.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXLAYER_H)
+#define RGXLAYER_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "img_elf.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#include "pvrsrv_device.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgx_fw_info.h"
+#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */
+#include "rgx_meta.h"
+#include "rgx_riscv.h"
+
+#include "rgxdefs_km.h"
+/* includes:
+ * rgx_cr_defs_km.h,
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemCopy
+
+ @Description    MemCopy implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the destination
+ @Input          pvSrc      : Pointer to the source location
+ @Input          uiSize     : The amount of memory to copy in bytes
+
+ @Return         void
+
+******************************************************************************/
+void RGXMemCopy(const void *hPrivate,
+                void *pvDst,
+                void *pvSrc,
+                size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXMemSet
+
+ @Description    MemSet implementation
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pvDst      : Pointer to the start of the memory region
+ @Input          ui8Value   : The value to be written
+ @Input          uiSize     : The number of bytes to be set to ui8Value
+
+ @Return         void
+
+******************************************************************************/
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXCommentLog
+
+ @Description    Generic log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+__printf(2, 3)
+void RGXCommentLog(const void *hPrivate,
+                   const IMG_CHAR *pszString,
+                   ...);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXErrorLog
+
+ @Description    Generic error log function used for debugging or other purposes
+
+ @Input          hPrivate   : Implementation specific data
+ @Input          pszString  : Message to be printed
+ @Input          ...        : Variadic arguments
+
+ @Return         void
+
+******************************************************************************/
+__printf(2, 3)
+void RGXErrorLog(const void *hPrivate,
+                 const IMG_CHAR *pszString,
+                 ...);
+
+/* This is used to check if a specific feature is enabled.
+ * Should be used instead of calling RGXDeviceHasFeature.  */
+#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \
+                       RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/* This is used to check if a specific feature with value is enabled.
+ * Should be used instead of calling RGXDeviceGetFeatureValue.  */
+#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \
+                       (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0)
+
+/* This is used to get the value of a specific feature from hPrivate.
+ * Should be used instead of calling RGXDeviceGetFeatureValue.  */
+#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \
+                       RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX)
+
+/* This is used to get the value of a specific ERN from hPrivate.
+ * Should be used instead of calling RGXDeviceHasErnBrn.  */
+#define RGX_DEVICE_HAS_ERN(hPrivate, FixNum) \
+                       RGXDeviceHasErnBrn(hPrivate, HW_##FixNum##_BIT_MASK)
+
+/* This is used to get the value of a specific BRN from hPrivate.
+ * Should be used instead of calling RGXDeviceHasErnBrn.  */
+#define RGX_DEVICE_HAS_BRN(hPrivate, FixNum) \
+                       RGXDeviceHasErnBrn(hPrivate, FIX_HW_##FixNum##_BIT_MASK)
+
+#define CLK_CTRL_FORCE_ON(X, Module) \
+                       X = (((X) & RGX_CR_##Module##_CLRMSK) | RGX_CR_##Module##_ON)
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceGetFeatureValue
+
+ @Description    Checks if a device has a particular feature with values
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64Feature  : Feature with values to check
+
+ @Return         Value >= 0 if the given feature is available, -1 otherwise
+
+******************************************************************************/
+IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasFeature
+
+ @Description    Checks if a device has a particular feature
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64Feature  : Feature to check
+
+ @Return         IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXDeviceHasErnBrn
+
+ @Description    Checks if a device has a particular errata
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui64ErnsBrns : Flags to check
+
+ @Return         IMG_TRUE if the given errata is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetFWCorememSize
+
+ @Description    Get the FW coremem size
+
+ @Input          hPrivate   : Implementation specific data
+
+ @Return         FW coremem size
+
+******************************************************************************/
+IMG_INTERNAL
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXWriteReg32/64
+
+ @Description   Write a value to a 32/64 bit RGX register
+
+ @Input         hPrivate         : Implementation specific data
+ @Input         ui32RegAddr      : Register offset inside the register bank
+ @Input         ui32/64RegValue  : New register value
+
+ @Return        void
+
+******************************************************************************/
+void RGXWriteReg32(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT32 ui32RegValue);
+
+void RGXWriteReg64(const void *hPrivate,
+                   IMG_UINT32 ui32RegAddr,
+                   IMG_UINT64 ui64RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadReg32/64
+
+ @Description    Read a 32/64 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data
+ @Input          ui32RegAddr  : Register offset inside the register bank
+
+ @Return         Register value
+
+******************************************************************************/
+IMG_UINT32 RGXReadReg32(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate,
+                        IMG_UINT32 ui32RegAddr);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXReadModifyWriteReg32
+
+ @Description    Read-modify-write a 32 bit RGX register
+
+ @Input          hPrivate     : Implementation specific data.
+ @Input          ui32RegAddr  : Register offset inside the register bank.
+ @Input          ui32RegValue : New register value.
+ @Input          ui32RegMask  : Keep the bits set in the mask.
+
+ @Return         Always returns PVRSRV_OK
+
+******************************************************************************/
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+                                   IMG_UINT32 ui32RegAddr,
+                                   IMG_UINT64 ui64RegValue,
+                                   IMG_UINT64 ui64RegKeepMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXPollReg32/64
+
+ @Description    Poll on a 32/64 bit RGX register until some bits are set/unset
+
+ @Input          hPrivate         : Implementation specific data
+ @Input          ui32RegAddr      : Register offset inside the register bank
+ @Input          ui32/64RegValue  : Value expected from the register
+ @Input          ui32/64RegMask   : Only the bits set in this mask will be
+                                    checked against uiRegValue
+
+ @Return         PVRSRV_OK if the poll succeeds,
+                 PVRSRV_ERROR_TIMEOUT if the poll takes too long
+
+******************************************************************************/
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT32 ui32RegValue,
+                          IMG_UINT32 ui32RegMask);
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64RegValue,
+                          IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXWaitCycles
+
+ @Description    Wait for a number of GPU cycles and/or microseconds
+
+ @Input          hPrivate    : Implementation specific data
+ @Input          ui32Cycles  : Number of GPU cycles to wait for in pdumps,
+                               it can also be used when running driver-live
+                               if desired (ignoring the next parameter)
+ @Input          ui32WaitUs  : Number of microseconds to wait for when running
+                               driver-live
+
+ @Return         void
+
+******************************************************************************/
+void RGXWaitCycles(const void *hPrivate,
+                   IMG_UINT32 ui32Cycles,
+                   IMG_UINT32 ui32WaitUs);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireKernelMMUPC
+
+ @Description     Acquire the Kernel MMU Page Catalogue device physical address
+
+ @Input           hPrivate  : Implementation specific data
+ @Input           psPCAddr  : Returned page catalog address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXWriteKernelMMUPC32/64
+
+ @Description     Write the Kernel MMU Page Catalogue to the 32/64 bit
+                  RGX register passed as argument.
+                  In a driver-live scenario without PDump these functions
+                  are the same as RGXWriteReg32/64 and they don't need
+                  to be reimplemented.
+
+ @Input           hPrivate        : Implementation specific data
+ @Input           ui32PCReg       : Register offset inside the register bank
+ @Input           ui32AlignShift  : PC register alignshift
+ @Input           ui32Shift       : PC register shift
+ @Input           ui32/64PCVal    : Page catalog value (aligned and shifted)
+
+ @Return          void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT32 ui32PCVal);
+
+#else  /* defined(PDUMP) */
+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \
+       RGXWriteReg32(priv, pcreg, pcval)
+#endif /* defined(PDUMP) */
+
+/*!
+*******************************************************************************
+
+ @Function        RGXDoFWSlaveBoot
+
+ @Description     Returns whether or not a FW Slave Boot is required
+                  while powering on
+
+ @Input           hPrivate       : Implementation specific data
+
+ @Return          IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXFabricCoherencyTest
+
+ @Description    Performs fabric coherency test
+
+ @Input          hPrivate         : Implementation specific data
+
+ @Return         PVRSRV_OK if the test succeeds,
+                 PVRSRV_ERROR_INIT_FAILURE if the test fails at some point
+
+******************************************************************************/
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceSLCBanks
+
+ @Description    Returns the number of SLC banks used by the device
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Number of SLC banks
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function       RGXGetDeviceCacheLineSize
+
+ @Description    Returns the device cache line size
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return         Cache line size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootCodeAddr
+
+ @Description     Acquire the device virtual address of the RISCV boot code
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootCodeAddr   : Boot code base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr);
+
+/*!
+*******************************************************************************
+
+ @Function        RGXAcquireBootDataAddr
+
+ @Description     Acquire the device virtual address of the RISCV boot data
+
+ @Input           hPrivate         : Implementation specific data
+ @Output          psBootDataAddr   : Boot data base address
+
+ @Return          void
+
+******************************************************************************/
+void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr);
+
+/*!
+*******************************************************************************
+
+ @Function      RGXDeviceAckIrq
+
+ @Description   Checks the implementation specific IRQ status register,
+                clearing it if necessary and returning the IRQ status.
+
+ @Input          hPrivate    : Implementation specific data
+
+ @Return:       IRQ status
+
+******************************************************************************/
+IMG_BOOL RGXDeviceAckIrq(const void *hPrivate);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGXLAYER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer_impl.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer_impl.c
new file mode 100644 (file)
index 0000000..4778142
--- /dev/null
@@ -0,0 +1,993 @@
+/*************************************************************************/ /*!
+@File
+@Title          DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxlayer_impl.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "rgxfwutils.h"
+#include "cache_km.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#endif
+
+void RGXMemCopy(const void *hPrivate,
+                void *pvDst,
+                void *pvSrc,
+                size_t uiSize)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       OSDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+void RGXMemSet(const void *hPrivate,
+               void *pvDst,
+               IMG_UINT8 ui8Value,
+               size_t uiSize)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       OSDeviceMemSet(pvDst, ui8Value, uiSize);
+}
+
+void RGXCommentLog(const void *hPrivate,
+                   const IMG_CHAR *pszString,
+                   ...)
+{
+#if defined(PDUMP)
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       va_list argList;
+       va_start(argList, pszString);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList);
+       va_end(argList);
+#else
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+       PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+void RGXErrorLog(const void *hPrivate,
+                 const IMG_CHAR *pszString,
+                 ...)
+{
+       IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+       va_list argList;
+
+       PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+       va_start(argList, pszString);
+       vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+       va_end(argList);
+
+       PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer));
+}
+
+IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+       IMG_INT32 i32Ret = -1;
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PVR_ASSERT(hPrivate != NULL);
+
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       psDeviceNode = psDevInfo->psDeviceNode;
+
+       if ((psDeviceNode->pfnGetDeviceFeatureValue))
+       {
+               i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature);
+       }
+
+       return i32Ret;
+}
+
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+       {
+               return RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE);
+       }
+       return 0;
+}
+
+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+       }
+
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                  ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags);
+}
+
+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+       }
+
+       PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                  ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags);
+}
+
+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+       IMG_UINT32 ui32RegValue;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+       {
+               ui32RegValue = IMG_UINT32_MAX;
+       }
+       else
+#endif
+       {
+               ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+       }
+
+       PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                      ui32RegAddr, psParams->ui32PdumpFlags);
+
+       return ui32RegValue;
+}
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+       IMG_UINT64 ui64RegValue;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+       {
+               ui64RegValue = IMG_UINT64_MAX;
+       }
+       else
+#endif
+       {
+               ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+       }
+
+       PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                      ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+       return ui64RegValue;
+}
+
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+                                   IMG_UINT32 ui32RegAddr,
+                                   IMG_UINT64 uiRegValueNew,
+                                   IMG_UINT64 uiRegKeepMask)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+       /* only use the new values for bits we update according to the keep mask */
+       uiRegValueNew &= ~uiRegKeepMask;
+
+#if defined(PDUMP)
+       /* Store register offset to temp PDump variable */
+       PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                                   ":SYSMEM:$1", ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Keep the bits set in the mask */
+       PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                               uiRegKeepMask, PDUMP_FLAGS_CONTINUOUS);
+
+       /* OR the new values */
+       PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1",
+                              uiRegValueNew, PDUMP_FLAGS_CONTINUOUS);
+
+       /* Do the actual register write */
+       PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
+                               ui32RegAddr, ":SYSMEM:$1", 0);
+
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+
+       {
+               IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+               uiRegValue &= uiRegKeepMask;
+               OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT32 ui32RegValue,
+                          IMG_UINT32 ui32RegMask)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                        (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+                                        ui32RegValue,
+                                        ui32RegMask,
+                                        POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+       }
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   ui32RegAddr,
+                   ui32RegValue,
+                   ui32RegMask,
+                   psParams->ui32PdumpFlags,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+                          IMG_UINT32 ui32RegAddr,
+                          IMG_UINT64 ui64RegValue,
+                          IMG_UINT64 ui64RegMask)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       void __iomem *pvRegsBase;
+
+       /* Split lower and upper words */
+       IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32);
+       IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue);
+       IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32);
+       IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask);
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+       pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+       if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+       {
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                        (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4),
+                                        ui32UpperValue,
+                                        ui32UpperMask,
+                                        POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+                                        (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+                                        ui32LowerValue,
+                                        ui32LowerMask,
+                                        POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr));
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+       }
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   ui32RegAddr + 4,
+                   ui32UpperValue,
+                   ui32UpperMask,
+                   psParams->ui32PdumpFlags,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+
+       PDUMPREGPOL(psDevInfo->psDeviceNode,
+                   RGX_PDUMPREG_NAME,
+                   ui32RegAddr,
+                   ui32LowerValue,
+                   ui32LowerMask,
+                   psParams->ui32PdumpFlags,
+                   PDUMP_POLL_OPERATOR_EQUAL);
+
+       return PVRSRV_OK;
+}
+
+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+       OSWaitus(ui32TimeUs);
+       PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr)
+{
+       PVR_ASSERT(hPrivate != NULL);
+       *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr;
+}
+
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+                           IMG_UINT32 ui32PCReg,
+                           IMG_UINT32 ui32PCRegAlignShift,
+                           IMG_UINT32 ui32PCRegShift,
+                           IMG_UINT32 ui32PCVal)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       /* Write the cat-base address */
+       OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+
+       /* Pdump catbase address */
+       MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+                                 RGX_PDUMPREG_NAME,
+                                 ui32PCReg,
+                                 4,
+                                 ui32PCRegAlignShift,
+                                 ui32PCRegShift,
+                                 PDUMP_FLAGS_CONTINUOUS);
+}
+#endif /* defined(PDUMP) */
+
+#define MAX_NUM_COHERENCY_TESTS  (10)
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS)
+       {
+               return IMG_FALSE;
+       }
+
+       psDeviceNode = psDevInfo->psDeviceNode;
+#if !defined(NO_HARDWARE)
+       return (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) &&
+               PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig));
+#else
+       return IMG_FALSE;
+#endif
+}
+
+/*
+ * The fabric coherency test is performed when platform supports fabric coherency
+ * either in the form of ACE-lite or Full-ACE. This test is done quite early
+ * with the firmware processor quiescent and makes exclusive use of the slave
+ * port interface for reading/writing through the device memory hierarchy. The
+ * rationale for the test is to ensure that what the CPU writes to its dcache
+ * is visible to the GPU via coherency snoop miss/hit and vice-versa without
+ * any intervening cache maintenance by the writing agent.
+ */
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 *pui32FabricCohTestBufferCpuVA = NULL;
+       IMG_UINT32 *pui32FabricCohCcTestBufferCpuVA = NULL;
+       IMG_UINT32 *pui32FabricCohNcTestBufferCpuVA = NULL;
+       DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc = NULL;
+       DEVMEM_MEMDESC *psFabricCohCcTestBufferMemDesc = NULL;
+       DEVMEM_MEMDESC *psFabricCohNcTestBufferMemDesc = NULL;
+       RGXFWIF_DEV_VIRTADDR sFabricCohCcTestBufferDevVA;
+       RGXFWIF_DEV_VIRTADDR sFabricCohNcTestBufferDevVA;
+       RGXFWIF_DEV_VIRTADDR *psFabricCohTestBufferDevVA = NULL;
+       IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64);
+       IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64);
+       IMG_UINT64 ui64SegOutAddrTopCached = 0;
+       IMG_UINT64 ui64SegOutAddrTopUncached = 0;
+       IMG_UINT32 ui32OddEven;
+       IMG_UINT32 ui32OddEvenSeed = 1;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bFullTestPassed = IMG_TRUE;
+       IMG_BOOL bExit = IMG_FALSE;
+#if defined(DEBUG)
+       IMG_BOOL bSubTestPassed = IMG_FALSE;
+#endif
+       enum TEST_TYPE {
+               CPU_WRITE_GPU_READ_SM=0, GPU_WRITE_CPU_READ_SM,
+               CPU_WRITE_GPU_READ_SH,   GPU_WRITE_CPU_READ_SH
+       } eTestType;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       PVR_LOG(("Starting fabric coherency test ....."));
+
+       /* Size and align are 'expanded' because we request an export align allocation */
+       eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+                                                    &uiFabricCohTestBlockSize,
+                                                    &uiFabricCohTestBlockAlign);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       /* Allocate, acquire cpu address and set firmware address for cc=1 buffer */
+       eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+                                                                               uiFabricCohTestBlockSize,
+                                                                               uiFabricCohTestBlockAlign,
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                                               "FwExFabricCoherencyCcTestBuffer",
+                                                                               &psFabricCohCcTestBufferMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemFwAllocateExportable() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psFabricCohCcTestBufferMemDesc, (void **) &pui32FabricCohCcTestBufferCpuVA);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemAcquireCpuVirtAddr() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e1;
+       }
+
+       /* Create a FW address which is uncached in the Meta DCache and in the SLC using the Meta bootloader segment.
+          This segment is the only one configured correctly out of reset (when this test is meant to be executed) */
+       eError = RGXSetFirmwareAddress(&sFabricCohCcTestBufferDevVA,
+                                                 psFabricCohCcTestBufferMemDesc,
+                                                 0,
+                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", e2);
+
+       /* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+       sFabricCohCcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+       sFabricCohCcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+       sFabricCohCcTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+       /* Map the buffer in the bootloader segment as uncached */
+       sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+       sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+
+       /* Allocate, acquire cpu address and set firmware address for cc=0 buffer  */
+       eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+                                                                               uiFabricCohTestBlockSize,
+                                                                               uiFabricCohTestBlockAlign,
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                               PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                                               "FwExFabricCoherencyNcTestBuffer",
+                                                                               &psFabricCohNcTestBufferMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemFwAllocateExportable() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e3;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psFabricCohNcTestBufferMemDesc, (void **) &pui32FabricCohNcTestBufferCpuVA);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "DevmemAcquireCpuVirtAddr() error: %s, exiting",
+                               PVRSRVGetErrorString(eError)));
+               goto e4;
+       }
+
+       eError = RGXSetFirmwareAddress(&sFabricCohNcTestBufferDevVA,
+                                                 psFabricCohNcTestBufferMemDesc,
+                                                 0,
+                                                 RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", e5);
+
+       /* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+       sFabricCohNcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+       sFabricCohNcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+       sFabricCohNcTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+       /* Map the buffer in the bootloader segment as uncached */
+       sFabricCohNcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+       sFabricCohNcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+
+       /* Obtain the META segment addresses corresponding to cached and uncached windows into SLC */
+       ui64SegOutAddrTopCached   = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF);
+       ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF);
+
+       /* At the top level, we perform snoop-miss (i.e. to verify slave port) & snoop-hit (i.e. to verify ACE) test.
+          NOTE: For now, skip snoop-miss test as Services currently forces all firmware allocations to be coherent */
+       for (eTestType = CPU_WRITE_GPU_READ_SH; eTestType <= GPU_WRITE_CPU_READ_SH && bExit == IMG_FALSE; eTestType++)
+       {
+               IMG_CPU_PHYADDR sCpuPhyAddr;
+               IMG_BOOL bValid;
+               PMR *psPMR;
+
+               if (eTestType == CPU_WRITE_GPU_READ_SM)
+               {
+                       /* All snoop miss test must bypass the SLC, here memory is region of coherence so
+                          configure META to use SLC bypass cache policy for the bootloader segment. Note
+                          this cannot be done on a cache-coherent (i.e. CC=1) VA, as this violates ACE
+                          standard as one cannot issue a non-coherent request into the bus fabric for
+                          an allocation's VA that is cache-coherent in SLC, so use non-coherent buffer */
+                       RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+                                                                       (ui64SegOutAddrTopUncached |  RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+                       pui32FabricCohTestBufferCpuVA = pui32FabricCohNcTestBufferCpuVA;
+                       psFabricCohTestBufferMemDesc = psFabricCohNcTestBufferMemDesc;
+                       psFabricCohTestBufferDevVA = &sFabricCohNcTestBufferDevVA;
+               }
+               else if (eTestType == CPU_WRITE_GPU_READ_SH)
+               {
+                       /* All snoop hit test must obviously use SLC, here SLC is region of coherence so
+                          configure META not to bypass the SLC for the bootloader segment */
+                       RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+                                                                       (ui64SegOutAddrTopCached |  RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+                       pui32FabricCohTestBufferCpuVA = pui32FabricCohCcTestBufferCpuVA;
+                       psFabricCohTestBufferMemDesc = psFabricCohCcTestBufferMemDesc;
+                       psFabricCohTestBufferDevVA = &sFabricCohCcTestBufferDevVA;
+               }
+
+               if (eTestType == GPU_WRITE_CPU_READ_SH &&
+                       !PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig))
+               {
+                       /* Cannot perform this test if there is no snooping of device cache */
+                       continue;
+               }
+
+               /* Acquire underlying PMR CpuPA in preparation for cache maintenance */
+               (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR);
+               eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid);
+               if (eError != PVRSRV_OK || bValid == IMG_FALSE)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "PMR_CpuPhysAddr error: %s, exiting",
+                                       PVRSRVGetErrorString(eError)));
+                       bExit = IMG_TRUE;
+                       continue;
+               }
+
+               /* Here we do two passes mostly to account for the effects of using a different
+                  seed (i.e. ui32OddEvenSeed) value to read and write */
+               for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++)
+               {
+                       IMG_UINT32 i;
+
+                       /* Do multiple sub-dword cache line tests */
+                       for (i = 0; i < 2 && bExit == IMG_FALSE; i++)
+                       {
+                               IMG_UINT32 ui32FWAddr;
+                               IMG_UINT32 ui32FWValue;
+                               IMG_UINT32 ui32FWValue2;
+                               IMG_UINT32 ui32LastFWValue = ~0;
+                               IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32);
+
+                               /* Calculate next address and seed value to write/read from slave-port */
+                               ui32FWAddr = psFabricCohTestBufferDevVA->ui32Addr + ui32Offset;
+                               ui32OddEvenSeed += 1;
+
+                               if (eTestType == GPU_WRITE_CPU_READ_SM || eTestType == GPU_WRITE_CPU_READ_SH)
+                               {
+                                       /* Clean dcache to ensure there is no stale data in dcache that might over-write
+                                          what we are about to write via slave-port here because if it drains from the CPU
+                                          dcache before we read it, it would corrupt what we are going to read back via
+                                          the CPU */
+                                       CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_CLEAN);
+
+                                       /* Calculate a new value to write */
+                                       ui32FWValue = i + ui32OddEvenSeed;
+
+                                       /* Write the value using the RGX slave-port interface */
+                                       eError = RGXWriteFWModuleAddr(psDevInfo, ui32FWAddr, ui32FWValue);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "RGXWriteFWModuleAddr error: %s, exiting",
+                                                                PVRSRVGetErrorString(eError)));
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+
+                                       /* Read back value using RGX slave-port interface, this is used
+                                          as a sort of memory barrier for the above write */
+                                       eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue2);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "RGXReadFWModuleAddr error: %s, exiting",
+                                                                PVRSRVGetErrorString(eError)));
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+                                       else if (ui32FWValue != ui32FWValue2)
+                                       {
+                                               //IMG_UINT32 ui32FWValue3;
+                                               //RGXReadFWModuleAddr(psDevInfo, 0xC1F00000, &ui32FWValue3);
+
+                                               /* Fatal error, we should abort */
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x",
+                                                               i,
+                                                               ui32FWValue,
+                                                               ui32FWValue2));
+                                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+
+                                       if (!PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig))
+                                       {
+                                               /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory
+                                                  region is discarded before we read (i.e. next read must trigger a cache miss).
+                                                  If there is snooping of device cache, then any prefetching done by the CPU
+                                                  will reflect the most up to date datum writing by GPU into said location,
+                                                  that is to say prefetching must be coherent so CPU d-flush is not needed */
+                                               CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE);
+                                       }
+                               }
+                               else
+                               {
+                                       IMG_UINT32 ui32RAWCpuValue;
+
+                                       /* Ensures line is in dcache */
+                                       ui32FWValue = IMG_UINT32_MAX;
+
+                                       /* Dirty allocation in dcache */
+                                       ui32RAWCpuValue = i + ui32OddEvenSeed;
+                                       pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed;
+
+                                       /* Flush possible cpu store-buffer(ing) on LMA */
+                                       OSWriteMemoryBarrier(&pui32FabricCohTestBufferCpuVA[i]);
+
+                                       switch (eTestType)
+                                       {
+                                       case CPU_WRITE_GPU_READ_SM:
+                                               /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so
+                                                  memory is coherent before the SlavePort reads */
+                                               CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_FLUSH);
+                                               break;
+                                       default:
+                                               break;
+                                       }
+
+                                       /* Read back value using RGX slave-port interface */
+                                       eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "RGXReadWithSP error: %s, exiting",
+                                                               PVRSRVGetErrorString(eError)));
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+
+                                       /* Being mostly paranoid here, verify that CPU RAW operation is valid
+                                          after the above slave port read */
+                                       CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE);
+                                       if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue)
+                                       {
+                                               /* Fatal error, we should abort */
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "At Offset: %d, RAW by CPU failed: expected: %x, got: %x",
+                                                               i,
+                                                               ui32RAWCpuValue,
+                                                               pui32FabricCohTestBufferCpuVA[i]));
+                                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+                               }
+
+                               /* Compare to see if sub-test passed */
+                               if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue)
+                               {
+#if defined(DEBUG)
+                                       bSubTestPassed = IMG_TRUE;
+#endif
+                               }
+                               else
+                               {
+                                       bFullTestPassed = IMG_FALSE;
+                                       eError = PVRSRV_ERROR_INIT_FAILURE;
+#if defined(DEBUG)
+                                       bSubTestPassed = IMG_FALSE;
+#endif
+                                       if (ui32LastFWValue != ui32FWValue)
+                                       {
+#if defined(DEBUG)
+                                               PVR_LOG(("At Offset: %d, Expected: %x, Got: %x",
+                                                                i,
+                                                                (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i],
+                                                                (eTestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue));
+#endif
+                                       }
+                                       else
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "test encountered unexpected error, exiting"));
+                                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                                               bExit = IMG_TRUE;
+                                               continue;
+                                       }
+                               }
+
+                               ui32LastFWValue = (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i];
+                       }
+
+#if defined(DEBUG)
+                       bSubTestPassed = bExit ? IMG_FALSE : bSubTestPassed;
+                       switch (eTestType)
+                       {
+                       case CPU_WRITE_GPU_READ_SM:
+                               PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s",
+                                                ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       case GPU_WRITE_CPU_READ_SM:
+                               PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s",
+                                                ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       case CPU_WRITE_GPU_READ_SH:
+                               PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s",
+                                                ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       case GPU_WRITE_CPU_READ_SH:
+                               PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s",
+                                                ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+                               break;
+                       default:
+                               PVR_LOG(("Internal error, exiting test"));
+                               eError = PVRSRV_ERROR_INIT_FAILURE;
+                               bExit = IMG_TRUE;
+                               continue;
+                       }
+#endif
+               }
+       }
+
+       /* Release and free NC/CC test buffers */
+       RGXUnsetFirmwareAddress(psFabricCohCcTestBufferMemDesc);
+e5:
+       DevmemReleaseCpuVirtAddr(psFabricCohCcTestBufferMemDesc);
+e4:
+       DevmemFwUnmapAndFree(psDevInfo, psFabricCohCcTestBufferMemDesc);
+
+e3:
+       RGXUnsetFirmwareAddress(psFabricCohNcTestBufferMemDesc);
+e2:
+       DevmemReleaseCpuVirtAddr(psFabricCohNcTestBufferMemDesc);
+e1:
+       DevmemFwUnmapAndFree(psDevInfo, psFabricCohNcTestBufferMemDesc);
+
+e0:
+       /* Restore bootloader segment settings */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+                                (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+
+       bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed;
+       if (bFullTestPassed)
+       {
+               PVR_LOG(("fabric coherency test: PASSED"));
+               psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1;
+       }
+       else
+       {
+               PVR_LOG(("fabric coherency test: FAILED"));
+               psDevInfo->ui32CoherencyTestsDone++;
+       }
+
+       return eError;
+}
+
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0;
+}
+
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS))
+       {
+               return 0;
+       }
+       return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS);
+}
+
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))
+       {
+               return 0;
+       }
+       return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS);
+}
+
+void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase;
+}
+
+void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+       *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase;
+}
+
+IMG_BOOL RGXDeviceAckIrq(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+
+       PVR_ASSERT(hPrivate != NULL);
+       psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       psDevInfo = psParams->psDevInfo;
+
+       return (psDevInfo->pfnRGXAckIrq != NULL) ?
+                       psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer_impl.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxlayer_impl.h
new file mode 100644 (file)
index 0000000..b1ea6f0
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          Header for DDK implementation of the Services abstraction layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for DDK implementation of the Services abstraction layer
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXLAYER_IMPL_H)
+#define RGXLAYER_IMPL_H
+
+#include "rgxlayer.h"
+#include "device_connection.h"
+
+typedef struct _RGX_LAYER_PARAMS_
+{
+       void *psDevInfo;
+       void *psDevConfig;
+#if defined(PDUMP)
+       IMG_UINT32 ui32PdumpFlags;
+#endif
+
+       IMG_DEV_PHYADDR sPCAddr;
+} RGX_LAYER_PARAMS;
+
+#endif /* RGXLAYER_IMPL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmmuinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmmuinit.c
new file mode 100644 (file)
index 0000000..8d13954
--- /dev/null
@@ -0,0 +1,1272 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "rgxdevice.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "rgx_heaps.h"
+#include "pdump_km.h"
+
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1)
+
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+
+
+/* protection bits for MMU_VERSION <= 3 */
+#define RGX_MMUCTRL_PTE_PROTMASK       (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+               ~RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK | \
+               RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+               RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+               RGX_MMUCTRL_PT_DATA_CC_EN | \
+               RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+               RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK       (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+               ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+               RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK       (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+               RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+/*
+ * protection bits for MMU_VERSION >= 4
+ * MMU4 has no PENDING or PAGE_SIZE fields in PxE
+ */
+#define RGX_MMU4CTRL_PTE_PROTMASK      (RGX_MMUCTRL_PTE_PROTMASK & ~RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN)
+
+#define RGX_MMU4CTRL_PDE_PROTMASK      (RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMU4CTRL_PCE_PROTMASK      (RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+/* protection bits derivation functions for MMUv4 */
+static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                                                                  IMG_DEV_VIRTADDR sDevVAddr,
+                                                                                                  IMG_UINT32 *pui32Log2PageSize);
+
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+               const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+               const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+               const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+               IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       IMG_BOOL bHaveMMU4 = (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4);
+
+       /* Setup of Px Entries:
+        *
+        *
+        * PAGE TABLE (8 Byte):
+        *
+        * | 62              | 61...40         | 39...12 (varies) | 11...6          | 5             | 4      | 3               | 2               | 1         | 0     |
+        * | PM/Meta protect | VP Page (39:18) | Physical Page    | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
+        *
+        *
+        * PAGE DIRECTORY (8 Byte):
+        *
+        *  | 40            | 39...5  (varies)        | 4          | 3...1     | 0     |
+        *  | Entry Pending | Page Table base address | (reserved) | Page Size | Valid |
+        *
+        *
+        * PAGE CATALOGUE (4 Byte):
+        *
+        *  | 31...4                      | 3...2      | 1             | 0     |
+        *  | Page Directory base address | (reserved) | Entry Pending | Valid |
+        *
+        */
+
+
+       /* Example how to get the PD address from a PC entry.
+        * The procedure is the same for PD and PT entries to retrieve PT and Page addresses:
+        *
+        * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
+        *  | 31...4   | 3...2      | 1             | 0     |
+        *  | PD Addr  | 0          | 0             | 0     |
+        *
+        * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
+        *  | 27...0   |
+        *  | PD Addr  |
+        *
+        * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
+        *  | 39...0   |
+        *  | PD Addr  |
+        *
+        */
+
+
+       sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+                       PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]);
+
+       /*
+        * Setup sRGXMMUPCEConfig
+        */
+       sRGXMMUPCEConfig.uiBytesPerEntry = 4;     /* 32 bit entries */
+       sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
+
+       sRGXMMUPCEConfig.uiAddrShift = 4;         /* Shift this many bits to get PD address */
+       sRGXMMUPCEConfig.uiAddrLog2Align = 12;    /* Alignment of PD physical addresses. */
+
+       sRGXMMUPCEConfig.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PCE_PROTMASK : RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits */
+       sRGXMMUPCEConfig.uiProtShift = 0;                       /* Shift this many bits to get the status bits */
+
+       sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN;     /* Mask to get entry valid bit of the PC */
+       sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
+
+       /*
+        *  Setup sRGXMMUTopLevelDevVAddrConfig
+        */
+       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
+       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PC index */
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
+                       sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
+
+       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
+       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;  /* Shift a 40 bit virt. device address by this amount to get the PD index */
+       sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
+                       sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
+
+       /*
+        *
+        *  Configuration for heaps with 4kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_4KBDP
+        */
+       sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+       sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
+
+       sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_4KBDP
+        */
+       sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+       sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
+       sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
+
+       sRGXMMUPTEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_4KBDP
+        */
+       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
+
+       sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+       sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig4KB
+        */
+       gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+       gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+       gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+       gsPageSizeConfig4KB.uiRefCount = 0;
+       gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+       /*
+        *
+        *  Configuration for heaps with 16kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_16KBDP
+        */
+       sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10;
+       sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10;
+
+       sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_16KBDP
+        */
+       sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+       sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
+       sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
+
+       sRGXMMUPTEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_16KBDP
+        */
+       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+       sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
+
+       sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+       sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig16KB
+        */
+       gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+       gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+       gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+       gsPageSizeConfig16KB.uiRefCount = 0;
+       gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+       /*
+        *
+        *  Configuration for heaps with 64kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_64KBDP
+        */
+       sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+       sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
+
+       sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_64KBDP
+        */
+       sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+       sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16;
+       sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
+
+       sRGXMMUPTEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_64KBDP
+        */
+       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+       sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+       sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig64KB
+        */
+       gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+       gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+       gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+       gsPageSizeConfig64KB.uiRefCount = 0;
+       gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+       /*
+        *
+        *  Configuration for heaps with 256kB Data-Page size
+        *
+        */
+
+       /*
+        * Setup sRGXMMUPDEConfig_256KBDP
+        */
+       sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+       sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
+
+       sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+        */
+       sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+       sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+       sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
+
+       sRGXMMUPTEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_256KBDP
+        */
+       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+       sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+       sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig256KB
+        */
+       gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+       gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+       gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+       gsPageSizeConfig256KB.uiRefCount = 0;
+       gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUPDEConfig_1MBDP
+        */
+       sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       /*
+        * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+        * if they contain fewer entries.
+        */
+       sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6;
+       sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6;
+
+       sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_1MBDP
+        */
+       sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+       sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+       sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
+
+       sRGXMMUPTEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_1MBDP
+        */
+       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+       sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+       sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig1MB
+        */
+       gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+       gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+       gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+       gsPageSizeConfig1MB.uiRefCount = 0;
+       gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUPDEConfig_2MBDP
+        */
+       sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+       /*
+        * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+        * if they contain fewer entries.
+        */
+       sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6;
+       sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6;
+
+       sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+       sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+       sRGXMMUPDEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK;
+       sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+       sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUPTEConfig_2MBDP
+        */
+       sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+       sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+       sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+       sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
+
+       sRGXMMUPTEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK;
+       sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+       sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+       sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+       /*
+        * Setup sRGXMMUDevVAddrConfig_2MBDP
+        */
+       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
+                       sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
+                       sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+       sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
+                       sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
+
+
+       sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+       sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+       sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+       /*
+        * Setup gsPageSizeConfig2MB
+        */
+       gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+       gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+       gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+       gsPageSizeConfig2MB.uiRefCount = 0;
+       gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+       /*
+        * Setup sRGXMMUDeviceAttributes
+        */
+       sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+       sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+       sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+       sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+       sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+       /* Functions for deriving page table/dir/cat protection bits */
+       sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+       sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+       sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+       sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+       /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+          on per-heap basis */
+       sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+       sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+       sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL;
+
+       if (bHaveMMU4)
+       {
+               /* override some of these functions for MMU4 as page size is not stored in PD entries */
+               sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXMMU4DerivePDEProt8;
+               sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXMMU4GetPageSizeFromPDE8;
+               sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = RGXMMU4GetPageSizeFromVirtAddr;
+       }
+
+       psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+       psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+       psDeviceNode->psMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+       PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+                       gsPageSizeConfig4KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+                       gsPageSizeConfig4KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+                       gsPageSizeConfig16KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+                       gsPageSizeConfig16KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+                       gsPageSizeConfig64KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+                       gsPageSizeConfig64KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+                       gsPageSizeConfig256KB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+                       gsPageSizeConfig256KB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+                       gsPageSizeConfig1MB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+                       gsPageSizeConfig1MB.uiRefCount));
+       PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+                       gsPageSizeConfig2MB.uiMaxRefCount));
+       PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+                       gsPageSizeConfig2MB.uiRefCount));
+#endif
+       if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+                       gsPageSizeConfig16KB.uiRefCount > 0 ||
+                       gsPageSizeConfig64KB.uiRefCount > 0 ||
+                       gsPageSizeConfig256KB.uiRefCount > 0 ||
+                       gsPageSizeConfig1MB.uiRefCount > 0 ||
+                       gsPageSizeConfig2MB.uiRefCount > 0
+       )
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+       }
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXMMUInit_GetConfigRangeValue
+@Description    Helper Function
+                               For a given virtual address range and page size, return the
+                               value to load into an MMU_PAGE_SIZE_RANGE config register.
+@Return         64-bit register value
+*/ /**************************************************************************/
+IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize)
+{
+       /* end address of range is inclusive */
+       IMG_UINT64 ui64EndAddress = ui64BaseAddress + ui64RangeSize - (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT);
+       IMG_UINT64 ui64RegValue = 0;
+
+       switch (ui32DataPageSize)
+       {
+               case 16*1024:
+                       ui64RegValue = 1;
+                       break;
+               case 64*1024:
+                       ui64RegValue = 2;
+                       break;
+               case 256*1024:
+                       ui64RegValue = 3;
+                       break;
+               case 1024*1024:
+                       ui64RegValue = 4;
+                       break;
+               case 2*1024*1024:
+                       ui64RegValue = 5;
+                       break;
+               case 4*1024:
+                       /* fall through */
+               default:
+                       /* anything we don't support, use 4K */
+                       break;
+    }
+
+       /* check that the range is defined by valid 40 bit virtual addresses */
+       PVR_ASSERT((ui64BaseAddress & ~((1ULL << 40) - 1)) == 0);
+       PVR_ASSERT((ui64EndAddress  & ~((1ULL << 40) - 1)) == 0);
+
+       /* the range config register addresses are in 2MB chunks so check 21 lsb are zero */
+       PVR_ASSERT((ui64BaseAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT) - 1)) == 0);
+       PVR_ASSERT((ui64EndAddress  & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)  - 1)) == 0);
+
+       ui64BaseAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT;
+       ui64EndAddress  >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT;
+
+       ui64RegValue = (ui64RegValue << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT) |
+                                  (ui64EndAddress  << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) |
+                                  (ui64BaseAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT);
+       return ui64RegValue;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+       return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+       return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize The log2 of the required page size.
+                E.g, for 4KiB pages, this parameter must be 12.
+                For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT64 ret_value = 0; /* 0 means invalid */
+
+       if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */
+       {
+               switch (uiLog2DataPageSize)
+               {
+               case RGX_HEAP_4KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+                       break;
+               case RGX_HEAP_16KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+                       break;
+               case RGX_HEAP_64KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+                       break;
+               case RGX_HEAP_256KB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+                       break;
+               case RGX_HEAP_1MB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+                       break;
+               case RGX_HEAP_2MB_PAGE_SHIFT:
+                       ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+                                       __FILE__, __LINE__, __func__, uiLog2DataPageSize));
+               }
+       }
+       return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+       PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+       return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT64 ui64MMUFlags=0;
+
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+       {
+               /* read/write */
+       }
+       else if (MMU_PROTFLAGS_READABLE & uiProtFlags)
+       {
+               /* read only */
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+       }
+       else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+       {
+               /* write only */
+               PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt8: write-only is not possible on this device"));
+       }
+       else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+       }
+
+       /* cache coherency */
+       if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+       }
+
+       if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+       }
+
+       if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+       {
+               ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+       }
+
+       /**
+        * Always enable caching on the fabric level cache irrespective of type of
+        * cache coherent interconnect and memory cache attributes.
+        * This needs to be updated, if selective caching policy needs to be
+        * implemented based on cache attributes requested by caller and based on
+        * cache coherent interconnect.
+        */
+       ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC;
+
+       return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+                RGXPutPageSizeConfigCB has to be called to ensure correct
+                refcounting.
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+               const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+               const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+               const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+               IMG_HANDLE *phPriv)
+{
+       MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+       switch (uiLog2DataPageSize)
+       {
+       case RGX_HEAP_4KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig4KB;
+               break;
+       case RGX_HEAP_16KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig16KB;
+               break;
+       case RGX_HEAP_64KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig64KB;
+               break;
+       case RGX_HEAP_256KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig256KB;
+               break;
+       case RGX_HEAP_1MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig1MB;
+               break;
+       case RGX_HEAP_2MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig2MB;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                               "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                               uiLog2DataPageSize));
+               *phPriv = NULL;
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+
+       /* Refer caller's pointers to the data */
+       *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+       *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+       *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+       /* Increment ref-count - not that we're allocating anything here
+       (I'm using static structs), but one day we might, so we want
+       the Get/Put code to be balanced properly */
+       psPageSizeConfig->uiRefCount++;
+
+       /* This is purely for debug statistics */
+       psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+                       psPageSizeConfig->uiRefCount);
+#endif
+
+       *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+       PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+                configurations set in RGXGetPageSizeConfig.  This can
+                be a no-op.
+                Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+       MMU_PAGESIZECONFIG *psPageSizeConfig;
+       IMG_UINT32 uiLog2DataPageSize;
+
+       uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+       switch (uiLog2DataPageSize)
+       {
+       case RGX_HEAP_4KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig4KB;
+               break;
+       case RGX_HEAP_16KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig16KB;
+               break;
+       case RGX_HEAP_64KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig64KB;
+               break;
+       case RGX_HEAP_256KB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig256KB;
+               break;
+       case RGX_HEAP_1MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig1MB;
+               break;
+       case RGX_HEAP_2MB_PAGE_SHIFT:
+               psPageSizeConfig = &gsPageSizeConfig2MB;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                               "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                               uiLog2DataPageSize));
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+
+       /* Ref-count here is not especially useful, but it's an extra
+          check that the API is being used correctly */
+       psPageSizeConfig->uiRefCount--;
+#else
+       PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32PDE);
+       PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+       PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+       return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       IMG_UINT64 ui64PageSizeBits = ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK);
+
+       switch (ui64PageSizeBits)
+       {
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+               *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+               *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+               *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+               *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+               *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+               break;
+       case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+               *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                               "RGXGetPageSizeFromPDE8: Invalid page size bitfield %" IMG_UINT64_FMTSPECx " in PDE",
+                               ui64PageSizeBits));
+
+               return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+       }
+       return PVRSRV_OK;
+}
+
+
+
+
+/*************************************************************************/ /*!
+@Function       RGXMMU4DerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          uiLog2DataPageSize: ignored as MMU4 doesn't put page size in PD entries.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+       IMG_UINT64 ret_value = 0; /* 0 means invalid */
+       PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+       if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */
+       {
+               /*  page size in range config registers. Bits in PD entries are reserved */
+               ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN;
+       }
+       return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXMMU4GetPageSizeFromPDE8
+@Description    The upper layers should be such that this function is never called
+                as pages size are not stored in PD entries for MMU4.
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+       PVR_UNREFERENCED_PARAMETER(ui64PDE);
+       PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+
+       PVR_ASSERT(0 && "RGXMMU4GetPageSizeFromPDE8 called in error. MMU4 does not store page sizes in PDT.");
+       return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXMMU4GetPageSizeFromVirtAddr
+@Description    Get page size by walking through range config registers
+                looking for a match against the virtual address.
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                                                                  IMG_DEV_VIRTADDR sDevVAddr,
+                                                                                                  IMG_UINT32 *pui32Log2PageSize)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 i;
+
+       /* assume default of 4KB page size */
+       *pui32Log2PageSize = 12;
+
+       /* Loop through the range registers looking for the given target address */
+       for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i)
+       {
+               IMG_UINT64 ui64RangeVal = psDevInfo->aui64MMUPageSizeRangeValue[i];
+
+               if (ui64RangeVal != 0)
+               {
+                       /* end addr in register is inclusive in the range so add 1 to move it over the end */
+                       IMG_UINT64 ui64Base = ((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK)
+                                                                                               >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT)
+                                                                                               << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT;
+                       IMG_UINT64 ui64End  = (((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK)
+                                                                                               >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) + 1)
+                                                                                               << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT;
+
+                       if ((sDevVAddr.uiAddr >= ui64Base) && (sDevVAddr.uiAddr < ui64End))
+                       {
+                               IMG_UINT32 ui32PageSizeField = (IMG_UINT32)((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK)
+                                                                                                               >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT);
+                               if (ui32PageSizeField < 5)
+                               {
+                                       *pui32Log2PageSize = (ui32PageSizeField << 1) + 12;   /* 12 (4K), 14 (16K), 16 (64K), 18 (256K), 20 (1MB) */
+                               }
+                               else if (ui32PageSizeField == 5)
+                               {
+                                       *pui32Log2PageSize = 21;    /* 2MB */
+                               }
+                               else
+                               {
+                                       eError = PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+                               }
+                               break;
+                       }
+               }
+       }
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmmuinit.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmmuinit.h
new file mode 100644 (file)
index 0000000..6e0c52e
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef SRVKM_RGXMMUINIT_H
+#define SRVKM_RGXMMUINIT_H
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize);
+
+#endif /* #ifndef SRVKM_RGXMMUINIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmulticore.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxmulticore.c
new file mode 100644 (file)
index 0000000..ab6fe58
--- /dev/null
@@ -0,0 +1,216 @@
+/*************************************************************************/ /*!
+@File           rgxmulticore.c
+@Title          Functions related to multicore devices
+@Codingstyle    IMG
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel mode workload estimation functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdevice.h"
+#include "rgxdefs_km.h"
+#include "pdump_km.h"
+#include "rgxmulticore.h"
+#include "multicore_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+
+
+static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        IMG_UINT32 ui32CapsSize,
+                                        IMG_UINT32 *pui32NumCores,
+                                        IMG_UINT64 *pui64Caps);
+
+
+/*
+ * RGXInitMultiCoreInfo:
+ * Return multicore information to clients.
+ * Return not supported on cores without multicore.
+ */
+static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT32 ui32CapsSize,
+                                 IMG_UINT32 *pui32NumCores,
+                                 IMG_UINT64 *pui64Caps)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psDeviceNode->ui32MultiCoreNumCores == 0)
+       {
+               /* MULTICORE not supported on this device */
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+       else
+       {
+               *pui32NumCores = psDeviceNode->ui32MultiCoreNumCores;
+               if (ui32CapsSize > 0)
+               {
+                       if (ui32CapsSize < psDeviceNode->ui32MultiCoreNumCores)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small"));
+                               eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+                       }
+                       else
+                       {
+                               IMG_UINT32 i;
+
+                               for (i = 0; i < psDeviceNode->ui32MultiCoreNumCores; ++i)
+                               {
+                                       pui64Caps[i] = psDeviceNode->pui64MultiCoreCapabilities[i];
+                               }
+                       }
+               }
+       }
+
+       return eError;
+}
+
+
+
+/*
+ * RGXInitMultiCoreInfo:
+ * Read multicore HW registers and fill in data structure for clients.
+ * Return not_supported on cores without multicore.
+ */
+PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psDeviceNode->pfnGetMultiCoreInfo != NULL)
+       {
+               /* we only set this up once */
+               return PVRSRV_OK;
+       }
+
+       /* defaults for non-multicore devices */
+       psDeviceNode->ui32MultiCoreNumCores = 0;
+       psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1);
+       psDeviceNode->pui64MultiCoreCapabilities = NULL;
+       psDeviceNode->pfnGetMultiCoreInfo = NULL;
+
+       if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+       {
+               IMG_UINT32 ui32MulticoreInfo;
+               IMG_UINT32 ui32PrimaryCoreIds;
+               IMG_UINT32 ui32PrimaryId;
+               IMG_UINT32 ui32TotalCores;
+               IMG_UINT32 ui32NumCores;
+               IMG_UINT32 id, i;
+
+               ui32NumCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_DOMAIN)
+                                                           & ~RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK)
+                                                           >> RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT;
+
+               ui32TotalCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM)
+                                                           & ~RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK)
+                                                           >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT;
+               ui32MulticoreInfo = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE);
+#if defined(NO_HARDWARE)
+               /* override to defaults if no hardware */
+               ui32NumCores = 8;//RGX_MULTICORE_MAX_NOHW_CORES;
+               ui32TotalCores = RGX_MULTICORE_MAX_NOHW_CORES;
+               ui32MulticoreInfo = 0;  /* primary id 0 with 7 secondaries */
+#endif
+               /* ID for this primary is in this register */
+               ui32PrimaryId = (ui32MulticoreInfo & ~RGX_CR_MULTICORE_ID_CLRMSK) >> RGX_CR_MULTICORE_ID_SHIFT;
+
+               /* allocate storage for capabilities */
+               psDeviceNode->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDeviceNode->pui64MultiCoreCapabilities[0]));
+               if (psDeviceNode->pui64MultiCoreCapabilities == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__));
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+
+               ui32PrimaryCoreIds = (ui32MulticoreInfo & ~RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK)
+                                                       >> RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT;
+
+               psDeviceNode->ui32MultiCorePrimaryId = ui32PrimaryId;
+               psDeviceNode->ui32MultiCoreNumCores = ui32NumCores;
+
+               PVR_DPF((PVR_DBG_MESSAGE, "Multicore domain has %d cores with primary id %u\n", ui32NumCores, ui32PrimaryId));
+               PDUMPCOMMENT(psDeviceNode,
+                            "RGX Multicore domain has %d cores with primary id %u\n",
+                            ui32NumCores, ui32PrimaryId);
+               for (i = 0, id = 0; id < ui32TotalCores; ++id)
+               {
+                       if ((ui32PrimaryCoreIds & 0x7) == ui32PrimaryId)
+                       {
+                               /* currently all cores are identical so have the same capabilities */
+                               psDeviceNode->pui64MultiCoreCapabilities[i] = id
+                                                   | ((id == ui32PrimaryId) ? RGX_MULTICORE_CAPABILITY_PRIMARY_EN : 0)
+                                                   | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN
+                                                   | RGX_MULTICORE_CAPABILITY_COMPUTE_EN
+                                                   | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN;
+                               PDUMPCOMMENT(psDeviceNode, "\tCore %u has caps 0x%08x", id,
+                                            (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]);
+                               PVR_DPF((PVR_DBG_MESSAGE, "Core %u has caps 0x%08x", id, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]));
+                               ++i;
+                       }
+                       ui32PrimaryCoreIds >>= 3;
+               }
+
+               /* Register callback to return info about multicore setup to client bridge */
+               psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo;
+       }
+       else
+       {
+               /* MULTICORE not supported on this device */
+               eError = PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       return eError;
+}
+
+
+/*
+ * RGXDeinitMultiCoreInfo:
+ * Release resources and clear the MultiCore values in the DeviceNode.
+ */
+void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (psDeviceNode->pui64MultiCoreCapabilities != NULL)
+       {
+               OSFreeMem(psDeviceNode->pui64MultiCoreCapabilities);
+               psDeviceNode->pui64MultiCoreCapabilities = NULL;
+               psDeviceNode->ui32MultiCoreNumCores = 0;
+               psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1);
+       }
+       psDeviceNode->pfnGetMultiCoreInfo = NULL;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpdump.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpdump.c
new file mode 100644 (file)
index 0000000..bca6a9b
--- /dev/null
@@ -0,0 +1,642 @@
+/*************************************************************************/ /*!
+@File           rgxpdump.c
+@Title          Device specific pdump routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+#include "pvrsrv.h"
+#include "devicemem_pdump.h"
+#include "rgxpdump.h"
+#include "pdumpdesc.h"
+#if defined(SUPPORT_VALIDATION)
+#include "validation_soc.h"
+#include "rgxtbdefs.h"
+#endif
+
+/*
+ * There are two different set of functions one for META/RISCV and one for MIPS
+ * because the Pdump player does not implement the support for
+ * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual,
+ * we have to use DevmemPDumpSaveToFile instead.
+ */
+static PVRSRV_ERROR _FWDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                                             IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       PDUMPIF(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags);
+       PDUMPELSE(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags);
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+       /* Gcov */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Gcov Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psFirmwareGcovBufferMemDesc,
+                                                                        0,
+                                                                        psDevInfo->ui32FirmwareGcovSize,
+                                                                        "firmware_gcov.img",
+                                                                        0,
+                                                                        ui32PDumpFlags);
+#endif
+       /* TDM signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TDM signatures and checksums Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTDMChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32SigTDMChecksSize,
+                                                                "out.2dsig",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+       /* TA signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32SigTAChecksSize,
+                                                                "out.tasig",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+       /* 3D signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32Sig3DChecksSize,
+                                                                "out.3dsig",
+                                                                0,
+                                                                ui32PDumpFlags);
+       /* CDM signatures */
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump CDM signatures and checksums Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigCDMChecksMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32SigCDMChecksSize,
+                                                                "out.cdmsig",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
+               RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1)
+       {
+               /* RDM signatures */
+               PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump RDM signatures and checksums Buffer");
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigRDMChecksMemDesc,
+                                                                       0,
+                                                                       psDevInfo->ui32SigRDMChecksSize,
+                                                                       "out.rdmsig",
+                                                                       0,
+                                                                       ui32PDumpFlags);
+       }
+
+       PDUMPFI(psDeviceNode, "DISABLE_SIGNATURE_BUFFER_DUMP", ui32PDumpFlags);
+
+#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_TRP) || defined(SUPPORT_WGP) || defined(SUPPORT_FBCDC_SIGNATURE_CHECK))
+       /*
+        *  Validation signatures buffer
+        */
+       PDUMPIF(psDeviceNode, "DISABLE_VALIDATION_CHECKSUM_BUFFER_DUMP", ui32PDumpFlags);
+       PDUMPELSE(psDeviceNode, "DISABLE_VALIDATION_CHECKSUM_BUFFER_DUMP", ui32PDumpFlags);
+
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump validation signatures buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWValidationSigMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32ValidationSigSize,
+                                                                "out.trpsig",
+                                                                0,
+                                                                ui32PDumpFlags);
+
+       PDUMPFI(psDeviceNode, "DISABLE_VALIDATION_CHECKSUM_BUFFER_DUMP", ui32PDumpFlags);
+#endif
+
+       return PVRSRV_OK;
+}
+static PVRSRV_ERROR _FWDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+                                                                                PVRSRV_DEVICE_NODE     *psDeviceNode,
+                                                                                IMG_UINT32                     ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       IMG_UINT32      ui32ThreadNum, ui32Size, ui32OutFileOffset;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       /* Dump trace buffers */
+       PDUMPIF(psDeviceNode, "ENABLE_TRACEBUF", ui32PDumpFlags);
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump trace buffers");
+       for (ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+       {
+               /*
+                * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+                * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+                * "expression must have a constant value".
+                */
+               const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff
+               = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+               /* ui32TracePointer tracepointer */
+               ui32Size = sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               uiTraceBufThreadNumOff,
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+
+               /* next, dump size of trace buffer in DWords */
+               ui32Size = sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords),
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+
+               /* trace buffer */
+               ui32Size = psDevInfo->psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
+               PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+                                                               0, /* 0 offset in the trace buffer mem desc */
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+
+               /* assert info buffer */
+               ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+                               + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+                               + sizeof(IMG_UINT32);
+               DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+                                                               offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */
+                                                                       + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */
+                                                                       + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */
+                                                               ui32Size,
+                                                               "out.trace",
+                                                               ui32OutFileOffset,
+                                                               ui32PDumpFlags);
+               ui32OutFileOffset += ui32Size;
+       }
+       PDUMPFI(psDeviceNode, "ENABLE_TRACEBUF", ui32PDumpFlags);
+
+       /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */
+       PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+
+       /* Dump hwperf buffer */
+       PDUMPIF(psDeviceNode, "ENABLE_HWPERF", ui32PDumpFlags);
+       PDumpCommentWithFlags(psDeviceNode, ui32PDumpFlags, "** Dump HWPerf Buffer");
+       DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+                                                                0,
+                                                                psDevInfo->ui32RGXFWIfHWPerfBufSize,
+                                                                "out.hwperf",
+                                                                0,
+                                                                ui32PDumpFlags);
+       PDUMPFI(psDeviceNode, "ENABLE_HWPERF", ui32PDumpFlags);
+
+       return PVRSRV_OK;
+
+}
+
+
+/*
+ * PVRSRVPDumpSignatureBufferKM
+ */
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE   *psDeviceNode,
+                                          IMG_UINT32                   ui32PDumpFlags)
+{
+       if (psDeviceNode->pfnCheckDeviceFeature)
+       {
+               return _FWDumpSignatureBufferKM(psConnection,
+                                                                               psDeviceNode,
+                                                                               ui32PDumpFlags);
+       }
+
+       return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_VALIDATION)
+PVRSRV_ERROR PVRSRVPDumpComputeCRCSignatureCheckKM(CONNECTION_DATA * psConnection,
+                                                   PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                   IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)))
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       /*
+        * Add a PDUMP POLL on the KZ signature check status.
+        */
+       if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_NOERR_EN)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: match required");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_PDUMPREG_NAME,
+                                    RGX_CR_SCRATCH11,
+                                    1U,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+       }
+       else if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_ERR_EN)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: mismatch required");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_PDUMPREG_NAME,
+                                    RGX_CR_SCRATCH11,
+                                    2U,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+       }
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR PVRSRVPDumpCRCSignatureCheckKM(CONNECTION_DATA * psConnection,
+                                            PVRSRV_DEVICE_NODE * psDeviceNode,
+                                            IMG_UINT32 ui32PDumpFlags)
+{
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_FBCDC_SIGNATURE_CHECK)
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       /*
+        * Add a PDUMP POLL on the FBC/FBDC signature check status.
+        */
+       if (psDevInfo->ui32ValidationFlags & RGX_VAL_FBDC_SIG_CHECK_NOERR_EN)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Verify FBCDC Signature: match required");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_PDUMPREG_NAME,
+                                    RGX_CR_FBCDC_STATUS,
+                                    0,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_PDUMPREG_NAME,
+                                    RGX_CR_FBCDC_SIGNATURE_STATUS,
+                                    0,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+       }
+       else if (psDevInfo->ui32ValidationFlags & RGX_VAL_FBDC_SIG_CHECK_ERR_EN)
+       {
+               static char pszVar1[] = ":SYSMEM:$2";
+               static char pszVar2[] = ":SYSMEM:$3";
+               char *pszLoopCondition;
+
+               /*
+                * Do:
+                *  v1 = [RGX_CR_FBCDC_STATUS]
+                *  v2 = [RGX_CR_FBCDC_SIGNATURE_STATUS]
+                * While (v1 OR v2) == 0
+                */
+               PDUMPCOMMENT(psDeviceNode, "Verify FBCDC Signature: mismatch required");
+               eError = PDumpInternalValCondStr(&pszLoopCondition,
+                                    pszVar1,
+                                    0,
+                                    0xFFFFFFFF,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Unable to write pdump verification sequence (%d)", __func__, eError));
+               }
+               else
+               {
+                       eError = PDumpStartDoLoopKM(psDeviceNode, ui32PDumpFlags);
+
+                       eError = PDumpRegRead32ToInternalVar(psDeviceNode,
+                                                                RGX_PDUMPREG_NAME,
+                                                                RGX_CR_FBCDC_STATUS,
+                                                                pszVar1,
+                                                                ui32PDumpFlags);
+
+                       eError = PDumpRegRead32ToInternalVar(psDeviceNode,
+                                                                RGX_PDUMPREG_NAME,
+                                                                RGX_CR_FBCDC_SIGNATURE_STATUS,
+                                                                pszVar2,
+                                                                ui32PDumpFlags);
+
+                       eError = PDumpWriteVarORVarOp(psDeviceNode, pszVar1, pszVar2, ui32PDumpFlags);
+                       eError = PDumpEndDoWhileLoopKM(psDeviceNode, pszLoopCondition, ui32PDumpFlags);
+                       OSFreeMem(pszLoopCondition);
+               }
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+#endif /* SUPPORT_VALIDATION */
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVPDumpValCheckPreCommand
+ */
+PVRSRV_ERROR PVRSRVPDumpValCheckPreCommandKM(CONNECTION_DATA * psConnection,
+                                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                                             IMG_UINT32 ui32PDumpFlags)
+{
+#if defined(SUPPORT_VALIDATION)
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       //if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN) &&
+       if    (psDevInfo->ui32ValidationFlags & RGX_VAL_GPUSTATEPIN_EN)
+       {
+               /*
+                * Add a PDUMP POLL on the GPU_STATE inactive status.
+                */
+               PDUMPCOMMENT(psDeviceNode, "Verify GPU system status: INACTIVE");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_TB_PDUMPREG_NAME,
+                                    RGX_TB_SYSTEM_STATUS,
+                                    0,
+                                    ~RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_EQUAL);
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVPDumpValCheckPostCommand
+ */
+PVRSRV_ERROR PVRSRVPDumpValCheckPostCommandKM(CONNECTION_DATA * psConnection,
+                                              PVRSRV_DEVICE_NODE * psDeviceNode,
+                                              IMG_UINT32 ui32PDumpFlags)
+{
+#if defined(SUPPORT_VALIDATION)
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       //if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN) &&
+       if    (psDevInfo->ui32ValidationFlags & RGX_VAL_GPUSTATEPIN_EN)
+       {
+               /*
+                * Add a PDUMP POLL on the GPU_STATE active status.
+                */
+               PDUMPCOMMENT(psDeviceNode, "Verify GPU system status: ACTIVE");
+               eError = PDUMPREGPOL(psDeviceNode,
+                                    RGX_TB_PDUMPREG_NAME,
+                                    RGX_TB_SYSTEM_STATUS,
+                                    0,
+                                    ~RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK,
+                                    ui32PDumpFlags,
+                                    PDUMP_POLL_OPERATOR_NOTEQUAL);
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+#endif
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+                                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                                      IMG_UINT32 ui32PDumpFlags)
+{
+       if (psDeviceNode->pfnCheckDeviceFeature)
+       {
+               return _FWDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPDumpPrepareOutputImageDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32HeaderSize,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32LogicalWidth,
+                                                                       IMG_UINT32 ui32LogicalHeight,
+                                                                       IMG_UINT32 ui32PhysicalWidth,
+                                                                       IMG_UINT32 ui32PhysicalHeight,
+                                                                       PDUMP_PIXEL_FORMAT ePixFmt,
+                                                                       IMG_MEMLAYOUT eMemLayout,
+                                                                       IMG_FB_COMPRESSION eFBCompression,
+                                                                       const IMG_UINT32 *paui32FBCClearColour,
+                                                                       PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                                                                       IMG_PBYTE abyPDumpDesc)
+{
+       IMG_PUINT32 pui32Word;
+       IMG_UINT32 ui32HeaderDataSize;
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       /* Validate parameters */
+       if (((IMAGE_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) ||
+               ((IMAGE_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       memset(abyPDumpDesc, 0, IMAGE_HEADER_SIZE);
+
+       pui32Word = IMG_OFFSET_ADDR(abyPDumpDesc, 0);
+       pui32Word[0] = (IMAGE_HEADER_TYPE << HEADER_WORD0_TYPE_SHIFT);
+       pui32Word[1] = (IMAGE_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+                                  (IMAGE_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+
+       ui32HeaderDataSize = ui32DataSize;
+       if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+       {
+               ui32HeaderDataSize += ui32HeaderSize;
+       }
+       pui32Word[2] = ui32HeaderDataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+
+       pui32Word[3] = ui32LogicalWidth << IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT;
+       pui32Word[4] = ui32LogicalHeight << IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT;
+
+       pui32Word[5] = ePixFmt << IMAGE_HEADER_WORD5_FORMAT_SHIFT;
+
+       pui32Word[6] = ui32PhysicalWidth << IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT;
+       pui32Word[7] = ui32PhysicalHeight << IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT;
+
+       pui32Word[8] = IMAGE_HEADER_WORD8_STRIDE_POSITIVE | IMAGE_HEADER_WORD8_BIFTYPE_NONE;
+
+       switch (eMemLayout)
+       {
+       case IMG_MEMLAYOUT_STRIDED:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_STRIDED;
+               break;
+       case IMG_MEMLAYOUT_TWIDDLED:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR, "Unsupported memory layout - %d", eMemLayout));
+               return PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT;
+       }
+
+       switch (eFBCompression)
+       {
+       case IMG_FB_COMPRESSION_NONE:
+               break;
+       case IMG_FB_COMPRESSION_DIRECT_PACKED_8x8:
+       case IMG_FB_COMPRESSION_DIRECT_8x8:
+       case IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_8X8;
+               break;
+       case IMG_FB_COMPRESSION_DIRECT_16x4:
+       case IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4:
+               pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_16x4;
+               break;
+       case IMG_FB_COMPRESSION_DIRECT_32x2:
+       case IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2:
+               /* Services Client guards against unsupported FEATURE_FB_CDC_32x2.
+                  We should never pass through the UM|KM bridge on cores lacking the feature.
+               */
+               pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_32x2;
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR, "Unsupported compression mode - %d", eFBCompression));
+               return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE;
+       }
+
+       pui32Word[9] = 0;
+
+       if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+       {
+               if ((RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC) == 4) || (RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC) == 5))
+               {
+                       pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V4;
+
+                       if (eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8  ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 ||
+                               eFBCompression == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_LOSSY_ON;
+                       }
+
+                       pui32Word[9] |= (eFBCSwizzle << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) & IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK;
+               }
+               else /* 3 or 3.1 */
+               {
+                       IMG_BOOL bIsFBC31 = psDevInfo->psRGXFWIfFwSysData->
+                                       ui32ConfigFlags & RGXFWIF_INICFG_FBCDC_V3_1_EN;
+
+                       if (bIsFBC31)
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2;
+                       }
+                       else
+                       {
+                               pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2;
+                       }
+               }
+
+               pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+       }
+
+       pui32Word[10] = paui32FBCClearColour[0];
+       pui32Word[11] = paui32FBCClearColour[1];
+       pui32Word[12] = (IMG_UINT32) (psDeviceNode->ui64FBCClearColour & 0xFFFFFFFF);
+       pui32Word[13] = (IMG_UINT32) (psDeviceNode->ui64FBCClearColour >> 32);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPDumpPrepareOutputDataDescriptorHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                       IMG_UINT32 ui32HeaderType,
+                                                                       IMG_UINT32 ui32DataSize,
+                                                                       IMG_UINT32 ui32ElementType,
+                                                                       IMG_UINT32 ui32ElementCount,
+                                                                       IMG_PBYTE pbyPDumpDataHdr)
+{
+       IMG_PUINT32 pui32Word;
+
+       /* Validate parameters */
+       if (((DATA_HEADER_SIZE & ~(HEADER_WORD1_SIZE_CLRMSK >> HEADER_WORD1_SIZE_SHIFT)) != 0) ||
+               ((DATA_HEADER_VERSION & ~(HEADER_WORD1_VERSION_CLRMSK >> HEADER_WORD1_VERSION_SHIFT)) != 0))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       pui32Word = IMG_OFFSET_ADDR(pbyPDumpDataHdr, 0);
+
+       if (ui32HeaderType == DATA_HEADER_TYPE)
+       {
+               pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT);
+               pui32Word[1] = (DATA_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+                       (DATA_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+               pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+
+               pui32Word[3] = ui32ElementType << DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT;
+               pui32Word[4] = ui32ElementCount << DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT;
+       }
+
+       if (ui32HeaderType == IBIN_HEADER_TYPE)
+       {
+               pui32Word[0] = (ui32HeaderType << HEADER_WORD0_TYPE_SHIFT);
+               pui32Word[1] = (IBIN_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+                       (IBIN_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+               pui32Word[2] = ui32DataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+       }
+
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+/******************************************************************************
+ End of file (rgxpdump.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpower.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpower.c
new file mode 100644 (file)
index 0000000..94f79ec
--- /dev/null
@@ -0,0 +1,1547 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific power routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "rgxpower.h"
+#include "rgxinit.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxtimecorr.h"
+#include "devicemem_utils.h"
+#include "htbserver.h"
+#include "rgxstartstop.h"
+#include "rgxfwimageutils.h"
+#include "sync.h"
+#include "rgxdefs_km.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(SUPPORT_LINUX_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+#include "validation_soc.h"
+#include "oskm_apphint.h"
+#endif
+
+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_KCCB_CMD sCmd;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32CmdKCCBSlot;
+
+       /* Send the Timeout notification to the FW */
+       sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+       sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT;
+
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                             &sCmd,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       return eError;
+}
+
+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+       IMG_UINT64 *paui64StatsCounters;
+       IMG_UINT64 ui64LastPeriod;
+       IMG_UINT64 ui64LastState;
+       IMG_UINT64 ui64LastTime;
+       IMG_UINT64 ui64TimeNow;
+
+       psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+       OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+       ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode));
+
+       /* Update counters to account for the time since the last update */
+       ui64LastState  = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+       ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+       ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+       paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+       /* Update state and time of the latest update */
+       psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+       OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       if (psDevConfig->pfnTDRGXStop == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!"));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData);
+#else
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       eError = RGXStop(&psDevInfo->sLayerParams);
+#endif
+
+       return eError;
+}
+
+/*
+       RGXPrePowerState
+*/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE                               hDevHandle,
+                              PVRSRV_DEV_POWER_STATE   eNewPowerState,
+                              PVRSRV_DEV_POWER_STATE   eCurrentPowerState,
+                              PVRSRV_POWER_FLAGS               ePwrFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE    *psDeviceNode = hDevHandle;
+
+       if ((eNewPowerState != eCurrentPowerState) &&
+           (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+       {
+               PVRSRV_RGXDEV_INFO    *psDevInfo = psDeviceNode->pvDevice;
+               RGXFWIF_KCCB_CMD      sPowCmd;
+               IMG_UINT32            ui32CmdKCCBSlot;
+
+               const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+               /* Send the Power off request to the FW */
+               sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+               sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+               sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED);
+
+               eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                                       __func__));
+                       return eError;
+               }
+
+               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                                     &sPowCmd,
+                                                     PDUMP_FLAGS_NONE,
+                                                     &ui32CmdKCCBSlot);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request",
+                                       __func__));
+                       return eError;
+               }
+
+               /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies
+                  on the EventObject which is signalled in this MISR */
+               eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                                                 psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                 0x1, 0xFFFFFFFF);
+
+               /* Check the Power state after the answer */
+               if (eError == PVRSRV_OK)
+               {
+                       /* Finally, de-initialise some registers. */
+                       if (psFwSysData->ePowState == RGXFWIF_POW_OFF)
+                       {
+#if !defined(NO_HARDWARE)
+                               IMG_UINT32 ui32TID;
+                               const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData;
+
+                               /* Driver takes the VZ Fw-KM connection down, preventing the
+                               * firmware from submitting further interrupts */
+                               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+
+                               for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+                               {
+                                       /* Wait for the pending FW processor to host interrupts to come back. */
+                                       eError = PVRSRVPollForValueKM(psDeviceNode,
+                                                                                         (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32TID],
+                                                                                         psFwOsData->aui32InterruptCount[ui32TID],
+                                                                                         0xffffffff,
+                                                                                         POLL_FLAG_LOG_ERROR);
+
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               "%s: Wait for pending interrupts failed (DevID %u). Thread %u: Host: %u, FW: %u",
+                                                               __func__,
+                                                               psDeviceNode->sDevId.ui32InternalID,
+                                                               ui32TID,
+                                                               psDevInfo->aui32SampleIRQCount[ui32TID],
+                                                               psFwOsData->aui32InterruptCount[ui32TID]));
+
+                                               RGX_WaitForInterruptsTimeout(psDevInfo);
+                                               break;
+                                       }
+                               }
+#endif /* NO_HARDWARE */
+
+                               /* Update GPU frequency and timer correlation related data */
+                               RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+                               /* Update GPU state counters */
+                               _RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(SUPPORT_LINUX_DVFS)
+                               eError = SuspendDVFS(psDeviceNode);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__));
+                                       return eError;
+                               }
+#endif
+
+                               psDevInfo->bRGXPowered = IMG_FALSE;
+
+                               eError = RGXDoStop(psDeviceNode);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       /* Power down failures are treated as successful since the power was removed but logged. */
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)",
+                                                       __func__, PVRSRVGetErrorString(eError)));
+                                       psDevInfo->ui32ActivePMReqNonIdle++;
+                                       eError = PVRSRV_OK;
+                               }
+                       }
+                       else
+                       {
+                               /* the sync was updated but the pow state isn't off -> the FW denied the transition */
+                               eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+                               if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED))
+                               {       /* It is an error for a forced request to be denied */
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s: Failure to power off during a forced power off. FW: %d",
+                                                        __func__, psFwSysData->ePowState));
+                               }
+                       }
+               }
+               else if (eError == PVRSRV_ERROR_TIMEOUT)
+               {
+                       /* timeout waiting for the FW to ack the request: return timeout */
+                       PVR_DPF((PVR_DBG_WARNING,
+                                        "%s: Timeout waiting for powoff ack from the FW",
+                                        __func__));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Error waiting for powoff ack from the FW (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+               }
+       }
+
+       return eError;
+}
+
+#if defined(SUPPORT_AUTOVZ)
+static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT;
+       IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS);
+
+       LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
+       {
+               IMG_UINT32 ui32OSid;
+               IMG_BOOL bGuestOnline = IMG_FALSE;
+
+               for (ui32OSid = RGXFW_GUEST_OSID_START;
+                        ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+               {
+                       RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE)
+                                       psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState;
+
+                       if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) ||
+                               (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING))
+                       {
+                               bGuestOnline = IMG_TRUE;
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid));
+                       }
+               }
+
+               if (!bGuestOnline)
+               {
+                       /* Allow Guests to finish reading Connection state registers before disconnecting. */
+                       OSSleepms(100);
+
+                       PVR_DPF((PVR_DBG_WARNING, "%s: All Guest connections are down. "
+                                                                         "Host can power down the GPU.", __func__));
+                       eError = PVRSRV_OK;
+                       break;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Waiting for Guests to disconnect "
+                                                                         "before powering down GPU.", __func__));
+
+                       if (PVRSRVPwrLockIsLockedByMe(psDeviceNode))
+                       {
+                               /* Don't wait with the power lock held as this prevents the vz
+                                * watchdog thread from keeping the fw-km connection alive. */
+                               PVRSRVPowerUnlock(psDeviceNode);
+                       }
+               }
+
+               OSSleepms(10);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode))
+       {
+               /* Take back power lock after waiting for Guests */
+               eError = PVRSRVPowerLock(psDeviceNode);
+       }
+
+       return eError;
+}
+#endif /* defined(SUPPORT_AUTOVZ) */
+
+/*
+       RGXVzPrePowerState
+*/
+PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE                             hDevHandle,
+                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+                                PVRSRV_POWER_FLAGS             ePwrFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+
+       PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError);
+
+       if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering down */
+#if defined(SUPPORT_AUTOVZ)
+               if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp))
+               {
+                       /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down.
+                        * Guest drivers regularly access hardware registers during runtime. If an attempt is made to
+                        * access a GPU register while the GPU is down, the SoC might lock up. */
+                       eError = _RGXWaitForGuestsToDisconnect(psDeviceNode);
+                       PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect");
+
+                       /* Temporarily restore all power callbacks used by the driver to fully power down the GPU.
+                        * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading)
+                        * are generally ignored and the GPU power state is unaffected. Special power requests like
+                        * those triggered by Suspend/Resume calls must reinstate the callbacks when needed. */
+                       PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev,
+                                                                       &RGXVzPrePowerState, &RGXVzPostPowerState,
+                                                                       psDeviceNode->psDevConfig->pfnPrePowerState,
+                                                                       psDeviceNode->psDevConfig->pfnPostPowerState,
+                                                                       &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest);
+               }
+               else
+               {
+                       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+                       if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) &&
+                               KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))
+                       {
+                               PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+                               PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, 0, RGXFWIF_OS_OFFLINE);
+                               PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState");
+                       }
+               }
+#endif
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+       }
+       else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering up */
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+
+       }
+
+       if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)))
+       {
+               /* call regular device power function */
+               eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags);
+       }
+
+       return eError;
+}
+
+/*
+       RGXVzPostPowerState
+*/
+PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE                            hDevHandle,
+                                 PVRSRV_DEV_POWER_STATE        eNewPowerState,
+                                 PVRSRV_DEV_POWER_STATE        eCurrentPowerState,
+                                 PVRSRV_POWER_FLAGS            ePwrFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError);
+
+       if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)))
+       {
+               /* call regular device power function */
+               eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags);
+       }
+
+       if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering down */
+               PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError);
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+
+#if !defined(SUPPORT_AUTOVZ_HW_REGS)
+               /* The connection states must be reset on a GPU power cycle. If the states are kept
+                * in hardware scratch registers, they will be cleared on power down. When using shared
+                * memory the connection data must be explicitly cleared by the driver. */
+               OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL));
+#endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */
+
+               if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))
+               {
+#if defined(SUPPORT_AUTOVZ)
+                       /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState.
+                        * Skip this redundant register write, as the Host could have powered down the GPU by now. */
+                       if (psDeviceNode->bAutoVzFwIsUp)
+#endif
+                       {
+                               /* Take the VZ connection down to prevent firmware from submitting further interrupts */
+                               KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
+                       }
+                       /* Power transition callbacks were not executed, update RGXPowered flag here */
+                       psDevInfo->bRGXPowered = IMG_FALSE;
+               }
+       }
+       else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)
+       {
+               /* powering up */
+               IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS);
+               volatile IMG_BOOL *pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated;
+
+               PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s",
+                                                               __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST",
+                                                               psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE"));
+               if (PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */
+                       psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+                       /* Guest drivers expect the firmware to have set its end of the
+                        * connection to Ready state by now. Poll indefinitely otherwise. */
+                       if (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__));
+                       }
+                       while (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+                       {
+                               OSSleepms(10);
+                       }
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__));
+#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */
+
+                       /* Guests can only access the register holding the connection states,
+                        * after the GPU is confirmed to be powered up */
+                       KM_SET_OS_CONNECTION(READY, psDevInfo);
+
+                       OSWriteDeviceMem32(pbUpdatedFlag, IMG_FALSE);
+
+                       /* Kick an initial dummy command to make the firmware initialise all
+                        * its internal guest OS data structures and compatibility information.
+                        * Use the lower-level RGXSendCommandAndGetKCCBSlot() for the job, to make
+                        * sure only 1 KCCB command is issued to the firmware.
+                        * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with
+                        * a pre-kick cache command which can interfere with the FW-KM init handshake. */
+                       {
+                               RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+                               sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+                               eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS, NULL);
+                               PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot()");
+                       }
+               }
+               else
+               {
+                       KM_SET_OS_CONNECTION(READY, psDevInfo);
+
+                       /* Disable power callbacks that should not be run on virtualised drivers after the GPU
+                        * is fully initialised: system layer pre/post functions and driver idle requests.
+                        * The original device RGX Pre/Post functions are called from this Vz wrapper. */
+                       PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev,
+                                                                       &RGXVzPrePowerState, &RGXVzPostPowerState,
+                                                                       NULL, NULL, NULL, NULL);
+
+#if defined(SUPPORT_AUTOVZ)
+                       /* During first-time boot the flag is set here, while subsequent reboots will already
+                        * have set it earlier in RGXInit. Set to true from this point onwards in any case. */
+                       psDeviceNode->bAutoVzFwIsUp = IMG_TRUE;
+#endif
+               }
+
+               /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */
+               while (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__));
+                       OSSleepms(100);
+               }
+               PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__));
+
+               /* poll on the Firmware supplying the compatibility data */
+               LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
+               {
+                       if (*pbUpdatedFlag)
+                       {
+                               break;
+                       }
+                       OSSleepms(10);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT);
+
+               KM_SET_OS_CONNECTION(ACTIVE, psDevInfo);
+       }
+
+       return PVRSRV_OK;
+}
+
+#if defined(TRACK_FW_BOOT)
+static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       FW_BOOT_STAGE eStage;
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               /* Boot stage temporarily stored to the register below */
+               eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+                                      RGX_FW_BOOT_STAGE_REGISTER);
+       }
+       else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
+       {
+               eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14);
+       }
+       else
+       {
+               return;
+       }
+
+       PVR_LOG(("%s: FW reached boot stage %i/%i.",
+                __func__, eStage, FW_BOOT_INIT_DONE));
+}
+#endif
+
+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+       if (psDevConfig->pfnTDRGXStart == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!"));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData);
+#else
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       eError = RGXStart(&psDevInfo->sLayerParams);
+#endif
+
+       return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+/*
+ * To validate the MTS unit we do the following:
+ *  - Immediately after firmware loading for each OSID
+ *    - Write the OSid to a memory location shared with FW
+ *    - Kick the register of that OSid
+ *         (Uncounted, DM 0)
+ *    - FW clears the memory location if OSid matches
+ *    - Host checks that memory location is cleared
+ *
+ *  See firmware/devices/rgx/rgxfw_bg.c
+ */
+static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE     *psDeviceNode,
+                                                                                                                RGXFWIF_SYSINIT *psFwSysInit,
+                                                                                                                PVRSRV_RGXDEV_INFO      *psDevInfo)
+{
+       IMG_UINT32 ui32ScheduleRegister;
+       IMG_UINT32 ui32OSid;
+       IMG_UINT32 ui32KickType;
+       IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS);
+
+       /* Nothing to do if device does not support GPU_VIRTUALISATION */
+       if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION))
+       {
+               return PVRSRV_OK;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:"));
+
+       ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS);
+
+       if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:"));
+               PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped));
+               PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped));
+       }
+
+       ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+
+       for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++)
+       {
+               /* set Test field */
+               psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT;
+
+#if defined(PDUMP)
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                  offsetof(RGXFWIF_SYSINIT, ui32OSKickTest),
+                                                  sizeof(psFwSysInit->ui32OSKickTest),
+                                                  PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+               OSWriteMemoryBarrier(&psFwSysInit->ui32OSKickTest);
+
+               /* kick register */
+               ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS);
+               PVR_DPF((PVR_DBG_MESSAGE, "  Testing OS: %u, Kick Reg: %X",
+                                ui32OSid,
+                                ui32ScheduleRegister));
+               OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType);
+               OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister);
+
+#if defined(PDUMP)
+               PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid);
+
+               PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME,
+                               ui32ScheduleRegister, ui32KickType, PDUMP_FLAGS_CONTINUOUS);
+
+               DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                          offsetof(RGXFWIF_SYSINIT, ui32OSKickTest),
+                                                          0,
+                                                          0xFFFFFFFF,
+                                                          PDUMP_POLL_OPERATOR_EQUAL,
+                                                          PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+               /* Wait test enable bit to be unset */
+               if (PVRSRVPollForValueKM(psDeviceNode,
+                                                                (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest,
+                                                                0,
+                                                                RGXFWIF_KICK_TEST_ENABLED_BIT,
+                                                                POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)",
+                                        ui32OSid,
+                                        psFwSysInit->ui32OSKickTest));
+
+                       return PVRSRV_ERROR_TIMEOUT;
+               }
+
+               /* Check that the value is what we expect */
+               if (psFwSysInit->ui32OSKickTest != 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location",
+                                        ui32OSid,
+                                        psFwSysInit->ui32OSKickTest));
+                       return PVRSRV_ERROR_INIT_FAILURE;
+               }
+
+               PVR_DPF((PVR_DBG_MESSAGE, "    PASS"));
+       }
+
+       PVR_LOG(("MTS passed sideband tests"));
+       return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+#define SCRATCH_VALUE  (0x12345678U)
+
+static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       void *pvAppHintState = NULL;
+       IMG_UINT32 ui32AppHintDefault = 0;
+       IMG_BOOL bRunRiscvDmiTest;
+
+       IMG_UINT32 *pui32FWCode = NULL;
+       PVRSRV_ERROR eError;
+
+       OSCreateKMAppHintState(&pvAppHintState);
+       OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest,
+                          &ui32AppHintDefault, &bRunRiscvDmiTest);
+       OSFreeKMAppHintState(pvAppHintState);
+
+       if (bRunRiscvDmiTest == IMG_FALSE)
+       {
+               return;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Error acquiring FW code memory pointer (%s)",
+                        __func__,
+                        PVRSRVGetErrorString(eError)));
+       }
+
+       PDumpIfKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS);
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN");
+
+       RGXRiscvHalt(psDevInfo);
+
+       /*
+        * Test RISC-V register reads/writes.
+        * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers
+        * via debug module.
+        */
+
+       /* Write RISC-V mscratch register */
+       RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE);
+       /* Read RISC-V misa register (compare against default standard value) */
+       RGXRiscvPollReg(psDevInfo,  RGXRISCVFW_MISA_ADDR,     RGXRISCVFW_MISA_VALUE);
+       /* Read RISC-V mscratch register (compare against previously written value) */
+       RGXRiscvPollReg(psDevInfo,  RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE);
+
+       /*
+        * Test RISC-V memory reads/writes.
+        * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module
+        * (from RISC-V point of view).
+        */
+
+       if (pui32FWCode != NULL)
+       {
+               IMG_UINT32 ui32Tmp;
+
+               /* Acquire pointer to FW code (bootloader) */
+               pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32);
+               /* Save FW code at address (bootloader) */
+               ui32Tmp = *pui32FWCode;
+
+               /* Write FW code at address (bootloader) */
+               RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE,     SCRATCH_VALUE);
+               /* Read FW code at address (bootloader + 4) (compare against value read from Host) */
+               RGXRiscvPollMem(psDevInfo,  RGXRISCVFW_BOOTLDR_CODE_BASE + 4, *(pui32FWCode + 1));
+               /* Read FW code at address (bootloader) (compare against previously written value) */
+               RGXRiscvPollMem(psDevInfo,  RGXRISCVFW_BOOTLDR_CODE_BASE,     SCRATCH_VALUE);
+               /* Restore FW code at address (bootloader) */
+               RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE,     ui32Tmp);
+
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+       }
+
+       /*
+        * Test GPU register reads/writes.
+        * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module
+        * (from RISC-V point of view).
+        * Note that system memory and GPU register accesses both use the same
+        * debug module interface, targeting different address ranges.
+        */
+
+       /* Write SCRATCH0 from the Host */
+       PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS);
+       /* Read SCRATCH0 */
+       RGXRiscvPollMem(psDevInfo,  RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE);
+       /* Write SCRATCH0 */
+       RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE);
+       /* Read SCRATCH0 from the Host */
+       PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, ~SCRATCH_VALUE, 0xFFFFFFFFU,
+                   PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL);
+
+       RGXRiscvResume(psDevInfo);
+
+       PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END");
+       PDumpFiKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS);
+}
+#endif
+
+/*
+       RGXPostPowerState
+*/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE                              hDevHandle,
+                               PVRSRV_DEV_POWER_STATE  eNewPowerState,
+                               PVRSRV_DEV_POWER_STATE  eCurrentPowerState,
+                               PVRSRV_POWER_FLAGS              ePwrFlags)
+{
+       PVRSRV_DEVICE_NODE       *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO       *psDevInfo = psDeviceNode->pvDevice;
+
+       if ((eNewPowerState != eCurrentPowerState) &&
+           (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+       {
+               PVRSRV_ERROR             eError;
+
+               if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+               {
+                       /* Update timer correlation related data */
+                       RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+                       /* Update GPU state counters */
+                       _RGXUpdateGPUUtilStats(psDevInfo);
+
+                       eError = RGXDoStart(psDeviceNode);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed"));
+                               return eError;
+                       }
+
+                       OSMemoryBarrier(NULL);
+
+                       /*
+                        * Check whether the FW has started by polling on bFirmwareStarted flag
+                        */
+                       if (PVRSRVPollForValueKM(psDeviceNode,
+                                                (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted,
+                                                IMG_TRUE,
+                                                0xFFFFFFFF,
+                                                POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed."));
+                               eError = PVRSRV_ERROR_TIMEOUT;
+
+#if defined(TRACK_FW_BOOT)
+                               RGXCheckFWBootStage(psDevInfo);
+#endif
+
+                               /*
+                                * When bFirmwareStarted fails some info may be gained by doing the following
+                                * debug dump but unfortunately it could be potentially dangerous if the reason
+                                * for not booting is the GPU power is not ON. However, if we have reached this
+                                * point the System Layer has returned without errors, we assume the GPU power
+                                * is indeed ON.
+                                */
+                               RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE);
+                               RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice);
+
+                               return eError;
+                       }
+
+#if defined(PDUMP)
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+                       eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc,
+                                                       offsetof(RGXFWIF_SYSINIT, bFirmwareStarted),
+                                                       IMG_TRUE,
+                                                       0xFFFFFFFFU,
+                                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)",
+                                        eError));
+                               return eError;
+                       }
+
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE)
+                       /* Check if the Validation IRQ flag is set */
+                       if ((psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) != 0)
+                       {
+                               eError = PVRSRVValidateIrqs(psDeviceNode);
+                               if (eError != PVRSRV_OK)
+                               {
+                                       return eError;
+                               }
+                       }
+#endif /* defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) */
+
+#endif /* defined(PDUMP) */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+                       eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo);
+                       if (eError != PVRSRV_OK)
+                       {
+                               return eError;
+                       }
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
+                       RGXRiscvDebugModuleTest(psDevInfo);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                       SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp);
+#endif
+
+                       HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal);
+
+                       psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(SUPPORT_LINUX_DVFS)
+                       eError = ResumeDVFS(psDeviceNode);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS"));
+                               return eError;
+                       }
+#endif
+               }
+       }
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXPostPowerState: Current state: %d, New state: %d",
+                    eCurrentPowerState, eNewPowerState);
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGXPreClockSpeedChange
+*/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE                         hDevHandle,
+                                    PVRSRV_DEV_POWER_STATE     eCurrentPowerState)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+       PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz",
+                       psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+       if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) &&
+           (psFwSysData->ePowState != RGXFWIF_POW_OFF))
+       {
+               /* Update GPU frequency and timer correlation related data */
+               RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+       }
+
+       return eError;
+}
+
+/*
+       RGXPostClockSpeedChange
+*/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE                                hDevHandle,
+                                     PVRSRV_DEV_POWER_STATE    eCurrentPowerState)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+       /* Update runtime configuration with the new value */
+       OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed,
+                                 ui32NewClockSpeed);
+
+       if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) &&
+           (psFwSysData->ePowState != RGXFWIF_POW_OFF))
+       {
+               RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd;
+               IMG_UINT32 ui32CmdKCCBSlot;
+
+               RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+
+               sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+               sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+               PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command");
+
+               PDUMPPOWCMDSTART(psDeviceNode);
+               eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                                     &sCOREClkSpeedChangeCmd,
+                                                     PDUMP_FLAGS_NONE,
+                                                     &ui32CmdKCCBSlot);
+               PDUMPPOWCMDEND(psDeviceNode);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command failed");
+                       PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+                       return eError;
+               }
+
+               PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+                               psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+       }
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPowUnitsStateMaskChange
+@Description    Changes power state of power units/islands
+@Input          hDevHandle         RGX Device Node.
+@Input          ui32PowUnitsStateMask   Mask containing power state of PUs.
+                                        Each bit corresponds to an PU.
+                                        Bit position corresponds to PU number i.e. Bit0 is PU0, Bit1 is PU1 etc.
+                                        '1' indicates ON and '0' indicates OFF.
+                                        Value must be non-zero.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32PowUnitsStateMask)
+{
+
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR            eError;
+       RGXFWIF_KCCB_CMD        sPowUnitsStateMaskChange;
+       IMG_UINT32 ui32PowUnitsMask = psDevInfo->ui32AvailablePowUnitsMask;
+       IMG_UINT32                      ui32CmdKCCBSlot;
+       RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       /**
+        * Validate the input. At-least one PU must be powered on and all requested
+        * PU's must be a subset of full PU mask.
+        */
+       if ((ui32PowUnitsStateMask == 0) || (ui32PowUnitsStateMask & ~ui32PowUnitsMask))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid Power Units mask requested (0x%X). Value should be non-zero and sub-set of 0x%X mask",
+                               __func__,
+                               ui32PowUnitsStateMask,
+                               ui32PowUnitsMask));
+               return PVRSRV_ERROR_INVALID_SPU_MASK;
+       }
+
+       psRuntimeCfg->ui32PowUnitsStateMask = ui32PowUnitsStateMask;
+       OSWriteMemoryBarrier(&psRuntimeCfg->ui32PowUnitsStateMask);
+
+#if !defined(NO_HARDWARE)
+       {
+               const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+               if (psFwSysData->ePowState == RGXFWIF_POW_OFF)
+               {
+                       return PVRSRV_OK;
+               }
+
+               if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE)
+               {
+                       eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Powered units state can not be changed, when not IDLE",
+                                        __func__));
+                       return eError;
+               }
+       }
+#endif
+
+       eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                               __func__));
+               return eError;
+       }
+
+       sPowUnitsStateMaskChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sPowUnitsStateMaskChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE;
+       sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32PowUnitsStateMask = ui32PowUnitsStateMask;
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "Scheduling command to change power units state to 0x%X",
+                    ui32PowUnitsStateMask);
+       eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                             &sPowUnitsStateMaskChange,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               PDUMPCOMMENT(psDeviceNode,
+                            "Scheduling command to change power units state. Error:%u",
+                            eError);
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Scheduling KCCB to change power units state. Error:%u",
+                                __func__, eError));
+               return eError;
+       }
+
+       /* Wait for the firmware to answer. */
+       eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                     psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                 0x1, 0xFFFFFFFF);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__));
+               return eError;
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode,
+                    "%s: Poll for Kernel SyncPrim [0x%p] on DM %d",
+                    __func__, psDevInfo->psPowSyncPrim->pui32LinAddr,
+                    RGXFWIF_DM_GP);
+
+       SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+                                       1,
+                                       0xffffffff,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       0);
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ @Function     RGXAPMLatencyChange
+*/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE            hDevHandle,
+                                 IMG_UINT32            ui32ActivePMLatencyms,
+                                 IMG_BOOL              bActivePMLatencyPersistant)
+{
+
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR            eError;
+       RGXFWIF_RUNTIME_CFG     *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+       IMG_UINT32                      ui32CmdKCCBSlot;
+       PVRSRV_DEV_POWER_STATE  ePowerState;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       eError = PVRSRVPowerLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock"));
+               return eError;
+       }
+
+       /* Update runtime configuration with the new values and ensure the
+        * new APM latency is written to memory before requesting the FW to
+        * read it
+        */
+       psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+       psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+       OSWriteMemoryBarrier(&psRuntimeCfg->bActivePMLatencyPersistant);
+
+       eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+       if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+       {
+               RGXFWIF_KCCB_CMD        sActivePMLatencyChange;
+               sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+               sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+
+               PDUMPCOMMENT(psDeviceNode,
+                            "Scheduling command to change APM latency to %u",
+                            ui32ActivePMLatencyms);
+               eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
+                                                     &sActivePMLatencyChange,
+                                                     PDUMP_FLAGS_NONE,
+                                                     &ui32CmdKCCBSlot);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PDUMPCOMMENT(psDeviceNode,
+                                    "Scheduling command to change APM latency failed. Error:%u",
+                                    eError);
+                       PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+                       goto ErrorExit;
+               }
+       }
+
+ErrorExit:
+       PVRSRVPowerUnlock(psDeviceNode);
+
+       return eError;
+}
+
+/*
+       RGXActivePowerRequest
+*/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+
+       psDevInfo->ui32ActivePMReqTotal++;
+
+       /* Powerlock to avoid further requests from racing with the FW hand-shake
+        * from now on (previous kicks to this point are detected by the FW)
+        * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid
+        * potential dead lock between PDumpWriteLock and PowerLock
+        * during 'DriverLive + PDUMP=1 + EnableAPM=1'.
+        */
+       eError = PVRSRVPowerTryLock(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock");
+               }
+               else
+               {
+                       psDevInfo->ui32ActivePMReqRetry++;
+               }
+               goto _RGXActivePowerRequest_PowerLock_failed;
+       }
+
+       /* Check again for IDLE once we have the power lock */
+       if (psFwSysData->ePowState == RGXFWIF_POW_IDLE)
+       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime);
+#endif
+
+               PDUMPPOWCMDSTART(psDeviceNode);
+               eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+                                                    PVRSRV_DEV_POWER_STATE_OFF,
+                                                    PVRSRV_POWER_FLAGS_NONE);
+               PDUMPPOWCMDEND(psDeviceNode);
+
+               if (eError == PVRSRV_OK)
+               {
+                       psDevInfo->ui32ActivePMReqOk++;
+               }
+               else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+               {
+                       psDevInfo->ui32ActivePMReqDenied++;
+               }
+       }
+       else
+       {
+               psDevInfo->ui32ActivePMReqNonIdle++;
+       }
+
+       PVRSRVPowerUnlock(psDeviceNode);
+
+_RGXActivePowerRequest_PowerLock_failed:
+
+       return eError;
+}
+/*
+       RGXForcedIdleRequest
+*/
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+       PVRSRV_DEVICE_NODE    *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO    *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_KCCB_CMD      sPowCmd;
+       PVRSRV_ERROR          eError;
+       IMG_UINT32            ui32RetryCount = 0;
+       IMG_UINT32            ui32CmdKCCBSlot;
+#if !defined(NO_HARDWARE)
+       const RGXFWIF_SYSDATA *psFwSysData;
+#endif
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+       psFwSysData = psDevInfo->psRGXFWIfFwSysData;
+
+       /* Firmware already forced idle */
+       if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE)
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+       if (psFwSysData->ePowState == RGXFWIF_POW_OFF)
+       {
+               return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+       }
+#endif
+
+       eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                               __func__));
+               return eError;
+       }
+       sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+       sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE;
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXForcedIdleRequest: Sending forced idle command");
+
+       /* Send one forced IDLE command to GP */
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                             &sPowCmd,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__));
+               return eError;
+       }
+
+       /* Wait for GPU to finish current workload */
+       do {
+               eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                             psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                         0x1, 0xFFFFFFFF);
+               if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+               {
+                       break;
+               }
+               ui32RetryCount++;
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: Request timeout. Retry %d of %d",
+                                __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+       } while (IMG_TRUE);
+
+       if (eError != PVRSRV_OK)
+       {
+               RGXFWNotifyHostTimeout(psDevInfo);
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Idle request failed. Firmware potentially left in forced idle state",
+                                __func__));
+               return eError;
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d",
+                    psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+       SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+                                       1,
+                                       0xffffffff,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       0);
+#endif
+
+#if !defined(NO_HARDWARE)
+       /* Check the firmware state for idleness */
+       if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE)
+       {
+               return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+       RGXCancelForcedIdleRequest
+*/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+       PVRSRV_DEVICE_NODE      *psDeviceNode = hDevHandle;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_KCCB_CMD        sPowCmd;
+       PVRSRV_ERROR            eError = PVRSRV_OK;
+       IMG_UINT32                      ui32CmdKCCBSlot;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim",
+                               __func__));
+               goto ErrorExit;
+       }
+
+       /* Send the IDLE request to the FW */
+       sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+       sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+       sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE;
+
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXForcedIdleRequest: Sending cancel forced idle command");
+
+       /* Send cancel forced IDLE command to GP */
+       eError = RGXSendCommandAndGetKCCBSlot(psDevInfo,
+                                             &sPowCmd,
+                                             PDUMP_FLAGS_NONE,
+                                             &ui32CmdKCCBSlot);
+
+       if (eError != PVRSRV_OK)
+       {
+               PDUMPCOMMENT(psDeviceNode,
+                            "RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d",
+                            RGXFWIF_DM_GP);
+               goto ErrorExit;
+       }
+
+       /* Wait for the firmware to answer. */
+       eError = RGXPollForGPCommandCompletion(psDeviceNode,
+                                     psDevInfo->psPowSyncPrim->pui32LinAddr,
+                                                                 1, 0xFFFFFFFF);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__));
+               goto ErrorExit;
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d",
+                    psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+       SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+                                       1,
+                                       0xffffffff,
+                                       PDUMP_POLL_OPERATOR_EQUAL,
+                                       0);
+#endif
+
+       return eError;
+
+ErrorExit:
+       PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__));
+       return eError;
+}
+
+#if defined(SUPPORT_VALIDATION)
+#define RGX_POWER_DOMAIN_STATE_INVALID (0xFFFFFFFF)
+
+PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState,
+                                                                               IMG_UINT32 ui32MaxPowUnitsCount)
+{
+       /*
+        * Total power domain states = 2^(Max power unit count)
+        */
+       IMG_UINT32 ui32TotalStates = 1 << ui32MaxPowUnitsCount;
+       IMG_UINT32 i;
+
+       /**
+        * Allocate memory for storing last transition for each power domain
+        * state.
+        */
+       psState->paui32LastTransition = OSAllocMem(ui32TotalStates *
+                                                                                          sizeof(*psState->paui32LastTransition));
+
+       if (!psState->paui32LastTransition)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate memory ", __func__));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /**
+        * Initialize last transition of each state to invalid
+        */
+       for (i=0; i<ui32TotalStates; i++)
+       {
+               psState->paui32LastTransition[i] = RGX_POWER_DOMAIN_STATE_INVALID;
+       }
+
+       psState->ui32PowUnitsCount = ui32MaxPowUnitsCount;
+       psState->ui32CurrentState = RGX_POWER_DOMAIN_STATE_INVALID;
+
+       return PVRSRV_OK;
+}
+
+void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState)
+{
+       psState->ui32PowUnitsCount = 0;
+
+       if (psState->paui32LastTransition)
+       {
+               OSFreeMem(psState->paui32LastTransition);
+       }
+}
+
+IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState)
+{
+       IMG_UINT32 ui32NextState, ui32CurrentState = psState->ui32CurrentState;
+       IMG_UINT32 ui32TotalStates = 1 << psState->ui32PowUnitsCount;
+
+       if (ui32CurrentState == RGX_POWER_DOMAIN_STATE_INVALID)
+       {
+               /**
+                * Start with all units powered off.
+                */
+               ui32NextState = 0;
+       }
+       else if (psState->paui32LastTransition[ui32CurrentState] == RGX_POWER_DOMAIN_STATE_INVALID)
+       {
+               ui32NextState = ui32CurrentState;
+               psState->paui32LastTransition[ui32CurrentState] = ui32CurrentState;
+       }
+       else
+       {
+               ui32NextState = (psState->paui32LastTransition[ui32CurrentState] + 1) % ui32TotalStates;
+               psState->paui32LastTransition[ui32CurrentState] = ui32NextState;
+       }
+
+       psState->ui32CurrentState = ui32NextState;
+       return ui32NextState;
+}
+#endif
+/******************************************************************************
+ End of file (rgxpower.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpower.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxpower.h
new file mode 100644 (file)
index 0000000..e92938d
--- /dev/null
@@ -0,0 +1,272 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX power header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX power
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXPOWER_H)
+#define RGXPOWER_H
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "rgxdevice.h"
+
+
+/*!
+******************************************************************************
+
+ @Function     RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE                               hDevHandle,
+                                                         PVRSRV_DEV_POWER_STATE        eNewPowerState,
+                                                         PVRSRV_DEV_POWER_STATE        eCurrentPowerState,
+                                                         PVRSRV_POWER_FLAGS            ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE                              hDevHandle,
+                                                          PVRSRV_DEV_POWER_STATE       eNewPowerState,
+                                                          PVRSRV_DEV_POWER_STATE       eCurrentPowerState,
+                                                          PVRSRV_POWER_FLAGS           ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXVzPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition on a vz driver
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE                             hDevHandle,
+                                                               PVRSRV_DEV_POWER_STATE  eNewPowerState,
+                                                               PVRSRV_DEV_POWER_STATE  eCurrentPowerState,
+                                                               PVRSRV_POWER_FLAGS              ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXVzPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition on a vz driver
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eNewPowerState : New power state
+ @Input           eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE                            hDevHandle,
+                                                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                                                PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+                                                                PVRSRV_POWER_FLAGS             ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function     RGXPreClockSpeedChange
+
+ @Description
+
+       Does processing required before an RGX clock speed change.
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE                         hDevHandle,
+                                                                       PVRSRV_DEV_POWER_STATE  eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function     RGXPostClockSpeedChange
+
+ @Description
+
+       Does processing required after an RGX clock speed change.
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE                                hDevHandle,
+                                                                        PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function     RGXPowUnitsStateMaskChange
+
+ @Description Changes power state of SPUs
+
+ @Input           hDevHandle              RGX Device Node.
+ @Input           ui32PowUnitsStateMask   Mask containing power state of SPUs.
+                                   Each bit corresponds to an SPU.
+                                   Bit position corresponds to SPU number
+                                   i.e. Bit0 is SPU0, Bit1 is SPU1 etc.
+                                   '1' indicates ON and '0' indicates OFF.
+                                   Value must be non-zero.
+ @Return   PVRSRV_ERROR.
+
+******************************************************************************/
+PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle,
+                                                                               IMG_UINT32 ui32PowUnitsStateMask);
+
+/*!
+******************************************************************************
+
+ @Function     RGXAPMLatencyChange
+
+ @Description
+
+       Changes the wait duration used before firmware indicates IDLE.
+       Reducing this value will cause the firmware to shut off faster and
+       more often but may increase bubbles in GPU scheduling due to the added
+       power management activity. If bPersistent is NOT set, APM latency will
+       return back to system default on power up.
+
+ @Input           hDevHandle : RGX Device Node
+ @Input           ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input           bActivePMLatencyPersistant : Set to ensure new value is not reset
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE                            hDevHandle,
+                               IMG_UINT32                              ui32ActivePMLatencyms,
+                               IMG_BOOL                                bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function     RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input           hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function     RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input           hDevHandle : RGX Device Node
+
+ @Input    bDeviceOffPermitted : Set to indicate device state being off is not
+                                 erroneous.
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function     RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input           hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+
+#if defined(SUPPORT_VALIDATION)
+PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState,
+                                                                        IMG_UINT32 ui32MaxPowUnitsCount);
+
+void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState);
+
+IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState);
+#endif
+#endif /* RGXPOWER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxray.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxray.c
new file mode 100644 (file)
index 0000000..a4ebb9e
--- /dev/null
@@ -0,0 +1,767 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Ray routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Ray routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxray.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#include "rgxtimerquery.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP    0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_RAY_CONTEXT_ {
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+       DEVMEM_MEMDESC                          *psFWRayContextMemDesc;
+       DEVMEM_MEMDESC                          *psContextStateMemDesc;
+       POS_LOCK                                        hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA                       sWorkEstData;
+#endif
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       SYNC_ADDR_LIST                          sSyncAddrListFence;
+       SYNC_ADDR_LIST                          sSyncAddrListUpdate;
+};
+
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA                       *psConnection,
+                                                                                        PVRSRV_DEVICE_NODE             *psDeviceNode,
+                                                                                        IMG_UINT32                             ui32Priority,
+                                                                                        IMG_HANDLE                             hMemCtxPrivData,
+                                                                                        IMG_UINT32                             ui32ContextFlags,
+                                                                                        IMG_UINT32                             ui32StaticRayContextStateSize,
+                                                                                        IMG_PBYTE                              pStaticRayContextState,
+                                                                                        IMG_UINT64                             ui64RobustnessAddress,
+                                                                                        IMG_UINT32                             ui32MaxDeadlineMS,
+                                                                                        RGX_SERVER_RAY_CONTEXT **ppsRayContext)
+{
+       DEVMEM_MEMDESC                          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_SERVER_RAY_CONTEXT          *psRayContext;
+       RGXFWIF_FWRAYCONTEXT            *psFWRayContext;
+       RGX_COMMON_CONTEXT_INFO         sInfo = {NULL};
+       PVRSRV_ERROR                            eError;
+
+       *ppsRayContext = NULL;
+
+       psRayContext = OSAllocZMem(sizeof(*psRayContext));
+       if (psRayContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psRayContext->psDeviceNode = psDeviceNode;
+       /*
+               Create the FW ray context, this has the RDM common
+               context embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWRAYCONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwRayContext",
+                       &psRayContext->psFWRayContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwraycontext;
+       }
+
+       eError = OSLockCreate(&psRayContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to create lock (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto fail_createlock;
+       }
+
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware ray context suspend state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         sizeof(RGXFWIF_COMPUTECTX_STATE),
+                                                         RGX_FWCOMCTX_ALLOCFLAGS,
+                                                         "FwRayContextState",
+                                                         &psRayContext->psContextStateMemDesc);
+
+       eError = FWCommonContextAllocate(psConnection,
+                                                                        psDeviceNode,
+                                                                        REQ_TYPE_RAY,
+                                                                        RGXFWIF_DM_RAY,
+                                                                        hMemCtxPrivData,
+                                                                        psRayContext->psFWRayContextMemDesc,
+                                                                        offsetof(RGXFWIF_FWRAYCONTEXT, sRDMContext),
+                                                                        psFWMemContextMemDesc,
+                                                                        psRayContext->psContextStateMemDesc,
+                                                                        RGX_RDM_CCB_SIZE_LOG2,
+                                                                        RGX_RDM_CCB_MAX_SIZE_LOG2,
+                                                                        ui32ContextFlags,
+                                                                        ui32Priority,
+                                                                        ui32MaxDeadlineMS,
+                                                                        ui64RobustnessAddress,
+                                                                        &sInfo,
+                                                                        &psRayContext->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to init Ray fw common context (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               goto fail_raycommoncontext;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc,
+                       (void **)&psFWRayContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_acquire_cpu_mapping;
+       }
+
+       OSDeviceMemCopy(&psFWRayContext->sStaticRayContextState, pStaticRayContextState, ui32StaticRayContextStateSize);
+       DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
+       DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc);
+
+       SyncAddrListInit(&psRayContext->sSyncAddrListFence);
+       SyncAddrListInit(&psRayContext->sSyncAddrListUpdate);
+
+       *ppsRayContext = psRayContext;
+
+       return PVRSRV_OK;
+fail_acquire_cpu_mapping:
+fail_raycommoncontext:
+       OSLockDestroy(psRayContext->hLock);
+fail_createlock:
+       DevmemFwUnmapAndFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+fail_fwraycontext:
+       OSFreeMem(psRayContext);
+
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext)
+{
+
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice;
+
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psRayContext->psDeviceNode,
+                                                                                         psRayContext->psServerCommonContext,
+                                                                                         RGXFWIF_DM_RAY,
+                                                                                         PDUMP_FLAGS_NONE);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                                __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+       FWCommonContextFree(psRayContext->psServerCommonContext);
+       DevmemFwUnmapAndFree(psDevInfo, psRayContext->psContextStateMemDesc);
+       psRayContext->psServerCommonContext = NULL;
+
+
+       DevmemFwUnmapAndFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+
+       OSLockDestroy(psRayContext->hLock);
+       OSFreeMem(psRayContext);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+                                                               IMG_UINT32                              ui32ClientUpdateCount,
+                                                               SYNC_PRIMITIVE_BLOCK    **pauiClientUpdateUFODevVarBlock,
+                                                               IMG_UINT32                              *paui32ClientUpdateSyncOffset,
+                                                               IMG_UINT32                              *paui32ClientUpdateValue,
+                                                               PVRSRV_FENCE                    iCheckFence,
+                                                               PVRSRV_TIMELINE                 iUpdateTimeline,
+                                                               PVRSRV_FENCE                    *piUpdateFence,
+                                                               IMG_CHAR                                pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                               IMG_UINT32                              ui32CmdSize,
+                                                               IMG_PBYTE                               pui8DMCmd,
+                                                               IMG_UINT32                              ui32PDumpFlags,
+                                                               IMG_UINT32                              ui32ExtJobRef)
+{
+
+       RGXFWIF_KCCB_CMD                sRayKCCBCmd;
+       RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+       PVRSRV_ERROR                    eError, eError2;
+       IMG_UINT32                              ui32FWCtx;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+       PVRSRV_RGXDEV_INFO      *psDevInfo;
+       RGX_CLIENT_CCB          *psClientCCB;
+       IMG_UINT32              ui32IntJobRef;
+
+       IMG_BOOL                                bCCBStateOpen = IMG_FALSE;
+       IMG_UINT64 ui64FBSCEntryMask;
+       IMG_UINT32 ui32IntClientFenceCount = 0;
+       PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+       IMG_UINT32 ui32IntClientUpdateCount = 0;
+       PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+       IMG_UINT32 *paui32IntUpdateValue = NULL;
+       PVRSRV_FENCE  iUpdateFence = PVRSRV_NO_FENCE;
+       IMG_UINT64               uiCheckFenceUID = 0;
+       IMG_UINT64               uiUpdateFenceUID = 0;
+       PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+       void *pvUpdateFenceFinaliseData = NULL;
+
+       psDevInfo = FWCommonContextGetRGXDevInfo(psRayContext->psServerCommonContext);
+       psClientCCB = FWCommonContextGetClientCCB(psRayContext->psServerCommonContext);
+       ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+
+       if (iUpdateTimeline >= 0 && !piUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Ensure we haven't been given a null ptr to
+        * update values if we have been told we
+        * have updates
+        */
+       if (ui32ClientUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+                                       "paui32ClientUpdateValue NULL but "
+                                       "ui32ClientUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+
+       OSLockAcquire(psRayContext->hLock);
+
+       eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+                                                                       0,
+                                                                       NULL,
+                                                                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               goto err_populate_sync_addr_list;
+       }
+
+       ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+       eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+                                                                       ui32ClientUpdateCount,
+                                                                       pauiClientUpdateUFODevVarBlock,
+                                                                       paui32ClientUpdateSyncOffset);
+       if (eError != PVRSRV_OK)
+       {
+               goto err_populate_sync_addr_list;
+       }
+       if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+       {
+               pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+       }
+       paui32IntUpdateValue = paui32ClientUpdateValue;
+
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+       /* Resolve the sync checkpoints that make up the input fence */
+       eError = SyncCheckpointResolveFence(psRayContext->psDeviceNode->hSyncCheckpointContext,
+                                                                               iCheckFence,
+                                                                               &ui32FenceSyncCheckpointCount,
+                                                                               &apsFenceSyncCheckpoints,
+                                           &uiCheckFenceUID, ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+               goto fail_resolve_input_fence;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               IMG_UINT32 ii;
+               for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+               {
+                       PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+               }
+       }
+#endif
+       /* Create the output fence (if required) */
+       if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d,  psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+               eError = SyncCheckpointCreateFence(psRayContext->psDeviceNode,
+                                                  pcszUpdateFenceName,
+                                                                                  iUpdateTimeline,
+                                                                                  psRayContext->psDeviceNode->hSyncCheckpointContext,
+                                                                                  &iUpdateFence,
+                                                                                  &uiUpdateFenceUID,
+                                                                                  &pvUpdateFenceFinaliseData,
+                                                                                  &psUpdateSyncCheckpoint,
+                                                                                  (void*)&psFenceTimelineUpdateSync,
+                                                                                  &ui32FenceTimelineUpdateValue,
+                                                                                  ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError));
+                       goto fail_create_output_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+               /* Append the sync prim update for the timeline (if required) */
+               if (psFenceTimelineUpdateSync)
+               {
+                       IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                       /* Allocate memory to hold the list of update values (including our timeline update) */
+                       pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                       if (!pui32IntAllocatedUpdateValues)
+                       {
+                               /* Failed to allocate memory */
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto fail_alloc_update_values_mem;
+                       }
+                       OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                       /* Copy the update values into the new memory, then append our timeline update value */
+                       OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+#if defined(CMP_CHECKPOINT_DEBUG)
+                       if (ui32IntClientUpdateCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Now set the additional update value */
+                       pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+                       *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+                       ui32IntClientUpdateCount++;
+                       /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+                       paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__,  (void*)psFenceTimelineUpdateSync));
+                       /* Now append the timeline sync prim addr to the compute context update list */
+                       SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate,
+                                                  psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+                       if (ui32IntClientUpdateCount > 0)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+                       /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+                       paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+               }
+       }
+
+       /* Append the checks (from input fence) */
+       if (ui32FenceSyncCheckpointCount > 0)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to Ray RDM Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+               if (ui32IntClientUpdateCount > 0)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+               SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListFence,
+                                                                         ui32FenceSyncCheckpointCount,
+                                                                         apsFenceSyncCheckpoints);
+               if (!pauiIntFenceUFOAddress)
+               {
+                       pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+               }
+               ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+       }
+#if defined(CMP_CHECKPOINT_DEBUG)
+       if (ui32IntClientUpdateCount > 0)
+       {
+               IMG_UINT32 iii;
+               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __func__, iii, (void*)pui32Tmp));
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp));
+                       pui32Tmp++;
+               }
+       }
+#endif
+
+       if (psUpdateSyncCheckpoint)
+       {
+               /* Append the update (from output fence) */
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to Ray RDM Update (&psRayContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psRayContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+               SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListUpdate,
+                                                                         1,
+                                                                         &psUpdateSyncCheckpoint);
+               if (!pauiIntUpdateUFOAddress)
+               {
+                       pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+               }
+               ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+               if (ui32IntClientUpdateCount > 0)
+               {
+                       IMG_UINT32 iii;
+                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                               pui32Tmp++;
+                       }
+               }
+#endif
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s:   (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (RDM) fence/updates syncs...", __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RDM) fence syncs (&psRayContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psRayContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+                       for (ii=0; ii<ui32IntClientFenceCount; ii++)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RDM) update syncs (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psRayContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+                       for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+                       {
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+
+       RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+                                 &pPreAddr,
+                                 &pPostAddr,
+                                 &pRMWUFOAddr);
+
+       /*
+       * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
+       * in other words, take the value and set it to zero afterwards.
+       * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
+       * as it must be ready at the time of context activation.
+       */
+       {
+               eError = RGXExtractFBSCEntryMaskFromMMUContext(psRayContext->psDeviceNode,
+                                                                                                               FWCommonContextGetServerMMUCtx(psRayContext->psServerCommonContext),
+                                                                                                               &ui64FBSCEntryMask);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
+                       goto fail_invalfbsc;
+               }
+       }
+
+       RGXCmdHelperInitCmdCCB(psDevInfo,
+                              psClientCCB,
+                              ui64FBSCEntryMask,
+                              ui32IntClientFenceCount,
+                              pauiIntFenceUFOAddress,
+                              NULL,
+                              ui32IntClientUpdateCount,
+                              pauiIntUpdateUFOAddress,
+                              paui32IntUpdateValue,
+                              ui32CmdSize,
+                              pui8DMCmd,
+                           &pPreAddr,
+                           &pPostAddr,
+                           &pRMWUFOAddr,
+                              RGXFWIF_CCB_CMD_TYPE_RAY,
+                              ui32ExtJobRef,
+                              ui32IntJobRef,
+                              ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                              NULL,
+#else
+                              NULL,
+#endif
+                              "Ray",
+                              bCCBStateOpen,
+                              asCmdHelperData);
+       eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_cmdaquire;
+       }
+
+       if (eError == PVRSRV_OK)
+       {
+               /*
+                       All the required resources are ready at this point, we can't fail so
+                       take the required server sync operations and commit all the resources
+               */
+               RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "RDM", FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr);
+       }
+
+       /* Construct the kernel compute CCB command. */
+       sRayKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+       sRayKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext);
+       sRayKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+       sRayKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+       sRayKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+       sRayKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+       ui32FWCtx = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr;
+
+       RGXSRV_HWPERF_ENQ(psRayContext,
+                         OSGetCurrentClientProcessIDKM(),
+                         ui32FWCtx,
+                         ui32ExtJobRef,
+                         ui32IntJobRef,
+                         RGX_HWPERF_KICK_TYPE_RS,
+                         iCheckFence,
+                         iUpdateFence,
+                         iUpdateTimeline,
+                         uiCheckFenceUID,
+                         uiUpdateFenceUID,
+                         NO_DEADLINE,
+                         NO_CYCEST);
+
+       /*
+        * Submit the compute command to the firmware.
+        */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+                                                                       RGXFWIF_DM_RAY,
+                                                                       &sRayKCCBCmd,
+                                                                       ui32PDumpFlags);
+               if (eError2 != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError2 != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s failed to schedule kernel CCB command (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError2)));
+               if (eError == PVRSRV_OK)
+               {
+                       eError = eError2;
+               }
+       }
+       else
+       {
+               PVRGpuTraceEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+                                       ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                                       RGX_HWPERF_KICK_TYPE_RS);
+       }
+       /*
+        * Now check eError (which may have returned an error from our earlier call
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_cmdaquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+       }
+       if (psFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+       *piUpdateFence = iUpdateFence;
+
+       if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psRayContext->psDeviceNode, iUpdateFence,
+                                           pvUpdateFenceFinaliseData,
+                                                                       psUpdateSyncCheckpoint, pcszUpdateFenceName);
+       }
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+       OSLockRelease(psRayContext->hLock);
+
+       return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_invalfbsc:
+       SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence);
+       SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+       if (iUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+       }
+fail_create_output_fence:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                                                apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+
+err_populate_sync_addr_list:
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+
+       OSLockRelease(psRayContext->hLock);
+       return eError;
+}
+
+/******************************************************************************
+ End of file (rgxray.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxray.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxray.h
new file mode 100644 (file)
index 0000000..3855a42
--- /dev/null
@@ -0,0 +1,113 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX ray functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX compute functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXRAY_H_)
+#define RGXRAY_H_
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_RAY_CONTEXT_ RGX_SERVER_RAY_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXCreateRayContextKM
+
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA               *psConnection,
+                                                                                PVRSRV_DEVICE_NODE             *psDeviceNode,
+                                                                                IMG_UINT32                             ui32Priority,
+                                                                                IMG_HANDLE                             hMemCtxPrivData,
+                                                                                IMG_UINT32                             ui32ContextFlags,
+                                                                                IMG_UINT32                             ui32StaticRayContextStateSize,
+                                                                                IMG_PBYTE                              pStaticRayContextState,
+                                                                                IMG_UINT64                             ui64RobustnessAddress,
+                                                                                IMG_UINT32                             ui32MaxDeadlineMS,
+                                                                                RGX_SERVER_RAY_CONTEXT **ppsRayContext);
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXDestroyRayContextKM
+
+ @Description
+       Server-side implementation of RGXDestroyRayContext
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext);
+
+
+/*!
+*******************************************************************************
+ @Function     PVRSRVRGXKickRDMKM
+
+ @Description
+       Server-side implementation of RGXKickRDM
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+                                                               IMG_UINT32                              ui32ClientUpdateCount,
+                                                               SYNC_PRIMITIVE_BLOCK    **pauiClientUpdateUFODevVarBlock,
+                                                               IMG_UINT32                              *paui32ClientUpdateSyncOffset,
+                                                               IMG_UINT32                              *paui32ClientUpdateValue,
+                                                               PVRSRV_FENCE                    iCheckFence,
+                                                               PVRSRV_TIMELINE                 iUpdateTimeline,
+                                                               PVRSRV_FENCE                    *piUpdateFence,
+                                                               IMG_CHAR                                pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                               IMG_UINT32                              ui32CmdSize,
+                                                               IMG_PBYTE                               pui8DMCmd,
+                                                               IMG_UINT32                              ui32PDumpFlags,
+                                                               IMG_UINT32                              ui32ExtJobRef);
+
+
+#endif /* RGXRAY_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxsrvinit.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxsrvinit.c
new file mode 100644 (file)
index 0000000..a4f4d1c
--- /dev/null
@@ -0,0 +1,1537 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvinit.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "km_apphint_defs.h"
+
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "rgx_compat_bvnc.h"
+
+#include "rgxdefs_km.h"
+#include "pvrsrv.h"
+
+#include "rgxinit.h"
+#include "rgxmulticore.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "osfunc.h"
+
+#include "rgxdefs_km.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "rgx_fwif_hwperf.h"
+#include "rgx_hwperf_table.h"
+
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] =
+{
+#define X(a, b, c, d, e, f, g)  {a, b, 0xFF, d, e, f, NULL}
+RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST,
+RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST
+#undef X
+};
+
+#include "fwload.h"
+#include "rgxlayer_impl.h"
+#include "rgxfwimageutils.h"
+#include "rgxfwutils.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgxdevice.h"
+#include "pvrsrv.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#endif
+
+#define DRIVER_MODE_HOST               0          /* AppHint value for host driver mode */
+
+#define        HW_PERF_FILTER_DEFAULT         0x00000000 /* Default to no HWPerf */
+#define HW_PERF_FILTER_DEFAULT_ALL_ON  0xFFFFFFFF /* All events */
+#define AVAIL_POW_UNITS_MASK_DEFAULT   (PVRSRV_APPHINT_HWVALAVAILABLESPUMASK)
+#define AVAIL_RAC_MASK_DEFAULT         (PVRSRV_APPHINT_HWVALAVAILABLERACMASK)
+
+/* Kernel CCB size */
+
+#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE)
+#define PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE 4
+#endif
+#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE)
+#define PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE 16
+#endif
+
+#if PVRSRV_APPHINT_KCCB_SIZE_LOG2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE
+#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too low.
+#elif PVRSRV_APPHINT_KCCB_SIZE_LOG2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE
+#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high.
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+#include "pvrsrv_apphint.h"
+#endif
+
+#include "os_srvinit_param.h"
+
+#if defined(__linux__)
+#include "km_apphint.h"
+#else
+/*!
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+/* apphint map of name vs. enable flag */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+       HTB_LOG_SFGROUPLIST
+#undef X
+};
+/* apphint map of arg vs. OpMode */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = {
+       { "droplatest", HTB_OPMODE_DROPLATEST},
+       { "dropoldest", HTB_OPMODE_DROPOLDEST},
+       /* HTB should never be started in HTB_OPMODE_BLOCK
+        * as this can lead to deadlocks
+        */
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = {
+       { "trace", 0},
+       { "none", 0}
+#if defined(SUPPORT_TBI_INTERFACE)
+       , { "tbi", 1}
+#endif
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = {
+       { "mono", 0 },
+       { "mono_raw", 1 },
+       { "sched", 2 }
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP };
+
+/*
+ * Services AppHints initialisation
+ */
+#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e)
+APPHINT_LIST_ALL
+#undef X
+#endif /* defined(__linux__) */
+
+/*
+ * Container for all the apphints used by this module
+ */
+typedef struct _RGX_SRVINIT_APPHINTS_
+{
+       IMG_UINT32 ui32DriverMode;
+       IMG_BOOL   bEnableSignatureChecks;
+       IMG_UINT32 ui32SignatureChecksBufSize;
+
+       IMG_BOOL   bAssertOnOutOfMem;
+       IMG_BOOL   bAssertOnHWRTrigger;
+#if defined(SUPPORT_VALIDATION)
+       IMG_UINT32 ui32RenderKillingCtl;
+       IMG_UINT32 ui32CDMTDMKillingCtl;
+       IMG_BOOL   bValidateIrq;
+       IMG_BOOL   bValidateSOCUSCTimer;
+       IMG_UINT32 ui32AvailablePowUnitsMask;
+       IMG_UINT32 ui32AvailableRACMask;
+       IMG_BOOL   bInjectPowUnitsStateMaskChange;
+       IMG_BOOL   bEnablePowUnitsStateMaskChange;
+       IMG_UINT32 ui32FBCDCVersionOverride;
+       IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST];
+       IMG_UINT32 aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST];
+       IMG_UINT64 aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST];
+#endif
+       IMG_BOOL   bCheckMlist;
+       IMG_BOOL   bDisableClockGating;
+       IMG_BOOL   bDisableDMOverlap;
+       IMG_BOOL   bDisableFEDLogging;
+       IMG_BOOL   bDisablePDP;
+       IMG_BOOL   bEnableDMKillRand;
+       IMG_BOOL   bEnableRandomCsw;
+       IMG_BOOL   bEnableSoftResetCsw;
+       IMG_BOOL   bFilteringMode;
+       IMG_BOOL   bHWPerfDisableCounterFilter;
+       IMG_BOOL   bZeroFreelist;
+       IMG_UINT32 ui32EnableFWContextSwitch;
+       IMG_UINT32 ui32FWContextSwitchProfile;
+       IMG_UINT32 ui32ISPSchedulingLatencyMode;
+       IMG_UINT32 ui32HWPerfFWBufSize;
+       IMG_UINT32 ui32HWPerfHostBufSize;
+       IMG_UINT32 ui32HWPerfFilter0;
+       IMG_UINT32 ui32HWPerfFilter1;
+       IMG_UINT32 ui32HWPerfHostFilter;
+       IMG_UINT32 ui32TimeCorrClock;
+       IMG_UINT32 ui32HWRDebugDumpLimit;
+       IMG_UINT32 ui32JonesDisableMask;
+       IMG_UINT32 ui32LogType;
+       IMG_UINT32 ui32TruncateMode;
+       IMG_UINT32 ui32KCCBSizeLog2;
+       IMG_UINT32 ui32CDMArbitrationMode;
+       FW_PERF_CONF eFirmwarePerf;
+       RGX_ACTIVEPM_CONF eRGXActivePMConf;
+       RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+
+       IMG_BOOL   bEnableTrustedDeviceAceConfig;
+       IMG_UINT32 ui32FWContextSwitchCrossDM;
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+       IMG_UINT32 ui32PhysMemTestPasses;
+#endif
+} RGX_SRVINIT_APPHINTS;
+
+/*!
+*******************************************************************************
+
+ @Function      GetApphints
+
+ @Description   Read init time apphints and initialise internal variables
+
+ @Input         psHints : Pointer to apphints container
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints)
+{
+       void *pvParamState = SrvInitParamOpen();
+       IMG_UINT32 ui32ParamTemp;
+
+       /*
+        * NB AppHints initialised to a default value via SrvInitParamInit* macros above
+        */
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    DriverMode,                         psHints->ui32DriverMode);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,    EnableSignatureChecks,      psHints->bEnableSignatureChecks);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize);
+
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,      pvParamState,    AssertOutOfMemory,               psHints->bAssertOnOutOfMem);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,      pvParamState,    AssertOnHWRTrigger,            psHints->bAssertOnHWRTrigger);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,      pvParamState,    CheckMList,                            psHints->bCheckMlist);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,    DisableClockGating,            psHints->bDisableClockGating);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,    DisableDMOverlap,                psHints->bDisableDMOverlap);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,      pvParamState,    DisableFEDLogging,              psHints->bDisableFEDLogging);
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,    pvParamState,    EnableAPM,                                    ui32ParamTemp);
+       psHints->eRGXActivePMConf = ui32ParamTemp;
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,    EnableGenericDMKillingRandMode,  psHints->bEnableDMKillRand);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,    EnableRandomContextSwitch,        psHints->bEnableRandomCsw);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,    EnableSoftResetContextSwitch,  psHints->bEnableSoftResetCsw);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    EnableFWContextSwitch,   psHints->ui32EnableFWContextSwitch);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    EnableRDPowerIsland,                          ui32ParamTemp);
+       psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    FirmwarePerf,                                 ui32ParamTemp);
+       psHints->eFirmwarePerf = ui32ParamTemp;
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,
+               HWPerfDisableCounterFilter, psHints->bHWPerfDisableCounterFilter);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    HWPerfHostBufSizeInKB,       psHints->ui32HWPerfHostBufSize);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,    HWPerfFWBufSizeInKB,           psHints->ui32HWPerfFWBufSize);
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,    pvParamState,    KernelCCBSizeLog2,                psHints->ui32KCCBSizeLog2);
+
+       if (psHints->ui32KCCBSizeLog2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too low, setting to %u",
+                        psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE));
+               psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE;
+       }
+       else if (psHints->ui32KCCBSizeLog2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too high, setting to %u",
+                        psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE));
+               psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       if (psHints->ui32KCCBSizeLog2 != PVRSRV_APPHINT_KCCB_SIZE_LOG2)
+       {
+               PVR_LOG(("KernelCCBSizeLog2 set to %u", psHints->ui32KCCBSizeLog2));
+       }
+#endif
+
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,
+               ISPSchedulingLatencyMode, psHints->ui32ISPSchedulingLatencyMode);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,     CDMArbitrationOverride,    psHints->ui32CDMArbitrationMode);
+#if defined(__linux__)
+       /* name changes */
+       {
+               IMG_UINT64 ui64Tmp;
+               SrvInitParamGetBOOL(psDevInfo->psDeviceNode,    pvParamState,    DisablePDumpPanic,                 psHints->bDisablePDP);
+               SrvInitParamGetUINT64(psDevInfo->psDeviceNode,  pvParamState,    HWPerfFWFilter,                                 ui64Tmp);
+               psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu);
+               psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu);
+       }
+#else
+       SrvInitParamUnreferenced(DisablePDumpPanic);
+       SrvInitParamUnreferenced(HWPerfFWFilter);
+       SrvInitParamUnreferenced(RGXBVNC);
+#endif
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,      pvParamState,    HWPerfHostFilter,             psHints->ui32HWPerfHostFilter);
+       SrvInitParamGetUINT32List(psDevInfo->psDeviceNode,  pvParamState,    TimeCorrClock,                   psHints->ui32TimeCorrClock);
+       SrvInitParamGetUINT32(psDevInfo->psDeviceNode,      pvParamState,    HWRDebugDumpLimit,                            ui32ParamTemp);
+       psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    JonesDisableMask,                             ui32ParamTemp);
+       psHints->ui32JonesDisableMask = ui32ParamTemp & RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK;
+
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,            pvParamState,    NewFilteringMode,                   psHints->bFilteringMode);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    TruncateMode,                     psHints->ui32TruncateMode);
+
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,        pvParamState,    ZeroFreelist,                        psHints->bZeroFreelist);
+#if defined(__linux__)
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM);
+#else
+       SrvInitParamUnreferenced(FWContextSwitchCrossDM);
+#endif
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    PhysMemTestPasses,           psHints->ui32PhysMemTestPasses);
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  KillingCtl,                              psHints->ui32RenderKillingCtl);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  CDMTDMKillingCtl,                        psHints->ui32CDMTDMKillingCtl);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,  ValidateIrq,                                     psHints->bValidateIrq);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,  ValidateSOCUSCTimer,                     psHints->bValidateSOCUSCTimer);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  HWValAvailableSPUMask,              psHints->ui32AvailablePowUnitsMask);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  HWValAvailableRACMask,                   psHints->ui32AvailableRACMask);
+       SrvInitParamGetBOOL(psDevInfo->psDeviceNode,      pvParamState,  GPUUnitsPowerChange,           psHints->bInjectPowUnitsStateMaskChange);
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,          pvParamState,  HWValEnableSPUPowerMaskChange, psHints->bEnablePowUnitsStateMaskChange);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  FBCDCVersionOverride,                psHints->ui32FBCDCVersionOverride);
+
+       /* Apphints for Unified Store virtual partitioning. */
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  USRMNumRegionsVDM,   psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_VDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  USRMNumRegionsDDM,   psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_DDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  USRMNumRegionsCDM,   psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_CDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  USRMNumRegionsPDM,   psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_PDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  USRMNumRegionsTDM,   psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_TDM]);
+
+       /* Apphints for UVB virtual partitioning. */
+       SrvInitParamGetUINT64(INITPARAM_NO_DEVICE,        pvParamState,  UVBRMNumRegionsVDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_VDM]);
+       SrvInitParamGetUINT64(INITPARAM_NO_DEVICE,        pvParamState,  UVBRMNumRegionsDDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_DDM]);
+
+       /* Apphints for TPU trilinear frac masking */
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,        pvParamState,  TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]);
+#endif
+
+       /*
+        * FW logs apphints
+        */
+       {
+               IMG_UINT32 ui32LogGroup, ui32TraceOrTBI;
+
+               SrvInitParamGetUINT32BitField(psDevInfo->psDeviceNode,  pvParamState,    EnableLogGroup,    ui32LogGroup);
+               SrvInitParamGetUINT32List(psDevInfo->psDeviceNode,      pvParamState,    FirmwareLogType, ui32TraceOrTBI);
+
+               /* Defaulting to TRACE */
+               BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE);
+
+#if defined(SUPPORT_TBI_INTERFACE)
+               if (ui32TraceOrTBI == 1 /* TBI */)
+               {
+                       if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0)
+                       {
+                               /* No groups configured - defaulting to MAIN group */
+                               BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN);
+                       }
+                       BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE);
+               }
+#endif
+
+               psHints->ui32LogType = ui32LogGroup;
+       }
+
+       SrvInitParamGetBOOL(INITPARAM_NO_DEVICE,  pvParamState,  EnableTrustedDeviceAceConfig,  psHints->bEnableTrustedDeviceAceConfig);
+
+       SrvInitParamClose(pvParamState);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFWConfigFlags
+
+ @Description   Initialise and return FW config flags
+
+ @Input         psHints            : Apphints container
+ @Input         pui32FWConfigFlags : Pointer to config flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    RGX_SRVINIT_APPHINTS *psHints,
+                                    IMG_UINT32 *pui32FWConfigFlags,
+                                    IMG_UINT32 *pui32FWConfigFlagsExt,
+                                    IMG_UINT32 *pui32FwOsCfgFlags)
+{
+       IMG_UINT32 ui32FWConfigFlags = 0;
+       IMG_UINT32 ui32FWConfigFlagsExt = 0;
+       IMG_UINT32 ui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM |
+                                     (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK);
+
+       if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               ui32FWConfigFlags = 0;
+               ui32FWConfigFlagsExt = 0;
+       }
+       else
+       {
+               ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0;
+               ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0;
+               ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0;
+               ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0;
+               ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0;
+               ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0;
+               ui32FWConfigFlags |= psHints->bEnableDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0;
+               ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0;
+               ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0;
+               ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0;
+               ui32FWConfigFlags |= (psHints->ui32ISPSchedulingLatencyMode << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) & RGXFWIF_INICFG_ISPSCHEDMODE_MASK;
+#if defined(SUPPORT_VALIDATION)
+#if defined(NO_HARDWARE) && defined(PDUMP)
+               ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0;
+#endif
+#endif
+               ui32FWConfigFlags |= psHints->bHWPerfDisableCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0;
+               ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK;
+
+#if defined(SUPPORT_VALIDATION)
+               ui32FWConfigFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN : 0;
+               ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0;
+
+               if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) &&
+                   ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0)))
+               {
+                       psHints->eRGXActivePMConf = 0;
+                       psHints->eRGXRDPowerIslandConf = 0;
+                       PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n"
+                                "Overriding current value for both with new value 0."));
+               }
+#endif
+               ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0;
+               ui32FWConfigFlags |= (psHints->ui32CDMArbitrationMode << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) & RGXFWIF_INICFG_CDM_ARBITRATION_MASK;
+       }
+
+       if ((ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN) &&
+               ((ui32FWConfigFlags & RGXFWIF_INICFG_ISPSCHEDMODE_MASK) == RGXFWIF_INICFG_ISPSCHEDMODE_NONE))
+       {
+               ui32FwOsCfgFlags &= ~RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN;
+               PVR_DPF((PVR_DBG_WARNING, "ISPSchedulingLatencyMode=0 implies context switching is inoperable on DM_3D.\n"
+                                "Overriding current value EnableFWContextSwitch=0x%x with new value 0x%x",
+                                psHints->ui32EnableFWContextSwitch,
+                                ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL));
+       }
+
+       *pui32FWConfigFlags    = ui32FWConfigFlags;
+       *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt;
+       *pui32FwOsCfgFlags     = ui32FwOsCfgFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      GetFilterFlags
+
+ @Description   Initialise and return filter flags
+
+ @Input         psHints : Apphints container
+
+ @Return        IMG_UINT32 : Filter flags
+
+******************************************************************************/
+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints)
+{
+       IMG_UINT32 ui32FilterFlags = 0;
+
+       ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0;
+       if (psHints->ui32TruncateMode == 2)
+       {
+               ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT;
+       }
+       else if (psHints->ui32TruncateMode == 3)
+       {
+               ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF;
+       }
+
+       return ui32FilterFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      InitDeviceFlags
+
+ @Description   Initialise and return device flags
+
+ @Input         psHints          : Apphints container
+ @Input         pui32DeviceFlags : Pointer to device flags
+
+ @Return        void
+
+******************************************************************************/
+static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints,
+                                  IMG_UINT32 *pui32DeviceFlags)
+{
+       IMG_UINT32 ui32DeviceFlags = 0;
+
+#if defined(SUPPORT_VALIDATION)
+       ui32DeviceFlags |= psHints->bInjectPowUnitsStateMaskChange? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0;
+#endif
+       ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0;
+       ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0;
+#if defined(SUPPORT_VALIDATION)
+       ui32DeviceFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN : 0;
+#endif
+#if defined(PVRSRV_ENABLE_CCCB_GROW)
+       BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN);
+#endif
+
+       *pui32DeviceFlags = ui32DeviceFlags;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+/*!
+*******************************************************************************
+
+ @Function      RGXTDProcessFWImage
+
+ @Description   Fetch and send data used by the trusted device to complete
+                the FW image setup
+
+ @Input         psDeviceNode : Device node
+ @Input         psRGXFW      : Firmware blob
+ @Input         puFWParams   : Parameters used by the FW at boot time
+
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        OS_FW_IMAGE *psRGXFW,
+                                        PVRSRV_FW_BOOT_PARAMS *puFWParams)
+{
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_TD_FW_PARAMS sTDFWParams;
+       PVRSRV_ERROR eError;
+
+       if (psDevConfig->pfnTDSendFWImage == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       sTDFWParams.pvFirmware       = OSFirmwareData(psRGXFW);
+       sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW);
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               sTDFWParams.uFWP.sMeta = puFWParams->sMeta;
+       }
+       else
+       {
+               sTDFWParams.uFWP.sRISCV = puFWParams->sRISCV;
+       }
+
+       eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams);
+
+       return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      InitFirmware
+
+ @Description   Allocate, initialise and pdump Firmware code and data memory
+
+ @Input         psDeviceNode : Device Node
+ @Input         psHints      : Apphints
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 RGX_SRVINIT_APPHINTS *psHints)
+{
+       OS_FW_IMAGE       *psRGXFW = NULL;
+       const IMG_BYTE    *pbRGXFirmware = NULL;
+
+       /* FW code memory */
+       IMG_DEVMEM_SIZE_T uiFWCodeAllocSize;
+       void              *pvFWCodeHostAddr;
+
+       /* FW data memory */
+       IMG_DEVMEM_SIZE_T uiFWDataAllocSize;
+       void              *pvFWDataHostAddr;
+
+       /* FW coremem code memory */
+       IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize;
+       void              *pvFWCorememCodeHostAddr = NULL;
+
+       /* FW coremem data memory */
+       IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize;
+       void              *pvFWCorememDataHostAddr = NULL;
+
+       PVRSRV_FW_BOOT_PARAMS uFWParams;
+       RGX_LAYER_PARAMS sLayerParams;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       /*
+        * Get pointer to Firmware image
+        */
+       eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware);
+
+       if (eError != PVRSRV_OK)
+       {
+               /* Error or confirmation message generated in RGXLoadAndGetFWData */
+               goto fw_load_fail;
+       }
+
+       sLayerParams.psDevInfo = psDevInfo;
+
+       /*
+        * Allocate Firmware memory
+        */
+
+       eError = RGXGetFWImageAllocSize(&sLayerParams,
+                                       pbRGXFirmware,
+                                       OSFirmwareSize(psRGXFW),
+                                       &uiFWCodeAllocSize,
+                                       &uiFWDataAllocSize,
+                                       &uiFWCorememCodeAllocSize,
+                                       &uiFWCorememDataAllocSize);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: RGXGetFWImageAllocSize failed",
+                       __func__));
+               goto cleanup_initfw;
+       }
+
+       psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: META DMA not available, disabling core memory code/data",
+                       __func__));
+               uiFWCorememCodeAllocSize = 0;
+               uiFWCorememDataAllocSize = 0;
+       }
+#endif
+
+       psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize;
+
+       eError = RGXInitAllocFWImgMem(psDeviceNode,
+                                     uiFWCodeAllocSize,
+                                     uiFWDataAllocSize,
+                                     uiFWCorememCodeAllocSize,
+                                     uiFWCorememDataAllocSize);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: PVRSRVRGXInitAllocFWImgMem failed (%d)",
+                       __func__,
+                       eError));
+               goto cleanup_initfw;
+       }
+
+       /*
+        * Acquire pointers to Firmware allocations
+        */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw);
+
+#else
+       /* We can't get a pointer to a secure FW allocation from within the DDK */
+       pvFWCodeHostAddr = NULL;
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code);
+
+#else
+       /* We can't get a pointer to a secure FW allocation from within the DDK */
+       pvFWDataHostAddr = NULL;
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememCodeAllocSize != 0)
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data);
+       }
+       else
+       {
+               pvFWCorememCodeHostAddr = NULL;
+       }
+#else
+       /* We can't get a pointer to a secure FW allocation from within the DDK */
+       pvFWCorememCodeHostAddr = NULL;
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememDataAllocSize != 0)
+       {
+               eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode);
+       }
+       else
+#endif
+       {
+               pvFWCorememDataHostAddr = NULL;
+       }
+
+       /*
+        * Prepare FW boot parameters
+        */
+
+       if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+       {
+               uFWParams.sMeta.sFWCodeDevVAddr =  psDevInfo->sFWCodeDevVAddrBase;
+               uFWParams.sMeta.sFWDataDevVAddr =  psDevInfo->sFWDataDevVAddrBase;
+               uFWParams.sMeta.sFWCorememCodeDevVAddr =  psDevInfo->sFWCorememCodeDevVAddrBase;
+               uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr;
+               uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize;
+               uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase;
+               uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr;
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+               uFWParams.sMeta.ui32NumThreads = 2;
+#else
+               uFWParams.sMeta.ui32NumThreads = 1;
+#endif
+       }
+       else
+       {
+               uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase;
+               uFWParams.sRISCV.sFWCorememCodeFWAddr   = psDevInfo->sFWCorememCodeFWAddr;
+               uFWParams.sRISCV.uiFWCorememCodeSize    = uiFWCorememCodeAllocSize;
+
+               uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase;
+               uFWParams.sRISCV.sFWCorememDataFWAddr   = psDevInfo->sFWCorememDataStoreFWAddr;
+               uFWParams.sRISCV.uiFWCorememDataSize    = uiFWCorememDataAllocSize;
+       }
+
+
+       /*
+        * Process the Firmware image and setup code and data segments.
+        *
+        * When the trusted device is enabled and the FW code lives
+        * in secure memory we will only setup the data segments here,
+        * while the code segments will be loaded to secure memory
+        * by the trusted device.
+        */
+       if (!psDeviceNode->bAutoVzFwIsUp)
+       {
+               eError = RGXProcessFWImage(&sLayerParams,
+                                                                  pbRGXFirmware,
+                                                                  pvFWCodeHostAddr,
+                                                                  pvFWDataHostAddr,
+                                                                  pvFWCorememCodeHostAddr,
+                                                                  pvFWCorememDataHostAddr,
+                                                                  &uFWParams);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: RGXProcessFWImage failed (%d)",
+                                __func__,
+                                eError));
+                       goto release_corememdata;
+               }
+       }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       if (psRGXFW)
+       {
+               RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams);
+       }
+#endif
+
+
+       /*
+        * PDump Firmware allocations
+        */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Dump firmware code image");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc,
+                          0,
+                          uiFWCodeAllocSize,
+                          PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Dump firmware data image");
+       DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc,
+                          0,
+                          uiFWDataAllocSize,
+                          PDUMP_FLAGS_CONTINUOUS);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememCodeAllocSize != 0)
+       {
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "Dump firmware coremem code image");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc,
+                                                  0,
+                                                  uiFWCorememCodeAllocSize,
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememDataAllocSize != 0)
+       {
+               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                     "Dump firmware coremem data store image");
+               DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+                                                  0,
+                                                  uiFWCorememDataAllocSize,
+                                                  PDUMP_FLAGS_CONTINUOUS);
+       }
+#endif
+
+       /*
+        * Release Firmware allocations and clean up
+        */
+release_corememdata:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+       if (uiFWCorememDataAllocSize !=0)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+       }
+
+release_corememcode:
+       if (uiFWCorememCodeAllocSize != 0)
+       {
+               DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc);
+       }
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+release_data:
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION)
+release_code:
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+#endif
+cleanup_initfw:
+       OSUnloadFirmware(psRGXFW);
+fw_load_fail:
+
+       return eError;
+}
+
+IMG_INTERNAL static inline IMG_UINT32 RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *);
+IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **);
+
+IMG_INTERNAL /*static inline*/ IMG_UINT32
+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel)
+{
+    *ppsModel = gasCntBlkTypeModel;
+    return ARRAY_SIZE(gasCntBlkTypeModel);
+}
+
+/*!
+*******************************************************************************
+ @Function    RGXHWPerfMaxDefinedBlks
+
+ @Description Return the number of valid block-IDs for the given device node
+
+ @Input       (PVRSRV_RGXDEV_INFO *)   pvDevice    device-node to query
+
+ @Returns     (IMG_UINT32)             Number of block-IDs (RGX_CNTBLK_ID)
+                                       valid for this device.
+******************************************************************************/
+IMG_INTERNAL static inline IMG_UINT32
+RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       RGX_HWPERF_CNTBLK_RT_INFO sRtInfo;
+       IMG_UINT32  uiRetVal;
+       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psHWPBlkConfig;
+       IMG_UINT32  uiNumArrayEls, ui;
+
+       uiRetVal = RGX_CNTBLK_ID_DIRECT_LAST;
+
+       uiNumArrayEls = RGXGetHWPerfBlockConfig(&psHWPBlkConfig);
+
+       if (psHWPBlkConfig == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL Config Block", __func__));
+               return 0;
+       }
+       PVR_ASSERT(uiNumArrayEls > 0);
+
+       /* Iterate over each block-ID and find the number of instances of each
+        * block which are present for this device type. We only query the
+        * Indirect blocks as their presence varies according to GPU. All direct
+        * blocks have an entry - but they may not be physically present.
+        */
+       for (ui = RGX_CNTBLK_ID_DIRECT_LAST; ui < uiNumArrayEls; ui++)
+       {
+               if (rgx_hwperf_blk_present(&psHWPBlkConfig[ui], (void *)psDevInfo, &sRtInfo))
+               {
+                       uiRetVal += sRtInfo.uiNumUnits;
+                       PVR_DPF((PVR_DBG_VERBOSE, "%s: Block %u, NumUnits %u, Total %u",
+                               __func__, ui, sRtInfo.uiNumUnits, uiRetVal));
+               }
+#ifdef DEBUG
+               else
+               {
+                       if (psHWPBlkConfig[ui].uiCntBlkIdBase == RGX_CNTBLK_ID_RAC0)
+                       {
+                               if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode,
+                                   RAY_TRACING_ARCH) > 2U)
+                               {
+                                       PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present",
+                                               __func__, ui));
+                               }
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present",
+                                       __func__, ui));
+                       }
+               }
+#endif
+       }
+
+       PVR_DPF((PVR_DBG_VERBOSE, "%s: Num Units = %u", __func__, uiRetVal));
+
+       return uiRetVal;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitialiseHWPerfCounters
+
+ @Description   Initialisation of hardware performance counters and dumping
+                them out to pdump, so that they can be modified at a later
+                point.
+
+ @Input         pvDevice
+ @Input         psHWPerfDataMemDesc
+ @Input         psHWPerfInitDataInt
+
+ @Return        void
+
+******************************************************************************/
+
+static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     void *pvDevice,
+                                     DEVMEM_MEMDESC *psHWPerfDataMemDesc,
+                                     RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
+{
+       RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+       IMG_UINT32 ui32CntBlkModelLen;
+       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+       const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
+       IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx;
+       RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo;
+       IMG_UINT32 uiUnit;
+       IMG_BOOL bDirect;
+
+       ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+
+       PVR_DPF((PVR_DBG_VERBOSE, "%s: #BlockConfig entries = %d", __func__, ui32CntBlkModelLen));
+
+       /* Initialise the number of blocks in the RGXFWIF_HWPERF_CTL structure.
+        * This allows Firmware to validate that it has been correctly configured.
+        */
+       psHWPerfInitDataInt->ui32NumBlocks = RGXHWPerfMaxDefinedBlks(pvDevice);
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+           "HWPerf Block count = %u.",
+           psHWPerfInitDataInt->ui32NumBlocks);
+#if defined(PDUMP)
+       /* Ensure that we record the BVNC specific ui32NumBlocks in the PDUMP data
+        * so that when we playback we have the correct value present.
+        */
+       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+           (size_t)&(psHWPerfInitDataInt->ui32NumBlocks) - (size_t)(psHWPerfInitDataInt),
+           psHWPerfInitDataInt->ui32NumBlocks, PDUMP_FLAGS_CONTINUOUS);
+#endif /* defined(PDUMP) */
+
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+           "HWPerf Counter config starts here.");
+
+       /* Simply iterate over all the RGXFWIW_HWPERF_CTL blocks in order */
+       psHWPerfInitBlkData = &psHWPerfInitDataInt->sBlkCfg[0];
+
+       for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen;
+            ui32BlkCfgIdx++, psHWPerfInitBlkData++)
+       {
+               IMG_BOOL bSingleton;
+
+               /* Exit early if this core does not have any of these counter blocks
+                * due to core type/BVNC features.... */
+               psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx];
+
+               if (psBlkTypeDesc == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL - Index %d / %d",
+                           __func__, ui32BlkCfgIdx, ui32CntBlkModelLen));
+                       continue;
+               }
+
+               PVR_DPF((PVR_DBG_VERBOSE,
+                       "%s: CfgIdx = %u, InitBlkData @ 0x%p, BlkTypeDesc @ 0x%p",
+                       __func__, ui32BlkCfgIdx, psHWPerfInitBlkData, psBlkTypeDesc));
+
+               if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE)
+               {
+                       PVR_DPF((PVR_DBG_VERBOSE, "%s: %s [ID 0x%x] NOT present", __func__,
+                           psBlkTypeDesc->pszBlockNameComment,
+                           psBlkTypeDesc->uiCntBlkIdBase ));
+                       /* Block isn't present, but has an entry in the table. Populate
+                        * the Init data so that we can track the block later.
+                        */
+                       psHWPerfInitBlkData->uiBlockID = psBlkTypeDesc->uiCntBlkIdBase;
+                       continue;
+               }
+#ifdef DEBUG
+               else
+               {
+                       PVR_DPF((PVR_DBG_VERBOSE, "%s: %s has %d %s", __func__,
+                               psBlkTypeDesc->pszBlockNameComment, sCntBlkRtInfo.uiNumUnits,
+                           (sCntBlkRtInfo.uiNumUnits > 1) ? "units" : "unit"));
+               }
+#endif /* DEBUG */
+
+               /* Program all counters in one block so those already on may
+                * be configured off and vice-versa. */
+               bDirect = psBlkTypeDesc->uiIndirectReg == 0;
+
+               /* Set if there is only one instance of this block-ID present */
+               bSingleton = sCntBlkRtInfo.uiNumUnits == 1;
+
+               for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase, uiUnit = 0;
+                    ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits;
+                    ui32BlockID++, uiUnit++)
+               {
+
+                       if (bDirect)
+                       {
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                                   "Block : %s", psBlkTypeDesc->pszBlockNameComment);
+                       }
+                       else
+                       {
+                               PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                   PDUMP_FLAGS_CONTINUOUS,
+                                   "Unit %d Block : %s%d",
+                                   ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase,
+                                   psBlkTypeDesc->pszBlockNameComment, uiUnit);
+                       }
+
+                       psHWPerfInitBlkData->uiBlockID = ui32BlockID;
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                           "uiBlockID: The Block ID for the layout block. See RGX_CNTBLK_ID for further information.");
+#if defined(PDUMP)
+                       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                                       (size_t)&(psHWPerfInitBlkData->uiBlockID) - (size_t)(psHWPerfInitDataInt),
+                                                       psHWPerfInitBlkData->uiBlockID,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+#endif /* PDUMP */
+
+                       psHWPerfInitBlkData->uiNumCounters = 0;
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                           "uiNumCounters (X): Specifies the number of valid counters"
+                           " [0..%d] which follow.", RGX_CNTBLK_COUNTERS_MAX);
+#if defined(PDUMP)
+                       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                           (size_t)&(psHWPerfInitBlkData->uiNumCounters) - (size_t)(psHWPerfInitDataInt),
+                           psHWPerfInitBlkData->uiNumCounters,
+                           PDUMP_FLAGS_CONTINUOUS);
+#endif /* PDUMP */
+
+                       psHWPerfInitBlkData->uiEnabled = 0;
+                       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                           "uiEnabled: Set to 0x1 if the block needs to be enabled during playback.");
+#if defined(PDUMP)
+                       DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                           (size_t)&(psHWPerfInitBlkData->uiEnabled) - (size_t)(psHWPerfInitDataInt),
+                           psHWPerfInitBlkData->uiEnabled,
+                           PDUMP_FLAGS_CONTINUOUS);
+#endif /* PDUMP */
+
+                       for (ui32CounterIdx = 0; ui32CounterIdx < RGX_CNTBLK_COUNTERS_MAX; ui32CounterIdx++)
+                       {
+                               psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx] = IMG_UINT32_C(0x00000000);
+
+                               if (bDirect)
+                               {
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                           PDUMP_FLAGS_CONTINUOUS,
+                                           "%s_COUNTER_%d",
+                                           psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx);
+                               }
+                               else
+                               {
+                                       PDUMPCOMMENTWITHFLAGS(psDeviceNode,
+                                           PDUMP_FLAGS_CONTINUOUS,
+                                           "%s%d_COUNTER_%d",
+                                           psBlkTypeDesc->pszBlockNameComment,
+                                           uiUnit, ui32CounterIdx);
+                               }
+#if defined(PDUMP)
+                               DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+                                   (size_t)&(psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+                                   psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx],
+                                   PDUMP_FLAGS_CONTINUOUS);
+#endif /* PDUMP */
+
+                       }
+
+                       /* Update our block reference for indirect units which have more
+                        * than a single unit present. Only increment if we have more than
+                        * one unit left to process as the external loop counter will be
+                        * incremented after final unit is processed.
+                        */
+                       if (!bSingleton && (uiUnit < (sCntBlkRtInfo.uiNumUnits - 1)))
+                       {
+                               psHWPerfInitBlkData++;
+                       }
+               }
+       }
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "HWPerf Counter config finishes here.");
+}
+
+/*!
+*******************************************************************************
+
+ @Function      InitialiseAllCounters
+
+ @Description   Initialise HWPerf and custom counters
+
+ @Input         psDeviceNode : Device Node
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+       RGXFWIF_HWPERF_CTL *psHWPerfInitData;
+       PVRSRV_ERROR eError;
+
+       eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt);
+
+       InitialiseHWPerfCounters(psDeviceNode, psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData);
+
+failHWPerfCountersMemDescAqCpuVirt:
+       DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+       return eError;
+}
+
+/*
+ * _ParseHTBAppHints:
+ *
+ * Generate necessary references to the globally visible AppHints which are
+ * declared in the above #include "km_apphint_defs.h"
+ * Without these local references some compiler tool-chains will treat
+ * unreferenced declarations as fatal errors. This function duplicates the
+ * HTB_specific apphint references which are made in htbserver.c:HTBInit()
+ * However, it makes absolutely *NO* use of these hints.
+ */
+static void
+_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       void *pvParamState = NULL;
+       IMG_UINT32 ui32LogType;
+       IMG_BOOL bAnyLogGroupConfigured;
+       IMG_UINT32 ui32BufferSize;
+       IMG_UINT32 ui32OpMode;
+
+       /* Services initialisation parameters */
+       pvParamState = SrvInitParamOpen();
+       if (pvParamState == NULL)
+               return;
+
+       SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE,  pvParamState,    EnableHTBLogGroup,    ui32LogType);
+       bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+       SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE,      pvParamState,    HTBOperationMode,      ui32OpMode);
+       SrvInitParamGetUINT32(INITPARAM_NO_DEVICE,          pvParamState,    HTBufferSizeInKB,  ui32BufferSize);
+
+       SrvInitParamClose(pvParamState);
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                         PVRSRV_PHYS_HEAP ePhysHeap,
+                                                                         PHYS_HEAP_USAGE_FLAGS ui32RequiredFlags)
+{
+       PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap];
+       PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap);
+       PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE
+                                                                                          | PHYS_HEAP_USAGE_GPU_SECURE);
+
+       PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0,
+                                                          PVRSRV_ERROR_NOT_SUPPORTED,
+                                                          "TD heap is missing required flags. flags: 0x%x / required:0x%x",
+                                                          ui32HeapFlags,
+                                                          ui32RequiredFlags);
+
+       PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32InvalidFlags) == 0,
+                                                          PVRSRV_ERROR_NOT_SUPPORTED,
+                                                          "TD heap uses invalid flags. flags: 0x%x / invalid:0x%x",
+                                                          ui32HeapFlags,
+                                                          ui32InvalidFlags);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA");
+
+       eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE");
+
+       eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE");
+
+       return PVRSRV_OK;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInit
+
+ @Description   RGX Initialisation
+
+ @Input         psDeviceNode
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Services initialisation parameters */
+       RGX_SRVINIT_APPHINTS sApphints = {0};
+       IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags;
+       IMG_UINT32 ui32DeviceFlags;
+       IMG_UINT32 ui32AvailablePowUnitsMask, ui32AvailableRACMask;
+
+       PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+       /* Number of HWPerf Block-IDs (RGX_CNTBLK_ID) which are available */
+       IMG_UINT32 ui32NumHWPerfBlocks;
+
+       /* Size of the RGXFWIF_HWPERF_CTL_BLK structure - varies by BVNC */
+       IMG_UINT32 ui32HWPerfBlkSize;
+       RGX_LAYER_PARAMS sLayerParams;
+
+       PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 1");
+
+       PDUMPCOMMENT(psDeviceNode, "Device Name: %s",
+                    psDeviceNode->psDevConfig->pszName);
+       PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)",
+                    psDeviceNode->sDevId.ui32InternalID,
+                    psDeviceNode->sDevId.i32OsDeviceID);
+
+       if (psDeviceNode->psDevConfig->pszVersion)
+       {
+               PDUMPCOMMENT(psDeviceNode, "Device Version: %s",
+                            psDeviceNode->psDevConfig->pszVersion);
+       }
+
+       /* pdump info about the core */
+       PDUMPCOMMENT(psDeviceNode,
+                    "RGX Version Information (KM): %d.%d.%d.%d",
+                    psDevInfo->sDevFeatureCfg.ui32B,
+                    psDevInfo->sDevFeatureCfg.ui32V,
+                    psDevInfo->sDevFeatureCfg.ui32N,
+                    psDevInfo->sDevFeatureCfg.ui32C);
+
+       RGXInitMultiCoreInfo(psDeviceNode);
+
+#if defined(PDUMP)
+       eError = DevmemIntAllocDefBackingPage(psDeviceNode,
+                                             &psDeviceNode->sDummyPage,
+                                             PVR_DUMMY_PAGE_INIT_VALUE,
+                                             DUMMY_PAGE,
+                                             IMG_TRUE);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__));
+               goto cleanup;
+       }
+       eError = DevmemIntAllocDefBackingPage(psDeviceNode,
+                                             &psDeviceNode->sDevZeroPage,
+                                             PVR_ZERO_PAGE_INIT_VALUE,
+                                             DEV_ZERO_PAGE,
+                                             IMG_TRUE);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__));
+               goto cleanup;
+       }
+#endif
+
+       sLayerParams.psDevInfo = psDevInfo;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       eError = RGXValidateTDHeaps(psDeviceNode);
+       PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps");
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+       if (PVRSRV_VZ_MODE_IS(HOST))
+       {
+               /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation
+                * and it provides a good method of determining if the firmware has been booted previously */
+               psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0);
+
+               PVR_LOG(("AutoVz startup check: firmware is %s;",
+                               (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down"));
+       }
+       else if (PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               /* Guest assumes the firmware is always available */
+               psDeviceNode->bAutoVzFwIsUp = IMG_TRUE;
+       }
+       else
+#endif
+       {
+               /* Firmware does not follow the AutoVz life-cycle */
+               psDeviceNode->bAutoVzFwIsUp = IMG_FALSE;
+       }
+
+       if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))
+       {
+               /* set the device power state here as the regular power
+                * callbacks will not be executed on this driver */
+               psDevInfo->bRGXPowered = IMG_TRUE;
+       }
+
+       /* Set which HW Safety Events will be handled by the driver */
+       psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ?
+                                                                                 RGX_CR_EVENT_STATUS_WDT_TIMEOUT_EN : 0;
+       psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS)
+                                                                                  && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ?
+                                                                                 RGX_CR_EVENT_STATUS_FAULT_FW_EN : 0;
+
+#if defined(PDUMP)
+       PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
+                             "Register defs revision: %d", RGX_CR_DEFS_KM_REVISION);
+#endif
+
+       ui32NumHWPerfBlocks = RGXHWPerfMaxDefinedBlks((void *)psDevInfo);
+
+       ui32HWPerfBlkSize = sizeof(RGXFWIF_HWPERF_CTL) +
+               (ui32NumHWPerfBlocks - 1) * sizeof(RGXFWIF_HWPERF_CTL_BLK);
+
+       /* Services initialisation parameters */
+       _ParseHTBAppHints(psDeviceNode);
+       GetApphints(psDevInfo, &sApphints);
+       InitDeviceFlags(&sApphints, &ui32DeviceFlags);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(EMULATOR)
+       if ((sApphints.bEnableTrustedDeviceAceConfig) &&
+               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE)))
+       {
+               SetTrustedDeviceAceEnabled();
+       }
+#endif
+#endif
+
+       eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)",
+                        __func__, eError));
+               goto cleanup;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = InitFirmware(psDeviceNode, &sApphints);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: InitFirmware failed (%d)",
+                               __func__,
+                               eError));
+                       goto cleanup;
+               }
+       }
+
+       /*
+        * Setup Firmware initialisation data
+        */
+
+       GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags);
+
+#if defined(SUPPORT_VALIDATION)
+       ui32AvailablePowUnitsMask = sApphints.ui32AvailablePowUnitsMask;
+       ui32AvailableRACMask = sApphints.ui32AvailableRACMask;
+#else
+       ui32AvailablePowUnitsMask = AVAIL_POW_UNITS_MASK_DEFAULT;
+       ui32AvailableRACMask = AVAIL_RAC_MASK_DEFAULT;
+#endif
+
+       eError = RGXInitFirmware(psDeviceNode,
+                                sApphints.bEnableSignatureChecks,
+                                sApphints.ui32SignatureChecksBufSize,
+                                sApphints.ui32HWPerfFWBufSize,
+                                (IMG_UINT64)sApphints.ui32HWPerfFilter0 |
+                                ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32),
+                                ui32FWConfigFlags,
+                                sApphints.ui32LogType,
+                                GetFilterFlags(&sApphints),
+                                sApphints.ui32JonesDisableMask,
+                                sApphints.ui32HWRDebugDumpLimit,
+#if defined(SUPPORT_VALIDATION)
+                                sApphints.ui32RenderKillingCtl,
+                                sApphints.ui32CDMTDMKillingCtl,
+                                &sApphints.aui32TPUTrilinearFracMask[0],
+                                &sApphints.aui32USRMNumRegions[0],
+                                (IMG_PUINT64)&sApphints.aui64UVBRMNumRegions[0],
+#else
+                                0, 0,
+                                NULL, NULL, NULL,
+#endif
+                                ui32HWPerfBlkSize,
+                                sApphints.eRGXRDPowerIslandConf,
+                                sApphints.eFirmwarePerf,
+                                sApphints.ui32KCCBSizeLog2,
+                                ui32FWConfigFlagsExt,
+                                ui32AvailablePowUnitsMask,
+                                                        ui32AvailableRACMask,
+                                ui32FwOsCfgFlags);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVRGXInitFirmware failed (%d)",
+                       __func__,
+                       eError));
+               goto cleanup;
+       }
+
+       if (!PVRSRV_VZ_MODE_IS(GUEST))
+       {
+               eError = InitialiseAllCounters(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: InitialiseAllCounters failed (%d)",
+                               __func__,
+                               eError));
+                       goto cleanup;
+               }
+       }
+
+       /*
+        * Perform second stage of RGX initialisation
+        */
+       eError = RGXInitDevPart2(psDeviceNode,
+                                ui32DeviceFlags,
+                                sApphints.ui32HWPerfHostFilter,
+                                sApphints.eRGXActivePMConf,
+                                ui32AvailablePowUnitsMask,
+                                                        ui32AvailableRACMask);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: PVRSRVRGXInitDevPart2KM failed (%d)",
+                       __func__,
+                       eError));
+               goto cleanup;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       PVRSRVAppHintDumpState(psDeviceNode);
+#endif
+
+       eError = PVRSRV_OK;
+
+cleanup:
+       return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsrvinit.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxstartstop.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxstartstop.c
new file mode 100644 (file)
index 0000000..30b4a89
--- /dev/null
@@ -0,0 +1,871 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific start/stop routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific start/stop routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxstartstop.h"
+#include "rgxfwutils.h"
+
+/*
+ * Specific fields for RGX_CR_IDLE must not be polled in pdumps
+ * (technical reasons)
+ */
+#define CR_IDLE_UNSELECTED_MASK ((~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK) | \
+                                                                (~RGX_CR_SLC_IDLE_OWDB_CLRMSK) |               \
+                                                                (RGX_CR_SLC_IDLE_FBCDC_ARB_EN))
+
+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate,
+                                                IMG_UINT32 ui32CoreReg,
+                                                IMG_UINT32 ui32Value)
+{
+       IMG_UINT32 i = 0;
+
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value);
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT);
+
+       do
+       {
+               RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value);
+       } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000));
+
+       if (i == 1000)
+       {
+               RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout");
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate)
+{
+       PVRSRV_ERROR eError;
+
+       /* Give privilege to debug and slave port */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+       /* Point Meta to the bootloader address, global (uncached) range */
+       eError = RGXWriteMetaCoreRegThoughSP(hPrivate,
+                                            PC_ACCESS(0),
+                                            RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT);
+
+       if (eError != PVRSRV_OK)
+       {
+               RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!");
+               return eError;
+       }
+
+       /* Enable minim encoding */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN);
+
+       /* Enable Meta thread */
+       RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT);
+
+       return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitMetaProcWrapper
+
+ @Description   Configures the hardware wrapper of the META processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitMetaProcWrapper(const void *hPrivate)
+{
+       IMG_UINT64 ui64GartenConfig;
+
+       /* Garten IDLE bit controlled by META */
+       ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+       RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper");
+       RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitRiscvProcWrapper
+
+ @Description   Configures the hardware wrapper of the RISCV processor
+
+ @Input         hPrivate  : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitRiscvProcWrapper(const void *hPrivate)
+{
+       IMG_DEV_VIRTADDR sTmp;
+
+       RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper");
+
+       RGXCommentLog(hPrivate, "RGXStart: Write boot code remap");
+       RGXAcquireBootCodeAddr(hPrivate, &sTmp);
+       RGXWriteReg64(hPrivate,
+                     RGXRISCVFW_BOOTLDR_CODE_REMAP,
+                     sTmp.uiAddr |
+                     (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+                       << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT |
+                     (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT |
+                     RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
+
+       RGXCommentLog(hPrivate, "RGXStart: Write boot data remap");
+       RGXAcquireBootDataAddr(hPrivate, &sTmp);
+       RGXWriteReg64(hPrivate,
+                     RGXRISCVFW_BOOTLDR_DATA_REMAP,
+                     sTmp.uiAddr |
+                     (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+                       << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT |
+                     (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT |
+                     RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
+
+       /* Garten IDLE bit controlled by RISCV */
+       RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV");
+       RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function      RGXInitBIF
+
+ @Description   Initialise RGX BIF
+
+ @Input         hPrivate : Implementation specific data
+
+ @Return        void
+
+******************************************************************************/
+static void RGXInitBIF(const void *hPrivate)
+{
+       IMG_DEV_PHYADDR sPCAddr;
+       IMG_UINT32 uiPCAddr;
+
+       /*
+        * Acquire the address of the Kernel Page Catalogue.
+        */
+       RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
+       uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+                    << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+                   & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+
+       /*
+        * Write the kernel catalogue base.
+        */
+       RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+
+
+       /* Set the mapping context */
+       RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV);
+       (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */
+
+       /* Write the cat-base address */
+       RGXWriteKernelMMUPC32(hPrivate,
+                             RGX_CR_MMU_CBASE_MAPPING,
+                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+                             uiPCAddr);
+
+#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV)
+       /* Set-up different MMU ID mapping to the same PC used above */
+       RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF);
+       (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */
+
+       RGXWriteKernelMMUPC32(hPrivate,
+                             RGX_CR_MMU_CBASE_MAPPING,
+                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+                             RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+                             uiPCAddr);
+#endif
+}
+
+
+/**************************************************************************/ /*!
+@Function       RGXInitMMURangeRegisters
+@Description    Initialises MMU range registers for Non4K pages.
+@Input          hPrivate           Implementation specific data
+@Return         void
+ */ /**************************************************************************/
+static void RGXInitMMURangeRegisters(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo;
+       IMG_UINT32 ui32RegAddr = RGX_CR_MMU_PAGE_SIZE_RANGE_ONE;
+       IMG_UINT32 i;
+
+       for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i, ui32RegAddr += sizeof(IMG_UINT64))
+       {
+               RGXWriteReg64(hPrivate, ui32RegAddr, psDevInfo->aui64MMUPageSizeRangeValue[i]);
+       }
+}
+
+
+/**************************************************************************/ /*!
+@Function       RGXInitAXIACE
+@Description    Initialises AXI ACE registers
+@Input          hPrivate           Implementation specific data
+@Return         void
+ */ /**************************************************************************/
+static void RGXInitAXIACE(const void *hPrivate)
+{
+       IMG_UINT64 ui64RegVal;
+
+       /**
+        * The below configuration is only applicable for RGX core's supporting
+        * ACE/ACE-lite protocol and connected to ACE coherent interconnect.
+        */
+
+       /**
+        * Configure AxDomain and AxCache for MMU transactions.
+        * AxDomain set to non sharable (0x0).
+        */
+       ui64RegVal = RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE |
+                                RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE;
+
+       /**
+        * Configure AxCache for PM/MMU transactions.
+        * Set to same value (i.e WBRWALLOC caching, rgxmmunit.c:RGXDerivePTEProt8)
+        * as non-coherent PTEs
+        */
+       ui64RegVal |= (IMG_UINT64_C(0xF)) << RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT;
+
+       /**
+        * Configure AxDomain for non MMU transactions.
+        */
+       ui64RegVal |= (IMG_UINT64)(RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE |
+                                                          RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE);
+
+       RGXCommentLog(hPrivate, "Init AXI-ACE interface");
+       RGXWriteReg64(hPrivate, RGX_CR_ACE_CTRL, ui64RegVal);
+}
+
+static void RGXMercerSoftResetSet(const void *hPrivate, IMG_UINT64 ui32MercerFlags)
+{
+       RGXWriteReg64(hPrivate, RGX_CR_MERCER_SOFT_RESET, ui32MercerFlags & RGX_CR_MERCER_SOFT_RESET_MASKFULL);
+
+       /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+       (void) RGXReadReg64(hPrivate, RGX_CR_MERCER_SOFT_RESET);
+}
+
+static void RGXSPUSoftResetAssert(const void *hPrivate)
+{
+       /* Assert Mercer0 */
+       RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN);
+       /* Assert Mercer1 */
+       RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN);
+       /* Assert Mercer2 */
+       RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN | RGX_CR_MERCER2_SOFT_RESET_SPU_EN);
+
+       RGXWriteReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET, RGX_CR_SWIFT_SOFT_RESET_MASKFULL);
+       /* Fence the previous write */
+       (void) RGXReadReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET);
+
+       RGXWriteReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET, RGX_CR_TEXAS_SOFT_RESET_MASKFULL);
+       /* Fence the previous write */
+       (void) RGXReadReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET);
+}
+
+static void RGXSPUSoftResetDeAssert(const void *hPrivate)
+{
+       RGXWriteReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET, 0);
+       /* Fence the previous write */
+       (void) RGXReadReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET);
+
+
+       RGXWriteReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET, 0);
+       /* Fence the previous write */
+       (void) RGXReadReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET);
+
+       /* Deassert Mercer2 */
+       RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN);
+       /* Deassert Mercer1 */
+       RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN);
+       /* Deassert Mercer0 */
+       RGXMercerSoftResetSet(hPrivate, 0);
+}
+
+static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCESSOR)
+{
+       /* Set RGX in soft-reset */
+       RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1");
+       RGXSPUSoftResetAssert(hPrivate);
+
+       RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2");
+       RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_JONES_ALL);
+
+       /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+       RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_JONES_ALL | RGX_SOFT_RESET_EXTRA);
+
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+       /* Take everything out of reset but the FW processor */
+       RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR);
+       RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_EXTRA | RGX_CR_SOFT_RESET_GARTEN_EN);
+
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+       RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+       RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR);
+       RGXSPUSoftResetDeAssert(hPrivate);
+
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+}
+
+static void DeassertMetaReset(const void *hPrivate)
+{
+       /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */
+       RGXWaitCycles(hPrivate, 32, 3);
+
+       RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0);
+       (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+       /* ... and afterwards */
+       RGXWaitCycles(hPrivate, 32, 3);
+}
+
+static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32Value;
+       IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META);
+       IMG_UINT32 ui32Mask;
+
+       if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) == 0)
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       if (bMetaFW)
+       {
+               /* META must be taken out of reset (without booting) during Coremem initialization. */
+               RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+               DeassertMetaReset(hPrivate);
+       }
+
+       /* Clocks must be set to "on" during RAMs initialization. */
+       RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_ON);
+       RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_ON);
+
+       if (bMetaFW)
+       {
+               RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+               RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, META_CR_TXCLKCTRL_ALL_ON);
+               RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value);
+       }
+
+       ui32Mask = bMetaFW ?
+               RGX_CR_JONES_RAM_INIT_KICK_MASKFULL
+               : RGX_CR_JONES_RAM_INIT_KICK_MASKFULL & ~RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN;
+       RGXWriteReg64(hPrivate, RGX_CR_JONES_RAM_INIT_KICK, ui32Mask);
+       eError = RGXPollReg64(hPrivate, RGX_CR_JONES_RAM_STATUS, ui32Mask, ui32Mask);
+
+       if (bMetaFW)
+       {
+               RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, META_CR_TXCLKCTRL_ALL_AUTO);
+               RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value);
+       }
+
+       RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_AUTO);
+       RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_AUTO);
+
+       if (bMetaFW)
+       {
+               RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+               RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXStart(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bDoFWSlaveBoot = IMG_FALSE;
+       IMG_CHAR *pcRGXFW_PROCESSOR;
+       IMG_BOOL bMetaFW = IMG_FALSE;
+
+       if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+       {
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV;
+               bMetaFW = IMG_FALSE;
+               bDoFWSlaveBoot = IMG_FALSE;
+       }
+       else
+       {
+               pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+               bMetaFW = IMG_TRUE;
+               bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate);
+       }
+
+       /* Disable the default sys_bus_secure protection to perform minimal setup */
+       RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0);
+       (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE);
+
+       /* Only bypass HMMU if the module is present */
+       if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK))
+       {
+               if (PVRSRV_VZ_MODE_IS(NATIVE))
+               {
+                       /* Always set HMMU in bypass mode */
+                       RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS);
+               }
+#if defined(PVRSRV_VZ_BYPASS_HMMU)
+               if (PVRSRV_VZ_MODE_IS(HOST))
+               {
+                       /* Also set HMMU in bypass mode */
+                       RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS);
+               }
+#endif
+       }
+
+#if defined(SUPPORT_VALIDATION)
+#if !defined(RGX_CR_FIRMWARE_PROCESSOR_LS)
+#define RGX_CR_FIRMWARE_PROCESSOR_LS                      (0x01A0U)
+#define RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN            (0x00000001U)
+#endif
+       {
+               if (psDevInfo->ui32ValidationFlags & RGX_VAL_LS_EN)
+               {
+                       /* Set the dual LS mode */
+                       RGXWriteReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS, RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN);
+                       (void) RGXReadReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS);
+               }
+       }
+#endif
+
+       /*!
+        * Start series8 FW init sequence
+        */
+       RGXResetSequence(hPrivate, pcRGXFW_PROCESSOR);
+
+       if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0)
+       {
+               RGXCommentLog(hPrivate, "RGXStart: Init Jones ECC RAM");
+               eError = InitJonesECCRAM(hPrivate);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       }
+
+       if (RGX_DEVICE_HAS_BRN(hPrivate, BRN_66927))
+       {
+               IMG_UINT64 ui64ClockCtrl;
+
+               ui64ClockCtrl = RGXReadReg64(hPrivate, RGX_CR_CLK_CTRL0);
+               CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_MCU_L0);
+               CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_PM);
+               CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_FBDC);
+               RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, ui64ClockCtrl);
+
+               ui64ClockCtrl = RGXReadReg64(hPrivate, RGX_CR_CLK_CTRL1);
+               CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL1_PIXEL);
+               CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL1_GEO_VERTEX);
+               RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, ui64ClockCtrl);
+       }
+
+       if (bMetaFW)
+       {
+               if (bDoFWSlaveBoot)
+               {
+                       /* Configure META to Slave boot */
+                       RGXCommentLog(hPrivate, "RGXStart: META Slave boot");
+                       RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+               }
+               else
+               {
+                       /* Configure META to Master boot */
+                       RGXCommentLog(hPrivate, "RGXStart: META Master boot");
+                       RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+               }
+       }
+
+       /*
+        * Initialise Firmware wrapper
+        */
+       if (bMetaFW)
+       {
+               RGXInitMetaProcWrapper(hPrivate);
+       }
+       else
+       {
+               RGXInitRiscvProcWrapper(hPrivate);
+       }
+
+       if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4)
+       {
+               // initialise the MMU range based config registers for Non4K pages.
+               RGXInitMMURangeRegisters(hPrivate);
+       }
+
+       RGXInitAXIACE(hPrivate);
+       /*
+        * Initialise BIF.
+        */
+       RGXInitBIF(hPrivate);
+
+       RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR);
+       DeassertMetaReset(hPrivate);
+
+       if (bMetaFW)
+       {
+               if (bDoFWSlaveBoot)
+               {
+                       eError = RGXFabricCoherencyTest(hPrivate);
+                       if (eError != PVRSRV_OK) return eError;
+
+                       RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start");
+                       eError = RGXStartFirmware(hPrivate);
+                       if (eError != PVRSRV_OK) return eError;
+               }
+               else
+               {
+                       RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+               }
+       }
+       else
+       {
+               /* Bring Debug Module out of reset */
+               RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN);
+
+               /* Boot the FW */
+               RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+               RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1);
+               RGXWaitCycles(hPrivate, 32, 3);
+       }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION)
+       RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure");
+       RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+       (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+#endif
+
+       /*!
+        * End series8 FW init sequence
+        */
+
+       return eError;
+}
+
+PVRSRV_ERROR RGXStop(const void *hPrivate)
+{
+       RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META);
+       IMG_UINT32 ui32JonesIdleMask = RGX_CR_JONES_IDLE_MASKFULL^RGX_CR_JONES_IDLE_AXI2IMG_EN;
+
+       RGXDeviceAckIrq(hPrivate);
+
+#if defined(SUPPORT_VALIDATION) && !defined(TC_MEMORY_CONFIG)
+#if !defined(RGX_CR_POWER_EVENT)
+#define RGX_CR_POWER_EVENT                                (0x0038U)
+#define RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK                (IMG_UINT64_C(0x00FFFFFFFFFFFFFF))
+#define RGX_CR_POWER_EVENT_GPU_ID_CLRMSK                  (IMG_UINT64_C(0xFFFFFFFFFFFFFF1F))
+#define RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT              (9U)
+#define RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT          (8U)
+#define RGX_CR_POWER_EVENT_DOMAIN_CLUSTER_CLUSTER0_SHIFT  (32U)
+#define RGX_CR_POWER_EVENT_TYPE_SHIFT                     (0U)
+#define RGX_CR_POWER_EVENT_TYPE_POWER_DOWN                (0x00000000U)
+#define RGX_CR_POWER_EVENT_REQ_EN                         (0x00000002U)
+#endif
+
+       /* Power off any enabled SPUs */
+       if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN))
+       {
+               if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 3)
+               {
+                       IMG_UINT64 ui64PowUnitOffMask;
+                       IMG_UINT64 ui64RegVal;
+
+                       ui64PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_CLUSTERS)) -1;
+                       ui64RegVal = (~RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK) | // GPU_MASK specifies all cores
+                                    (~RGX_CR_POWER_EVENT_GPU_ID_CLRMSK) | // GPU_ID all set means use the GPU_MASK
+                                    (ui64PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_CLUSTER_CLUSTER0_SHIFT) |
+                                    RGX_CR_POWER_EVENT_TYPE_POWER_DOWN;
+
+                       RGXWriteReg64(hPrivate,
+                                     RGX_CR_POWER_EVENT,
+                                     ui64RegVal);
+
+                       RGXWriteReg64(hPrivate,
+                                     RGX_CR_POWER_EVENT,
+                                     ui64RegVal | RGX_CR_POWER_EVENT_REQ_EN);
+               }
+               else if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 2)
+               {
+                       IMG_UINT64 ui64PowUnitOffMask;
+                       IMG_UINT64 ui64RegVal;
+
+                       ui64PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_CLUSTERS)) -1;
+                       ui64RegVal = (~RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK) | // GPU_MASK specifies all cores
+                                    (~RGX_CR_POWER_EVENT_GPU_ID_CLRMSK) | // GPU_ID all set means use the GPU_MASK
+                                    (ui64PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT) |
+                                    RGX_CR_POWER_EVENT_TYPE_POWER_DOWN;
+
+                       RGXWriteReg64(hPrivate,
+                                     RGX_CR_POWER_EVENT,
+                                     ui64RegVal);
+
+                       RGXWriteReg64(hPrivate,
+                                     RGX_CR_POWER_EVENT,
+                                     ui64RegVal | RGX_CR_POWER_EVENT_REQ_EN);
+               }
+               else
+               {
+                       IMG_UINT32 ui32PowUnitOffMask;
+                       IMG_UINT32 ui32RegVal;
+
+                       ui32PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_SPU)) -1;
+                       ui32RegVal = (ui32PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT) |
+                                    RGX_CR_POWER_EVENT_TYPE_POWER_DOWN;
+
+                       RGXWriteReg32(hPrivate,
+                                     RGX_CR_POWER_EVENT,
+                                     ui32RegVal);
+
+                       RGXWriteReg32(hPrivate,
+                                     RGX_CR_POWER_EVENT,
+                                     ui32RegVal | RGX_CR_POWER_EVENT_REQ_EN);
+               }
+
+               /* Poll on complete */
+               eError = RGXPollReg32(hPrivate,
+                                     RGX_CR_EVENT_STATUS,
+                                     RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN,
+                                     RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN);
+               if (eError != PVRSRV_OK) return eError;
+
+               /* Update the SPU_ENABLE mask */
+               if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 1)
+               {
+                       RGXWriteReg32(hPrivate, RGX_CR_SPU_ENABLE, 0);
+               }
+               RGXWriteReg32(hPrivate, 0xF020, 0);
+       }
+#endif
+
+       /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+       if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) ||
+           RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) < 2)
+       {
+               ui32JonesIdleMask ^= (RGX_CR_JONES_IDLE_ASC_EN|RGX_CR_JONES_IDLE_RCE_EN);
+       }
+
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_JONES_IDLE,
+                             ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN),
+                             ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN));
+
+       if (eError != PVRSRV_OK) return eError;
+
+
+       /* Wait for SLC to signal IDLE */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_SLC_IDLE,
+                             RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK),
+                             RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK));
+       if (eError != PVRSRV_OK) return eError;
+
+
+       /* Unset MTS DM association with threads */
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+                     RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL);
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+                     RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+                     RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL);
+       RGXWriteReg32(hPrivate,
+                     RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+                     RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+                     & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL);
+
+
+#if defined(PDUMP)
+       if (bMetaFW)
+       {
+               /* Disabling threads is only required for pdumps to stop the fw gracefully */
+
+               /* Disable thread 0 */
+               eError = RGXWriteMetaRegThroughSP(hPrivate,
+                                                 META_CR_T0ENABLE_OFFSET,
+                                                 ~META_CR_TXENABLE_ENABLE_BIT);
+               if (eError != PVRSRV_OK) return eError;
+
+               /* Disable thread 1 */
+               eError = RGXWriteMetaRegThroughSP(hPrivate,
+                                                 META_CR_T1ENABLE_OFFSET,
+                                                 ~META_CR_TXENABLE_ENABLE_BIT);
+               if (eError != PVRSRV_OK) return eError;
+
+               /* Wait for the Slave Port to finish all the transactions */
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
+               {
+                       /* Clear down any irq raised by META (done after disabling the FW
+                        * threads to avoid a race condition).
+                        * This is only really needed for PDumps but we do it anyway driver-live.
+                        */
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES, 0x0);
+                       (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
+
+                       eError = RGXPollReg32(hPrivate,
+                                                                 RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
+                                                                 RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                                 | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
+                                                                 RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
+                                                                 | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
+               }
+               else
+               {
+                       /* Clear down any irq raised by META (done after disabling the FW
+                        * threads to avoid a race condition).
+                        * This is only really needed for PDumps but we do it anyway driver-live.
+                        */
+                       RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0);
+                       (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */
+
+                       eError = RGXPollReg32(hPrivate,
+                                                                 RGX_CR_META_SP_MSLVCTRL1,
+                                                                 RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+                                                                 RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+               }
+               if (eError != PVRSRV_OK) return eError;
+       }
+#endif
+
+
+       eError = RGXPollReg64(hPrivate,
+                             RGX_CR_SLC_STATUS1,
+                             0,
+                             (~RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK |
+                              ~RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK |
+                              ~RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK |
+                              ~RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK));
+       if (eError != PVRSRV_OK) return eError;
+
+       eError = RGXPollReg64(hPrivate,
+                             RGX_CR_SLC_STATUS2,
+                             0,
+                             (~RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK |
+                              ~RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK |
+                              ~RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK |
+                              ~RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK));
+       if (eError != PVRSRV_OK) return eError;
+
+
+       /* Wait for SLC to signal IDLE */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_SLC_IDLE,
+                             RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK),
+                             RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK));
+       if (eError != PVRSRV_OK) return eError;
+
+
+       /* Wait for Jones to signal IDLE except for the Garten Wrapper */
+       eError = RGXPollReg32(hPrivate,
+                             RGX_CR_JONES_IDLE,
+                             ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN),
+                             ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN));
+
+       if (eError != PVRSRV_OK) return eError;
+
+
+       if (bMetaFW)
+       {
+               IMG_UINT32 ui32RegValue;
+
+               eError = RGXReadMetaRegThroughSP(hPrivate,
+                                                META_CR_TxVECINT_BHALT,
+                                                &ui32RegValue);
+               if (eError != PVRSRV_OK) return eError;
+
+               if ((ui32RegValue & 0xFFFFFFFFU) == 0x0)
+               {
+                       /* Wait for Sidekick/Jones to signal IDLE including
+                        * the Garten Wrapper if there is no debugger attached
+                        * (TxVECINT_BHALT = 0x0) */
+                       eError = RGXPollReg32(hPrivate,
+                                             RGX_CR_JONES_IDLE,
+                                             ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN,
+                                             ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN);
+                       if (eError != PVRSRV_OK) return eError;
+               }
+       }
+       else
+       {
+               eError = RGXPollReg32(hPrivate,
+                                     RGX_CR_JONES_IDLE,
+                                     ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN,
+                                     ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN);
+               if (eError != PVRSRV_OK) return eError;
+       }
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxta3d.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxta3d.c
new file mode 100644 (file)
index 0000000..290bc7f
--- /dev/null
@@ -0,0 +1,5446 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA/3D routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX TA/3D routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#if defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "rgxsyncutils.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#include "rgxpmdefs.h"
+
+#include "rgxtimerquery.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TA3D_UFO_DUMP   0
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static INLINE
+void _DebugSyncValues(const IMG_CHAR *pszFunction,
+                                         const IMG_UINT32 *pui32UpdateValues,
+                                         const IMG_UINT32 ui32Count)
+{
+       IMG_UINT32 i;
+       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+       for (i = 0; i < ui32Count; i++)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp));
+               pui32Tmp++;
+       }
+}
+
+static INLINE
+void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction,
+                                                  const IMG_CHAR *pszDMName,
+                                                  const PSYNC_CHECKPOINT *apsSyncCheckpoints,
+                                                  const IMG_UINT32 ui32Count)
+{
+       IMG_UINT32 i;
+
+       for (i = 0; i < ui32Count; i++)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i)));
+       }
+}
+
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/* define the number of commands required to be set up by the CCB helper */
+/* 1 command for the TA */
+#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1
+/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */
+#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui32CyclesPrediction)
+#else
+#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST)
+#endif
+
+typedef struct {
+       DEVMEM_MEMDESC                          *psContextStateMemDesc;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+       DEVMEM_MEMDESC                          *psContextStateMemDesc;
+       RGX_SERVER_COMMON_CONTEXT       *psServerCommonContext;
+       IMG_UINT32                                      ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+       /* this lock protects usage of the render context.
+        * it ensures only one kick is being prepared and/or submitted on
+        * this render context at any time
+        */
+       POS_LOCK                                hLock;
+       RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+       RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+       DEVMEM_MEMDESC                          *psFWRenderContextMemDesc;
+       DEVMEM_MEMDESC                          *psFWFrameworkMemDesc;
+       RGX_SERVER_RC_TA_DATA           sTAData;
+       RGX_SERVER_RC_3D_DATA           s3DData;
+       IMG_UINT32                                      ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE         (1 << 0)
+#define RC_CLEANUP_3D_COMPLETE         (1 << 1)
+       DLLIST_NODE                                     sListNode;
+       SYNC_ADDR_LIST                          sSyncAddrListTAFence;
+       SYNC_ADDR_LIST                          sSyncAddrListTAUpdate;
+       SYNC_ADDR_LIST                          sSyncAddrList3DFence;
+       SYNC_ADDR_LIST                          sSyncAddrList3DUpdate;
+       ATOMIC_T                                        hIntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA                       sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+
+/*
+       Static functions used by render context code
+*/
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+                                                          PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                                                                                         psTAData->psServerCommonContext,
+                                                                                         RGXFWIF_DM_GEOM,
+                                                                                         PDUMP_FLAGS_CONTINUOUS);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+       FWCommonContextFree(psTAData->psServerCommonContext);
+       DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc);
+       psTAData->psServerCommonContext = NULL;
+       return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+                                                          PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+                                                                                         ps3DData->psServerCommonContext,
+                                                                                         RGXFWIF_DM_3D,
+                                                                                         PDUMP_FLAGS_CONTINUOUS);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free its resources */
+       FWCommonContextFree(ps3DData->psServerCommonContext);
+       DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc);
+       ps3DData->psServerCommonContext = NULL;
+       return PVRSRV_OK;
+}
+
+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode)
+{
+       RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+       PVRSRV_ERROR                    eError;
+
+       eError = PMRDumpPageList(psPMRNode->psPMR,
+                                                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "Error (%s) printing pmr %p",
+                               PVRSRVGetErrorString(eError),
+                               psPMRNode->psPMR));
+       }
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+       DLLIST_NODE *psNode, *psNext;
+
+       PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+                               psFreeList->sFreeListFWDevVAddr.ui32Addr,
+                               psFreeList->ui32FreelistID,
+                               psFreeList->ui64FreelistChecksum));
+
+       /* Dump Init FreeList page list */
+       PVR_LOG(("  Initial Memory block"));
+       dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+       {
+               _RGXDumpPMRPageList(psNode);
+       }
+
+       /* Dump Grow FreeList page list */
+       PVR_LOG(("  Grow Memory blocks"));
+       dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+       {
+               _RGXDumpPMRPageList(psNode);
+       }
+
+       return IMG_TRUE;
+}
+
+static void _CheckFreelist(RGX_FREELIST *psFreeList,
+                                                  IMG_UINT32 ui32NumOfPagesToCheck,
+                                                  IMG_UINT64 ui64ExpectedCheckSum,
+                                                  IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+       /* No checksum needed as we have all information in the pdumps */
+       PVR_UNREFERENCED_PARAMETER(psFreeList);
+       PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+       PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+       *pui64CalculatedCheckSum = 0;
+#else
+       PVRSRV_ERROR eError;
+       size_t uiNumBytes;
+       IMG_UINT8* pui8Buffer;
+       IMG_UINT32* pui32Buffer;
+       IMG_UINT32 ui32CheckSumAdd = 0;
+       IMG_UINT32 ui32CheckSumXor = 0;
+       IMG_UINT32 ui32Entry;
+       IMG_UINT32 ui32Entry2;
+       IMG_BOOL bFreelistBad = IMG_FALSE;
+
+       *pui64CalculatedCheckSum = 0;
+
+       PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+       /* Allocate Buffer of the size of the freelist */
+       pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+       if (pui8Buffer == NULL)
+       {
+               PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!",
+                               __func__, psFreeList));
+               PVR_ASSERT(0);
+               return;
+       }
+
+       /* Copy freelist content into Buffer */
+       eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+                                       psFreeList->uiFreeListPMROffset +
+                                       (((psFreeList->ui32MaxFLPages -
+                                          psFreeList->ui32CurrentFLPages - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) &
+                                        ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)),
+                                        pui8Buffer,
+                                        ui32NumOfPagesToCheck * sizeof(IMG_UINT32),
+                                       &uiNumBytes);
+       if (eError != PVRSRV_OK)
+       {
+               OSFreeMem(pui8Buffer);
+               PVR_LOG(("%s: Failed to get freelist data for freelist %p!",
+                               __func__, psFreeList));
+               PVR_ASSERT(0);
+               return;
+       }
+
+       PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+
+       /* Generate checksum (skipping the first page if not allocated) */
+       pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+       ui32Entry = ((psFreeList->ui32GrowFLPages == 0  &&  psFreeList->ui32CurrentFLPages > 1) ? 1 : 0);
+       for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+       {
+               ui32CheckSumAdd += pui32Buffer[ui32Entry];
+               ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+               /* Check for double entries */
+               for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+               {
+                       if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2])
+                       {
+                               PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+                                               __func__,
+                                               psFreeList->sFreeListFWDevVAddr.ui32Addr,
+                                               pui32Buffer[ui32Entry2],
+                                               ui32Entry,
+                                               ui32Entry2,
+                                               psFreeList->ui32CurrentFLPages));
+                               bFreelistBad = IMG_TRUE;
+                               break;
+                       }
+               }
+       }
+
+       OSFreeMem(pui8Buffer);
+
+       /* Check the calculated checksum against the expected checksum... */
+       *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+       if (ui64ExpectedCheckSum != 0  &&  ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+       {
+               PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016llx calculated 0x%016llx",
+                       __func__, psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+               bFreelistBad = IMG_TRUE;
+       }
+
+       if (bFreelistBad)
+       {
+               PVR_LOG(("%s: Sleeping for ever!", __func__));
+               PVR_ASSERT(!bFreelistBad);
+       }
+#endif
+}
+
+
+/*
+ *  Function to work out the number of freelist pages to reserve for growing
+ *  within the FW without having to wait for the host to progress a grow
+ *  request.
+ *
+ *  The number of pages must be a multiple of 4 to align the PM addresses
+ *  for the initial freelist allocation and also be less than the grow size.
+ *
+ *  If the threshold or grow size means less than 4 pages, then the feature
+ *  is not used.
+ */
+static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList,
+                                               IMG_UINT32  ui32FLPages)
+{
+       IMG_UINT32  ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) &
+                                      ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1);
+
+       if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages)
+       {
+               ui32ReadyFLPages = psFreeList->ui32GrowFLPages;
+       }
+
+       return ui32ReadyFLPages;
+}
+
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+                             IMG_UINT32 ui32NumPages,
+                             PDLLIST_NODE pListHeader)
+{
+       RGX_PMR_NODE    *psPMRNode;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_UINT32  ui32MappingTable = 0;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_DEVMEM_SIZE_T uiLength;
+       IMG_DEVMEM_SIZE_T uistartPage;
+       PVRSRV_ERROR eError;
+       static const IMG_CHAR szAllocName[] = "Free List";
+
+       /* Are we allowed to grow ? */
+       if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "Freelist [0x%p]: grow by %u pages denied. "
+                               "Max PB size reached (current pages %u+%u/%u)",
+                               psFreeList,
+                               ui32NumPages,
+                               psFreeList->ui32CurrentFLPages,
+                               psFreeList->ui32ReadyFLPages,
+                               psFreeList->ui32MaxFLPages));
+               return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+       }
+
+       /* Allocate kernel memory block structure */
+       psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+       if (psPMRNode == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to allocate host data structure",
+                               __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorAllocHost;
+       }
+
+       /*
+        * Lock protects simultaneous manipulation of:
+        * - the memory block list
+        * - the freelist's ui32CurrentFLPages
+        */
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+       /*
+        *  The PM never takes the last page in a freelist, so if this block
+        *  of pages is the first one and there is no ability to grow, then
+        *  we can skip allocating one 4K page for the lowest entry.
+        */
+       if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE)
+       {
+               /*
+                * Allocation size will be rounded up to the OS page size,
+                * any attempt to change it a bit now will be invalidated later.
+                */
+               psPMRNode->bFirstPageMissing = IMG_FALSE;
+       }
+       else
+       {
+               psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0  &&  ui32NumPages > 1);
+       }
+
+       psPMRNode->ui32NumPages = ui32NumPages;
+       psPMRNode->psFreeList = psFreeList;
+
+       /* Allocate Memory Block */
+       PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Allocate PB Block (Pages %08X)", ui32NumPages);
+       uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+       if (psPMRNode->bFirstPageMissing)
+       {
+               uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+       }
+       eError = PhysmemNewRamBackedPMR(psFreeList->psConnection,
+                       psFreeList->psDevInfo->psDeviceNode,
+                       uiSize,
+                       uiSize,
+                       1,
+                       1,
+                       &ui32MappingTable,
+                       RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                       PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+                       sizeof(szAllocName),
+                       szAllocName,
+                       psFreeList->ownerPid,
+                       &psPMRNode->psPMR,
+                       PDUMP_NONE,
+                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX,
+                                __func__,
+                                (IMG_UINT64)uiSize));
+               goto ErrorBlockAlloc;
+       }
+
+       /* Zeroing physical pages pointed by the PMR */
+       if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+       {
+               eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to zero PMR %p of freelist %p (%s)",
+                                       __func__,
+                                       psPMRNode->psPMR,
+                                       psFreeList,
+                                       PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(0);
+               }
+       }
+
+       uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+       uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+       uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+       eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR,
+                                           psFreeList->ownerPid);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: call to RIWritePMREntryWithOwnerKM failed (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+       }
+
+        /* Attach RI information */
+       eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR,
+                                      OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN),
+                                      szAllocName,
+                                      0,
+                                      uiSize,
+                                      IMG_FALSE,
+                                      IMG_FALSE,
+                                      &psPMRNode->hRIHandle);
+       PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM");
+
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+       /* write Freelist with Memory Block physical addresses */
+       eError = PMRWritePMPageList(
+                                               /* Target PMR, offset, and length */
+                                               psFreeList->psFreeListPMR,
+                                               (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+                                               (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+                                               /* Referenced PMR, and "page" granularity */
+                                               psPMRNode->psPMR,
+                                               RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                                               &psPMRNode->psPageList);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to write pages of Node %p",
+                                __func__,
+                                psPMRNode));
+               goto ErrorPopulateFreelist;
+       }
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+       /* Copy freelist memory to shadow freelist */
+       {
+               const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32);
+               const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2;
+               const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset;
+               IMG_BYTE *pFLMapAddr;
+               size_t uiNumBytes;
+               PVRSRV_ERROR res;
+               IMG_HANDLE hMapHandle;
+
+               /* Map both the FL and the shadow FL */
+               res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize,
+                                                 (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle);
+               if (res != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to map freelist (ID=%d)",
+                                __func__,
+                                psFreeList->ui32FreelistID));
+                       goto ErrorPopulateFreelist;
+               }
+
+               /* Copy only the newly added memory */
+               OSCachedMemCopy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength);
+               OSWriteMemoryBarrier(pFLMapAddr);
+
+#if defined(PDUMP)
+               PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Initialize shadow freelist");
+
+               /* Translate memcpy to pdump */
+               {
+                       IMG_DEVMEM_OFFSET_T uiCurrOffset;
+
+                       for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32))
+                       {
+                               PMRPDumpCopyMem32(psFreeList->psFreeListPMR,
+                                                 uiCurrOffset + ui32FLMaxSize,
+                                                 psFreeList->psFreeListPMR,
+                                                 uiCurrOffset,
+                                                 ":SYSMEM:$1",
+                                                 PDUMP_FLAGS_CONTINUOUS);
+                       }
+               }
+#endif
+
+
+               res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle);
+
+               if (res != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Failed to release freelist mapping (ID=%d)",
+                                __func__,
+                                psFreeList->ui32FreelistID));
+                       goto ErrorPopulateFreelist;
+               }
+       }
+#endif
+
+       /* We add It must be added to the tail, otherwise the freelist population won't work */
+       dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+       /* Update number of available pages */
+       psFreeList->ui32CurrentFLPages += ui32NumPages;
+
+       /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */
+       if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+       {
+               psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+       }
+
+       /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */
+       psFreeList->ui32ReadyFLPages    = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages);
+       psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+
+       if (psFreeList->bCheckFreelist)
+       {
+               /*
+                *  We can only calculate the freelist checksum when the list is full
+                *  (e.g. at initial creation time). At other times the checksum cannot
+                *  be calculated and has to be disabled for this freelist.
+                */
+               if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages)
+               {
+                       _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum);
+               }
+               else
+               {
+                       psFreeList->ui64FreelistChecksum = 0;
+               }
+       }
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)",
+                       psFreeList,
+               ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"),
+                       ui32NumPages,
+                       psFreeList->ui32CurrentFLPages,
+                       psFreeList->ui32ReadyFLPages,
+                       psFreeList->ui32MaxFLPages,
+                       psFreeList->ui64FreelistChecksum,
+                       (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : "")));
+
+       return PVRSRV_OK;
+
+       /* Error handling */
+ErrorPopulateFreelist:
+       PMRUnrefPMR(psPMRNode->psPMR);
+
+ErrorBlockAlloc:
+       OSFreeMem(psPMRNode);
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ErrorAllocHost:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+                                                                               RGX_FREELIST *psFreeList)
+{
+       DLLIST_NODE *psNode;
+       RGX_PMR_NODE *psPMRNode;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_UINT32 ui32OldValue;
+
+       /*
+        * Lock protects simultaneous manipulation of:
+        * - the memory block list
+        * - the freelist's ui32CurrentFLPages value
+        */
+       PVR_ASSERT(pListHeader);
+       PVR_ASSERT(psFreeList);
+       PVR_ASSERT(psFreeList->psDevInfo);
+       PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+       /* Get node from head of list and remove it */
+       psNode = dllist_get_next_node(pListHeader);
+       if (psNode)
+       {
+               dllist_remove_node(psNode);
+
+               psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+               PVR_ASSERT(psPMRNode);
+               PVR_ASSERT(psPMRNode->psPMR);
+               PVR_ASSERT(psPMRNode->psFreeList);
+
+               /* remove block from freelist list */
+
+               /* Unwrite Freelist with Memory Block physical addresses */
+               eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to unwrite pages of Node %p",
+                                        __func__,
+                                        psPMRNode));
+                       PVR_ASSERT(IMG_FALSE);
+               }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+
+               if (psPMRNode->hRIHandle)
+               {
+                       PVRSRV_ERROR eError;
+
+                       eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle);
+                       PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM");
+               }
+
+#endif  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+               /* Free PMR (We should be the only one that holds a ref on the PMR) */
+               eError = PMRUnrefPMR(psPMRNode->psPMR);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to free PB block %p (%s)",
+                                        __func__,
+                                        psPMRNode->psPMR,
+                                        PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(IMG_FALSE);
+               }
+
+               /* update available pages in freelist */
+               ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+
+               /*
+                * Deallocated pages should first be deducted from ReadyPages bank, once
+                * there are no more left, start deducting them from CurrentPage bank.
+                */
+               if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages)
+               {
+                       psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages;
+                       psFreeList->ui32ReadyFLPages = 0;
+               }
+               else
+               {
+                       psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages;
+               }
+
+               /* check underflow */
+               PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+               PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+                                                               psFreeList,
+                                                               psPMRNode->ui32NumPages,
+                                                               psFreeList->ui32CurrentFLPages,
+                                                               psFreeList->ui32MaxFLPages));
+
+               OSFreeMem(psPMRNode);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+                                                               psFreeList,
+                                                               psFreeList->ui32InitFLPages));
+               eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+       }
+
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       return eError;
+}
+
+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+       DLLIST_NODE *psNode, *psNext;
+       RGX_FREELIST *psFreeList = NULL;
+
+       OSLockAcquire(psDevInfo->hLockFreeList);
+
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+               if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+               {
+                       psFreeList = psThisFreeList;
+                       break;
+               }
+       }
+
+       OSLockRelease(psDevInfo->hLockFreeList);
+       return psFreeList;
+}
+
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+                           IMG_UINT32 ui32FreelistID)
+{
+       RGX_FREELIST *psFreeList = NULL;
+       RGXFWIF_KCCB_CMD s3DCCBCmd;
+       IMG_UINT32 ui32GrowValue;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psDevInfo);
+
+       psFreeList = FindFreeList(psDevInfo, ui32FreelistID);
+       if (psFreeList == NULL)
+       {
+               /* Should never happen */
+               PVR_DPF((PVR_DBG_ERROR,
+                        "FreeList Lookup for FreeList ID 0x%08x failed (Populate)",
+                        ui32FreelistID));
+               PVR_ASSERT(IMG_FALSE);
+
+               return;
+       }
+
+       /* Since the FW made the request, it has already consumed the ready pages, update the host struct */
+       psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages;
+       psFreeList->ui32ReadyFLPages = 0;
+
+
+       /* Try to grow the freelist */
+       eError = RGXGrowFreeList(psFreeList,
+                                psFreeList->ui32GrowFLPages,
+                                &psFreeList->sMemoryBlockHead);
+
+       if (eError == PVRSRV_OK)
+       {
+               /* Grow successful, return size of grow size */
+               ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+               psFreeList->ui32NumGrowReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               /* Update Stats */
+               PVRSRVStatsUpdateFreelistStats(0,
+                                              1, /* Add 1 to the appropriate counter (Requests by FW) */
+                                              psFreeList->ui32InitFLPages,
+                                              psFreeList->ui32NumHighPages,
+                                              psFreeList->ownerPid);
+
+#endif
+
+       }
+       else
+       {
+               /* Grow failed */
+               ui32GrowValue = 0;
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Grow for FreeList %p failed (%s)",
+                        psFreeList,
+                        PVRSRVGetErrorString(eError)));
+       }
+
+       /* send feedback */
+       s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+       s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+       s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+       s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+       s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages;
+
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: Grow pages=%u, new pages=%u, ready pages=%u, counter=%d",
+                psFreeList,
+                ui32GrowValue,
+                s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages,
+                s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages,
+                psFreeList->ui32NumGrowReqByFW));
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                           RGXFWIF_DM_3D,
+                                           &s3DCCBCmd,
+                                           PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+       /* Kernel CCB should never fill up, as the FW is processing them right away  */
+
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+static void _RGXFreeListReconstruction(PDLLIST_NODE psNode)
+{
+
+       PVRSRV_RGXDEV_INFO              *psDevInfo;
+       RGX_FREELIST                    *psFreeList;
+       RGX_PMR_NODE                    *psPMRNode;
+       PVRSRV_ERROR                    eError;
+       IMG_DEVMEM_OFFSET_T             uiOffset;
+       IMG_DEVMEM_SIZE_T               uiLength;
+       IMG_UINT32                              ui32StartPage;
+
+       psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+       psFreeList = psPMRNode->psFreeList;
+       PVR_ASSERT(psFreeList);
+       psDevInfo = psFreeList->psDevInfo;
+       PVR_ASSERT(psDevInfo);
+
+       uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+       ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+       uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+       PMRUnwritePMPageList(psPMRNode->psPageList);
+       psPMRNode->psPageList = NULL;
+       eError = PMRWritePMPageList(
+                                               /* Target PMR, offset, and length */
+                                               psFreeList->psFreeListPMR,
+                                               (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+                                               (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+                                               /* Referenced PMR, and "page" granularity */
+                                               psPMRNode->psPMR,
+                                               RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+                                               &psPMRNode->psPageList);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Error (%s) writing FL 0x%08x",
+                               __func__,
+                               PVRSRVGetErrorString(eError),
+                               (IMG_UINT32)psFreeList->ui32FreelistID));
+       }
+
+       /* Zeroing physical pages pointed by the reconstructed freelist */
+       if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+       {
+               eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to zero PMR %p of freelist %p (%s)",
+                                       __func__,
+                                       psPMRNode->psPMR,
+                                       psFreeList,
+                                       PVRSRVGetErrorString(eError)));
+                       PVR_ASSERT(0);
+               }
+       }
+
+       psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+}
+
+
+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList)
+{
+       IMG_UINT32        ui32OriginalFLPages;
+       DLLIST_NODE       *psNode, *psNext;
+       PVRSRV_ERROR      eError;
+#if !defined(PM_INTERACTIVE_MODE)
+       IMG_DEV_VIRTADDR  sFreeListBaseDevVAddr;
+#endif
+
+       //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID));
+
+       /* Do the FreeList Reconstruction */
+       ui32OriginalFLPages            = psFreeList->ui32CurrentFLPages;
+       psFreeList->ui32CurrentFLPages = 0;
+
+       /* Reconstructing Init FreeList pages */
+       dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+       {
+               _RGXFreeListReconstruction(psNode);
+       }
+
+       /* Reconstructing Grow FreeList pages */
+       dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+       {
+               _RGXFreeListReconstruction(psNode);
+       }
+
+       /* Ready pages are allocated but kept hidden until OOM occurs. */
+       psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+       if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages)
+       {
+               PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages);
+               return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED;
+       }
+
+       {
+               RGXFWIF_FREELIST  *psFWFreeList;
+
+               /* Update firmware freelist structure */
+               eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+
+#if defined(PM_INTERACTIVE_MODE)
+               psFWFreeList->ui32CurrentStackTop       = psFWFreeList->ui32CurrentPages - 1;
+               psFWFreeList->ui32AllocatedPageCount    = 0;
+               psFWFreeList->ui32AllocatedMMUPageCount = 0;
+#else
+               sFreeListBaseDevVAddr                          = psFWFreeList->sFreeListBaseDevVAddr;
+               psFWFreeList->bUpdatePending                   = IMG_FALSE;
+               psFWFreeList->ui32UpdateNewPages               = 0;
+               psFWFreeList->ui32UpdateNewReadyPages          = 0;
+               psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0;
+#endif
+
+               DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+       }
+
+#if !defined(PM_INTERACTIVE_MODE)
+       /* Reset freelist state buffer */
+       {
+               RGX_PM_FREELISTSTATE_BUFFER             sFLState;
+               size_t uiNbBytes;
+               IMG_DEV_VIRTADDR sFLBaseAddr;
+
+               eError = PMR_ReadBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes);
+
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+
+               PVR_ASSERT(uiNbBytes == sizeof(sFLState));
+
+               sFLBaseAddr.uiAddr = (sFreeListBaseDevVAddr.uiAddr +
+                                     ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) &
+                                     ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1);
+               /* Note: Freelist base address is packed shifted down. */
+               RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(sFLState, sFLBaseAddr.uiAddr >> RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT);
+               RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(sFLState, psFreeList->ui32CurrentFLPages - 1);
+               RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(sFLState, 0);
+               RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(sFLState, 0);
+
+               eError = PMR_WriteBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes);
+
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+
+               PVR_ASSERT(uiNbBytes == sizeof(sFLState));
+       }
+#endif
+
+       /* Check the Freelist checksum if required (as the list is fully populated) */
+       if (psFreeList->bCheckFreelist)
+       {
+               IMG_UINT64  ui64CheckSum;
+
+               _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+       }
+
+       return eError;
+}
+
+
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                              IMG_UINT32 ui32FreelistsCount,
+                                              const IMG_UINT32 *paui32Freelists)
+{
+       PVRSRV_ERROR      eError = PVRSRV_OK;
+       DLLIST_NODE       *psNode, *psNext;
+       IMG_UINT32        ui32Loop;
+       RGXFWIF_KCCB_CMD  sTACCBCmd;
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       DLLIST_NODE       *psNodeHWRTData, *psNextHWRTData;
+       RGX_KM_HW_RT_DATASET *psKMHWRTDataSet;
+       RGXFWIF_HWRTDATA     *psHWRTData;
+#endif
+       IMG_UINT32        ui32FinalFreelistsCount = 0;
+       IMG_UINT32        aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */
+
+       PVR_ASSERT(psDevInfo != NULL);
+       PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT);
+       if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT)
+       {
+               ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT;
+       }
+
+       //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount));
+
+       /*
+        *  Initialise the response command (in case we don't find a freelist ID).
+        *  Also copy the list to the 'final' freelist array.
+        */
+       sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+       sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+       for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+       {
+               sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] |
+                               RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+               aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop];
+       }
+
+       ui32FinalFreelistsCount = ui32FreelistsCount;
+
+       /*
+        *  The list of freelists we have been given for reconstruction will
+        *  consist of local and global freelists (maybe MMU as well). Any
+        *  local freelists should have their global list specified as well.
+        *  There may be cases where the global freelist is not given (in
+        *  cases of partial setups before a poll failure for example). To
+        *  handle that we must first ensure every local freelist has a global
+        *  freelist specified, otherwise we add that to the 'final' list.
+        *  This final list of freelists is created in a first pass.
+        *
+        *  Even with the global freelists listed, there may be other local
+        *  freelists not listed, which are going to have their global freelist
+        *  reconstructed. Therefore we have to find those freelists as well
+        *  meaning we will have to iterate the entire list of freelists to
+        *  find which must be reconstructed. This is the second pass.
+        */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST  *psFreeList   = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+               IMG_BOOL      bInList       = IMG_FALSE;
+               IMG_BOOL      bGlobalInList = IMG_FALSE;
+
+               /* Check if this local freelist is in the list and ensure its global is too. */
+               if (psFreeList->ui32FreelistGlobalID != 0)
+               {
+                       for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++)
+                       {
+                               if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID)
+                               {
+                                       bInList = IMG_TRUE;
+                               }
+                               if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+                               {
+                                       bGlobalInList = IMG_TRUE;
+                               }
+                       }
+
+                       if (bInList  &&  !bGlobalInList)
+                       {
+                               aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID;
+                               ui32FinalFreelistsCount++;
+                       }
+               }
+       }
+       dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+       {
+               RGX_FREELIST  *psFreeList  = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+               IMG_BOOL      bReconstruct = IMG_FALSE;
+
+               /*
+                *  Check if this freelist needs to be reconstructed (was it requested
+                *  or is its global freelist going to be reconstructed)...
+                */
+               for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++)
+               {
+                       if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID  ||
+                           aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+                       {
+                               bReconstruct = IMG_TRUE;
+                               break;
+                       }
+               }
+
+               if (bReconstruct)
+               {
+                       eError = RGXReconstructFreeList(psFreeList);
+                       if (eError == PVRSRV_OK)
+                       {
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+                               /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */
+                               dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData)
+                               {
+                                       psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData);
+                                       eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                                       "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)",
+                                                                       psKMHWRTDataSet->psHWRTDataFwMemDesc,
+                                                                       psHWRTData));
+                                               continue;
+                                       }
+
+                                       psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR;
+                                       psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA;
+
+                                       DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+                               }
+#endif
+
+                               /* Update the response for this freelist if it was specifically requested for reconstruction. */
+                               for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+                               {
+                                       if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID)
+                                       {
+                                               /* Reconstruction of this requested freelist was successful... */
+                                               sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+                                               break;
+                                       }
+                               }
+                       }
+                       else
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Reconstructing of FreeList %p failed (%s)",
+                                               psFreeList,
+                                               PVRSRVGetErrorString(eError)));
+                       }
+               }
+       }
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       /* Check that all freelists were found and reconstructed... */
+       for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+       {
+               PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &
+                           RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0);
+       }
+
+       /* send feedback */
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                           RGXFWIF_DM_GEOM,
+                                           &sTACCBCmd,
+                                           PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       /* Kernel CCB should never fill up, as the FW is processing them right away  */
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create HWRTDataSet */
+static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA  *psConnection,
+                                  PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                  IMG_DEV_VIRTADDR          psVHeapTableDevVAddr,
+                                  IMG_DEV_VIRTADDR          sPMDataDevVAddr, /* per-HWRTData */
+                                  IMG_DEV_VIRTADDR          sPMSecureDataDevVAddr, /* per-HWRTData */
+                                  RGX_FREELIST            *apsFreeLists[RGXFW_MAX_FREELISTS],
+                                  IMG_DEV_VIRTADDR          sTailPtrsDevVAddr,
+                                  IMG_UINT16                ui16MaxRTs,
+                                  RGX_HWRTDATA_COMMON_COOKIE   *psHWRTDataCommonCookie,
+                                  RGX_KM_HW_RT_DATASET    **ppsKMHWRTDataSet) /* per-HWRTData */
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32Loop;
+
+       /* KM cookie storing all the FW/HW data */
+       RGX_KM_HW_RT_DATASET *psKMHWRTDataSet;
+
+       /* local pointers for memory descriptors of FW allocations */
+       DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL;
+       DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL;
+       DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL;
+
+       /* local pointer for CPU-mapped [FW]HWRTData */
+       RGXFWIF_HWRTDATA *psHWRTData = NULL;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /* Prepare the HW RT DataSet struct */
+       psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet));
+       if (psKMHWRTDataSet == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto AllocError;
+       }
+
+       *ppsKMHWRTDataSet = psKMHWRTDataSet;
+       psKMHWRTDataSet->psDeviceNode = psDeviceNode;
+
+       psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie;
+
+       psDevInfo = psDeviceNode->pvDevice;
+
+       /*
+        * This FW RT-Data is only mapped into kernel for initialisation.
+        * Otherwise this allocation is only used by the FW.
+        * Therefore the GPU cache doesn't need coherency and write-combine will
+        * suffice on the CPU side. (WC buffer will be flushed at the first TA-kick)
+        */
+       eError = DevmemFwAllocate(      psDevInfo,
+                                                               sizeof(RGXFWIF_HWRTDATA),
+                                                               RGX_FWCOMCTX_ALLOCFLAGS,
+                                                               "FwHwRTData",
+                                                               &psHWRTDataFwMemDesc    );
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed",
+                               __func__));
+               goto FWRTDataAllocateError;
+       }
+
+       psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc;
+       eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr,
+                                                       psHWRTDataFwMemDesc,
+                                                       0,
+                                                       RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError);
+
+       eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc,
+                                                                         (void **)&psHWRTData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+#if defined(PM_INTERACTIVE_MODE)
+       psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+#endif
+
+       psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr;
+
+       psHWRTData->sPMSecureRenderStateDevVAddr = sPMSecureDataDevVAddr;
+
+#if defined(PM_INTERACTIVE_MODE)
+       psHWRTData->sPMMListDevVAddr = sPMDataDevVAddr;
+#else
+       psHWRTData->sPMRenderStateDevVAddr = sPMDataDevVAddr;
+#endif
+
+       psHWRTData->sTailPtrsDevVAddr     = sTailPtrsDevVAddr;
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+       {
+               psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+               psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++;
+               psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr;
+               /* invalid initial snapshot value, the snapshot is always taken during first kick
+                * and hence the value get replaced during the first kick anyway. So it's safe to set it 0.
+               */
+               psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+               psHWRTData->bRenderStateNeedsReset = IMG_FALSE;
+       }
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData));
+#endif
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       {
+               RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl;
+
+               psRTACtl->ui32RenderTargetIndex = 0;
+               psRTACtl->ui32ActiveRenderTargets = 0;
+               psRTACtl->sValidRenderTargets.ui32Addr = 0;
+               psRTACtl->sRTANumPartialRenders.ui32Addr = 0;
+               psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs;
+
+               if (ui16MaxRTs > 1)
+               {
+                       /* Allocate memory for the checks */
+                       PDUMPCOMMENT(psDeviceNode, "Allocate memory for shadow render target cache");
+                       eError = DevmemFwAllocate(      psDevInfo,
+                                                                               ui16MaxRTs * sizeof(IMG_UINT32),
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                                               PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                                               PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                                                               PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                                               PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                                               "FwShadowRTCache",
+                                                                               &psRTArrayFwMemDesc);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate %u bytes for render target array (%s)",
+                                               __func__,
+                                               ui16MaxRTs, PVRSRVGetErrorString(eError)));
+                               goto FWAllocateRTArryError;
+                       }
+
+                       psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc;
+
+                       eError = RGXSetFirmwareAddress( &psRTACtl->sValidRenderTargets,
+                                                                       psRTArrayFwMemDesc,
+                                                                       0,
+                                                                       RFW_FWADDR_FLAG_NONE    );
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError);
+
+                       /* Allocate memory for the checks */
+                       PDUMPCOMMENT(psDeviceNode, "Allocate memory for tracking renders accumulation");
+                       eError = DevmemFwAllocate(psDevInfo,
+                                                 ui16MaxRTs * sizeof(IMG_UINT32),
+                                                         PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                         PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                         PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                         PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                         PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+                                                         PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                         PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                         "FwRendersAccumulation",
+                                                         &psRendersAccArrayFwMemDesc);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)",
+                                               __func__,
+                                               ui16MaxRTs, PVRSRVGetErrorString(eError)));
+                               goto FWAllocateRTAccArryError;
+                       }
+
+                       psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc;
+
+                       eError = RGXSetFirmwareAddress( &psRTACtl->sRTANumPartialRenders,
+                                                                       psRendersAccArrayFwMemDesc,
+                                                                       0,
+                                                                       RFW_FWADDR_FLAG_NONE    );
+                       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError);
+               }
+       }
+
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr);
+       DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+       return PVRSRV_OK;
+
+FWAllocRTAccArryFwAddrError:
+       DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc);
+FWAllocateRTAccArryError:
+       RGXUnsetFirmwareAddress(psRTArrayFwMemDesc);
+FWAllocateRTArryFwAddrError:
+       DevmemFwUnmapAndFree(psDevInfo, psRTArrayFwMemDesc);
+FWAllocateRTArryError:
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+       {
+               PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+               psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--;
+       }
+       OSLockRelease(psDevInfo->hLockFreeList);
+       DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+FWRTDataCpuMapError:
+       RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+FWRTDataFwAddrError:
+       DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc);
+FWRTDataAllocateError:
+       *ppsKMHWRTDataSet = NULL;
+       OSFreeMem(psKMHWRTDataSet);
+
+AllocError:
+       return eError;
+}
+
+/* Destroy HWRTDataSet */
+static PVRSRV_ERROR RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       IMG_UINT32 ui32Loop;
+
+       PVR_ASSERT(psKMHWRTDataSet);
+
+       psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice;
+
+       if (psKMHWRTDataSet->psRTArrayFwMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc);
+       }
+
+       if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc)
+       {
+               RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc);
+               DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc);
+       }
+
+       /* Decrease freelist refcount */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+       {
+               PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+               psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--;
+       }
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData);
+#endif
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+       /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist
+        * otherwise we risk traversing the freelist to find a pointer from a freed data structure */
+       RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc);
+       DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc);
+
+       OSFreeMem(psKMHWRTDataSet);
+
+       return PVRSRV_OK;
+}
+
+/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */
+PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA      *psConnection,
+               PVRSRV_DEVICE_NODE      *psDeviceNode,
+               IMG_DEV_VIRTADDR        psVHeapTableDevVAddr,
+               IMG_DEV_VIRTADDR                asPMDataDevVAddr[RGXMKIF_NUM_RTDATAS],
+               IMG_DEV_VIRTADDR        asPMSecureDataDevVAddr[RGXMKIF_NUM_RTDATAS],
+               RGX_FREELIST               *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS],
+               IMG_UINT32                ui32ScreenPixelMax,
+               IMG_UINT64                ui64PPPMultiSampleCtl,
+               IMG_UINT32                ui32TEStride,
+               IMG_DEV_VIRTADDR          asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+               IMG_UINT32                ui32TPCSize,
+               IMG_UINT32                ui32TEScreen,
+               IMG_UINT32                ui32TEAA,
+               IMG_UINT32                ui32TEMTILE1,
+               IMG_UINT32                ui32TEMTILE2,
+               IMG_UINT32                ui32RgnStride,
+               IMG_UINT32                ui32ISPMergeLowerX,
+               IMG_UINT32                ui32ISPMergeLowerY,
+               IMG_UINT32                ui32ISPMergeUpperX,
+               IMG_UINT32                ui32ISPMergeUpperY,
+               IMG_UINT32                ui32ISPMergeScaleX,
+               IMG_UINT32                ui32ISPMergeScaleY,
+               IMG_UINT16                ui16MaxRTs,
+               RGX_KM_HW_RT_DATASET     *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS])
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32RTDataID;
+       PVRSRV_RGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+
+       RGX_HWRTDATA_COMMON_COOKIE      *psHWRTDataCommonCookie;
+       RGXFWIF_HWRTDATA_COMMON         *psHWRTDataCommon;
+       DEVMEM_MEMDESC                          *psHWRTDataCommonFwMemDesc;
+       RGXFWIF_DEV_VIRTADDR            sHWRTDataCommonFwAddr;
+
+       /* Prepare KM cleanup object for HWRTDataCommon FW object */
+       psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie));
+       if (psHWRTDataCommonCookie == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_HWRTDataCommonCookieAlloc;
+       }
+
+       /*
+        * This FW common context is only mapped into kernel for initialisation.
+        * Otherwise this allocation is only used by the FW.
+        * Therefore the GPU cache doesn't need coherency, and write-combine will
+        * suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_HWRTDATA_COMMON),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwHWRTDataCommon",
+                       &psHWRTDataCommonFwMemDesc);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__));
+               goto err_HWRTDataCommonAlloc;
+       }
+       eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr);
+
+       eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA);
+
+       psHWRTDataCommon->ui32ScreenPixelMax    = ui32ScreenPixelMax;
+       psHWRTDataCommon->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl;
+       psHWRTDataCommon->ui32TEStride          = ui32TEStride;
+       psHWRTDataCommon->ui32TPCSize           = ui32TPCSize;
+       psHWRTDataCommon->ui32TEScreen          = ui32TEScreen;
+       psHWRTDataCommon->ui32TEAA              = ui32TEAA;
+       psHWRTDataCommon->ui32TEMTILE1          = ui32TEMTILE1;
+       psHWRTDataCommon->ui32TEMTILE2          = ui32TEMTILE2;
+       psHWRTDataCommon->ui32RgnStride         = ui32RgnStride; /* Region stride in Bytes */
+       psHWRTDataCommon->ui32ISPMergeLowerX    = ui32ISPMergeLowerX;
+       psHWRTDataCommon->ui32ISPMergeLowerY    = ui32ISPMergeLowerY;
+       psHWRTDataCommon->ui32ISPMergeUpperX    = ui32ISPMergeUpperX;
+       psHWRTDataCommon->ui32ISPMergeUpperY    = ui32ISPMergeUpperY;
+       psHWRTDataCommon->ui32ISPMergeScaleX    = ui32ISPMergeScaleX;
+       psHWRTDataCommon->ui32ISPMergeScaleY    = ui32ISPMergeScaleY;
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump HWRTDataCommon");
+       DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS);
+#endif
+       DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc);
+
+       psHWRTDataCommonCookie->ui32RefCount = 0;
+       psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc;
+       psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr;
+
+       /* Here we are creating a set of HWRTData(s)
+          the number of elements in the set equals RGXMKIF_NUM_RTDATAS.
+       */
+
+       for (ui32RTDataID = 0; ui32RTDataID < RGXMKIF_NUM_RTDATAS; ui32RTDataID++)
+       {
+               eError = RGXCreateHWRTData_aux(
+                       psConnection,
+                       psDeviceNode,
+                       psVHeapTableDevVAddr,
+                       asPMDataDevVAddr[ui32RTDataID],
+                       asPMSecureDataDevVAddr[ui32RTDataID],
+                       &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS],
+                       asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS],
+                       ui16MaxRTs,
+                       psHWRTDataCommonCookie,
+                       &pasKMHWRTDataSet[ui32RTDataID]);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to create HWRTData [slot %u] (%s)",
+                                       __func__,
+                                       ui32RTDataID,
+                                       PVRSRVGetErrorString(eError)));
+                       goto err_HWRTDataAlloc;
+               }
+               psHWRTDataCommonCookie->ui32RefCount += 1;
+       }
+
+       return PVRSRV_OK;
+
+err_HWRTDataAlloc:
+       PVR_DPF((PVR_DBG_WARNING, "%s: err_HWRTDataAlloc %u",
+                        __func__, psHWRTDataCommonCookie->ui32RefCount));
+       if (pasKMHWRTDataSet)
+       {
+               for (ui32RTDataID = psHWRTDataCommonCookie->ui32RefCount; ui32RTDataID > 0; ui32RTDataID--)
+               {
+                       if (pasKMHWRTDataSet[ui32RTDataID-1] != NULL)
+                       {
+                               RGXDestroyHWRTData_aux(pasKMHWRTDataSet[ui32RTDataID-1]);
+                               pasKMHWRTDataSet[ui32RTDataID-1] = NULL;
+                       }
+               }
+       }
+err_HWRTDataCommonVA:
+       RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc);
+err_HWRTDataCommonFwAddr:
+       DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc);
+err_HWRTDataCommonAlloc:
+       OSFreeMem(psHWRTDataCommonCookie);
+err_HWRTDataCommonCookieAlloc:
+
+       return eError;
+}
+
+/* Destroy a single instance of HWRTData.
+   Additionally, destroy the HWRTDataCommon{Cookie} objects
+   when it is the last HWRTData within a corresponding set of HWRTDatas.
+*/
+PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo;
+       PVRSRV_DEVICE_NODE *psDevNode;
+       PVRSRV_ERROR eError;
+       PRGXFWIF_HWRTDATA psHWRTData;
+       RGX_HWRTDATA_COMMON_COOKIE *psCommonCookie;
+
+       PVR_ASSERT(psKMHWRTDataSet);
+
+       psDevNode = psKMHWRTDataSet->psDeviceNode;
+       psDevInfo = psDevNode->pvDevice;
+
+       eError = RGXSetFirmwareAddress(&psHWRTData,
+                                      psKMHWRTDataSet->psHWRTDataFwMemDesc, 0,
+                                      RFW_FWADDR_NOREF_FLAG);
+       PVR_RETURN_IF_ERROR(eError);
+
+       /* Cleanup HWRTData */
+       eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie;
+
+       RGXDestroyHWRTData_aux(psKMHWRTDataSet);
+
+       /* We've got past potential PVRSRV_ERROR_RETRY events, so we are sure
+          that the HWRTDATA instance will be destroyed during this call.
+          Consequently, we decrease the ref count for HWRTDataCommonCookie.
+
+          NOTE: This ref count does not require locks or atomics.
+          -------------------------------------------------------
+            HWRTDatas bound into one pair are always destroyed sequentially,
+            within a single loop on the Client side.
+            The Common/Cookie objects always belong to only one pair of
+            HWRTDatas, and ref count is used to ensure that the Common/Cookie
+            objects will be destroyed after destruction of all HWRTDatas
+            within a single pair.
+       */
+       psCommonCookie->ui32RefCount--;
+
+       /* When ref count for HWRTDataCommonCookie hits ZERO
+        * we have to destroy the HWRTDataCommon [FW object] and the cookie
+        * [KM object] afterwards. */
+       if (psCommonCookie->ui32RefCount == 0)
+       {
+               RGXUnsetFirmwareAddress(psCommonCookie->psHWRTDataCommonFwMemDesc);
+
+               /* We don't need to flush the SLC before freeing.
+                * FW RequestCleanUp has already done that for HWRTData, so we're fine
+                * now. */
+
+               DevmemFwUnmapAndFree(psDevNode->pvDevice,
+                                    psCommonCookie->psHWRTDataCommonFwMemDesc);
+               OSFreeMem(psCommonCookie);
+       }
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE      *psDeviceNode,
+                               IMG_HANDLE                      hMemCtxPrivData,
+                               IMG_UINT32                      ui32MaxFLPages,
+                               IMG_UINT32                      ui32InitFLPages,
+                               IMG_UINT32                      ui32GrowFLPages,
+                               IMG_UINT32           ui32GrowParamThreshold,
+                               RGX_FREELIST                    *psGlobalFreeList,
+                               IMG_BOOL                                bCheckFreelist,
+                               IMG_DEV_VIRTADDR                sFreeListBaseDevVAddr,
+                               IMG_DEV_VIRTADDR                sFreeListStateDevVAddr,
+                               PMR                                     *psFreeListPMR,
+                               IMG_DEVMEM_OFFSET_T     uiFreeListPMROffset,
+                               PMR                                     *psFreeListStatePMR,
+                               IMG_DEVMEM_OFFSET_T     uiFreeListStatePMROffset,
+                               RGX_FREELIST                    **ppsFreeList)
+{
+       PVRSRV_ERROR                            eError;
+       RGXFWIF_FREELIST                        *psFWFreeList;
+       DEVMEM_MEMDESC                          *psFWFreelistMemDesc;
+       RGX_FREELIST                            *psFreeList;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+
+       if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+       {
+               IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages;
+
+               /* Round up number of FL pages to the next multiple of the OS page size */
+
+               ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+               ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+               ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+               ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+               ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+               ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+               ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+               PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u",
+                                __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages));
+
+               ui32InitFLPages = ui32NewInitFLPages;
+               ui32GrowFLPages = ui32NewGrowFLPages;
+               ui32MaxFLPages = ui32NewMaxFLPages;
+       }
+
+       /* Allocate kernel freelist struct */
+       psFreeList = OSAllocZMem(sizeof(*psFreeList));
+       if (psFreeList == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to allocate host data structure",
+                               __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorAllocHost;
+       }
+
+       /*
+        * This FW FreeList context is only mapped into kernel for initialisation
+        * and reconstruction (at other times it is not mapped and only used by the
+        * FW).
+        * Therefore the GPU cache doesn't need coherency, and write-combine will
+        * suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                                                       sizeof(*psFWFreeList),
+                                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                       PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                                                       PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                       PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                       "FwFreeList",
+                                                       &psFWFreelistMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: DevmemAllocate for RGXFWIF_FREELIST failed",
+                               __func__));
+               goto FWFreeListAlloc;
+       }
+
+       /* Initialise host data structures */
+       psFreeList->psDevInfo = psDevInfo;
+       psFreeList->psConnection = psConnection;
+       psFreeList->psFreeListPMR = psFreeListPMR;
+       psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+       psFreeList->psFreeListStatePMR = psFreeListStatePMR;
+       psFreeList->uiFreeListStatePMROffset = uiFreeListStatePMROffset;
+       psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+       eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr);
+
+       /* psFreeList->ui32FreelistID set below with lock... */
+       psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0);
+       psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+       psFreeList->ui32InitFLPages = ui32InitFLPages;
+       psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+       psFreeList->ui32CurrentFLPages = 0;
+       psFreeList->ui32ReadyFLPages = 0;
+       psFreeList->ui32GrowThreshold = ui32GrowParamThreshold;
+       psFreeList->ui64FreelistChecksum = 0;
+       psFreeList->ui32RefCount = 0;
+       psFreeList->bCheckFreelist = bCheckFreelist;
+       dllist_init(&psFreeList->sMemoryBlockHead);
+       dllist_init(&psFreeList->sMemoryBlockInitHead);
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       dllist_init(&psFreeList->sNodeHWRTDataHead);
+#endif
+       psFreeList->ownerPid = OSGetCurrentClientProcessIDKM();
+
+
+       /* Add to list of freelists */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+       dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+       OSLockRelease(psDevInfo->hLockFreeList);
+
+
+       /* Initialise FW data structure */
+       eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+       PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+       {
+               const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages);
+
+               psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+               psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages;
+               psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+               psFWFreeList->bUpdatePending = IMG_FALSE;
+               psFWFreeList->ui32UpdateNewPages = 0;
+               psFWFreeList->ui32UpdateNewReadyPages = 0;
+               psFWFreeList->sFreeListBaseDevVAddr = sFreeListBaseDevVAddr;
+#if defined(PM_INTERACTIVE_MODE)
+               psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+               psFWFreeList->ui64CurrentDevVAddr = (sFreeListBaseDevVAddr.uiAddr +
+                                                ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) &
+                                                       ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1);
+#endif
+               psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+               psFWFreeList->bGrowPending = IMG_FALSE;
+               psFWFreeList->ui32ReadyPages = ui32ReadyPages;
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+               /* Get the FW Memory Context address... */
+               eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext,
+                                              RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData),
+                                              0, RFW_FWADDR_NOREF_FLAG);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed",
+                                       __func__));
+                       DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+                       goto FWFreeListCpuMap;
+               }
+#else
+               PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData);
+#endif
+
+               /*
+                * Only the PM state buffer address is needed which contains the PM
+                * state including the freelist base address.
+                *
+                * Access to the physical PMR will be used to update the contents of the
+                * PM state buffer when PB grow occurs following OOM.
+                */
+               psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0;
+               psFWFreeList->sFreeListStateDevVAddr = sFreeListStateDevVAddr;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "Freelist [%p]: Created: Max pages 0x%08x, Init pages 0x%08x, FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current pages %u",
+                       psFreeList,
+                       ui32MaxFLPages,
+                       ui32InitFLPages,
+                       sFreeListBaseDevVAddr.uiAddr,
+                       (sFreeListBaseDevVAddr.uiAddr +
+                ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) &
+                       ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1),
+                       psFWFreeList->ui32CurrentPages - 1));
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump FW FreeList");
+       DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(PM_INTERACTIVE_MODE)
+       /*
+        * Separate dump of the Freelist's number of Pages and stack pointer.
+        * This allows to easily modify the PB size in the out2.txt files.
+        */
+       PDUMPCOMMENT(psDeviceNode, "FreeList TotalPages");
+       DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+                                                       offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+                                                       psFWFreeList->ui32CurrentPages,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+       PDUMPCOMMENT(psDeviceNode, "FreeList StackPointer");
+       DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+                                                       offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+                                                       psFWFreeList->ui32CurrentStackTop,
+                                                       PDUMP_FLAGS_CONTINUOUS);
+#endif
+#endif
+       DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+       /* Add initial PB block */
+       eError = RGXGrowFreeList(psFreeList,
+                                ui32InitFLPages,
+                                &psFreeList->sMemoryBlockInitHead);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)",
+                               __func__,
+                               sFreeListBaseDevVAddr.uiAddr,
+                               PVRSRVGetErrorString(eError)));
+               goto FWFreeListCpuMap;
+       }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       /* Update Stats */
+       PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+                                      0,
+                                      psFreeList->ui32InitFLPages,
+                                      psFreeList->ui32NumHighPages,
+                                      psFreeList->ownerPid);
+
+#endif
+
+       /* return values */
+       *ppsFreeList = psFreeList;
+
+       return PVRSRV_OK;
+
+       /* Error handling */
+
+FWFreeListCpuMap:
+       /* Remove freelists from list  */
+       OSLockAcquire(psDevInfo->hLockFreeList);
+       dllist_remove_node(&psFreeList->sNode);
+       OSLockRelease(psDevInfo->hLockFreeList);
+       RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+
+ErrorSetFwAddr:
+       DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc);
+
+FWFreeListAlloc:
+       OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+       RGXDestroyFreeList
+*/
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32RefCount;
+
+       PVR_ASSERT(psFreeList);
+
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+       ui32RefCount = psFreeList->ui32RefCount;
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+       if (ui32RefCount != 0)
+       {
+               /* Freelist still busy */
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       /* Freelist is not in use => start firmware cleanup */
+       eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+                                                                                psFreeList->sFreeListFWDevVAddr);
+       if (eError != PVRSRV_OK)
+       {
+               /* Can happen if the firmware took too long to handle the cleanup request,
+                * or if SLC-flushes didn't went through (due to some GPU lockup) */
+               return eError;
+       }
+
+       /* Remove FreeList from linked list before we destroy it... */
+       OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+       dllist_remove_node(&psFreeList->sNode);
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       /* Confirm all HWRTData nodes are freed before releasing freelist */
+       PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead));
+#endif
+       OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+#if defined(PM_INTERACTIVE_MODE)
+       if (psFreeList->bCheckFreelist)
+       {
+               RGXFWIF_FREELIST  *psFWFreeList;
+               IMG_UINT64        ui32CurrentStackTop;
+               IMG_UINT64        ui64CheckSum;
+
+               /* Get the current stack pointer for this free list */
+               DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+               ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop;
+               DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+               if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1)
+               {
+                       /* Do consistency tests (as the list is fully populated) */
+                       _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+               }
+               else
+               {
+                       /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */
+                       _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum);
+               }
+       }
+#endif
+
+       /* Destroy FW structures */
+       RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+       DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+       /* Remove grow shrink blocks */
+       while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+       {
+               eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+
+       /* Remove initial PB block */
+       eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       /* consistency checks */
+       PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+       PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+       /* free Freelist */
+       OSFreeMem(psFreeList);
+
+       return eError;
+}
+
+
+/*
+       RGXCreateZSBuffer
+*/
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+                                 PVRSRV_DEVICE_NODE    *psDeviceNode,
+                                 DEVMEMINT_RESERVATION *psReservation,
+                                 PMR                                   *psPMR,
+                                 PVRSRV_MEMALLOCFLAGS_T                uiMapFlags,
+                                 RGX_ZSBUFFER_DATA **ppsZSBuffer)
+{
+       PVRSRV_ERROR                            eError;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       RGXFWIF_PRBUFFER                        *psFWZSBuffer;
+       RGX_ZSBUFFER_DATA                       *psZSBuffer;
+       DEVMEM_MEMDESC                          *psFWZSBufferMemDesc;
+       IMG_BOOL                                        bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE;
+
+       /* Allocate host data structure */
+       psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer));
+       if (psZSBuffer == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate cleanup data structure for ZS-Buffer",
+                               __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto ErrorAllocCleanup;
+       }
+
+       /* Populate Host data */
+       psZSBuffer->psDevInfo = psDevInfo;
+       psZSBuffer->psReservation = psReservation;
+       psZSBuffer->psPMR = psPMR;
+       psZSBuffer->uiMapFlags = uiMapFlags;
+       psZSBuffer->ui32RefCount = 0;
+       psZSBuffer->bOnDemand = bOnDemand;
+       if (bOnDemand)
+       {
+               /* psZSBuffer->ui32ZSBufferID set below with lock... */
+               psZSBuffer->psMapping = NULL;
+
+               OSLockAcquire(psDevInfo->hLockZSBuffer);
+               psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+               dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+               OSLockRelease(psDevInfo->hLockZSBuffer);
+       }
+
+       /* Allocate firmware memory for ZS-Buffer. */
+       PDUMPCOMMENT(psDeviceNode, "Allocate firmware ZS-Buffer data structure");
+       eError = DevmemFwAllocate(psDevInfo,
+                                                       sizeof(*psFWZSBuffer),
+                                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                       PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+                                                       PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+                                                       PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                       PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                       PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+                                                       PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+                                                       PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
+                                                       PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+                                                       PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN),
+                                                       "FwZSBuffer",
+                                                       &psFWZSBufferMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware ZS-Buffer (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto ErrorAllocFWZSBuffer;
+       }
+       psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc;
+
+       /* Temporarily map the firmware render context to the kernel. */
+       eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+                                         (void **)&psFWZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware ZS-Buffer (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto ErrorAcquireFWZSBuffer;
+       }
+
+       /* Populate FW ZS-Buffer data structure */
+       psFWZSBuffer->bOnDemand = bOnDemand;
+       psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED;
+       psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID;
+
+       /* Get firmware address of ZS-Buffer. */
+       eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+       PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr);
+
+       /* Dump the ZS-Buffer and the memory content */
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump firmware ZS-Buffer");
+       DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       /* Release address acquired above. */
+       DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+       /* define return value */
+       *ppsZSBuffer = psZSBuffer;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+                                                       psZSBuffer,
+                                                       (bOnDemand) ? "On-Demand": "Up-front"));
+
+       psZSBuffer->owner=OSGetCurrentClientProcessIDKM();
+
+       return PVRSRV_OK;
+
+       /* error handling */
+
+ErrorSetFwAddr:
+       DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+ErrorAcquireFWZSBuffer:
+       DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc);
+
+ErrorAllocFWZSBuffer:
+       OSFreeMem(psZSBuffer);
+
+ErrorAllocCleanup:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+       RGXDestroyZSBuffer
+*/
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+       POS_LOCK hLockZSBuffer;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psZSBuffer);
+       hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+       /* Request ZS Buffer cleanup */
+       eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+                                                                               psZSBuffer->sZSBufferFWDevVAddr);
+       if (eError == PVRSRV_OK)
+       {
+               /* Free the firmware render context. */
+               RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc);
+               DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc);
+
+               /* Remove Deferred Allocation from list */
+               if (psZSBuffer->bOnDemand)
+               {
+                       OSLockAcquire(hLockZSBuffer);
+                       PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+                       dllist_remove_node(&psZSBuffer->sNode);
+                       OSLockRelease(hLockZSBuffer);
+               }
+
+               PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+               PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer));
+
+               /* Free ZS-Buffer host data structure */
+               OSFreeMem(psZSBuffer);
+
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+       POS_LOCK hLockZSBuffer;
+       PVRSRV_ERROR eError;
+
+       if (!psZSBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (!psZSBuffer->bOnDemand)
+       {
+               /* Only deferred allocations can be populated */
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+                                                               psZSBuffer,
+                                                               psZSBuffer->ui32ZSBufferID));
+       hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+       OSLockAcquire(hLockZSBuffer);
+
+       if (psZSBuffer->ui32RefCount == 0)
+       {
+               if (psZSBuffer->bOnDemand)
+               {
+                       IMG_HANDLE hDevmemHeap = (IMG_HANDLE)NULL;
+
+                       PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+                       /* Get Heap */
+                       eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+                       PVR_ASSERT(psZSBuffer->psMapping == NULL);
+                       if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL))
+                       {
+                               OSLockRelease(hLockZSBuffer);
+                               return PVRSRV_ERROR_INVALID_HEAP;
+                       }
+
+                       eError = DevmemIntMapPMR(hDevmemHeap,
+                                                                       psZSBuffer->psReservation,
+                                                                       psZSBuffer->psPMR,
+                                                                       psZSBuffer->uiMapFlags,
+                                                                       &psZSBuffer->psMapping);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)",
+                                               psZSBuffer,
+                                               psZSBuffer->ui32ZSBufferID,
+                                               PVRSRVGetErrorString(eError)));
+                               OSLockRelease(hLockZSBuffer);
+                               return eError;
+
+                       }
+                       PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+                                                                               psZSBuffer,
+                                                                               psZSBuffer->ui32ZSBufferID));
+               }
+       }
+
+       /* Increase refcount*/
+       psZSBuffer->ui32RefCount++;
+
+       OSLockRelease(hLockZSBuffer);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+                                       RGX_POPULATION **ppsPopulation)
+{
+       RGX_POPULATION *psPopulation;
+       PVRSRV_ERROR eError;
+
+       psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner);
+#endif
+
+       /* Do the backing */
+       eError = RGXBackingZSBuffer(psZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               goto OnErrorBacking;
+       }
+
+       /* Create the handle to the backing */
+       psPopulation = OSAllocMem(sizeof(*psPopulation));
+       if (psPopulation == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto OnErrorAlloc;
+       }
+
+       psPopulation->psZSBuffer = psZSBuffer;
+
+       /* return value */
+       *ppsPopulation = psPopulation;
+
+       return PVRSRV_OK;
+
+OnErrorAlloc:
+       RGXUnbackingZSBuffer(psZSBuffer);
+
+OnErrorBacking:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+       POS_LOCK hLockZSBuffer;
+       PVRSRV_ERROR eError;
+
+       if (!psZSBuffer)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                       "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+                                                               psZSBuffer,
+                                                               psZSBuffer->ui32ZSBufferID));
+
+       hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+       OSLockAcquire(hLockZSBuffer);
+
+       if (psZSBuffer->bOnDemand)
+       {
+               if (psZSBuffer->ui32RefCount == 1)
+               {
+                       PVR_ASSERT(psZSBuffer->psMapping);
+
+                       eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)",
+                                               psZSBuffer,
+                                               psZSBuffer->ui32ZSBufferID,
+                                               PVRSRVGetErrorString(eError)));
+                               OSLockRelease(hLockZSBuffer);
+                               return eError;
+                       }
+
+                       PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+                                                                               psZSBuffer,
+                                                                               psZSBuffer->ui32ZSBufferID));
+               }
+       }
+
+       /* Decrease refcount*/
+       psZSBuffer->ui32RefCount--;
+
+       OSLockRelease(hLockZSBuffer);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+       PVRSRV_ERROR eError;
+
+       if (!psPopulation)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               return eError;
+       }
+
+       OSFreeMem(psPopulation);
+
+       return PVRSRV_OK;
+}
+
+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID)
+{
+       DLLIST_NODE *psNode, *psNext;
+       RGX_ZSBUFFER_DATA *psZSBuffer = NULL;
+
+       OSLockAcquire(psDevInfo->hLockZSBuffer);
+
+       dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext)
+       {
+               RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+               if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID)
+               {
+                       psZSBuffer = psThisZSBuffer;
+                       break;
+               }
+       }
+
+       OSLockRelease(psDevInfo->hLockZSBuffer);
+       return psZSBuffer;
+}
+
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT32 ui32ZSBufferID)
+{
+       IMG_BOOL bBackingDone = IMG_TRUE;
+       RGX_ZSBUFFER_DATA *psZSBuffer;
+       RGXFWIF_KCCB_CMD sTACCBCmd;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psDevInfo);
+
+       /* scan all deferred allocations */
+       psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+       if (psZSBuffer == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)",
+                        ui32ZSBufferID));
+
+               return;
+       }
+
+       /* Populate ZLS */
+       eError = RGXBackingZSBuffer(psZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "Populating ZS-Buffer failed with error %u (ID = 0x%08x)",
+                        eError, ui32ZSBufferID));
+               bBackingDone = IMG_FALSE;
+       }
+
+       /* send confirmation */
+       sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+       sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+       sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                                                       RGXFWIF_DM_GEOM,
+                                                                       &sTACCBCmd,
+                                                                       PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       /* Kernel CCB should never fill up, as the FW is processing them right away  */
+       PVR_ASSERT(eError == PVRSRV_OK);
+
+       psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner);
+#endif
+}
+
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                       IMG_UINT32 ui32ZSBufferID)
+{
+       RGX_ZSBUFFER_DATA *psZSBuffer;
+       RGXFWIF_KCCB_CMD sTACCBCmd;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psDevInfo);
+
+       /* scan all deferred allocations */
+       psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+       if (psZSBuffer == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)",
+                        ui32ZSBufferID));
+
+               return;
+       }
+
+       /* Unpopulate ZLS */
+       eError = RGXUnbackingZSBuffer(psZSBuffer);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "UnPopulating ZS-Buffer failed with error %u (ID = 0x%08x)",
+                        eError, ui32ZSBufferID));
+               PVR_ASSERT(IMG_FALSE);
+       }
+
+       /* send confirmation */
+       sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+       sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+       sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psDevInfo,
+                                                                       RGXFWIF_DM_GEOM,
+                                                                       &sTACCBCmd,
+                                                                       PDUMP_FLAGS_NONE);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       /* Kernel CCB should never fill up, as the FW is processing them right away */
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+                                                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                         SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                         DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                         IMG_UINT32 ui32AllocatedOffset,
+                                                         DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                         IMG_UINT32 ui32Priority,
+                                                         IMG_UINT32 ui32MaxDeadlineMS,
+                                                         IMG_UINT64 ui64RobustnessAddress,
+                                                         RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                         RGX_SERVER_RC_TA_DATA *psTAData,
+                                                         IMG_UINT32 ui32CCBAllocSizeLog2,
+                                                         IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+                                                         IMG_UINT32 ui32ContextFlags)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+       */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TA context suspend state");
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         sizeof(RGXFWIF_TACTX_STATE),
+                                                         RGX_FWCOMCTX_ALLOCFLAGS,
+                                                         "FwTAContextState",
+                                                         &psTAData->psContextStateMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware GPU context suspend state (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_tacontextsuspendalloc;
+       }
+
+       eError = FWCommonContextAllocate(psConnection,
+                                                                        psDeviceNode,
+                                                                        REQ_TYPE_TA,
+                                                                        RGXFWIF_DM_GEOM,
+                                                                        psServerMMUContext,
+                                                                        psAllocatedMemDesc,
+                                                                        ui32AllocatedOffset,
+                                                                        psFWMemContextMemDesc,
+                                                                        psTAData->psContextStateMemDesc,
+                                                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2,
+                                                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2,
+                                                                        ui32ContextFlags,
+                                                                        ui32Priority,
+                                                                        ui32MaxDeadlineMS,
+                                                                        ui64RobustnessAddress,
+                                                                        psInfo,
+                                                                        &psTAData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to init TA fw common context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_tacommoncontext;
+       }
+
+       /*
+        * Dump the FW 3D context suspend state buffer
+        */
+#if defined(PDUMP)
+       PDUMPCOMMENT(psDeviceNode, "Dump the TA context suspend state buffer");
+       DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_TACTX_STATE),
+                                          PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+       psTAData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_tacommoncontext:
+       DevmemFree(psTAData->psContextStateMemDesc);
+fail_tacontextsuspendalloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+                                                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                         SERVER_MMU_CONTEXT *psServerMMUContext,
+                                                         DEVMEM_MEMDESC *psAllocatedMemDesc,
+                                                         IMG_UINT32 ui32AllocatedOffset,
+                                                         DEVMEM_MEMDESC *psFWMemContextMemDesc,
+                                                         IMG_UINT32 ui32Priority,
+                                                         IMG_UINT32 ui32MaxDeadlineMS,
+                                                         IMG_UINT64 ui64RobustnessAddress,
+                                                         RGX_COMMON_CONTEXT_INFO *psInfo,
+                                                         RGX_SERVER_RC_3D_DATA *ps3DData,
+                                                         IMG_UINT32 ui32CCBAllocSizeLog2,
+                                                         IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+                                                         IMG_UINT32 ui32ContextFlags)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       PVRSRV_ERROR eError;
+       IMG_UINT        uiNumISPStoreRegs = RGXFWIF_IPP_RESUME_REG_COUNT; /* default 1 register for IPP_resume */
+       IMG_UINT        ui3DRegISPStateStoreSize = 0;
+
+       /*
+               Allocate device memory for the firmware GPU context suspend state.
+               Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+       */
+       PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware 3D context suspend state");
+
+       uiNumISPStoreRegs += (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU) *
+                                                               RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_PER_SPU) *
+                                                               RGXFWIF_PIPE_COUNT_PER_ISP);
+
+
+       if (uiNumISPStoreRegs > (RGXFWIF_ISP_PIPE_COUNT_MAX + RGXFWIF_IPP_RESUME_REG_COUNT))
+       {
+               return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+
+       /* Size of the CS buffer */
+       /* Calculate the size of the 3DCTX ISP state */
+       ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+                       (uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]));
+
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         ui3DRegISPStateStoreSize,
+                                                         RGX_FWCOMCTX_ALLOCFLAGS,
+                                                         "Fw3DContextState",
+                                                         &ps3DData->psContextStateMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate firmware GPU context suspend state (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_3dcontextsuspendalloc;
+       }
+
+       eError = FWCommonContextAllocate(psConnection,
+                                                                        psDeviceNode,
+                                                                        REQ_TYPE_3D,
+                                                                        RGXFWIF_DM_3D,
+                                                                        psServerMMUContext,
+                                                                        psAllocatedMemDesc,
+                                                                        ui32AllocatedOffset,
+                                                                        psFWMemContextMemDesc,
+                                                                        ps3DData->psContextStateMemDesc,
+                                                                        ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2,
+                                                                        ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2,
+                                                                        ui32ContextFlags,
+                                                                        ui32Priority,
+                                                                        ui32MaxDeadlineMS,
+                                                                        ui64RobustnessAddress,
+                                                                        psInfo,
+                                                                        &ps3DData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to init 3D fw common context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto fail_3dcommoncontext;
+       }
+
+       /*
+        * Dump the FW 3D context suspend state buffer
+        */
+       PDUMPCOMMENT(psDeviceNode, "Dump the 3D context suspend state buffer");
+       DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+                                          0,
+                                          sizeof(RGXFWIF_3DCTX_STATE),
+                                          PDUMP_FLAGS_CONTINUOUS);
+
+       ps3DData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_3dcommoncontext:
+       DevmemFree(ps3DData->psContextStateMemDesc);
+fail_3dcontextsuspendalloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA                            *psConnection,
+                                                                                       PVRSRV_DEVICE_NODE                      *psDeviceNode,
+                                                                                       IMG_UINT32                                      ui32Priority,
+                                                                                       IMG_UINT32                                      ui32FrameworkRegisterSize,
+                                                                                       IMG_PBYTE                                       pabyFrameworkRegisters,
+                                                                                       IMG_HANDLE                                      hMemCtxPrivData,
+                                                                                       IMG_UINT32                                      ui32StaticRenderContextStateSize,
+                                                                                       IMG_PBYTE                                       pStaticRenderContextState,
+                                                                                       IMG_UINT32                                      ui32PackedCCBSizeU8888,
+                                                                                       IMG_UINT32                                      ui32ContextFlags,
+                                                                                       IMG_UINT64                                      ui64RobustnessAddress,
+                                                                                       IMG_UINT32                                      ui32MaxTADeadlineMS,
+                                                                                       IMG_UINT32                                      ui32Max3DDeadlineMS,
+                                                                                       RGX_SERVER_RENDER_CONTEXT       **ppsRenderContext)
+{
+       PVRSRV_ERROR                            eError;
+       PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+       RGX_SERVER_RENDER_CONTEXT       *psRenderContext;
+       DEVMEM_MEMDESC                          *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       RGX_COMMON_CONTEXT_INFO         sInfo = {NULL};
+       RGXFWIF_FWRENDERCONTEXT         *psFWRenderContext;
+
+       *ppsRenderContext = NULL;
+
+       if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psRenderContext = OSAllocZMem(sizeof(*psRenderContext));
+       if (psRenderContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       eError = OSLockCreate(&psRenderContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_lock;
+       }
+
+       psRenderContext->psDeviceNode = psDeviceNode;
+
+       /*
+               Create the FW render context, this has the TA and 3D FW common
+               contexts embedded within it
+       */
+       eError = DevmemFwAllocate(psDevInfo,
+                                                         sizeof(RGXFWIF_FWRENDERCONTEXT),
+                                                         RGX_FWCOMCTX_ALLOCFLAGS,
+                                                         "FwRenderContext",
+                                                         &psRenderContext->psFWRenderContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwrendercontext;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+       if (ui32FrameworkRegisterSize)
+       {
+               /*
+                * Create the FW framework buffer
+                */
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psRenderContext->psFWFrameworkMemDesc,
+                               ui32FrameworkRegisterSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to allocate firmware GPU framework state (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psRenderContext->psFWFrameworkMemDesc,
+                               pabyFrameworkRegisters,
+                               ui32FrameworkRegisterSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to populate the framework buffer (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+               sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+       }
+
+       eError = _Create3DContext(psConnection,
+                                                         psDeviceNode,
+                                                         hMemCtxPrivData,
+                                                         psRenderContext->psFWRenderContextMemDesc,
+                                                         offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+                                                         psFWMemContextMemDesc,
+                                                         ui32Priority,
+                                                         ui32Max3DDeadlineMS,
+                                                         ui64RobustnessAddress,
+                                                         &sInfo,
+                                                         &psRenderContext->s3DData,
+                                                         U32toU8_Unpack3(ui32PackedCCBSizeU8888),
+                                                         U32toU8_Unpack4(ui32PackedCCBSizeU8888),
+                                                         ui32ContextFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_3dcontext;
+       }
+
+       eError = _CreateTAContext(psConnection,
+                                                         psDeviceNode,
+                                                         hMemCtxPrivData,
+                                                         psRenderContext->psFWRenderContextMemDesc,
+                                                         offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+                                                         psFWMemContextMemDesc,
+                                                         ui32Priority,
+                                                         ui32MaxTADeadlineMS,
+                                                         ui64RobustnessAddress,
+                                                         &sInfo,
+                                                         &psRenderContext->sTAData,
+                                                         U32toU8_Unpack1(ui32PackedCCBSizeU8888),
+                                                         U32toU8_Unpack2(ui32PackedCCBSizeU8888),
+                                                         ui32ContextFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_tacontext;
+       }
+
+       eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+                       (void **)&psFWRenderContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_acquire_cpu_mapping;
+       }
+
+       OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize);
+       DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS);
+       DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       psRenderContext->psBufferSyncContext =
+               pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                                                          "rogue-ta3d");
+       if (IS_ERR(psRenderContext->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: failed to create buffer_sync context (err=%ld)",
+                                __func__, PTR_ERR(psRenderContext->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence);
+       SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate);
+       SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence);
+       SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate);
+
+       {
+               PVRSRV_RGXDEV_INFO                      *psDevInfo = psDeviceNode->pvDevice;
+
+               OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+               dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+               OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+       }
+
+       *ppsRenderContext = psRenderContext;
+       return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+fail_buffer_sync_context_create:
+#endif
+fail_acquire_cpu_mapping:
+       _DestroyTAContext(&psRenderContext->sTAData,
+                         psDeviceNode);
+fail_tacontext:
+       _Destroy3DContext(&psRenderContext->s3DData,
+                         psRenderContext->psDeviceNode);
+fail_3dcontext:
+fail_frameworkcopy:
+       if (psRenderContext->psFWFrameworkMemDesc != NULL)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+fail_fwrendercontext:
+       OSLockDestroy(psRenderContext->hLock);
+fail_lock:
+       OSFreeMem(psRenderContext);
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+       PVRSRV_ERROR                            eError;
+       PVRSRV_RGXDEV_INFO      *psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
+       IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+       /* remove node from list before calling destroy - as destroy, if successful
+        * will invalidate the node
+        * must be re-added if destroy fails
+        */
+       OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+       dllist_remove_node(&(psRenderContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       /* Check psBufferSyncContext has not been destroyed already (by a previous
+        * call to this function which then later returned PVRSRV_ERROR_RETRY)
+        */
+       if (psRenderContext->psBufferSyncContext != NULL)
+       {
+               pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext);
+               psRenderContext->psBufferSyncContext = NULL;
+       }
+#endif
+
+       /* Cleanup the TA if we haven't already */
+       if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+       {
+               eError = _DestroyTAContext(&psRenderContext->sTAData,
+                                                                  psRenderContext->psDeviceNode);
+               if (eError == PVRSRV_OK)
+               {
+                       psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+               }
+               else
+               {
+                       goto e0;
+               }
+       }
+
+       /* Cleanup the 3D if we haven't already */
+       if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+       {
+               eError = _Destroy3DContext(&psRenderContext->s3DData,
+                                                                  psRenderContext->psDeviceNode);
+               if (eError == PVRSRV_OK)
+               {
+                       psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+               }
+               else
+               {
+                       goto e0;
+               }
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+                       (void **)&psFWRenderContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware render context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+
+       DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+       /* Check if all of the workload estimation CCB commands for this workload are read */
+       if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+       {
+
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                       __func__, ui32WorkEstCCBSubmitted,
+                       psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+               eError = PVRSRV_ERROR_RETRY;
+               goto e0;
+       }
+#endif
+
+       /*
+               Only if both TA and 3D contexts have been cleaned up can we
+               free the shared resources
+       */
+       if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+       {
+               if (psRenderContext->psFWFrameworkMemDesc != NULL)
+               {
+                       /* Free the framework buffer */
+                       DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+               }
+
+               /* Free the firmware render context */
+               DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence);
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate);
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence);
+               SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+               OSLockDestroy(psRenderContext->hLock);
+
+               OSFreeMem(psRenderContext);
+       }
+
+       return PVRSRV_OK;
+
+e0:
+       OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+       return eError;
+}
+
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount,
+                        IMG_UINT32 ui32ClientTAUpdateCount,
+                        IMG_UINT32 ui32Client3DFenceCount,
+                        IMG_UINT32 ui32Client3DUpdateCount,
+                        PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress,
+                        IMG_UINT32 *paui32ClientTAFenceValue,
+                        PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress,
+                        IMG_UINT32 *paui32ClientTAUpdateValue,
+                        PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress,
+                        IMG_UINT32 *paui32Client3DFenceValue,
+                        PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress,
+                        IMG_UINT32 *paui32Client3DUpdateValue)
+{
+       IMG_UINT32 i;
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~",
+                        __func__));
+
+       /* Dump Fence syncs, Update syncs and PR Update syncs */
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:",
+               __func__, ui32ClientTAFenceCount));
+       for (i = 0; i < ui32ClientTAFenceCount; i++)
+       {
+               if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32ClientTAFenceCount,
+                               (void *) pauiClientTAFenceUFOAddress,
+                               pauiClientTAFenceUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+                               __func__, i + 1, ui32ClientTAFenceCount,
+                               (void *) pauiClientTAFenceUFOAddress,
+                               pauiClientTAFenceUFOAddress->ui32Addr,
+                               *paui32ClientTAFenceValue,
+                               *paui32ClientTAFenceValue));
+                       paui32ClientTAFenceValue++;
+               }
+               pauiClientTAFenceUFOAddress++;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:",
+                        __func__, ui32ClientTAUpdateCount));
+       for (i = 0; i < ui32ClientTAUpdateCount; i++)
+       {
+               if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32ClientTAUpdateCount,
+                               (void *) pauiClientTAUpdateUFOAddress,
+                               pauiClientTAUpdateUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)",
+                               __func__, i + 1, ui32ClientTAUpdateCount,
+                               (void *) pauiClientTAUpdateUFOAddress,
+                               pauiClientTAUpdateUFOAddress->ui32Addr,
+                               *paui32ClientTAUpdateValue,
+                               *paui32ClientTAUpdateValue));
+                       paui32ClientTAUpdateValue++;
+               }
+               pauiClientTAUpdateUFOAddress++;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:",
+                        __func__, ui32Client3DFenceCount));
+       for (i = 0; i < ui32Client3DFenceCount; i++)
+       {
+               if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32Client3DFenceCount,
+                               (void *) pauiClient3DFenceUFOAddress,
+                               pauiClient3DFenceUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)",
+                               __func__, i + 1, ui32Client3DFenceCount,
+                               (void *) pauiClient3DFenceUFOAddress,
+                               pauiClient3DFenceUFOAddress->ui32Addr,
+                               *paui32Client3DFenceValue,
+                               *paui32Client3DFenceValue));
+                       paui32Client3DFenceValue++;
+               }
+               pauiClient3DFenceUFOAddress++;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:",
+                        __func__, ui32Client3DUpdateCount));
+       for (i = 0; i < ui32Client3DUpdateCount; i++)
+       {
+               if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x,"
+                               " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED",
+                               __func__, i + 1, ui32Client3DUpdateCount,
+                               (void *) pauiClient3DUpdateUFOAddress,
+                               pauiClient3DUpdateUFOAddress->ui32Addr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)",
+                               __func__, i + 1, ui32Client3DUpdateCount,
+                               (void *) pauiClient3DUpdateUFOAddress,
+                               pauiClient3DUpdateUFOAddress->ui32Addr,
+                               *paui32Client3DUpdateValue,
+                               *paui32Client3DUpdateValue));
+                       paui32Client3DUpdateValue++;
+               }
+               pauiClient3DUpdateUFOAddress++;
+       }
+}
+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT     *psRenderContext,
+                                                                IMG_UINT32                                     ui32ClientTAFenceCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClientTAFenceSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32ClientTAFenceSyncOffset,
+                                                                IMG_UINT32                                     *paui32ClientTAFenceValue,
+                                                                IMG_UINT32                                     ui32ClientTAUpdateCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClientTAUpdateSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32ClientTAUpdateSyncOffset,
+                                                                IMG_UINT32                                     *paui32ClientTAUpdateValue,
+                                                                IMG_UINT32                                     ui32Client3DUpdateCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClient3DUpdateSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32Client3DUpdateSyncOffset,
+                                                                IMG_UINT32                                     *paui32Client3DUpdateValue,
+                                                                SYNC_PRIMITIVE_BLOCK           *psPRFenceSyncPrimBlock,
+                                                                IMG_UINT32                                     ui32PRFenceSyncOffset,
+                                                                IMG_UINT32                                     ui32PRFenceValue,
+                                                                PVRSRV_FENCE                           iCheckTAFence,
+                                                                PVRSRV_TIMELINE                        iUpdateTATimeline,
+                                                                PVRSRV_FENCE                           *piUpdateTAFence,
+                                                                IMG_CHAR                                       szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH],
+                                                                PVRSRV_FENCE                           iCheck3DFence,
+                                                                PVRSRV_TIMELINE                        iUpdate3DTimeline,
+                                                                PVRSRV_FENCE                           *piUpdate3DFence,
+                                                                IMG_CHAR                                       szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+                                                                IMG_UINT32                                     ui32TACmdSize,
+                                                                IMG_PBYTE                                      pui8TADMCmd,
+                                                                IMG_UINT32                                     ui323DPRCmdSize,
+                                                                IMG_PBYTE                                      pui83DPRDMCmd,
+                                                                IMG_UINT32                                     ui323DCmdSize,
+                                                                IMG_PBYTE                                      pui83DDMCmd,
+                                                                IMG_UINT32                                     ui32ExtJobRef,
+                                                                IMG_BOOL                                       bKickTA,
+                                                                IMG_BOOL                                       bKickPR,
+                                                                IMG_BOOL                                       bKick3D,
+                                                                IMG_BOOL                                       bAbort,
+                                                                IMG_UINT32                                     ui32PDumpFlags,
+                                                                RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+                                                                RGX_ZSBUFFER_DATA                      *psZSBuffer,
+                                                                RGX_ZSBUFFER_DATA                      *psMSAAScratchBuffer,
+                                                                IMG_UINT32                                     ui32SyncPMRCount,
+                                                                IMG_UINT32                                     *paui32SyncPMRFlags,
+                                                                PMR                                            **ppsSyncPMRs,
+                                                                IMG_UINT32                                     ui32RenderTargetSize,
+                                                                IMG_UINT32                                     ui32NumberOfDrawCalls,
+                                                                IMG_UINT32                                     ui32NumberOfIndices,
+                                                                IMG_UINT32                                     ui32NumberOfMRTs,
+                                                                IMG_UINT64                                     ui64DeadlineInus)
+{
+       /* per-context helper structures */
+       RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData;
+       RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData;
+
+       IMG_UINT64                              ui64FBSCEntryMask;
+
+       IMG_UINT32                              ui32TACmdCount=0;
+       IMG_UINT32                              ui323DCmdCount=0;
+       IMG_UINT32                              ui32TACmdOffset=0;
+       IMG_UINT32                              ui323DCmdOffset=0;
+       RGXFWIF_UFO                             sPRUFO;
+       IMG_UINT32                              i;
+       PVRSRV_ERROR                    eError = PVRSRV_OK;
+       PVRSRV_ERROR                    eError2 = PVRSRV_OK;
+
+       PVRSRV_RGXDEV_INFO      *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext);
+       IMG_UINT32              ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+       IMG_BOOL                bCCBStateOpen = IMG_FALSE;
+
+       IMG_UINT32                              ui32ClientPRUpdateCount = 0;
+       PRGXFWIF_UFO_ADDR               *pauiClientPRUpdateUFOAddress = NULL;
+       IMG_UINT32                              *paui32ClientPRUpdateValue = NULL;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+       PRGXFWIF_UFO_ADDR               *pauiClientTAFenceUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               *pauiClientTAUpdateUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               *pauiClient3DFenceUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               *pauiClient3DUpdateUFOAddress = NULL;
+       PRGXFWIF_UFO_ADDR               uiPRFenceUFOAddress;
+
+       IMG_UINT64               uiCheckTAFenceUID = 0;
+       IMG_UINT64               uiCheck3DFenceUID = 0;
+       IMG_UINT64               uiUpdateTAFenceUID = 0;
+       IMG_UINT64               uiUpdate3DFenceUID = 0;
+
+       IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd;
+
+       IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0;
+
+       IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
+
+       RGXFWIF_KCCB_CMD_KICK_DATA      sTACmdKickData;
+       RGXFWIF_KCCB_CMD_KICK_DATA      s3DCmdKickData;
+       IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D);
+
+       PVRSRV_FENCE    iUpdateTAFence = PVRSRV_NO_FENCE;
+       PVRSRV_FENCE    iUpdate3DFence = PVRSRV_NO_FENCE;
+
+       IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE;
+       IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0;
+       IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+
+       /*
+        * Count of the number of TA and 3D update values (may differ from number of
+        * TA and 3D updates later, as sync checkpoints do not need to specify a value)
+        */
+       IMG_UINT32 ui32ClientPRUpdateValueCount = 0;
+       IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount;
+       IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount;
+       PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL;                             /*!< TA fence checkpoints */
+       PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL;                             /*!< 3D fence checkpoints */
+       IMG_UINT32 ui32FenceTASyncCheckpointCount = 0;
+       IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL;                               /*!< TA update checkpoint (output) */
+       PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL;                               /*!< 3D update checkpoint (output) */
+       PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+       void *pvTAUpdateFenceFinaliseData = NULL;
+       void *pv3DUpdateFenceFinaliseData = NULL;
+
+       RGX_SYNC_DATA sTASyncData = {NULL};             /*!< Contains internal update syncs for TA */
+       RGX_SYNC_DATA s3DSyncData = {NULL};             /*!< Contains internal update syncs for 3D */
+
+       IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE;
+#if defined(SUPPORT_VALIDATION)
+       PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE;
+       PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL;
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0};
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0};
+       IMG_UINT32 ui32TACommandOffset = 0;
+       IMG_UINT32 ui323DCommandOffset = 0;
+       IMG_UINT32 ui32TACmdHeaderOffset = 0;
+       IMG_UINT32 ui323DCmdHeaderOffset = 0;
+       IMG_UINT32 ui323DFullRenderCommandOffset = 0;
+       IMG_UINT32 ui32TACmdOffsetWrapCheck = 0;
+       IMG_UINT32 ui323DCmdOffsetWrapCheck = 0;
+       RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+       IMG_UINT32 ui32TAFenceCount, ui323DFenceCount;
+       IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount;
+       IMG_UINT32 ui32PRUpdateCount;
+
+       IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM();
+
+       IMG_UINT32 ui32Client3DFenceCount = 0;
+
+       /* Ensure we haven't been given a null ptr to
+        * TA fence values if we have been told we
+        * have TA sync prim fences
+        */
+       if (ui32ClientTAFenceCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL,
+                                       "paui32ClientTAFenceValue NULL but "
+                                       "ui32ClientTAFenceCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       /* Ensure we haven't been given a null ptr to
+        * TA update values if we have been told we
+        * have TA updates
+        */
+       if (ui32ClientTAUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL,
+                                       "paui32ClientTAUpdateValue NULL but "
+                                       "ui32ClientTAUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+       /* Ensure we haven't been given a null ptr to
+        * 3D update values if we have been told we
+        * have 3D updates
+        */
+       if (ui32Client3DUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL,
+                                       "paui32Client3DUpdateValue NULL but "
+                                       "ui32Client3DUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Write FW addresses into CMD SHARED BLOCKs */
+       {
+               CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd;
+               CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd;
+               CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd;
+
+               if (psKMHWRTDataSet == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer"));
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+
+               /* Write FW address for TA CMD
+               */
+               if (psGeomCmdShared != NULL)
+               {
+                       psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr;
+
+                       if (psZSBuffer != NULL)
+                       {
+                               psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr;
+                       }
+                       if (psMSAAScratchBuffer != NULL)
+                       {
+                               psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr;
+                       }
+               }
+
+               /* Write FW address for 3D CMD
+               */
+               if (ps3DCmdShared != NULL)
+               {
+                       ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr;
+
+                       if (psZSBuffer != NULL)
+                       {
+                               ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr;
+                       }
+                       if (psMSAAScratchBuffer != NULL)
+                       {
+                               ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr;
+                       }
+               }
+
+               /* Write FW address for PR3D CMD
+               */
+               if (psPR3DCmdShared != NULL)
+               {
+                       psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr;
+
+                       if (psZSBuffer != NULL)
+                       {
+                               psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr;
+                       }
+                       if (psMSAAScratchBuffer != NULL)
+                       {
+                               psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr;
+                       }
+               }
+       }
+
+       if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+       if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, "
+                          "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d",
+                          __func__,
+                          ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                          ui32Client3DFenceCount, ui32Client3DUpdateCount));
+
+       RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice,
+                                 &pPreAddr,
+                                 &pPostAddr,
+                                 &pRMWUFOAddr);
+
+       /* Double-check we have a PR kick if there are client fences */
+       if (unlikely(!bKickPR && ui32Client3DFenceCount != 0))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick",
+                       __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+       szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+
+       OSLockAcquire(psRenderContext->hLock);
+
+       ui32TAFenceCount = ui32ClientTAFenceCount;
+       ui323DFenceCount = ui32Client3DFenceCount;
+       ui32TAUpdateCount = ui32ClientTAUpdateCount;
+       ui323DUpdateCount = ui32Client3DUpdateCount;
+       ui32PRUpdateCount = ui32ClientPRUpdateCount;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (ui32SyncPMRCount)
+       {
+               int err;
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling"
+                         " pvr_buffer_sync_resolve_and_create_fences", __func__));
+
+               err = pvr_buffer_sync_resolve_and_create_fences(
+                   psRenderContext->psBufferSyncContext,
+                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                   ui32SyncPMRCount,
+                   ppsSyncPMRs,
+                   paui32SyncPMRFlags,
+                   &ui32BufferFenceSyncCheckpointCount,
+                   &apsBufferFenceSyncCheckpoints,
+                   &psBufferUpdateSyncCheckpoint,
+                   &psBufferSyncData
+               );
+
+               if (unlikely(err))
+               {
+                       switch (err)
+                       {
+                               case -EINTR:
+                                       eError = PVRSRV_ERROR_RETRY;
+                                       break;
+                               case -ENOMEM:
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       break;
+                               default:
+                                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                       break;
+                       }
+
+                       if (eError != PVRSRV_ERROR_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   "
+                                       "pvr_buffer_sync_resolve_and_create_fences failed (%d)",
+                                       __func__, eError));
+                       }
+                       OSLockRelease(psRenderContext->hLock);
+                       return eError;
+               }
+
+#if !defined(SUPPORT_STRIP_RENDERING)
+               if (bKickTA)
+               {
+                       ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+               else
+               {
+                       ui323DFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+#else /* !defined(SUPPORT_STRIP_RENDERING) */
+               ui323DFenceCount += ui32BufferFenceSyncCheckpointCount;
+
+               PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly);
+#endif /* !defined(SUPPORT_STRIP_RENDERING) */
+
+               if (psBufferUpdateSyncCheckpoint != NULL)
+               {
+                       if (bKick3D)
+                       {
+                               ui323DUpdateCount++;
+                       }
+                       else
+                       {
+                               ui32PRUpdateCount++;
+                       }
+               }
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2
+#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2."
+#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */
+
+       if (iCheckTAFence != PVRSRV_NO_FENCE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]"
+                         " (iCheckFence=%d),"
+                         " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+                         __func__, iCheckTAFence,
+                         (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext));
+
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(
+                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                   iCheckTAFence,
+                   &ui32FenceTASyncCheckpointCount,
+                   &apsFenceTASyncCheckpoints,
+                   &uiCheckTAFenceUID,
+                   ui32PDumpFlags
+               );
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)",
+                                 __func__, eError));
+                       goto fail_resolve_input_ta_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d "
+                         "checkpoints (apsFenceSyncCheckpoints=<%p>)",
+                         __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount,
+                         (void *) apsFenceTASyncCheckpoints));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               if (apsFenceTASyncCheckpoints)
+               {
+                       _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints,
+                                             ui32FenceTASyncCheckpointCount);
+               }
+#endif /* defined(TA3D_CHECKPOINT_DEBUG) */
+       }
+
+       if (iCheck3DFence != PVRSRV_NO_FENCE)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]"
+                         " (iCheckFence=%d), "
+                         "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...",
+                         __func__, iCheck3DFence,
+                         (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(
+                   psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                   iCheck3DFence,
+                   &ui32Fence3DSyncCheckpointCount,
+                   &apsFence3DSyncCheckpoints,
+                   &uiCheck3DFenceUID,
+                   ui32PDumpFlags
+               );
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)",
+                                 __func__, eError));
+                       goto fail_resolve_input_3d_fence;
+               }
+
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d "
+                         "checkpoints (apsFenceSyncCheckpoints=<%p>)",
+                         __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount,
+                         (void*)apsFence3DSyncCheckpoints));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+               if (apsFence3DSyncCheckpoints)
+               {
+                       _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints,
+                                             ui32Fence3DSyncCheckpointCount);
+               }
+#endif /* defined(TA3D_CHECKPOINT_DEBUG) */
+       }
+
+       if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
+           iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
+       {
+               IMG_UINT32 i;
+
+               if (bKickTA)
+               {
+                       ui32TAFenceCount += ui32FenceTASyncCheckpointCount;
+
+                       for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++)
+                       {
+                               if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) !=
+                                   uiCurrentProcess)
+                               {
+                                       ui32TAFenceCount++;
+                               }
+                       }
+               }
+
+               if (bKick3D)
+               {
+                       ui323DFenceCount += ui32Fence3DSyncCheckpointCount;
+               }
+
+               ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ?
+                               UPDATE_FENCE_CHECKPOINT_COUNT : 0;
+               ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ?
+                               UPDATE_FENCE_CHECKPOINT_COUNT : 0;
+               ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ?
+                               UPDATE_FENCE_CHECKPOINT_COUNT : 0;
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       /* Check if TestingSLR is adding an extra sync checkpoint to the
+        * 3D fence check (which we won't signal)
+        */
+       if ((psDevInfo->ui32TestSLRInterval > 0) &&
+           (--psDevInfo->ui32TestSLRCount == 0))
+       {
+               bTestSLRAdd3DCheck = IMG_TRUE;
+               psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval;
+       }
+
+       if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE))
+       {
+               if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint "
+                                "to 3D fence but no update 3D timeline provided", __func__));
+               }
+               else
+               {
+                       SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                                           iUpdate3DTimeline,
+                                           hTestSLRTmpFence,
+                                           "TestSLRCheck",
+                                           &psDummySyncCheckpoint);
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence "
+                                                 "checkpoints (psDummySyncCheckpoint=<%p>)",
+                                                 __func__, (void*)psDummySyncCheckpoint));
+                       SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+                                                     1,
+                                                     &psDummySyncCheckpoint);
+                       if (!pauiClient3DFenceUFOAddress)
+                       {
+                               pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+                       }
+
+                       if (ui32Client3DFenceCount == 0)
+                       {
+                               b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                       }
+                       ui323DFenceCount++;
+               }
+       }
+#endif /* defined(SUPPORT_VALIDATION) */
+
+       /*
+        * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
+        * in other words, take the value and set it to zero afterwards.
+        * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
+        * as it must be ready at the time of context activation.
+        *
+        * NOTE: We use sTAData to get the ServerCommonContext giving us the ServerMMUCtx,
+        *       should we use s3DData in some cases?
+        *       Under assumption that sTAData and s3DData share the same psServerCommonContext,
+        *       the answer is NO.
+        *
+        *       The ui64FBSCEntryMask filled by the following call gets cleared
+        *       after the first KICK command and is ignored in the context of partial renders.
+        */
+       eError = RGXExtractFBSCEntryMaskFromMMUContext(
+           psRenderContext->psDeviceNode,
+           FWCommonContextGetServerMMUCtx(psRenderContext->sTAData.psServerCommonContext),
+           &ui64FBSCEntryMask
+       );
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
+               goto fail_tacmdinvalfbsc;
+       }
+
+       if (bKickTA)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d",
+                         __func__, ui32TAFenceCount, ui32TAUpdateCount));
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                   psDevInfo,
+                       ui64FBSCEntryMask,
+                   ui32TAFenceCount,
+                   ui32TAUpdateCount,
+                   ui32TACmdSize,
+                   &pPreAddr,
+                   (bKick3D ? NULL : &pPostAddr),
+                   (bKick3D ? NULL : &pRMWUFOAddr),
+                   pasTACmdHelperData
+               );
+
+               /* Clear the mask as we don't want to invalidate the FBSC multiple times
+                * with the same value of ui64FBSCEntryMask.
+                */
+               ui64FBSCEntryMask = 0;
+       }
+
+       if (bKickPR)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32Client3DFenceCount=%d", __func__,
+                         ui323DFenceCount));
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                   psDevInfo,
+                       0, /* empty ui64FBSCEntryMask it is assumed that PRs should
+                       * not invalidate FBSC */
+                   ui323DFenceCount,
+                   0,
+                   sizeof(sPRUFO),
+                       NULL,
+                       NULL,
+                       NULL,
+                   &pas3DCmdHelperData[ui323DCmdCount++]
+               );
+       }
+
+       if (bKickPR && !bUseCombined3DAnd3DPR)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32PRUpdateCount=%d", __func__,
+                         ui32PRUpdateCount));
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                   psDevInfo,
+                       0, /* empty ui64FBSCEntryMask it is assumed that PRs should
+                       * not invalidate FBSC */
+                   0,
+                   ui32PRUpdateCount,
+                   /* if the client has not provided a 3DPR command, the regular 3D
+                    * command should be used instead */
+                   pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize,
+                       NULL,
+                       NULL,
+                       NULL,
+                   &pas3DCmdHelperData[ui323DCmdCount++]
+               );
+       }
+
+       if (bKick3D || bAbort)
+       {
+               if (!bKickTA)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   calling RGXCmdHelperInitCmdCCB(),"
+                         " ui32Client3DFenceCount=%d", __func__,
+                         ui323DFenceCount));
+               }
+
+               RGXCmdHelperInitCmdCCB_CommandSize(
+                   psDevInfo,
+                       ui64FBSCEntryMask, /* equals: [a] 0 if 3D is preceded by TA
+                                       *         [b] value from the MMU ctx otherwise */
+                       bKickTA ? 0 : ui323DFenceCount,
+                   ui323DUpdateCount,
+                   ui323DCmdSize,
+                       (bKickTA ? NULL : & pPreAddr),
+                       &pPostAddr,
+                       &pRMWUFOAddr,
+                   &pas3DCmdHelperData[ui323DCmdCount++]
+               );
+       }
+
+       if (bKickTA)
+       {
+               ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData);
+
+               eError = RGXCheckSpaceCCB(
+                   FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext),
+                   ui32TACmdSizeTmp
+               );
+               if (eError != PVRSRV_OK)
+               {
+                       goto err_not_enough_space;
+               }
+       }
+
+       if (ui323DCmdCount > 0)
+       {
+               ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData);
+
+               eError = RGXCheckSpaceCCB(
+                   FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext),
+                   ui323DCmdSizeTmp
+               );
+               if (eError != PVRSRV_OK)
+               {
+                       goto err_not_enough_space;
+               }
+       }
+
+       /* need to reset the counter here */
+
+       ui323DCmdCount = 0;
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...",
+                          __func__, ui32ClientTAFenceCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence,
+                                                                               ui32ClientTAFenceCount,
+                                                                               apsClientTAFenceSyncPrimBlock,
+                                                                               paui32ClientTAFenceSyncOffset);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_ta_fence;
+       }
+
+       if (ui32ClientTAFenceCount)
+       {
+               pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: pauiClientTAFenceUFOAddress=<%p> ",
+                          __func__, (void*)pauiClientTAFenceUFOAddress));
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...",
+                          __func__, ui32ClientTAUpdateCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate,
+                                                                               ui32ClientTAUpdateCount,
+                                                                               apsClientTAUpdateSyncPrimBlock,
+                                                                               paui32ClientTAUpdateSyncOffset);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_ta_update;
+       }
+
+       if (ui32ClientTAUpdateCount)
+       {
+               pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: pauiClientTAUpdateUFOAddress=<%p> ",
+                          __func__, (void*)pauiClientTAUpdateUFOAddress));
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...",
+                          __func__, ui32Client3DFenceCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence,
+                                                                               ui32Client3DFenceCount,
+                                                                               NULL,
+                                                                               NULL);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_3d_fence;
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...",
+                          __func__, ui32Client3DUpdateCount));
+       eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate,
+                                                                               ui32Client3DUpdateCount,
+                                                                               apsClient3DUpdateSyncPrimBlock,
+                                                                               paui32Client3DUpdateSyncOffset);
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_populate_sync_addr_list_3d_update;
+       }
+
+       if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D))
+       {
+               pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+       }
+       CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ",
+                          __func__, (void*)pauiClient3DUpdateUFOAddress));
+
+       eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock,
+                                                                       ui32PRFenceSyncOffset,
+                                                                       &uiPRFenceUFOAddress);
+
+       if (unlikely(eError != PVRSRV_OK))
+       {
+               goto err_pr_fence_address;
+       }
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+       DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                   ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0),
+                   ui32Client3DUpdateCount,
+                   pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue,
+                   pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue,
+                   pauiClient3DFenceUFOAddress, NULL,
+                   pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue);
+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */
+
+       if (ui32SyncPMRCount)
+       {
+#if defined(SUPPORT_BUFFER_SYNC)
+#if !defined(SUPPORT_STRIP_RENDERING)
+               /* Append buffer sync fences to TA fences */
+               if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   Append %d buffer sync checkpoints to TA Fence "
+                                          "(&psRenderContext->sSyncAddrListTAFence=<%p>, "
+                                          "pauiClientTAFenceUFOAddress=<%p>)...",
+                                          __func__,
+                                          ui32BufferFenceSyncCheckpointCount,
+                                          (void*)&psRenderContext->sSyncAddrListTAFence ,
+                                          (void*)pauiClientTAFenceUFOAddress));
+                       SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+                                                             ui32BufferFenceSyncCheckpointCount,
+                                                             apsBufferFenceSyncCheckpoints);
+                       if (!pauiClientTAFenceUFOAddress)
+                       {
+                               pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+                       }
+                       if (ui32ClientTAFenceCount == 0)
+                       {
+                               bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                       }
+                       ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+               else
+#endif /* !defined(SUPPORT_STRIP_RENDERING) */
+               /* Append buffer sync fences to 3D fences */
+               if (ui32BufferFenceSyncCheckpointCount > 0)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   Append %d buffer sync checkpoints to 3D Fence "
+                                          "(&psRenderContext->sSyncAddrList3DFence=<%p>, "
+                                          "pauiClient3DFenceUFOAddress=<%p>)...",
+                                          __func__,
+                                          ui32BufferFenceSyncCheckpointCount,
+                                          (void*)&psRenderContext->sSyncAddrList3DFence,
+                                          (void*)pauiClient3DFenceUFOAddress));
+                       SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+                                       ui32BufferFenceSyncCheckpointCount,
+                                       apsBufferFenceSyncCheckpoints);
+                       if (!pauiClient3DFenceUFOAddress)
+                       {
+                               pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+                       }
+                       if (ui32Client3DFenceCount == 0)
+                       {
+                               b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                       }
+                       ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount;
+               }
+
+               if (psBufferUpdateSyncCheckpoint)
+               {
+                       /* If we have a 3D kick append update to the 3D updates else append to the PR update */
+                       if (bKick3D)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s:   Append 1 buffer sync checkpoint<%p> to 3D Update"
+                                                  " (&psRenderContext->sSyncAddrList3DUpdate=<%p>,"
+                                                  " pauiClient3DUpdateUFOAddress=<%p>)...",
+                                                  __func__,
+                                                  (void*)psBufferUpdateSyncCheckpoint,
+                                                  (void*)&psRenderContext->sSyncAddrList3DUpdate,
+                                                  (void*)pauiClient3DUpdateUFOAddress));
+                               /* Append buffer sync update to 3D updates */
+                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                                                                         1,
+                                                                                         &psBufferUpdateSyncCheckpoint);
+                               if (!pauiClient3DUpdateUFOAddress)
+                               {
+                                       pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                               }
+                               ui32Client3DUpdateCount++;
+                       }
+                       else
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   Append 1 buffer sync checkpoint<%p> to PR Update"
+                                                  " (&psRenderContext->sSyncAddrList3DUpdate=<%p>,"
+                                                  " pauiClientPRUpdateUFOAddress=<%p>)...",
+                                          __func__,
+                                                  (void*)psBufferUpdateSyncCheckpoint,
+                                                  (void*)&psRenderContext->sSyncAddrList3DUpdate,
+                                                  (void*)pauiClientPRUpdateUFOAddress));
+                               /* Attach update to the 3D (used for PR) Updates */
+                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                                                                         1,
+                                                                                         &psBufferUpdateSyncCheckpoint);
+                               if (!pauiClientPRUpdateUFOAddress)
+                               {
+                                       pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                               }
+                               ui32ClientPRUpdateCount++;
+                       }
+               }
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   (after buffer_sync) ui32ClientTAFenceCount=%d, "
+                                  "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, "
+                                  "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,",
+                                  __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                                  ui32Client3DFenceCount, ui32Client3DUpdateCount,
+                                  ui32ClientPRUpdateCount));
+
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Buffer sync not supported but got %u buffers",
+                                __func__, ui32SyncPMRCount));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_no_buffer_sync_invalid_params;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+       }
+
+       /*
+        * The hardware requires a PR to be submitted if there is a TA (otherwise
+        * it can wedge if we run out of PB space with no PR to run)
+        *
+        * If we only have a TA, attach native checks to the TA and updates to the PR
+        * If we have a TA and 3D, attach checks to TA, updates to 3D
+        * If we only have a 3D, attach checks and updates to the 3D
+        *
+        * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in
+        * addition to the update fence FD (if supplied)
+        *
+        * Currently, the client driver never kicks only the 3D, so we only support
+        * that for the time being.
+        */
+       if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
+           iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
+       {
+               PRGXFWIF_UFO_ADDR       *pauiClientTAIntUpdateUFOAddress = NULL;
+               PRGXFWIF_UFO_ADDR       *pauiClient3DIntUpdateUFOAddress = NULL;
+
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d",
+                                  __func__, iCheckTAFence, iUpdateTATimeline));
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d",
+                                  __func__, iCheck3DFence, iUpdate3DTimeline));
+
+               {
+                       /* Create the output fence for TA (if required) */
+                       if (iUpdateTATimeline != PVRSRV_NO_TIMELINE)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: calling SyncCheckpointCreateFence[TA] "
+                                                  "(iUpdateFence=%d, iUpdateTimeline=%d, "
+                                                  "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)",
+                                                  __func__, iUpdateTAFence, iUpdateTATimeline,
+                                                  (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+                               eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+                                               szFenceNameTA,
+                                               iUpdateTATimeline,
+                                               psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                                               &iUpdateTAFence,
+                                               &uiUpdateTAFenceUID,
+                                               &pvTAUpdateFenceFinaliseData,
+                                               &psUpdateTASyncCheckpoint,
+                                               (void*)&psTAFenceTimelineUpdateSync,
+                                               &ui32TAFenceTimelineUpdateValue,
+                                               ui32PDumpFlags);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   SyncCheckpointCreateFence[TA] failed (%d)", __func__, eError));
+                                       goto fail_create_ta_fence;
+                               }
+
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: returned from SyncCheckpointCreateFence[TA] "
+                                                  "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+                                                  "ui32FenceTimelineUpdateValue=0x%x)",
+                                                  __func__, iUpdateTAFence,
+                                                  (void*)psTAFenceTimelineUpdateSync,
+                                                  ui32TAFenceTimelineUpdateValue));
+
+                               /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
+                               pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
+                                                  __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+                       }
+
+                       /* Append the sync prim update for the TA timeline (if required) */
+                       if (psTAFenceTimelineUpdateSync)
+                       {
+                               sTASyncData.ui32ClientUpdateCount                = ui32ClientTAUpdateCount;
+                               sTASyncData.ui32ClientUpdateValueCount   = ui32ClientTAUpdateValueCount;
+                               sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount;
+                               sTASyncData.paui32ClientUpdateValue              = paui32ClientTAUpdateValue;
+
+                               eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue,
+                                                                                       &psRenderContext->sSyncAddrListTAUpdate,
+                                                                                       (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate,
+                                                                                       psTAFenceTimelineUpdateSync,
+                                                                                       &sTASyncData,
+                                                                                       bKick3D);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       goto fail_alloc_update_values_mem_TA;
+                               }
+
+                               paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue;
+                               ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount;
+                               pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress;
+                               ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount;
+                       }
+
+                       /* Create the output fence for 3D (if required) */
+                       if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: calling SyncCheckpointCreateFence[3D] "
+                                                  "(iUpdateFence=%d, iUpdateTimeline=%d, "
+                                                  "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)",
+                                                  __func__, iUpdate3DFence, iUpdate3DTimeline,
+                                                  (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+                               eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+                                               szFenceName3D,
+                                               iUpdate3DTimeline,
+                                               psRenderContext->psDeviceNode->hSyncCheckpointContext,
+                                               &iUpdate3DFence,
+                                               &uiUpdate3DFenceUID,
+                                               &pv3DUpdateFenceFinaliseData,
+                                               &psUpdate3DSyncCheckpoint,
+                                               (void*)&ps3DFenceTimelineUpdateSync,
+                                               &ui323DFenceTimelineUpdateValue,
+                                               ui32PDumpFlags);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   SyncCheckpointCreateFence[3D] failed (%d)", __func__, eError));
+                                       goto fail_create_3d_fence;
+                               }
+
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: returned from SyncCheckpointCreateFence[3D] "
+                                                  "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, "
+                                                  "ui32FenceTimelineUpdateValue=0x%x)",
+                                                  __func__, iUpdate3DFence,
+                                                  (void*)ps3DFenceTimelineUpdateSync,
+                                                  ui323DFenceTimelineUpdateValue));
+
+                               /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
+                               pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
+                                                  __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+                       }
+
+                       /* Append the sync prim update for the 3D timeline (if required) */
+                       if (ps3DFenceTimelineUpdateSync)
+                       {
+                               s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount;
+                               s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount;
+                               s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount;
+                               s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue;
+
+                               eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue,
+                                                                                       &psRenderContext->sSyncAddrList3DUpdate,
+                                                                                       &psRenderContext->sSyncAddrList3DUpdate,        /*!< PR update: is this required? */
+                                                                                       ps3DFenceTimelineUpdateSync,
+                                                                                       &s3DSyncData,
+                                                                                       bKick3D);
+                               if (unlikely(eError != PVRSRV_OK))
+                               {
+                                       goto fail_alloc_update_values_mem_3D;
+                               }
+
+                               paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue;
+                               ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount;
+                               pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress;
+                               ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount;
+
+                               if (!bKick3D)
+                               {
+                                       paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue;
+                                       ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount;
+                                       pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress;
+                                       ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount;
+                               }
+                       }
+
+                       /*
+                        * The hardware requires a PR to be submitted if there is a TA OOM.
+                        * If we only have a TA, attach native checks and updates to the TA
+                        * and 3D updates to the PR.
+                        * If we have a TA and 3D, attach the native TA checks and updates
+                        * to the TA and similarly for the 3D.
+                        * Note that 'updates' includes the cleanup syncs for 'check' fence
+                        * FDs, in addition to the update fence FD (if supplied).
+                        * Currently, the client driver never kicks only the 3D, so we don't
+                        * support that for the time being.
+                        */
+
+                       {
+                               if (bKickTA)
+                               {
+                                       /* Attach checks and updates to TA */
+
+                                       /* Checks (from input fence) */
+                                       if (ui32FenceTASyncCheckpointCount > 0)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...",
+                                                                  __func__,
+                                                                  ui32FenceTASyncCheckpointCount,
+                                                                  (void*)apsFenceTASyncCheckpoints));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+                                                                                                         ui32FenceTASyncCheckpointCount,
+                                                                                                         apsFenceTASyncCheckpoints);
+                                               if (!pauiClientTAFenceUFOAddress)
+                                               {
+                                                       pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+                                               }
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   {ui32ClientTAFenceCount was %d, now %d}",
+                                                                  __func__, ui32ClientTAFenceCount,
+                                                                  ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount));
+                                               if (ui32ClientTAFenceCount == 0)
+                                               {
+                                                       bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                                               }
+                                               ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount;
+                                       }
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s:   {ui32ClientTAFenceCount now %d}",
+                                                          __func__, ui32ClientTAFenceCount));
+
+                                       if (psUpdateTASyncCheckpoint)
+                                       {
+                                               /* Update (from output fence) */
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append 1 sync checkpoint<%p> (ID=%d) to TA Update...",
+                                                                  __func__, (void*)psUpdateTASyncCheckpoint,
+                                                                  SyncCheckpointGetId(psUpdateTASyncCheckpoint)));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate,
+                                                                                                         1,
+                                                                                                         &psUpdateTASyncCheckpoint);
+                                               if (!pauiClientTAUpdateUFOAddress)
+                                               {
+                                                       pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+                                               }
+                                               ui32ClientTAUpdateCount++;
+                                       }
+
+                                       if (!bKick3D && psUpdate3DSyncCheckpoint)
+                                       {
+                                               /* Attach update to the 3D (used for PR) Updates */
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...",
+                                                                  __func__, (void*)psUpdate3DSyncCheckpoint,
+                                                                  SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                                                                                         1,
+                                                                                                         &psUpdate3DSyncCheckpoint);
+                                               if (!pauiClientPRUpdateUFOAddress)
+                                               {
+                                                       pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                                               }
+                                               ui32ClientPRUpdateCount++;
+                                       }
+                               }
+
+                               if (bKick3D)
+                               {
+                                       /* Attach checks and updates to the 3D */
+
+                                       /* Checks (from input fence) */
+                                       if (ui32Fence3DSyncCheckpointCount > 0)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append %d sync checkpoints to 3D Fence...",
+                                                                  __func__, ui32Fence3DSyncCheckpointCount));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+                                                                                                         ui32Fence3DSyncCheckpointCount,
+                                                                                                         apsFence3DSyncCheckpoints);
+                                               if (!pauiClient3DFenceUFOAddress)
+                                               {
+                                                       pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+                                               }
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   {ui32Client3DFenceCount was %d, now %d}",
+                                                                  __func__, ui32Client3DFenceCount,
+                                                                  ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount));
+                                               if (ui32Client3DFenceCount == 0)
+                                               {
+                                                       b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                                               }
+                                               ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount;
+                                       }
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s:   {ui32Client3DFenceCount was %d}",
+                                                          __func__, ui32Client3DFenceCount));
+
+                                       if (psUpdate3DSyncCheckpoint)
+                                       {
+                                               /* Update (from output fence) */
+                                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                                  "%s:   Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...",
+                                                                  __func__, (void*)psUpdate3DSyncCheckpoint,
+                                                                  SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+                                               SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+                                                                                                         1,
+                                                                                                         &psUpdate3DSyncCheckpoint);
+                                               if (!pauiClient3DUpdateUFOAddress)
+                                               {
+                                                       pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+                                               }
+                                               ui32Client3DUpdateCount++;
+                                       }
+                               }
+
+                               /*
+                                * Relocate sync check points from the 3D fence that are
+                                * external to the current process, to the TA fence.
+                                * This avoids a sync lockup when dependent renders are
+                                * submitted out-of-order and a PR must be scheduled.
+                                */
+                               if (bKickTA)
+                               {
+                                       /* Search for external timeline dependencies */
+                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                          "%s: Checking 3D fence for external sync points (%d)...",
+                                                          __func__, ui32Fence3DSyncCheckpointCount));
+
+                                       for (i=0; i<ui32Fence3DSyncCheckpointCount; i++)
+                                       {
+                                               /* Check to see if the checkpoint is on a TL owned by
+                                                * another process.
+                                                */
+                                               if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != uiCurrentProcess)
+                                               {
+                                                       /* 3D Sync point represents cross process
+                                                        * dependency, copy sync point to TA command fence. */
+                                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                                          "%s:   Append 1 sync checkpoint<%p> (ID=%d) to TA Fence...",
+                                                                          __func__, (void*)apsFence3DSyncCheckpoints[i],
+                                                                          SyncCheckpointGetId(apsFence3DSyncCheckpoints[i])));
+
+                                                       SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+                                                                                                                 1,
+                                                                                                                 &apsFence3DSyncCheckpoints[i]);
+
+                                                       if (!pauiClientTAFenceUFOAddress)
+                                                       {
+                                                               pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+                                                       }
+
+                                                       CHKPT_DBG((PVR_DBG_ERROR,
+                                                                          "%s:   {ui32ClientTAFenceCount was %d, now %d}",
+                                                                          __func__,
+                                                                          ui32ClientTAFenceCount,
+                                                                          ui32ClientTAFenceCount + 1));
+
+                                                       if (ui32ClientTAFenceCount == 0)
+                                                       {
+                                                               bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+                                                       }
+
+                                                       ui32ClientTAFenceCount++;
+                                               }
+                                       }
+                               }
+
+                               CHKPT_DBG((PVR_DBG_ERROR,
+                                                  "%s:   (after pvr_sync) ui32ClientTAFenceCount=%d, "
+                                                  "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, "
+                                                  "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,",
+                                                  __func__,
+                                                  ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                                                  ui32Client3DFenceCount, ui32Client3DUpdateCount,
+                                                  ui32ClientPRUpdateCount));
+                       }
+               }
+
+               if (ui32ClientTAFenceCount)
+               {
+                       PVR_ASSERT(pauiClientTAFenceUFOAddress);
+                       if (!bTAFenceOnSyncCheckpointsOnly)
+                       {
+                               PVR_ASSERT(paui32ClientTAFenceValue);
+                       }
+               }
+               if (ui32ClientTAUpdateCount)
+               {
+                       PVR_ASSERT(pauiClientTAUpdateUFOAddress);
+                       if (ui32ClientTAUpdateValueCount>0)
+                       {
+                               PVR_ASSERT(paui32ClientTAUpdateValue);
+                       }
+               }
+               if (ui32Client3DFenceCount)
+               {
+                       PVR_ASSERT(pauiClient3DFenceUFOAddress);
+                       PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly);
+               }
+               if (ui32Client3DUpdateCount)
+               {
+                       PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+                       if (ui32Client3DUpdateValueCount>0)
+                       {
+                               PVR_ASSERT(paui32Client3DUpdateValue);
+                       }
+               }
+               if (ui32ClientPRUpdateCount)
+               {
+                       PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+                       if (ui32ClientPRUpdateValueCount>0)
+                       {
+                               PVR_ASSERT(paui32ClientPRUpdateValue);
+                       }
+               }
+
+       }
+
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ",
+                          __func__,
+                          ui32ClientTAFenceCount,
+                          (void*)paui32ClientTAFenceValue));
+       CHKPT_DBG((PVR_DBG_ERROR,
+                          "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ",
+                          __func__,
+                          ui32ClientTAUpdateCount,
+                          (void*)pauiClientTAUpdateUFOAddress));
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+       DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount,
+                   ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0),
+                   ui32Client3DUpdateCount,
+                   pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue,
+                   pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue,
+                   pauiClient3DFenceUFOAddress, NULL,
+                   pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue);
+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */
+
+       /* Command size check */
+       if (ui32TAFenceCount != ui32ClientTAFenceCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences"
+                       " is different than the actual number (%u != %u)",
+                       ui32TAFenceCount, ui32ClientTAFenceCount));
+       }
+       if (ui32TAUpdateCount != ui32ClientTAUpdateCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates"
+                       " is different than the actual number (%u != %u)",
+                       ui32TAUpdateCount, ui32ClientTAUpdateCount));
+       }
+       if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences"
+                       " is different than the actual number (%u != %u)",
+                       ui323DFenceCount, ui32Client3DFenceCount));
+       }
+       if (ui323DUpdateCount != ui32Client3DUpdateCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates"
+                       " is different than the actual number (%u != %u)",
+                       ui323DUpdateCount, ui32Client3DUpdateCount));
+       }
+       if (ui32PRUpdateCount != ui32ClientPRUpdateCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates"
+                       " is different than the actual number (%u != %u)",
+                       ui32PRUpdateCount, ui32ClientPRUpdateCount));
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       if (bKickTA || bKick3D || bAbort)
+       {
+               sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize  = ui32RenderTargetSize;
+               sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls;
+               sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices   = ui32NumberOfIndices;
+               sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs      = ui32NumberOfMRTs;
+       }
+#endif
+
+       /* Init and acquire to TA command if required */
+       if (bKickTA)
+       {
+               RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Prepare workload estimation */
+               WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+                               &psRenderContext->sWorkEstData,
+                               &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA,
+                               RGXFWIF_CCB_CMD_TYPE_GEOM,
+                               &sWorkloadCharacteristics,
+                               ui64DeadlineInus,
+                               &sWorkloadKickDataTA);
+#endif
+
+               /* Init the TA command helper */
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d",
+                                  __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount));
+               RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+                                                ui32ClientTAFenceCount,
+                                                pauiClientTAFenceUFOAddress,
+                                                paui32ClientTAFenceValue,
+                                                ui32ClientTAUpdateCount,
+                                                pauiClientTAUpdateUFOAddress,
+                                                paui32ClientTAUpdateValue,
+                                                ui32TACmdSize,
+                                                pui8TADMCmd,
+                                                                        &pPreAddr,
+                                                                        (bKick3D ? NULL : & pPostAddr),
+                                                                        (bKick3D ? NULL : & pRMWUFOAddr),
+                                                RGXFWIF_CCB_CMD_TYPE_GEOM,
+                                                ui32ExtJobRef,
+                                                ui32IntJobRef,
+                                                ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                                                &sWorkloadKickDataTA,
+#else
+                                                NULL,
+#endif
+                                                "TA",
+                                                bCCBStateOpen,
+                                                pasTACmdHelperData);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* The following is used to determine the offset of the command header containing
+                  the workload estimation data so that can be accessed when the KCCB is read */
+               ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+#endif
+
+               eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                          __func__, eError));
+                       goto fail_taacquirecmd;
+               }
+               else
+               {
+                       ui32TACmdCount++;
+               }
+       }
+
+       /* Only kick the 3D if required */
+       if (bKickPR)
+       {
+               RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+               /*
+                       The command helper doesn't know about the PR fence so create
+                       the command with all the fences against it and later create
+                       the PR command itself which _must_ come after the PR fence.
+               */
+               sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+               sPRUFO.ui32Value = ui32PRFenceValue;
+
+               /* Init the PR fence command helper */
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d",
+                                  __func__, ui32Client3DFenceCount));
+               RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+                                                ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0),
+                                                pauiClient3DFenceUFOAddress,
+                                                NULL,
+                                                0,
+                                                NULL,
+                                                NULL,
+                                                sizeof(sPRUFO),
+                                                (IMG_UINT8*) &sPRUFO,
+                                                NULL,
+                                                NULL,
+                                                NULL,
+                                                RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+                                                ui32ExtJobRef,
+                                                ui32IntJobRef,
+                                                ui32PDumpFlags,
+                                                NULL,
+                                                "3D-PR-Fence",
+                                                bCCBStateOpen,
+                                                &pas3DCmdHelperData[ui323DCmdCount++]);
+
+               /* Init the 3D PR command helper */
+               /*
+                       Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update
+                       if no 3D is present. This is so the timeline update cannot happen out of order with any
+                       other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB).
+                       This out of order timeline sync prim update could happen if we attach it to the TA update.
+               */
+               if (ui32ClientPRUpdateCount)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s: Line %d, ui32ClientPRUpdateCount=%d, "
+                                          "pauiClientPRUpdateUFOAddress=0x%x, "
+                                          "ui32ClientPRUpdateValueCount=%d, "
+                                          "paui32ClientPRUpdateValue=0x%x",
+                                          __func__, __LINE__, ui32ClientPRUpdateCount,
+                                          pauiClientPRUpdateUFOAddress->ui32Addr,
+                                          ui32ClientPRUpdateValueCount,
+                                          (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue));
+               }
+               if (!bUseCombined3DAnd3DPR)
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR,
+                                          "%s:   calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d",
+                                          __func__, ui32ClientPRUpdateCount));
+                       RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+                                                        0,
+                                                        NULL,
+                                                        NULL,
+                                                        ui32ClientPRUpdateCount,
+                                                        pauiClientPRUpdateUFOAddress,
+                                                        paui32ClientPRUpdateValue,
+                                                        pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead
+                                                        pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd,
+                                                        NULL,
+                                                        NULL,
+                                                        NULL,
+                                                        RGXFWIF_CCB_CMD_TYPE_3D_PR,
+                                                        ui32ExtJobRef,
+                                                        ui32IntJobRef,
+                                                        ui32PDumpFlags,
+                                                        NULL,
+                                                        "3D-PR",
+                                                        bCCBStateOpen,
+                                                        &pas3DCmdHelperData[ui323DCmdCount++]);
+               }
+       }
+
+       if (bKick3D || bAbort)
+       {
+               RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+               const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Prepare workload estimation */
+               WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+                               &psRenderContext->sWorkEstData,
+                               &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D,
+                               e3DCmdType,
+                               &sWorkloadCharacteristics,
+                               ui64DeadlineInus,
+                               &sWorkloadKickData3D);
+#endif
+
+               /* Init the 3D command helper */
+               RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+                                                bKickTA ? 0 : ui32Client3DFenceCount,  /* For a kick with a TA, the 3D fences are added before the PR command instead */
+                                                bKickTA ? NULL : pauiClient3DFenceUFOAddress,
+                                                NULL,
+                                                ui32Client3DUpdateCount,
+                                                pauiClient3DUpdateUFOAddress,
+                                                paui32Client3DUpdateValue,
+                                                ui323DCmdSize,
+                                                pui83DDMCmd,
+                                                (bKickTA ? NULL : & pPreAddr),
+                                                &pPostAddr,
+                                                &pRMWUFOAddr,
+                                                e3DCmdType,
+                                                ui32ExtJobRef,
+                                                ui32IntJobRef,
+                                                ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                                                &sWorkloadKickData3D,
+#else
+                                                NULL,
+#endif
+                                                "3D",
+                                                bCCBStateOpen,
+                                                &pas3DCmdHelperData[ui323DCmdCount++]);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* The following are used to determine the offset of the command header containing the workload estimation
+                  data so that can be accessed when the KCCB is read */
+               ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
+               ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1);
+#endif
+       }
+
+       /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+       if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS))
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError));
+               goto fail_3dcmdinit;
+       }
+
+       if (ui323DCmdCount)
+       {
+               PVR_ASSERT(bKickPR || bKick3D);
+
+               /* Acquire space for all the 3D command(s) */
+               eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+                        * of a new TA command with the same Write offset in Kernel CCB.
+                        */
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError));
+                       goto fail_3dacquirecmd;
+               }
+       }
+
+       /*
+               We should acquire the space in the kernel CCB here as after this point
+               we release the commands which will take operations on server syncs
+               which can't be undone
+       */
+
+       /*
+               Everything is ready to go now, release the commands
+       */
+       if (ui32TACmdCount)
+       {
+               ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+               RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+                                                                 pasTACmdHelperData,
+                                                                 "TA",
+                                                                 FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+               /* This checks if the command would wrap around at the end of the CCB and therefore  would start at an
+                  offset of 0 rather than the current command offset */
+               if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+               {
+                       ui32TACommandOffset = ui32TACmdOffset;
+               }
+               else
+               {
+                       ui32TACommandOffset = 0;
+               }
+#endif
+       }
+
+       if (ui323DCmdCount)
+       {
+               ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+               RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+                                                                 pas3DCmdHelperData,
+                                                                 "3D",
+                                                                 FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+               if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+               {
+                       ui323DCommandOffset = ui323DCmdOffset;
+               }
+               else
+               {
+                       ui323DCommandOffset = 0;
+               }
+#endif
+       }
+
+       if (ui32TACmdCount)
+       {
+               IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr;
+               RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext);
+               CMDTA3D_SHARED *psGeomCmdShared = IMG_OFFSET_ADDR(pui8TADMCmd, 0);
+
+               sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+               sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+               sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Add the Workload data into the KCCB kick */
+               sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
+#else
+               sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+               eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl,
+                               &sTACmdKickData.ui32NumCleanupCtl,
+                               RGXFWIF_DM_GEOM,
+                               bKickTA,
+                               psKMHWRTDataSet,
+                               psZSBuffer,
+                               psMSAAScratchBuffer);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                          __func__, eError));
+                       goto fail_taattachcleanupctls;
+               }
+
+               if (psGeomCmdShared)
+               {
+                       HTBLOGK(HTB_SF_MAIN_KICK_TA,
+                                       sTACmdKickData.psContext,
+                                       ui32TACmdOffset,
+                                       psGeomCmdShared->sCmn.ui32FrameNum,
+                                       ui32ExtJobRef,
+                                       ui32IntJobRef);
+               }
+
+               RGXSRV_HWPERF_ENQ(psRenderContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 ui32FWCtx,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_TA,
+                                 iCheckTAFence,
+                                 iUpdateTAFence,
+                                 iUpdateTATimeline,
+                                 uiCheckTAFenceUID,
+                                 uiUpdateTAFenceUID,
+                                 ui64DeadlineInus,
+                                 WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA));
+
+               if (!bUseSingleFWCommand)
+               {
+                       /* Construct the kernel TA CCB command. */
+                       RGXFWIF_KCCB_CMD sTAKCCBCmd;
+                       sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+                       sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData;
+
+                       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+                       {
+                               eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+                                               RGXFWIF_DM_GEOM,
+                                               &sTAKCCBCmd,
+                                               ui32PDumpFlags);
+                               if (eError2 != PVRSRV_ERROR_RETRY)
+                               {
+                                       break;
+                               }
+                               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+                       } END_LOOP_UNTIL_TIMEOUT();
+               }
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_taacquirecmd;
+               }
+
+               PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+                                       ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                                       RGX_HWPERF_KICK_TYPE_TA);
+       }
+
+       if (ui323DCmdCount)
+       {
+               RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 };
+               IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr;
+               RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext);
+               CMDTA3D_SHARED *ps3DCmdShared = IMG_OFFSET_ADDR(pui83DDMCmd, 0);
+
+               s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+               s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+               s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+
+               /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+               s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+#else
+               s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+               eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl,
+                               &s3DCmdKickData.ui32NumCleanupCtl,
+                               RGXFWIF_DM_3D,
+                               bKick3D,
+                               psKMHWRTDataSet,
+                               psZSBuffer,
+                               psMSAAScratchBuffer);
+               if (unlikely(eError != PVRSRV_OK))
+               {
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                          __func__, eError));
+                       goto fail_3dattachcleanupctls;
+               }
+
+               if (ps3DCmdShared)
+               {
+                       HTBLOGK(HTB_SF_MAIN_KICK_3D,
+                                       s3DCmdKickData.psContext,
+                                       ui323DCmdOffset,
+                                       ps3DCmdShared->sCmn.ui32FrameNum,
+                                       ui32ExtJobRef,
+                                       ui32IntJobRef);
+               }
+
+               RGXSRV_HWPERF_ENQ(psRenderContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 ui32FWCtx,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_3D,
+                                 iCheck3DFence,
+                                 iUpdate3DFence,
+                                 iUpdate3DTimeline,
+                                 uiCheck3DFenceUID,
+                                 uiUpdate3DFenceUID,
+                                 ui64DeadlineInus,
+                                 WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D));
+
+               if (bUseSingleFWCommand)
+               {
+                       /* Construct the kernel TA/3D CCB command. */
+                       s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK;
+                       s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData;
+                       s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData;
+               }
+               else
+               {
+                       /* Construct the kernel 3D CCB command. */
+                       s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+                       s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData;
+               }
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+                                                                                RGXFWIF_DM_3D,
+                                                                                &s3DKCCBCmd,
+                                                                                ui32PDumpFlags);
+                       if (eError2 != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_3dacquirecmd;
+               }
+
+               PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+                                       ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+                                       RGX_HWPERF_KICK_TYPE_3D);
+       }
+
+       /*
+        * Now check eError (which may have returned an error from our earlier calls
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (unlikely(eError != PVRSRV_OK ))
+       {
+               CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line",
+                                  __func__, eError));
+               goto fail_3dacquirecmd;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateTASyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x",
+                                  __func__, (void*)psUpdateTASyncCheckpoint,
+                                  SyncCheckpointGetId(psUpdateTASyncCheckpoint),
+                                  SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint);
+       }
+       if (psTAFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Updating NOHW sync prim [TA] <%p> to %d",
+                                  __func__, (void*)psTAFenceTimelineUpdateSync,
+                                  ui32TAFenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue);
+       }
+
+       if (psUpdate3DSyncCheckpoint)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x",
+                                  __func__, (void*)psUpdate3DSyncCheckpoint,
+                                  SyncCheckpointGetId(psUpdate3DSyncCheckpoint),
+                                  SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint)));
+               SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint);
+       }
+       if (ps3DFenceTimelineUpdateSync)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   Updating NOHW sync prim [3D] <%p> to %d",
+                                  __func__, (void*)ps3DFenceTimelineUpdateSync,
+                                  ui323DFenceTimelineUpdateValue));
+               SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               CHKPT_DBG((PVR_DBG_ERROR,
+                                  "%s:   calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...",
+                                  __func__, (void*)psBufferSyncData));
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       if (piUpdateTAFence)
+       {
+               *piUpdateTAFence = iUpdateTAFence;
+       }
+       if (piUpdate3DFence)
+       {
+               *piUpdate3DFence = iUpdate3DFence;
+       }
+
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence.
+        * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+        */
+       if (bKickTA)
+       {
+               SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+       }
+       SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+
+       if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence,
+                                                                       pvTAUpdateFenceFinaliseData,
+                                                                       psUpdateTASyncCheckpoint, szFenceNameTA);
+       }
+       if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence,
+                                                                       pv3DUpdateFenceFinaliseData,
+                                                                       psUpdate3DSyncCheckpoint, szFenceName3D);
+       }
+
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceTASyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+       }
+       if (apsFence3DSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+       }
+
+       if (sTASyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+       }
+       if (s3DSyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+       }
+
+#if defined(SUPPORT_VALIDATION)
+       if (bTestSLRAdd3DCheck)
+       {
+               SyncCheckpointFree(psDummySyncCheckpoint);
+       }
+#endif
+       OSLockRelease(psRenderContext->hLock);
+
+       return PVRSRV_OK;
+
+fail_3dattachcleanupctls:
+fail_taattachcleanupctls:
+fail_3dacquirecmd:
+fail_3dcmdinit:
+fail_taacquirecmd:
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence);
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate);
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence);
+       SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate);
+       /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list.
+        * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what
+        * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the
+        * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate.
+        */
+       if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs))
+       {
+               SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr);
+       }
+
+fail_alloc_update_values_mem_3D:
+       if (iUpdate3DFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData);
+       }
+fail_create_3d_fence:
+fail_alloc_update_values_mem_TA:
+       if (iUpdateTAFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData);
+       }
+fail_create_ta_fence:
+#if !defined(SUPPORT_BUFFER_SYNC)
+err_no_buffer_sync_invalid_params:
+#endif /* !defined(SUPPORT_BUFFER_SYNC) */
+err_pr_fence_address:
+err_populate_sync_addr_list_3d_update:
+err_populate_sync_addr_list_3d_fence:
+err_populate_sync_addr_list_ta_update:
+err_populate_sync_addr_list_ta_fence:
+err_not_enough_space:
+fail_tacmdinvalfbsc:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence.
+        * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+        */
+#if defined(SUPPORT_BUFFER_SYNC)
+       SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount,
+                                    apsBufferFenceSyncCheckpoints);
+#endif
+       SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+fail_resolve_input_3d_fence:
+       if (bKickTA)
+       {
+               SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+       }
+fail_resolve_input_ta_fence:
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceTASyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+       }
+       if (apsFence3DSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+       }
+       if (sTASyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+       }
+       if (s3DSyncData.paui32ClientUpdateValue)
+       {
+               OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+       }
+#if defined(SUPPORT_VALIDATION)
+       if (bTestSLRAdd3DCheck)
+       {
+               SyncCheckpointFree(psDummySyncCheckpoint);
+       }
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+       PVR_ASSERT(eError != PVRSRV_OK);
+       OSLockRelease(psRenderContext->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                 PVRSRV_DEVICE_NODE * psDeviceNode,
+                                                 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                 IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       OSLockAcquire(psRenderContext->hLock);
+
+       if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+                                                                       psConnection,
+                                                                       psRenderContext->psDeviceNode->pvDevice,
+                                                                       ui32Priority,
+                                                                       RGXFWIF_DM_GEOM);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to set the priority of the TA part of the rendercontext (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       goto fail_tacontext;
+               }
+               psRenderContext->sTAData.ui32Priority = ui32Priority;
+       }
+
+       if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+                                                                       psConnection,
+                                                                       psRenderContext->psDeviceNode->pvDevice,
+                                                                       ui32Priority,
+                                                                       RGXFWIF_DM_3D);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Failed to set the priority of the 3D part of the rendercontext (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       goto fail_3dcontext;
+               }
+               psRenderContext->s3DData.ui32Priority = ui32Priority;
+       }
+
+       OSLockRelease(psRenderContext->hLock);
+       return PVRSRV_OK;
+
+fail_3dcontext:
+fail_tacontext:
+       OSLockRelease(psRenderContext->hLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                 RGX_CONTEXT_PROPERTY eContextProperty,
+                                                 IMG_UINT64 ui64Input,
+                                                 IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psRenderContext->hLock);
+                       eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext,
+                                                        ui32ContextFlags);
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext,
+                                                            ui32ContextFlags);
+                       }
+                       OSLockRelease(psRenderContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                         void *pvDumpDebugFile,
+                         IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+       OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+       dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+
+               DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+               DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+       }
+       OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_UINT32 ui32ContextBitMask = 0;
+
+       OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+               if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext)
+               {
+                       if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED)
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA;
+                       }
+               }
+
+               if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext)
+               {
+                       if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED)
+                       {
+                               ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D;
+                       }
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/*
+ * RGXRenderContextStalledKM
+ */
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+       RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE);
+       return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxta3d.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxta3d.h b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxta3d.h
new file mode 100644 (file)
index 0000000..3e523c4
--- /dev/null
@@ -0,0 +1,503 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA and 3D Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX TA and 3D Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXTA3D_H
+#define RGXTA3D_H
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+#include "ri_server.h"
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+/*****************************************************************************
+ * The Design of Data Storage System for Render Targets                      *
+ * ====================================================                      *
+ *   Relevant for                                                            *
+ *     understanding RGXCreateHWRTDataSet & RGXDestroyHWRTDataSet            *
+ *                                                                           *
+ *                                                                           *
+ *        +=========================================+                        *
+ *        |           RenderTargetDataSet           |                        *
+ *        +---------------|---------|---------------+                        *
+ *                        |         |                                        *
+ *                        V         V                                        *
+ *  +- - - - - - - - - - - - +   +- - - - - - - - - - - - +                  *
+ *  | KM_HW_RT_DATA_HANDLE_0 |   | KM_HW_RT_DATA_HANDLE_1 |                  *
+ *  +- - -|- - - - - - - - - +   +- - - - - - - - - | - - +                  *
+ *        |                                         |                        *
+ *        |                                         |           [UM]Client   *
+ *  ------|-----------------------------------------|----------------------- *
+ *        |                                         |               Bridge   *
+ *  ------|-----------------------------------------|----------------------- *
+ *        |                                         |           [KM]Server   *
+ *        |                                         |                        *
+ *        | KM-ptr                                  | KM-ptr                 *
+ *        V                                         V                        *
+ *  +====================+           +====================+                  *
+ *  |  KM_HW_RT_DATA_0   |           |   KM_HW_RT_DATA_1  |                  *
+ *  +-----|------------|-+           +-|------------|-----+                  *
+ *        |            |               |            |                        *
+ *        |            |               |            |                        *
+ *        |            |               |            |                        *
+ *        |            |               |            |                        *
+ *        |            | KM-ptr        | KM-ptr     |                        *
+ *        |            V               V            |                        *
+ *        |      +==========================+       |                        *
+ *        |      | HW_RT_DATA_COMMON_COOKIE |       |                        *
+ *        |      +--------------------------+       |                        *
+ *        |                   |                     |                        *
+ *        |                   |                     |                        *
+ *  ------|-------------------|---------------------|----------------------- *
+ *        |                   |                     |         [FW]Firmware   *
+ *        |                   |                     |                        *
+ *        | FW-addr           |                     | FW-addr                *
+ *        V                   |                     V                        *
+ *  +===============+         |           +===============+                  *
+ *  | HW_RT_DATA_0  |         |           | HW_RT_DATA_1  |                  *
+ *  +------------|--+         |           +--|------------+                  *
+ *               |            |              |                               *
+ *               | FW-addr    | FW-addr      | FW-addr                       *
+ *               V            V              V                               *
+ *        +=========================================+                        *
+ *        |           HW_RT_DATA_COMMON             |                        *
+ *        +-----------------------------------------+                        *
+ *                                                                           *
+ *****************************************************************************/
+
+typedef struct _RGX_HWRTDATA_COMMON_COOKIE_
+{
+       DEVMEM_MEMDESC                  *psHWRTDataCommonFwMemDesc;
+       RGXFWIF_DEV_VIRTADDR    sHWRTDataCommonFwAddr;
+       IMG_UINT32                              ui32RefCount;
+
+} RGX_HWRTDATA_COMMON_COOKIE;
+
+typedef struct _RGX_KM_HW_RT_DATASET_
+{
+       RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie;
+
+       /* RGX_RTDATA_CLEANUP_DATA */
+       /* RGXMKIF_NUM_RTDATAS */
+    PVRSRV_DEVICE_NODE *psDeviceNode;
+       RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr;
+
+    DEVMEM_MEMDESC *psHWRTDataFwMemDesc;
+    DEVMEM_MEMDESC *psRTArrayFwMemDesc;
+    DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc;
+
+    RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS];
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+    DLLIST_NODE sNodeHWRTData;
+#endif
+
+} RGX_KM_HW_RT_DATASET;
+
+struct _RGX_FREELIST_ {
+       PVRSRV_RGXDEV_INFO              *psDevInfo;
+       CONNECTION_DATA                 *psConnection;
+
+       /* Free list PMR */
+       PMR                                             *psFreeListPMR;
+       IMG_DEVMEM_OFFSET_T             uiFreeListPMROffset;
+
+       /* Free list PM state PMR */
+       PMR                                             *psFreeListStatePMR;
+       IMG_DEVMEM_OFFSET_T             uiFreeListStatePMROffset;
+
+       /* Freelist config */
+       IMG_UINT32                              ui32MaxFLPages;
+       IMG_UINT32                              ui32InitFLPages;
+       IMG_UINT32                              ui32CurrentFLPages;
+       IMG_UINT32                              ui32GrowFLPages;
+       IMG_UINT32                              ui32ReadyFLPages;
+       IMG_UINT32                              ui32GrowThreshold;              /* Percentage of FL memory used that should trigger a new grow request */
+       IMG_UINT32                              ui32FreelistID;
+       IMG_UINT32                              ui32FreelistGlobalID;   /* related global freelist for this freelist */
+       IMG_UINT64                              ui64FreelistChecksum;   /* checksum over freelist content */
+       IMG_BOOL                                bCheckFreelist;                 /* freelist check enabled */
+       IMG_UINT32                              ui32RefCount;                   /* freelist reference counting */
+
+       IMG_UINT32                              ui32NumGrowReqByApp;    /* Total number of grow requests by Application */
+       IMG_UINT32                              ui32NumGrowReqByFW;             /* Total Number of grow requests by Firmware */
+       IMG_UINT32                              ui32NumHighPages;               /* High Mark of pages in the freelist */
+
+       IMG_PID                                 ownerPid;                               /* Pid of the owner of the list */
+
+       /* Memory Blocks */
+       DLLIST_NODE                             sMemoryBlockHead;
+       DLLIST_NODE                             sMemoryBlockInitHead;
+       DLLIST_NODE                             sNode;
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+       /* HWRTData nodes linked to local freelist */
+       DLLIST_NODE                             sNodeHWRTDataHead;
+#endif
+
+       /* FW data structures */
+       DEVMEM_MEMDESC                  *psFWFreelistMemDesc;
+       RGXFWIF_DEV_VIRTADDR    sFreeListFWDevVAddr;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       HASH_TABLE*                             psWorkloadHashTable;
+#endif
+};
+
+struct _RGX_PMR_NODE_ {
+       RGX_FREELIST                    *psFreeList;
+       PMR                                             *psPMR;
+       PMR_PAGELIST                    *psPageList;
+       DLLIST_NODE                             sMemoryBlock;
+       IMG_UINT32                              ui32NumPages;
+       IMG_BOOL                                bFirstPageMissing;
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       RI_HANDLE                               hRIHandle;
+#endif
+};
+
+typedef struct {
+       PVRSRV_RGXDEV_INFO              *psDevInfo;
+       DEVMEM_MEMDESC                  *psFWZSBufferMemDesc;
+       RGXFWIF_DEV_VIRTADDR    sZSBufferFWDevVAddr;
+
+       DEVMEMINT_RESERVATION   *psReservation;
+       PMR                                             *psPMR;
+       DEVMEMINT_MAPPING               *psMapping;
+       PVRSRV_MEMALLOCFLAGS_T  uiMapFlags;
+       IMG_UINT32                              ui32ZSBufferID;
+       IMG_UINT32                              ui32RefCount;
+       IMG_BOOL                                bOnDemand;
+
+       IMG_BOOL                                ui32NumReqByApp;                /* Number of Backing Requests from Application */
+       IMG_BOOL                                ui32NumReqByFW;                 /* Number of Backing Requests from Firmware */
+
+       IMG_PID                                 owner;
+
+       DLLIST_NODE     sNode;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+       RGX_ZSBUFFER_DATA               *psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+
+/* Create HWRTDataSet */
+PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA          *psConnection,
+                                  PVRSRV_DEVICE_NODE       *psDeviceNode,
+                                                                 IMG_DEV_VIRTADDR          psVHeapTableDevVAddr,
+                                                                 IMG_DEV_VIRTADDR          sPMDataDevVAddr[RGXMKIF_NUM_RTDATAS],
+                                                                 IMG_DEV_VIRTADDR          sPMSecureDataDevVAddr[RGXMKIF_NUM_RTDATAS],
+                                                             RGX_FREELIST                 *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS],
+                                                             IMG_UINT32                ui32ScreenPixelMax,
+                                                             IMG_UINT64                ui64PPPMultiSampleCtl,
+                                                             IMG_UINT32                ui32TEStride,
+                                                             IMG_DEV_VIRTADDR          asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS],
+                                                             IMG_UINT32                ui32TPCSize,
+                                                             IMG_UINT32                ui32TEScreen,
+                                                             IMG_UINT32                ui32TEAA,
+                                                             IMG_UINT32                ui32TEMTILE1,
+                                                             IMG_UINT32                ui32TEMTILE2,
+                                                             IMG_UINT32                ui32RgnStride,
+                                                             IMG_UINT32                ui32ISPMergeLowerX,
+                                                             IMG_UINT32                ui32ISPMergeLowerY,
+                                                             IMG_UINT32                ui32ISPMergeUpperX,
+                                                             IMG_UINT32                ui32ISPMergeUpperY,
+                                                             IMG_UINT32                ui32ISPMergeScaleX,
+                                                             IMG_UINT32                ui32ISPMergeScaleY,
+                                                             IMG_UINT16                ui16MaxRTs,
+                                                             RGX_KM_HW_RT_DATASET     *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]);
+
+/* Destroy HWRTDataSet */
+PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKmHwRTDataSet);
+
+/*
+       RGXCreateZSBuffer
+*/
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+                                 PVRSRV_DEVICE_NODE    * psDeviceNode,
+                                 DEVMEMINT_RESERVATION *psReservation,
+                                 PMR                                   *psPMR,
+                                 PVRSRV_MEMALLOCFLAGS_T                uiMapFlags,
+                                 RGX_ZSBUFFER_DATA                     **ppsZSBuffer);
+
+/*
+       RGXDestroyZSBufferKM
+*/
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+                                                                  RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls)
+ */
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+       RGXProcessRequestZSBufferBacking
+*/
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                         IMG_UINT32 ui32ZSBufferID);
+
+/*
+       RGXProcessRequestZSBufferUnbacking
+*/
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                               IMG_UINT32 ui32ZSBufferID);
+
+/*
+       RGXGrowFreeList
+*/
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+                             IMG_UINT32 ui32NumPages,
+                             PDLLIST_NODE pListHeader);
+
+/* Create free list */
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA      *psConnection,
+                               PVRSRV_DEVICE_NODE      *psDeviceNode,
+                                                          IMG_HANDLE                   hMemCtxPrivData,
+                                                          IMG_UINT32                   ui32MaxFLPages,
+                                                          IMG_UINT32                   ui32InitFLPages,
+                                                          IMG_UINT32                   ui32GrowFLPages,
+                               IMG_UINT32           ui32GrowParamThreshold,
+                                                          RGX_FREELIST                 *psGlobalFreeList,
+                                                          IMG_BOOL                             bCheckFreelist,
+                                                          IMG_DEV_VIRTADDR             sFreeListBaseDevVAddr,
+                                                          IMG_DEV_VIRTADDR             sFreeListStateDevVAddr,
+                                                          PMR                                  *psFreeListPMR,
+                                                          IMG_DEVMEM_OFFSET_T  uiFreeListPMROffset,
+                                                          PMR                                  *psFreeListStatePMR,
+                                                          IMG_DEVMEM_OFFSET_T  uiFreeListStatePMROffset,
+                                                          RGX_FREELIST                 **ppsFreeList);
+
+/* Destroy free list */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+       RGXProcessRequestGrow
+*/
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                  IMG_UINT32 ui32FreelistID);
+
+
+/* Reconstruct free list after Hardware Recovery */
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+                                                                                         IMG_UINT32 ui32FreelistsCount,
+                                                                                         const IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXCreateRenderContextKM
+
+ @Description
+       Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input ui32Priority - context priority
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32PackedCCBSizeU8888 :
+               ui8TACCBAllocSizeLog2 - TA CCB size
+               ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow
+               ui83DCCBAllocSizeLog2 - 3D CCB size
+               ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow
+ @Input ui32ContextFlags - flags which specify properties of the context
+ @Output ppsRenderContext - clean up data
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA                            *psConnection,
+                                                                                       PVRSRV_DEVICE_NODE                      *psDeviceNode,
+                                                                                       IMG_UINT32                                      ui32Priority,
+                                                                                       IMG_UINT32                                      ui32FrameworkCommandSize,
+                                                                                       IMG_PBYTE                                       pabyFrameworkCommand,
+                                                                                       IMG_HANDLE                                      hMemCtxPrivData,
+                                                                                       IMG_UINT32                                      ui32StaticRenderContextStateSize,
+                                                                                       IMG_PBYTE                                       pStaticRenderContextState,
+                                                                                       IMG_UINT32                                      ui32PackedCCBSizeU8888,
+                                                                                       IMG_UINT32                                      ui32ContextFlags,
+                                                                                       IMG_UINT64                                      ui64RobustnessAddress,
+                                                                                       IMG_UINT32                                      ui32MaxTADeadlineMS,
+                                                                                       IMG_UINT32                                      ui32Max3DDeadlineMS,
+                                                                                       RGX_SERVER_RENDER_CONTEXT       **ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+       Server-side implementation of RGXDestroyRenderContext
+
+ @Input psRenderContext -
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function     PVRSRVRGXKickTA3DKM
+
+ @Description
+       Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT     *psRenderContext,
+                                                                IMG_UINT32                                     ui32ClientTAFenceCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClientTAFenceSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32ClientTAFenceSyncOffset,
+                                                                IMG_UINT32                                     *paui32ClientTAFenceValue,
+                                                                IMG_UINT32                                     ui32ClientTAUpdateCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClientUpdateSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32ClientUpdateSyncOffset,
+                                                                IMG_UINT32                                     *paui32ClientTAUpdateValue,
+                                                                IMG_UINT32                                     ui32Client3DUpdateCount,
+                                                                SYNC_PRIMITIVE_BLOCK           **apsClient3DUpdateSyncPrimBlock,
+                                                                IMG_UINT32                                     *paui32Client3DUpdateSyncOffset,
+                                                                IMG_UINT32                                     *paui32Client3DUpdateValue,
+                                                                SYNC_PRIMITIVE_BLOCK           *psPRSyncPrimBlock,
+                                                                IMG_UINT32                                     ui32PRSyncOffset,
+                                                                IMG_UINT32                                     ui32PRFenceValue,
+                                                                PVRSRV_FENCE                           iCheckFence,
+                                                                PVRSRV_TIMELINE                        iUpdateTimeline,
+                                                                PVRSRV_FENCE                           *piUpdateFence,
+                                                                IMG_CHAR                                       szFenceName[PVRSRV_SYNC_NAME_LENGTH],
+                                                                PVRSRV_FENCE                           iCheckFence3D,
+                                                                PVRSRV_TIMELINE                        iUpdateTimeline3D,
+                                                                PVRSRV_FENCE                           *piUpdateFence3D,
+                                                                IMG_CHAR                                       szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+                                                                IMG_UINT32                                     ui32TACmdSize,
+                                                                IMG_PBYTE                                      pui8TADMCmd,
+                                                                IMG_UINT32                                     ui323DPRCmdSize,
+                                                                IMG_PBYTE                                      pui83DPRDMCmd,
+                                                                IMG_UINT32                                     ui323DCmdSize,
+                                                                IMG_PBYTE                                      pui83DDMCmd,
+                                                                IMG_UINT32                                     ui32ExtJobRef,
+                                                                IMG_BOOL                                       bKickTA,
+                                                                IMG_BOOL                                       bKickPR,
+                                                                IMG_BOOL                                       bKick3D,
+                                                                IMG_BOOL                                       bAbort,
+                                                                IMG_UINT32                                     ui32PDumpFlags,
+                                                                RGX_KM_HW_RT_DATASET           *psKMHWRTDataSet,
+                                                                RGX_ZSBUFFER_DATA                      *psZSBuffer,
+                                                                RGX_ZSBUFFER_DATA                      *psMSAAScratchBuffer,
+                                                                IMG_UINT32                                     ui32SyncPMRCount,
+                                                                IMG_UINT32                                     *paui32SyncPMRFlags,
+                                                                PMR                                            **ppsSyncPMRs,
+                                                                IMG_UINT32                                     ui32RenderTargetSize,
+                                                                IMG_UINT32                                     ui32NumberOfDrawCalls,
+                                                                IMG_UINT32                                     ui32NumberOfIndices,
+                                                                IMG_UINT32                                     ui32NumberOfMRTs,
+                                                                IMG_UINT64                                     ui64DeadlineInus);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                 PVRSRV_DEVICE_NODE * psDevNode,
+                                                 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+                                                                                                RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                IMG_UINT64 ui64Input,
+                                                                                                IMG_UINT64 *pui64Output);
+
+/* Debug - Dump debug info of render contexts on this device */
+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                         DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                         void *pvDumpDebugFile,
+                         IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+#endif /* RGXTA3D_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxtdmtransfer.c b/drivers/gpu/drm/img/img-rogue/services/server/devices/volcanic/rgxtdmtransfer.c
new file mode 100644 (file)
index 0000000..3454e7f
--- /dev/null
@@ -0,0 +1,1334 @@
+/*************************************************************************/ /*!
+@File           rgxtdmtransfer.c
+@Title          Device specific TDM transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbuffer.h"
+#include "rgxshader.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "validation_soc.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#include "rgxtimerquery.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TDM_UFO_DUMP    0
+
+//#define TDM_CHECKPOINT_DEBUG 1
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+       RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+       IMG_UINT32                  ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       DEVMEM_MEMDESC          *psFWTransferContextMemDesc;
+       DEVMEM_MEMDESC          *psFWFrameworkMemDesc;
+       IMG_UINT32              ui32Flags;
+       RGX_SERVER_TQ_TDM_DATA  sTDMData;
+       DLLIST_NODE             sListNode;
+       SYNC_ADDR_LIST          sSyncAddrListFence;
+       SYNC_ADDR_LIST          sSyncAddrListUpdate;
+       POS_LOCK                hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WORKEST_HOST_DATA       sWorkEstData;
+#endif
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+       CONNECTION_DATA         * psConnection,
+       PVRSRV_DEVICE_NODE      * psDeviceNode,
+       DEVMEM_MEMDESC          * psAllocatedMemDesc,
+       IMG_UINT32                ui32AllocatedOffset,
+       SERVER_MMU_CONTEXT      * psServerMMUContext,
+       DEVMEM_MEMDESC          * psFWMemContextMemDesc,
+       IMG_UINT32                ui32Priority,
+       RGX_COMMON_CONTEXT_INFO * psInfo,
+       RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+       IMG_UINT32                ui32CCBAllocSizeLog2,
+       IMG_UINT32                ui32CCBMaxAllocSizeLog2,
+       IMG_UINT32                ui32ContextFlags,
+       IMG_UINT64                ui64RobustnessAddress)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       psTDMData->psBufferSyncContext =
+               pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+                                                                          "rogue-tdm");
+       if (IS_ERR(psTDMData->psBufferSyncContext))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: failed to create buffer_sync context (err=%ld)",
+                                __func__, PTR_ERR(psTDMData->psBufferSyncContext)));
+
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto fail_buffer_sync_context_create;
+       }
+#endif
+
+       eError = FWCommonContextAllocate(
+                       psConnection,
+                       psDeviceNode,
+                       REQ_TYPE_TQ_TDM,
+                       RGXFWIF_DM_TDM,
+                       psServerMMUContext,
+                       psAllocatedMemDesc,
+                       ui32AllocatedOffset,
+                       psFWMemContextMemDesc,
+                       NULL,
+                       ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2,
+                       ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2,
+                       ui32ContextFlags,
+                       ui32Priority,
+                       UINT_MAX, /* max deadline MS */
+                       ui64RobustnessAddress,
+                       psInfo,
+                       &psTDMData->psServerCommonContext);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_contextalloc;
+       }
+
+       psTDMData->ui32Priority = ui32Priority;
+       return PVRSRV_OK;
+
+fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+       psTDMData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+       RGX_SERVER_TQ_TDM_DATA  * psTDMData,
+       PVRSRV_DEVICE_NODE      * psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /* Check if the FW has finished with this resource ... */
+       eError = RGXFWRequestCommonContextCleanUp(
+               psDeviceNode,
+               psTDMData->psServerCommonContext,
+               RGXFWIF_DM_TDM,
+               PDUMP_FLAGS_CONTINUOUS);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+               return eError;
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+                                __func__,
+                                PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       /* ... it has so we can free it's resources */
+       FWCommonContextFree(psTDMData->psServerCommonContext);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+       psTDMData->psBufferSyncContext = NULL;
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+       CONNECTION_DATA            * psConnection,
+       PVRSRV_DEVICE_NODE         * psDeviceNode,
+       IMG_UINT32                   ui32Priority,
+       IMG_UINT32                   ui32FrameworkCommandSize,
+       IMG_PBYTE                    pabyFrameworkCommand,
+       IMG_HANDLE                   hMemCtxPrivData,
+       IMG_UINT32                   ui32PackedCCBSizeU88,
+       IMG_UINT32                   ui32ContextFlags,
+       IMG_UINT64                   ui64RobustnessAddress,
+       RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+       RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+       DEVMEM_MEMDESC          * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+       PVRSRV_RGXDEV_INFO      * psDevInfo = psDeviceNode->pvDevice;
+       RGX_COMMON_CONTEXT_INFO   sInfo = {NULL};
+       PVRSRV_ERROR              eError = PVRSRV_OK;
+
+       /* Allocate the server side structure */
+       *ppsTransferContext = NULL;
+       psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+       if (psTransferContext == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       /*
+               Create the FW transfer context, this has the TDM common
+               context embedded within it
+        */
+       eError = DevmemFwAllocate(psDevInfo,
+                       sizeof(RGXFWIF_FWTDMCONTEXT),
+                       RGX_FWCOMCTX_ALLOCFLAGS,
+                       "FwTransferContext",
+                       &psTransferContext->psFWTransferContextMemDesc);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_fwtransfercontext;
+       }
+
+       eError = OSLockCreate(&psTransferContext->hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+                                                                       __func__,
+                                               PVRSRVGetErrorString(eError)));
+               goto fail_lockcreate;
+       }
+
+       psTransferContext->psDeviceNode = psDeviceNode;
+
+       if (ui32FrameworkCommandSize)
+       {
+               eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+                               &psTransferContext->psFWFrameworkMemDesc,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to allocate firmware GPU framework state (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcreate;
+               }
+
+               /* Copy the Framework client data into the framework buffer */
+               eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+                               psTransferContext->psFWFrameworkMemDesc,
+                               pabyFrameworkCommand,
+                               ui32FrameworkCommandSize);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Failed to populate the framework buffer (%s)",
+                                       __func__,
+                                       PVRSRVGetErrorString(eError)));
+                       goto fail_frameworkcopy;
+               }
+               sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+       }
+
+       eError = _CreateTDMTransferContext(psConnection,
+                                          psDeviceNode,
+                                          psTransferContext->psFWTransferContextMemDesc,
+                                          offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext),
+                                          hMemCtxPrivData,
+                                          psFWMemContextMemDesc,
+                                          ui32Priority,
+                                          &sInfo,
+                                          &psTransferContext->sTDMData,
+                                                                          U32toU8_Unpack1(ui32PackedCCBSizeU88),
+                                                                          U32toU8_Unpack2(ui32PackedCCBSizeU88),
+                                          ui32ContextFlags,
+                                          ui64RobustnessAddress);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_tdmtransfercontext;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
+#endif
+
+       SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+       SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+       OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+       *ppsTransferContext = psTransferContext;
+
+       return PVRSRV_OK;
+
+fail_tdmtransfercontext:
+fail_frameworkcopy:
+       if (psTransferContext->psFWFrameworkMemDesc != NULL)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+       }
+fail_frameworkcreate:
+       OSLockDestroy(psTransferContext->hLock);
+fail_lockcreate:
+       DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+fail_fwtransfercontext:
+       OSFreeMem(psTransferContext);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       *ppsTransferContext = NULL;
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM(
+       CONNECTION_DATA           * psConnection,
+       PVRSRV_DEVICE_NODE        * psDeviceNode,
+       PMR                      ** ppsCLIPMRMem,
+       PMR                      ** ppsUSCPMRMem)
+{
+       PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMRMem);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_FWTDMCONTEXT    *psFWTransferContext;
+       IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+       eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc,
+                       (void **)&psFWTransferContext);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to map firmware transfer context (%s)",
+                               __func__,
+                               PVRSRVGetErrorString(eError)));
+               return eError;
+       }
+
+       ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted;
+
+       DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc);
+
+       /* Check if all of the workload estimation CCB commands for this workload are read */
+       if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                               "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+                               __func__, ui32WorkEstCCBSubmitted,
+                               psTransferContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+               return PVRSRV_ERROR_RETRY;
+       }
+#endif
+
+
+       /* remove node from list before calling destroy - as destroy, if successful
+        * will invalidate the node
+        * must be re-added if destroy fails
+        */
+       OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+       dllist_remove_node(&(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+       eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+                                           psTransferContext->psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_destroyTDM;
+       }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
+#endif
+
+       SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+       SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+       if (psTransferContext->psFWFrameworkMemDesc != NULL)
+       {
+               DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+       }
+       DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+
+       OSLockDestroy(psTransferContext->hLock);
+
+       OSFreeMem(psTransferContext);
+
+       return PVRSRV_OK;
+
+fail_destroyTDM:
+
+       OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+       dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+       OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+       RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+       IMG_UINT32                  ui32PDumpFlags,
+       IMG_UINT32                  ui32ClientUpdateCount,
+       SYNC_PRIMITIVE_BLOCK     ** pauiClientUpdateUFODevVarBlock,
+       IMG_UINT32                * paui32ClientUpdateSyncOffset,
+       IMG_UINT32                * paui32ClientUpdateValue,
+       PVRSRV_FENCE                iCheckFence,
+       PVRSRV_TIMELINE             iUpdateTimeline,
+       PVRSRV_FENCE              * piUpdateFence,
+       IMG_CHAR                    szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+       IMG_UINT32                  ui32FWCommandSize,
+       IMG_UINT8                 * pui8FWCommand,
+       IMG_UINT32                  ui32ExtJobRef,
+       IMG_UINT32                  ui32SyncPMRCount,
+       IMG_UINT32                * paui32SyncPMRFlags,
+       PMR                      ** ppsSyncPMRs,
+       IMG_UINT32                  ui32TDMCharacteristic1,
+       IMG_UINT32                  ui32TDMCharacteristic2,
+       IMG_UINT64                  ui64DeadlineInus)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+       RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+       PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress   = NULL;
+       PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress  = NULL;
+       IMG_UINT32          ui32IntClientFenceCount  = 0;
+       IMG_UINT32        * paui32IntUpdateValue     = paui32ClientUpdateValue;
+       IMG_UINT32          ui32IntClientUpdateCount = ui32ClientUpdateCount;
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eError2;
+       PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+       PVRSRV_RGXDEV_INFO  *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext);
+       RGX_CLIENT_CCB      *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext);
+       IMG_UINT32          ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+
+       IMG_UINT64          ui64FBSCEntryMask;
+
+       IMG_UINT32 ui32CmdOffset = 0;
+       IMG_BOOL bCCBStateOpen;
+
+       PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+       PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+       PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+
+       IMG_UINT64               uiCheckFenceUID = 0;
+       IMG_UINT64               uiUpdateFenceUID = 0;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0};
+       IMG_UINT32 ui32TDMWorkloadDataRO = 0;
+       IMG_UINT32 ui32TDMCmdHeaderOffset = 0;
+       IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0;
+       RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+       PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+       PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif
+
+       PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+       PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+       IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+       IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+       PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+       IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+       void *pvUpdateFenceFinaliseData = NULL;
+
+       if (iUpdateTimeline >= 0 && !piUpdateFence)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+#if !defined(SUPPORT_WORKLOAD_ESTIMATION)
+       PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1);
+       PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2);
+       PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus);
+#endif
+
+       /* Ensure we haven't been given a null ptr to
+        * update values if we have been told we
+        * have updates
+        */
+       if (ui32ClientUpdateCount > 0)
+       {
+               PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+                                       "paui32ClientUpdateValue NULL but "
+                                       "ui32ClientUpdateCount > 0",
+                                       PVRSRV_ERROR_INVALID_PARAMS);
+       }
+
+       /* Ensure the string is null-terminated (Required for safety) */
+       szUpdateFenceName[31] = '\0';
+
+       if (ui32SyncPMRCount != 0)
+       {
+               if (!ppsSyncPMRs)
+               {
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+               }
+       }
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       /* We can't allocate the required amount of stack space on all consumer architectures */
+       psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+       if (psCmdHelper == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_allochelper;
+       }
+
+
+       /*
+               Init the command helper commands for all the prepares
+       */
+       {
+               IMG_CHAR *pszCommandName;
+               RGXFWIF_CCB_CMD_TYPE eType;
+#if defined(SUPPORT_BUFFER_SYNC)
+               struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+               pszCommandName = "TQ-TDM";
+
+               if (ui32FWCommandSize == 0)
+               {
+                       /* A NULL CMD for TDM is used to append updates to a non finished
+                        * FW command. bCCBStateOpen is used in case capture range is
+                        * entered on this command, to not drain CCB up to the Roff for this
+                        * command, but the finished command prior to this.
+                        */
+                       bCCBStateOpen = IMG_TRUE;
+                       eType = RGXFWIF_CCB_CMD_TYPE_NULL;
+               }
+               else
+               {
+                       bCCBStateOpen = IMG_FALSE;
+                       eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+               }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+               psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext;
+#endif
+
+               eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+                                             0,
+                                             NULL,
+                                             NULL);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_populate_sync_addr_list;
+               }
+
+               eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+                                                                               ui32ClientUpdateCount,
+                                                                               pauiClientUpdateUFODevVarBlock,
+                                                                               paui32ClientUpdateSyncOffset);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_populate_sync_addr_list;
+               }
+               paui32IntUpdateValue    = paui32ClientUpdateValue;
+               pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+               if (ui32SyncPMRCount)
+               {
+#if defined(SUPPORT_BUFFER_SYNC)
+                       int err;
+
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+                       err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+                                                                       psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                                       ui32SyncPMRCount,
+                                                                       ppsSyncPMRs,
+                                                                       paui32SyncPMRFlags,
+                                                                       &ui32BufferFenceSyncCheckpointCount,
+                                                                       &apsBufferFenceSyncCheckpoints,
+                                                                       &psBufferUpdateSyncCheckpoint,
+                                                                       &psBufferSyncData);
+                       if (err)
+                       {
+                               switch (err)
+                               {
+                                       case -EINTR:
+                                               eError = PVRSRV_ERROR_RETRY;
+                                               break;
+                                       case -ENOMEM:
+                                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                               break;
+                                       default:
+                                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                               break;
+                               }
+
+                               if (eError != PVRSRV_ERROR_RETRY)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
+                               }
+                               goto fail_resolve_input_fence;
+                       }
+
+                       /* Append buffer sync fences */
+                       if (ui32BufferFenceSyncCheckpointCount > 0)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+                               SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
+                                                                                                         ui32BufferFenceSyncCheckpointCount,
+                                                                                                         apsBufferFenceSyncCheckpoints);
+                               if (!pauiIntFenceUFOAddress)
+                               {
+                                       pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+                               }
+                               ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+                       }
+
+                       if (psBufferUpdateSyncCheckpoint)
+                       {
+                               /* Append the update (from output fence) */
+                               SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+                                                                                         1,
+                                                                                         &psBufferUpdateSyncCheckpoint);
+                               if (!pauiIntUpdateUFOAddress)
+                               {
+                                       pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+                               }
+                               ui32IntClientUpdateCount++;
+                       }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto fail_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+               }
+
+               /* Resolve the sync checkpoints that make up the input fence */
+               eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                   iCheckFence,
+                                                   &ui32FenceSyncCheckpointCount,
+                                                   &apsFenceSyncCheckpoints,
+                                                   &uiCheckFenceUID,
+                                                   ui32PDumpFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       goto fail_resolve_input_fence;
+               }
+#if defined(TDM_CHECKPOINT_DEBUG)
+               {
+                       IMG_UINT32 ii;
+                       for (ii=0; ii<32; ii++)
+                       {
+                               PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints +  ii);
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:    apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+                       }
+               }
+#endif
+               /* Create the output fence (if required) */
+               if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+               {
+                       eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+                                                                                       szUpdateFenceName,
+                                                                                          iUpdateTimeline,
+                                                                                          psTransferContext->psDeviceNode->hSyncCheckpointContext,
+                                                                                          &iUpdateFence,
+                                                                                          &uiUpdateFenceUID,
+                                                                                          &pvUpdateFenceFinaliseData,
+                                                                                          &psUpdateSyncCheckpoint,
+                                                                                          (void*)&psFenceTimelineUpdateSync,
+                                                                                          &ui32FenceTimelineUpdateValue,
+                                                                                          ui32PDumpFlags);
+                       if (eError != PVRSRV_OK)
+                       {
+                               goto fail_create_output_fence;
+                       }
+
+                       /* Append the sync prim update for the timeline (if required) */
+                       if (psFenceTimelineUpdateSync)
+                       {
+                               IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+                               /* Allocate memory to hold the list of update values (including our timeline update) */
+                               pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                               if (!pui32IntAllocatedUpdateValues)
+                               {
+                                       /* Failed to allocate memory */
+                                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                                       goto fail_alloc_update_values_mem;
+                               }
+                               OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+                               /* Copy the update values into the new memory, then append our timeline update value */
+                               if (paui32IntUpdateValue)
+                               {
+                                       OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+                               }
+                               /* Now set the additional update value */
+                               pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+                               *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+                               ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               /* Now append the timeline sync prim addr to the transfer context update list */
+                               SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
+                                                          psFenceTimelineUpdateSync);
+#if defined(TDM_CHECKPOINT_DEBUG)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+                               paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+                       }
+               }
+
+               if (ui32FenceSyncCheckpointCount)
+               {
+                       /* Append the checks (from input fence) */
+                       if (ui32FenceSyncCheckpointCount > 0)
+                       {
+                               CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
+#if defined(TDM_CHECKPOINT_DEBUG)
+                               {
+                                       IMG_UINT32 iii;
+                                       IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+                                       for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                                       {
+                                               CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                               pui32Tmp++;
+                                       }
+                               }
+#endif
+                               SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
+                                                             ui32FenceSyncCheckpointCount,
+                                                             apsFenceSyncCheckpoints);
+                               if (!pauiIntFenceUFOAddress)
+                               {
+                                       pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+                               }
+                               ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+                       }
+#if defined(TDM_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+               }
+               if (psUpdateSyncCheckpoint)
+               {
+                       /* Append the update (from output fence) */
+                       CHKPT_DBG((PVR_DBG_ERROR, "%s:   Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+                       SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+                                                     1,
+                                                     &psUpdateSyncCheckpoint);
+                       if (!pauiIntUpdateUFOAddress)
+                       {
+                               pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+                       }
+                       ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+                       {
+                               IMG_UINT32 iii;
+                               IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+                               for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+                               {
+                                       CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+                                       pui32Tmp++;
+                               }
+                       }
+#endif
+               }
+
+#if (ENABLE_TDM_UFO_DUMP == 1)
+               PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
+               {
+                       IMG_UINT32 ii;
+                       PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+                       PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+                       IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+                       /* Dump Fence syncs and Update syncs */
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+                       for (ii=0; ii<ui32IntClientFenceCount; ii++)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+                               psTmpIntFenceUFOAddress++;
+                       }
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+                       for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+                       {
+                               if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+                               }
+                               else
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s:   %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+                                       pui32TmpIntUpdateValue++;
+                               }
+                               psTmpIntUpdateUFOAddress++;
+                       }
+               }
+#endif
+
+               RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+                                         &pPreAddr,
+                                         &pPostAddr,
+                                         &pRMWUFOAddr);
+               /*
+                * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
+                * in other words, take the value and set it to zero afterwards.
+                * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
+                * as it must be ready at the time of context activation.
+                */
+               {
+                       eError = RGXExtractFBSCEntryMaskFromMMUContext(psTransferContext->psDeviceNode,
+                                                                                                                  FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext),
+                                                                                                                  &ui64FBSCEntryMask);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
+                               goto fail_invalfbsc;
+                       }
+               }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1;
+               sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2;
+
+               /* Prepare workload estimation */
+               WorkEstPrepare(psDeviceNode->pvDevice,
+                               &psTransferContext->sWorkEstData,
+                               &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM,
+                               eType,
+                               &sWorkloadCharacteristics,
+                               ui64DeadlineInus,
+                               &sWorkloadKickDataTransfer);
+#endif
+
+               /*
+                       Create the command helper data for this command
+               */
+               RGXCmdHelperInitCmdCCB(psDevInfo,
+                                      psClientCCB,
+                                      ui64FBSCEntryMask,
+                                      ui32IntClientFenceCount,
+                                      pauiIntFenceUFOAddress,
+                                      NULL,
+                                      ui32IntClientUpdateCount,
+                                      pauiIntUpdateUFOAddress,
+                                      paui32IntUpdateValue,
+                                      ui32FWCommandSize,
+                                      pui8FWCommand,
+                                      &pPreAddr,
+                                      &pPostAddr,
+                                      &pRMWUFOAddr,
+                                      eType,
+                                      ui32ExtJobRef,
+                                      ui32IntJobRef,
+                                      ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+                                      &sWorkloadKickDataTransfer,
+#else /* SUPPORT_WORKLOAD_ESTIMATION */
+                                      NULL,
+#endif /* SUPPORT_WORKLOAD_ESTIMATION */
+                                      pszCommandName,
+                                      bCCBStateOpen,
+                                      psCmdHelper);
+       }
+
+       /*
+               Acquire space for all the commands in one go
+       */
+
+       eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+       if (eError != PVRSRV_OK)
+       {
+               goto fail_3dcmdacquire;
+       }
+
+
+       /*
+               We should acquire the kernel CCB(s) space here as the schedule could fail
+               and we would have to roll back all the syncs
+       */
+
+       /*
+               Only do the command helper release (which takes the server sync
+               operations if the acquire succeeded
+       */
+       ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+       RGXCmdHelperReleaseCmdCCB(1,
+                                 psCmdHelper,
+                                 "TQ_TDM",
+                                 FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+       /* The following is used to determine the offset of the command header containing
+          the workload estimation data so that can be accessed when the KCCB is read */
+       ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper);
+
+       ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+
+       /* This checks if the command would wrap around at the end of the CCB and
+        * therefore would start at an offset of 0 rather than the current command
+        * offset */
+       if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck)
+       {
+               ui32TDMWorkloadDataRO = ui32CmdOffset;
+       }
+       else
+       {
+               ui32TDMWorkloadDataRO = 0;
+       }
+#endif
+
+       /*
+               Even if we failed to acquire the client CCB space we might still need
+               to kick the HW to process a padding packet to release space for us next
+               time round
+       */
+       {
+               RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+               IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress(
+                       psTransferContext->sTDMData.psServerCommonContext).ui32Addr;
+
+               /* Construct the kernel 3D CCB command. */
+               sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+               sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+               /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+               /* Store the offset to the CCCB command header so that it can be referenced
+                * when the KCCB command reaches the FW */
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset;
+#else
+               sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+               /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+               /*              s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+               /*              ui323DCmdOffset); */
+               RGXSRV_HWPERF_ENQ(psTransferContext,
+                                 OSGetCurrentClientProcessIDKM(),
+                                 FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
+                                 ui32ExtJobRef,
+                                 ui32IntJobRef,
+                                 RGX_HWPERF_KICK_TYPE_TQTDM,
+                                 iCheckFence,
+                                 iUpdateFence,
+                                 iUpdateTimeline,
+                                 uiCheckFenceUID,
+                                 uiUpdateFenceUID,
+                                 NO_DEADLINE,
+                                 NO_CYCEST);
+
+               LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+               {
+                       eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+                                                                                RGXFWIF_DM_TDM,
+                                                                                & sTDMKCCBCmd,
+                                                                                ui32PDumpFlags);
+                       if (eError2 != PVRSRV_ERROR_RETRY)
+                       {
+                               break;
+                       }
+                       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+               } END_LOOP_UNTIL_TIMEOUT();
+
+               if (eError2 != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXTDMSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
+                       if (eError == PVRSRV_OK)
+                       {
+                               eError = eError2;
+                       }
+                       goto fail_2dcmdacquire;
+               }
+
+               PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef,
+                                       ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM);
+       }
+
+       /*
+        * Now check eError (which may have returned an error from our earlier calls
+        * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+        * so we check it now...
+        */
+       if (eError != PVRSRV_OK )
+       {
+               goto fail_2dcmdacquire;
+       }
+
+#if defined(NO_HARDWARE)
+       /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+       if (psUpdateSyncCheckpoint)
+       {
+               SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+       }
+       if (psFenceTimelineUpdateSync)
+       {
+               SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+       }
+       SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+       * piUpdateFence = iUpdateFence;
+       if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+       {
+               SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData,
+                                           psUpdateSyncCheckpoint, szUpdateFenceName);
+       }
+
+       OSFreeMem(psCmdHelper);
+
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                    apsFenceSyncCheckpoints);
+       /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       /* Free memory allocated to hold the internal list of update values */
+       if (pui32IntAllocatedUpdateValues)
+       {
+               OSFreeMem(pui32IntAllocatedUpdateValues);
+               pui32IntAllocatedUpdateValues = NULL;
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return PVRSRV_OK;
+
+/*
+       No resources are created in this function so there is nothing to free
+       unless we had to merge syncs.
+       If we fail after the client CCB acquire there is still nothing to do
+       as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+
+fail_invalfbsc:
+       SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
+       SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+
+/* fail_pdumpcheck: */
+/* fail_cmdtype: */
+
+       if (iUpdateFence != PVRSRV_NO_FENCE)
+       {
+               SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+       }
+fail_create_output_fence:
+       /* Drop the references taken on the sync checkpoints in the
+        * resolved input fence */
+       SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+                                    apsFenceSyncCheckpoints);
+
+fail_resolve_input_fence:
+
+#if defined(SUPPORT_BUFFER_SYNC)
+       if (psBufferSyncData)
+       {
+               pvr_buffer_sync_kick_failed(psBufferSyncData);
+       }
+       if (apsBufferFenceSyncCheckpoints)
+       {
+               kfree(apsBufferFenceSyncCheckpoints);
+       }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+fail_populate_sync_addr_list:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       OSFreeMem(psCmdHelper);
+fail_allochelper:
+
+       if (apsFenceSyncCheckpoints)
+       {
+               SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+       }
+       OSLockRelease(psTransferContext->hLock);
+       return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+       RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+       IMG_UINT32                 ui32PDumpFlags)
+{
+       RGXFWIF_KCCB_CMD  sKCCBCmd;
+       PVRSRV_ERROR      eError;
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       /* Schedule the firmware command */
+       sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+       sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+       LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+       {
+               eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+                                           RGXFWIF_DM_TDM,
+                                           &sKCCBCmd,
+                                           ui32PDumpFlags);
+               if (eError != PVRSRV_ERROR_RETRY)
+               {
+                       break;
+               }
+               OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+       } END_LOOP_UNTIL_TIMEOUT();
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to schedule the FW command %d (%s)",
+                               __func__, eError, PVRSRVGETERRORSTRING(eError)));
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+                                                      PVRSRV_DEVICE_NODE * psDevNode,
+                                                      RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                      IMG_UINT32 ui32Priority)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+       OSLockAcquire(psTransferContext->hLock);
+
+       if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
+       {
+               eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+                                           psConnection,
+                                           psTransferContext->psDeviceNode->pvDevice,
+                                           ui32Priority,
+                                           RGXFWIF_DM_TDM);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError)));
+
+                       OSLockRelease(psTransferContext->hLock);
+                       return eError;
+               }
+       }
+
+       OSLockRelease(psTransferContext->hLock);
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+                                                                                                         RGX_CONTEXT_PROPERTY eContextProperty,
+                                                                                                         IMG_UINT64 ui64Input,
+                                                                                                         IMG_UINT64 *pui64Output)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       switch (eContextProperty)
+       {
+               case RGX_CONTEXT_PROPERTY_FLAGS:
+               {
+                       IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+                       OSLockAcquire(psTransferContext->hLock);
+                       eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext,
+                                                        ui32ContextFlags);
+                       OSLockRelease(psTransferContext->hLock);
+                       break;
+               }
+
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+                       eError = PVRSRV_ERROR_NOT_SUPPORTED;
+               }
+       }
+
+       return eError;
+}
+
+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              void *pvDumpDebugFile,
+                              IMG_UINT32 ui32VerbLevel)
+{
+       DLLIST_NODE *psNode, *psNext;
+
+       OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+               DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+                                       pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+       DLLIST_NODE *psNode, *psNext;
+       IMG_UINT32 ui32ContextBitMask = 0;
+
+       OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+       dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+       {
+               RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+                       IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+               if (CheckStalledClientCommonContext(
+                                    psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+                                == PVRSRV_ERROR_CCCB_STALLED) {
+                       ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+               }
+       }
+
+       OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+       return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/Kbuild.mk b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/Kbuild.mk
new file mode 100644 (file)
index 0000000..4ba7716
--- /dev/null
@@ -0,0 +1,624 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+# Window system
+ccflags-y += -DWINDOW_SYSTEM=\"$(WINDOW_SYSTEM)\"
+
+# Linux kernel headers
+ccflags-y += \
+ -Iinclude \
+ -Iinclude/drm
+
+# Compatibility BVNC
+ccflags-y += -I$(TOP)/services/shared/devices/$(PVR_ARCH_DEFS)
+
+# Errata files
+ccflags-y += -I$(HWDEFS_DIR) -I$(HWDEFS_DIR)/$(RGX_BNC)
+
+# Linux-specific headers
+ccflags-y += \
+ -I$(TOP)/include/drm \
+ -I$(TOP)/services/include/env/linux \
+ -I$(TOP)/services/server/env/linux/$(PVR_ARCH) -I$(TOP)/services/server/env/linux \
+ -I$(TOP)/kernel/drivers/staging/imgtec
+
+# System dir
+ifneq ($(wildcard $(TOP)/services/system/$(PVR_ARCH)/$(PVR_SYSTEM)/Kbuild.mk),)
+SYSTEM_DIR := $(TOP)/services/system/$(PVR_ARCH)/$(PVR_SYSTEM)
+else
+SYSTEM_DIR := $(TOP)/services/system/$(PVR_SYSTEM)
+endif
+
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_drm.o \
+ services/server/env/linux/event.o \
+ services/server/env/linux/fwload.o \
+ services/server/env/linux/km_apphint.o \
+ services/server/env/linux/module_common.o \
+ services/server/env/linux/osmmap_stub.o \
+ services/server/env/linux/osfunc.o \
+ services/server/env/linux/allocmem.o \
+ services/server/env/linux/osconnection_server.o \
+ services/server/env/linux/physmem_osmem_linux.o \
+ services/server/env/linux/pmr_os.o \
+ services/server/env/linux/pvr_bridge_k.o \
+ services/server/env/linux/pvr_debug.o \
+ services/server/env/linux/physmem_dmabuf.o \
+ services/server/common/devicemem_heapcfg.o \
+ services/shared/common/devicemem.o \
+ services/shared/common/devicemem_utils.o \
+ services/shared/common/hash.o \
+ services/shared/common/ra.o \
+ services/shared/common/sync.o \
+ services/shared/common/mem_utils.o \
+ services/server/common/devicemem_server.o \
+ services/server/common/handle.o \
+ services/server/common/lists.o \
+ services/server/common/mmu_common.o \
+ services/server/common/connection_server.o \
+ services/server/common/physheap.o \
+ services/server/common/physmem.o \
+ services/server/common/physmem_lma.o \
+ services/server/common/physmem_hostmem.o \
+ services/server/common/pmr.o \
+ services/server/common/power.o \
+ services/server/common/process_stats.o \
+ services/server/common/pvr_notifier.o \
+ services/server/common/pvrsrv.o \
+ services/server/common/srvcore.o \
+ services/server/common/sync_checkpoint.o \
+ services/server/common/sync_server.o \
+ services/shared/common/htbuffer.o \
+ services/server/common/htbserver.o \
+ services/server/common/htb_debug.o \
+ services/server/common/tlintern.o \
+ services/shared/common/tlclient.o \
+ services/server/common/tlserver.o \
+ services/server/common/tlstream.o \
+ services/server/common/cache_km.o \
+ services/shared/common/uniq_key_splay_tree.o \
+ services/server/common/pvrsrv_pool.o \
+ services/server/common/pvrsrv_bridge_init.o \
+ services/server/common/info_page_km.o \
+ services/shared/common/pvrsrv_error.o \
+ services/server/common/debug_common.o \
+ services/server/common/di_server.o
+
+ifeq ($(SUPPORT_DMA_TRANSFER),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/common/dma_km.o
+endif
+
+# Wrap ExtMem support
+ifeq ($(SUPPORT_WRAP_EXTMEM),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/physmem_extmem_linux.o \
+ services/server/common/physmem_extmem.o
+endif
+
+ifeq ($(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pg_walk_through.o
+endif
+
+ifeq ($(SUPPORT_PHYSMEM_TEST),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/physmem_test.o
+endif
+
+ifneq ($(PVR_LOADER),)
+ ifeq ($(KERNEL_DRIVER_DIR),)
+  $(PVRSRV_MODNAME)-y += services/server/env/linux/$(PVR_LOADER).o
+ else
+  ifneq ($(wildcard $(KERNELDIR)/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)/$(PVR_LOADER).c),)
+    $(PVRSRV_MODNAME)-y += external/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)/$(PVR_LOADER).o
+  else
+   ifneq ($(wildcard $(KERNELDIR)/$(KERNEL_DRIVER_DIR)/$(PVR_LOADER).c),)
+     $(PVRSRV_MODNAME)-y += external/$(KERNEL_DRIVER_DIR)/$(PVR_LOADER).o
+   else
+     $(PVRSRV_MODNAME)-y += services/server/env/linux/$(PVR_LOADER).o
+   endif
+  endif
+ endif
+else
+ $(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_platform_drv.o
+endif
+
+ifeq ($(SUPPORT_RGX),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgx_bridge_init.o \
+ services/server/env/linux/pvr_gputrace.o \
+ services/server/devices/rgxfwdbg.o \
+ services/server/devices/rgxtimerquery.o \
+ services/server/devices/rgxccb.o \
+ services/server/devices/$(PVR_ARCH_DEFS)/rgxdebug.o \
+ services/server/devices/rgxfwtrace_strings.o \
+ services/server/devices/$(PVR_ARCH)/rgxfwutils.o \
+ services/server/devices/$(PVR_ARCH)/rgxinit.o \
+ services/server/devices/rgxbvnc.o \
+ services/server/devices/rgxkicksync.o \
+ services/server/devices/$(PVR_ARCH)/rgxlayer_impl.o \
+ services/server/devices/rgxmem.o \
+ services/server/devices/$(PVR_ARCH)/rgxmmuinit.o \
+ services/server/devices/rgxregconfig.o \
+ services/server/devices/$(PVR_ARCH)/rgxta3d.o \
+ services/server/devices/rgxsyncutils.o \
+ services/server/devices/$(PVR_ARCH)/rgxtdmtransfer.o \
+ services/server/devices/rgxutils.o \
+ services/server/devices/rgxhwperf_common.o \
+ services/server/devices/$(PVR_ARCH)/rgxhwperf.o \
+ services/server/devices/$(PVR_ARCH)/rgxpower.o \
+ services/server/devices/$(PVR_ARCH)/rgxstartstop.o \
+ services/server/devices/rgxtimecorr.o \
+ services/server/devices/$(PVR_ARCH)/rgxcompute.o \
+ services/server/devices/$(PVR_ARCH)/rgxmulticore.o \
+ services/server/devices/rgxshader.o
+
+ifeq ($(SUPPORT_USC_BREAKPOINT),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgxbreakpoint.o
+endif
+
+ifeq ($(PVR_ARCH),volcanic)
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/$(PVR_ARCH)/rgxray.o
+endif
+
+
+ifeq ($(PVR_ARCH),rogue)
+ $(PVRSRV_MODNAME)-y += \
+  services/server/devices/$(PVR_ARCH)/rgxtransfer.o \
+  services/server/devices/$(PVR_ARCH)/rgxmipsmmuinit.o
+endif
+
+ifeq ($(SUPPORT_PDVFS),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgxpdvfs.o
+endif
+
+ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgxworkest.o
+endif
+
+ifeq ($(SUPPORT_VALIDATION),1)
+ifeq ($(PVR_TESTING_UTILS),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgxgpumap.o
+endif
+endif
+
+ifeq ($(SUPPORT_VALIDATION),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgxsoctimer.o
+endif
+endif
+
+ifeq ($(SUPPORT_DISPLAY_CLASS),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/common/dc_server.o \
+ services/server/common/scp.o
+endif
+
+ifeq ($(SUPPORT_SECURE_EXPORT),1)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/ossecure_export.o
+endif
+
+ifeq ($(PDUMP),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/common/pdump_server.o \
+ services/server/common/pdump_mmu.o \
+ services/server/common/pdump_physmem.o \
+ services/shared/common/devicemem_pdump.o \
+ services/shared/common/devicememx_pdump.o
+
+ifeq ($(SUPPORT_RGX),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/$(PVR_ARCH)/rgxpdump.o
+endif
+
+endif
+
+
+
+ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1)
+$(PVRSRV_MODNAME)-y += services/server/common/ri_server.o
+endif
+
+ifeq ($(PVR_TESTING_UTILS),1)
+$(PVRSRV_MODNAME)-y += services/server/common/tutils.o
+endif
+
+$(PVRSRV_MODNAME)-y += services/server/common/devicemem_history_server.o
+
+ifeq ($(PVR_HANDLE_BACKEND),generic)
+$(PVRSRV_MODNAME)-y += services/server/common/handle_generic.o
+else
+ifeq ($(PVR_HANDLE_BACKEND),idr)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/handle_idr.o
+endif
+endif
+
+ifeq ($(PVRSRV_ENABLE_LINUX_MMAP_STATS),1)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/mmap_stats.o
+endif
+
+ifeq ($(SUPPORT_BUFFER_SYNC),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_buffer_sync.o \
+ services/server/env/linux/pvr_fence.o
+endif
+
+ifeq ($(SUPPORT_NATIVE_FENCE_SYNC),1)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_sync_ioctl_common.o
+ifeq ($(USE_PVRSYNC_DEVNODE),1)
+$(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_sync_ioctl_dev.o
+else
+$(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_sync_ioctl_drm.o
+endif
+ifeq ($(SUPPORT_DMA_FENCE),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_sync_file.o \
+ services/server/env/linux/pvr_counting_timeline.o \
+ services/server/env/linux/pvr_sw_fence.o \
+ services/server/env/linux/pvr_fence.o
+else
+$(PVRSRV_MODNAME)-y += services/server/env/linux/pvr_sync2.o
+endif
+else
+ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/common/sync_fallback_server.o \
+ services/server/env/linux/ossecure_export.o
+endif
+endif
+
+ifeq ($(SUPPORT_LINUX_DVFS),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_dvfs_device.o
+endif
+
+ifeq ($(PVRSRV_ENABLE_PVR_ION_STATS),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_ion_stats.o
+endif
+
+$(PVRSRV_MODNAME)-$(CONFIG_X86) += services/server/env/linux/osfunc_x86.o
+$(PVRSRV_MODNAME)-$(CONFIG_ARM) += services/server/env/linux/osfunc_arm.o
+$(PVRSRV_MODNAME)-$(CONFIG_ARM64) += services/server/env/linux/osfunc_arm64.o
+$(PVRSRV_MODNAME)-$(CONFIG_METAG) += services/server/env/linux/osfunc_metag.o
+$(PVRSRV_MODNAME)-$(CONFIG_MIPS) += services/server/env/linux/osfunc_mips.o
+$(PVRSRV_MODNAME)-$(CONFIG_RISCV) += services/server/env/linux/osfunc_riscv.o
+
+ifeq ($(SUPPORT_ANDROID_PLATFORM),1)
+ ifeq ($(CONFIG_PROC_FS),y)
+ $(PVRSRV_MODNAME)-$(CONFIG_PROC_FS) += services/server/env/linux/pvr_procfs.o
+ else ifeq ($(CONFIG_DEBUG_FS),y)
+ $(PVRSRV_MODNAME)-$(CONFIG_DEBUG_FS) += services/server/env/linux/pvr_debugfs.o
+ endif
+else
+ ifeq ($(CONFIG_DEBUG_FS),y)
+ $(PVRSRV_MODNAME)-$(CONFIG_DEBUG_FS) += services/server/env/linux/pvr_debugfs.o
+ else ifeq ($(CONFIG_PROC_FS),y)
+ $(PVRSRV_MODNAME)-$(CONFIG_PROC_FS) += services/server/env/linux/pvr_procfs.o
+ endif
+endif
+
+ifeq ($(SUPPORT_DI_BRG_IMPL),1)
+$(PVRSRV_MODNAME)-y += services/server/common/di_impl_brg.o
+endif
+$(PVRSRV_MODNAME)-$(CONFIG_EVENT_TRACING) += services/server/env/linux/trace_events.o
+
+ccflags-y += -I$(OUT)/target_neutral/intermediates/firmware
+
+ifeq ($(SUPPORT_RGX),1)
+# Srvinit headers and source files
+
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/$(PVR_ARCH)/rgxsrvinit.o \
+ services/server/devices/rgxfwimageutils.o
+ifeq ($(PVR_ARCH),rogue)
+$(PVRSRV_MODNAME)-y += \
+ services/shared/devices/$(PVR_ARCH_DEFS)/rgx_hwperf_table.o
+endif
+endif
+
+$(PVRSRV_MODNAME)-y += \
+ services/system/$(PVR_ARCH)/common/env/linux/dma_support.o \
+ services/system/common/env/linux/interrupt_support.o
+
+$(PVRSRV_MODNAME)-$(CONFIG_PCI) += \
+ services/system/common/env/linux/pci_support.o
+
+ccflags-y += \
+ -I$(HWDEFS_DIR)/km
+ifeq ($(PVR_ARCH),rogue)
+ccflags-y += \
+ -I$(TOP)/include/$(PVR_ARCH_DEFS)
+endif
+ccflags-y += \
+ -I$(TOP)/include/$(PVR_ARCH) -I$(TOP)/include \
+ -I$(TOP)/include/$(PVR_ARCH)/public -I$(TOP)/include/public \
+ -I$(TOP)/services/include/$(PVR_ARCH) -I$(TOP)/services/include \
+ -I$(TOP)/services/shared/include \
+ -I$(TOP)/services/server/devices/$(PVR_ARCH) -I$(TOP)/services/server/devices \
+ -I$(TOP)/services/server/include/$(PVR_ARCH) -I$(TOP)/services/server/include \
+ -I$(TOP)/services/shared/common \
+ -I$(TOP)/services/shared/devices \
+ -I$(TOP)/services/system/include \
+ -I$(TOP)/services/system/$(PVR_ARCH)/include \
+ -I$(TOP)/services/server/common/$(PVR_ARCH) -I$(TOP)/services/server/common
+
+ifeq ($(KERNEL_DRIVER_DIR),)
+ ccflags-y += -I$(SYSTEM_DIR)
+endif
+
+# Bridge headers and source files
+
+# Keep in sync with:
+# build/linux/common/bridges.mk AND
+# services/bridge/Linux.mk
+
+ccflags-y += \
+ -I$(bridge_base)/mm_bridge \
+ -I$(bridge_base)/cmm_bridge \
+ -I$(bridge_base)/srvcore_bridge \
+ -I$(bridge_base)/sync_bridge \
+ -I$(bridge_base)/synctracking_bridge \
+ -I$(bridge_base)/htbuffer_bridge \
+ -I$(bridge_base)/pvrtl_bridge \
+ -I$(bridge_base)/cache_bridge \
+ -I$(bridge_base)/dmabuf_bridge
+
+ifeq ($(SUPPORT_DMA_TRANSFER),1)
+ccflags-y += \
+ -I$(bridge_base)/dma_bridge
+endif
+
+ifeq ($(SUPPORT_RGX),1)
+ccflags-y += \
+ -I$(bridge_base)/rgxta3d_bridge \
+ -I$(bridge_base)/rgxhwperf_bridge \
+ -I$(bridge_base)/rgxkicksync_bridge \
+ -I$(bridge_base)/rgxcmp_bridge \
+ -I$(bridge_base)/rgxregconfig_bridge \
+ -I$(bridge_base)/rgxtimerquery_bridge \
+ -I$(bridge_base)/rgxfwdbg_bridge
+ifeq ($(PVR_ARCH),volcanic)
+ccflags-y += \
+ -I$(bridge_base)/rgxray_bridge
+endif
+ifeq ($(PVR_ARCH),rogue)
+ccflags-y += \
+ -I$(bridge_base)/rgxtq_bridge
+endif
+# Oceanic does not support TDM
+ifneq ($(PVR_ARCH_DEFS),oceanic)
+ccflags-y += \
+ -I$(bridge_base)/rgxtq2_bridge
+endif
+ifeq ($(SUPPORT_USC_BREAKPOINT),1)
+ccflags-y += \
+ -I$(bridge_base)/rgxbreakpoint_bridge
+endif
+endif
+
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/mm_bridge/server_mm_bridge.o \
+ generated/$(PVR_ARCH)/cmm_bridge/server_cmm_bridge.o \
+ generated/$(PVR_ARCH)/srvcore_bridge/server_srvcore_bridge.o \
+ generated/$(PVR_ARCH)/sync_bridge/server_sync_bridge.o \
+ generated/$(PVR_ARCH)/htbuffer_bridge/server_htbuffer_bridge.o \
+ generated/$(PVR_ARCH)/pvrtl_bridge/server_pvrtl_bridge.o \
+ generated/$(PVR_ARCH)/cache_bridge/server_cache_bridge.o \
+ generated/$(PVR_ARCH)/dmabuf_bridge/server_dmabuf_bridge.o
+
+ifeq ($(SUPPORT_DMA_TRANSFER),1)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/dma_bridge/server_dma_bridge.o
+endif
+
+ifeq ($(SUPPORT_RGX),1)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxta3d_bridge/server_rgxta3d_bridge.o \
+ generated/$(PVR_ARCH)/rgxhwperf_bridge/server_rgxhwperf_bridge.o \
+ generated/$(PVR_ARCH)/rgxkicksync_bridge/server_rgxkicksync_bridge.o \
+ generated/$(PVR_ARCH)/rgxcmp_bridge/server_rgxcmp_bridge.o \
+ generated/$(PVR_ARCH)/rgxregconfig_bridge/server_rgxregconfig_bridge.o \
+ generated/$(PVR_ARCH)/rgxtimerquery_bridge/server_rgxtimerquery_bridge.o \
+ generated/$(PVR_ARCH)/rgxfwdbg_bridge/server_rgxfwdbg_bridge.o
+ifeq ($(PVR_ARCH),volcanic)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxray_bridge/server_rgxray_bridge.o
+endif
+ifeq ($(PVR_ARCH),rogue)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxtq_bridge/server_rgxtq_bridge.o
+endif
+# Oceanic does not support TDM
+ifneq ($(PVR_ARCH_DEFS),oceanic)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxtq2_bridge/server_rgxtq2_bridge.o
+endif
+ifeq ($(SUPPORT_USC_BREAKPOINT),1)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.o
+endif
+endif
+
+ifeq ($(SUPPORT_WRAP_EXTMEM),1)
+ccflags-y += -I$(bridge_base)/mmextmem_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/mmextmem_bridge/server_mmextmem_bridge.o
+endif
+
+ifeq ($(SUPPORT_DISPLAY_CLASS),1)
+ccflags-y += -I$(bridge_base)/dc_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/dc_bridge/server_dc_bridge.o
+endif
+
+ifeq ($(SUPPORT_SECURE_EXPORT),1)
+ccflags-y += -I$(bridge_base)/smm_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/smm_bridge/server_smm_bridge.o
+endif
+
+ifeq ($(PDUMP),1)
+ccflags-y += \
+ -I$(bridge_base)/pdump_bridge \
+ -I$(bridge_base)/pdumpctrl_bridge \
+ -I$(bridge_base)/pdumpmm_bridge
+
+ifeq ($(SUPPORT_RGX),1)
+ccflags-y += \
+ -I$(bridge_base)/rgxpdump_bridge
+
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxpdump_bridge/server_rgxpdump_bridge.o
+endif
+
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/pdump_bridge/server_pdump_bridge.o \
+ generated/$(PVR_ARCH)/pdumpctrl_bridge/server_pdumpctrl_bridge.o \
+ generated/$(PVR_ARCH)/pdumpmm_bridge/server_pdumpmm_bridge.o
+endif
+
+ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1)
+ccflags-y += -I$(bridge_base)/ri_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/ri_bridge/server_ri_bridge.o
+endif
+
+ifeq ($(SUPPORT_VALIDATION),1)
+ccflags-y += -I$(bridge_base)/validation_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/validation_bridge/server_validation_bridge.o
+$(PVRSRV_MODNAME)-y += services/server/common/validation.o
+ifeq ($(PVR_ARCH),volcanic)
+$(PVRSRV_MODNAME)-y += services/server/common/validation_soc.o
+endif
+endif
+
+ifeq ($(PVR_TESTING_UTILS),1)
+ccflags-y += -I$(bridge_base)/tutils_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/tutils_bridge/server_tutils_bridge.o
+endif
+
+ccflags-y += -I$(bridge_base)/devicememhistory_bridge
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/devicememhistory_bridge/server_devicememhistory_bridge.o
+
+ccflags-y += -I$(bridge_base)/synctracking_bridge
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/synctracking_bridge/server_synctracking_bridge.o
+
+ifeq ($(SUPPORT_FALLBACK_FENCE_SYNC),1)
+ccflags-y += \
+ -I$(bridge_base)/syncfallback_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/syncfallback_bridge/server_syncfallback_bridge.o
+endif
+
+ifeq ($(SUPPORT_DI_BRG_IMPL),1)
+ccflags-y += -I$(bridge_base)/di_bridge
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/di_bridge/server_di_bridge.o
+endif
+
+
+# Direct bridges
+
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/mm_bridge/client_mm_direct_bridge.o \
+ generated/$(PVR_ARCH)/sync_bridge/client_sync_direct_bridge.o \
+ generated/$(PVR_ARCH)/htbuffer_bridge/client_htbuffer_direct_bridge.o \
+ generated/$(PVR_ARCH)/cache_bridge/client_cache_direct_bridge.o \
+ generated/$(PVR_ARCH)/pvrtl_bridge/client_pvrtl_direct_bridge.o
+
+ifeq ($(PDUMP),1)
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/pdumpmm_bridge/client_pdumpmm_direct_bridge.o
+endif
+
+ifeq ($(PVRSRV_ENABLE_GPU_MEMORY_INFO),1)
+$(PVRSRV_MODNAME)-y += generated/$(PVR_ARCH)/ri_bridge/client_ri_direct_bridge.o
+endif
+
+ifeq ($(PDUMP),1)
+ $(PVRSRV_MODNAME)-y += \
+  generated/$(PVR_ARCH)/pdump_bridge/client_pdump_direct_bridge.o \
+  generated/$(PVR_ARCH)/pdumpctrl_bridge/client_pdumpctrl_direct_bridge.o
+
+ifeq ($(SUPPORT_RGX),1)
+ $(PVRSRV_MODNAME)-y += \
+  generated/$(PVR_ARCH)/rgxpdump_bridge/client_rgxpdump_direct_bridge.o
+endif
+
+endif
+
+# Enable -Werror for all built object files
+ifneq ($(W),1)
+$(foreach _o,$(addprefix CFLAGS_,$($(PVRSRV_MODNAME)-y)),$(eval $(_o) += -Werror))
+endif
+
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/devicememhistory_bridge/client_devicememhistory_direct_bridge.o
+
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/synctracking_bridge/client_synctracking_direct_bridge.o
+
+# Ignore address-of-packed-member warning for all bridge files
+$(foreach _o,$(addprefix CFLAGS_,$(filter generated/%.o,$($(PVRSRV_MODNAME)-y))),$(eval $(_o) += -Wno-address-of-packed-member))
+
+# With certain build configurations, e.g., ARM, Werror, we get a build
+# failure in the ftrace Linux kernel header.  So disable the relevant check.
+CFLAGS_services/server/env/linux/trace_events.o := -Wno-missing-prototypes
+
+# Make sure the mem_utils are built in 'free standing' mode, so the compiler
+# is not encouraged to call out to C library functions
+ifeq ($(CC),clang)
+ ifneq ($(SUPPORT_ANDROID_PLATFORM),1)
+  CFLAGS_services/shared/common/mem_utils.o := -ffreestanding -fforce-enable-int128
+ else
+  CFLAGS_services/shared/common/mem_utils.o := -ffreestanding
+ endif
+endif
+
+# Chrome OS kernel adds some issues
+ccflags-y += -Wno-ignored-qualifiers
+
+# Treat #warning as a warning
+ccflags-y += -Wno-error=cpp
+
+include $(SYSTEM_DIR)/Kbuild.mk
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/Linux.mk b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/Linux.mk
new file mode 100644 (file)
index 0000000..f18461d
--- /dev/null
@@ -0,0 +1,50 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+ifneq ($(SERVICES_SC),1)
+
+modules := srvkm
+
+srvkm_type := kernel_module
+srvkm_target := $(PVRSRV_MODNAME).ko
+srvkm_makefile := $(THIS_DIR)/Kbuild.mk
+
+endif # SERVICES_SC
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/allocmem.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/allocmem.c
new file mode 100644 (file)
index 0000000..5d7c85d
--- /dev/null
@@ -0,0 +1,422 @@
+/*************************************************************************/ /*!
+@File
+@Title          Host memory management implementation for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "process_stats.h"
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
+#include "pvrsrv.h"
+#endif
+#include "osfunc.h"
+
+
+/*
+ * When memory statistics are disabled, memory records are used instead.
+ * In order for these to work, the PID of the process that requested the
+ * allocation needs to be stored at the end of the kmalloc'd memory, making
+ * sure 4 extra bytes are allocated to fit the PID.
+ *
+ * There is no need for this extra allocation when memory statistics are
+ * enabled, since all allocations are tracked in DebugFS mem_area files.
+ */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32)
+#else
+#define ALLOCMEM_MEMSTATS_PADDING 0UL
+#endif
+
+/* How many times kmalloc can fail before the allocation threshold is reduced */
+static const IMG_UINT32 g_ui32kmallocFailLimit = 10;
+/* How many kmalloc failures happened since the last allocation threshold change */
+static IMG_UINT32 g_ui32kmallocFailCount = 0;
+/* Current kmalloc threshold value in bytes */
+static IMG_UINT32 g_ui32kmallocThreshold = PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD;
+/* Spinlock used so that the global variables above may not be modified by more than 1 thread at a time */
+static DEFINE_SPINLOCK(kmalloc_lock);
+
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
+static DEFINE_SPINLOCK(kmalloc_leak_lock);
+static IMG_UINT32 g_ui32kmallocLeakCounter = 0;
+#endif
+
+static inline void OSTryDecreaseKmallocThreshold(void)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&kmalloc_lock, flags);
+
+       g_ui32kmallocFailCount++;
+
+       if (g_ui32kmallocFailCount >= g_ui32kmallocFailLimit)
+       {
+               g_ui32kmallocFailCount = 0;
+               if (g_ui32kmallocThreshold > PAGE_SIZE)
+               {
+                       g_ui32kmallocThreshold >>= 1;
+                       printk(KERN_INFO "Threshold is now set to %d\n", g_ui32kmallocThreshold);
+               }
+       }
+
+       spin_unlock_irqrestore(&kmalloc_lock, flags);
+}
+
+static inline void OSResetKmallocFailCount(void)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&kmalloc_lock, flags);
+
+       g_ui32kmallocFailCount = 0;
+
+       spin_unlock_irqrestore(&kmalloc_lock, flags);
+}
+
+static inline void _pvr_vfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+       /* Size harder to come by for vmalloc and since vmalloc allocates
+        * a whole number of pages, poison the minimum size known to have
+        * been allocated.
+        */
+       OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+                      PAGE_SIZE);
+#endif
+       vfree(pvAddr);
+}
+
+static inline void _pvr_kfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+       /* Poison whole memory block */
+       OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+                      ksize(pvAddr));
+#endif
+       kfree(pvAddr);
+}
+
+static inline void _pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS)
+{
+#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVR_UNREFERENCED_PARAMETER(pvAddr);
+#else
+       if (!is_vmalloc_addr(pvAddr))
+       {
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+               IMG_CPU_PHYADDR sCpuPAddr;
+               sCpuPAddr.uiAddr = 0;
+
+               PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+                                                                         pvAddr,
+                                                                         sCpuPAddr,
+                                                                         ksize(pvAddr),
+                                                                         NULL,
+                                                                         OSGetCurrentClientProcessIDKM()
+                                                                         DEBUG_MEMSTATS_ARGS);
+#else
+               {
+                       /* Store the PID in the final additional 4 bytes allocated */
+                       IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING);
+                       *puiTemp = OSGetCurrentClientProcessIDKM();
+               }
+               PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr), OSGetCurrentClientProcessIDKM());
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+       }
+       else
+       {
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+               IMG_CPU_PHYADDR sCpuPAddr;
+               sCpuPAddr.uiAddr = 0;
+
+               PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+                                                                         pvAddr,
+                                                                         sCpuPAddr,
+                                                                         ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+                                                                         NULL,
+                                                                         OSGetCurrentClientProcessIDKM()
+                                                                         DEBUG_MEMSTATS_ARGS);
+#else
+               PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+                                                   ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+                                                   (IMG_UINT64)(uintptr_t) pvAddr,
+                                                   OSGetCurrentClientProcessIDKM());
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+       }
+#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */
+}
+
+static inline void _pvr_alloc_stats_remove(void *pvAddr)
+{
+#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVR_UNREFERENCED_PARAMETER(pvAddr);
+#else
+       if (!is_vmalloc_addr(pvAddr))
+       {
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+               {
+                       IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING);
+                       PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *puiTemp);
+               }
+#else
+               PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+                                               (IMG_UINT64)(uintptr_t) pvAddr,
+                                               OSGetCurrentClientProcessIDKM());
+#endif
+       }
+       else
+       {
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+               PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+                                                     (IMG_UINT64)(uintptr_t) pvAddr);
+#else
+               PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+                                               (IMG_UINT64)(uintptr_t) pvAddr,
+                                               OSGetCurrentClientProcessIDKM());
+#endif
+       }
+#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */
+}
+
+void *(OSAllocMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS)
+{
+       void *pvRet = NULL;
+
+       if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold)
+       {
+               pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+               if (pvRet == NULL)
+               {
+                       OSTryDecreaseKmallocThreshold();
+               }
+               else
+               {
+                       OSResetKmallocFailCount();
+               }
+       }
+
+       if (pvRet == NULL)
+       {
+               pvRet = vmalloc(ui32Size);
+       }
+
+       if (pvRet != NULL)
+       {
+               _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS);
+       }
+
+       return pvRet;
+}
+
+void *(OSAllocZMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS)
+{
+       void *pvRet = NULL;
+
+       if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold)
+       {
+               pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+               if (pvRet == NULL)
+               {
+                       OSTryDecreaseKmallocThreshold();
+               }
+               else
+               {
+                       OSResetKmallocFailCount();
+               }
+       }
+
+       if (pvRet == NULL)
+       {
+               pvRet = vzalloc(ui32Size);
+       }
+
+       if (pvRet != NULL)
+       {
+               _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS);
+       }
+
+       return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMem)(void *pvMem)
+{
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
+       unsigned long flags;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if (psPVRSRVData)
+       {
+               IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc;
+
+               spin_lock_irqsave(&kmalloc_leak_lock, flags);
+
+               g_ui32kmallocLeakCounter++;
+               if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax))
+               {
+                       g_ui32kmallocLeakCounter = 0;
+                       spin_unlock_irqrestore(&kmalloc_leak_lock, flags);
+
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Skipped freeing of pointer 0x%p to trigger memory leak.",
+                                __func__,
+                                pvMem));
+                       return;
+               }
+
+               spin_unlock_irqrestore(&kmalloc_leak_lock, flags);
+       }
+#endif
+       if (pvMem != NULL)
+       {
+               _pvr_alloc_stats_remove(pvMem);
+
+               if (!is_vmalloc_addr(pvMem))
+               {
+                       _pvr_kfree(pvMem);
+               }
+               else
+               {
+                       _pvr_vfree(pvMem);
+               }
+       }
+}
+
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size)
+{
+       void *pvRet = NULL;
+
+       if (ui32Size <= g_ui32kmallocThreshold)
+       {
+               pvRet = kmalloc(ui32Size, GFP_KERNEL);
+               if (pvRet == NULL)
+               {
+                       OSTryDecreaseKmallocThreshold();
+               }
+               else
+               {
+                       OSResetKmallocFailCount();
+               }
+       }
+
+       if (pvRet == NULL)
+       {
+               pvRet = vmalloc(ui32Size);
+       }
+
+       return pvRet;
+}
+
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size)
+{
+       void *pvRet = NULL;
+
+       if (ui32Size <= g_ui32kmallocThreshold)
+       {
+               pvRet = kzalloc(ui32Size, GFP_KERNEL);
+               if (pvRet == NULL)
+               {
+                       OSTryDecreaseKmallocThreshold();
+               }
+               else
+               {
+                       OSResetKmallocFailCount();
+               }
+       }
+
+       if (pvRet == NULL)
+       {
+               pvRet = vzalloc(ui32Size);
+       }
+
+       return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMemNoStats)(void *pvMem)
+{
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
+       unsigned long flags;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if (psPVRSRVData)
+       {
+               IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc;
+
+               spin_lock_irqsave(&kmalloc_leak_lock, flags);
+
+               g_ui32kmallocLeakCounter++;
+               if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax))
+               {
+                       g_ui32kmallocLeakCounter = 0;
+                       spin_unlock_irqrestore(&kmalloc_leak_lock, flags);
+
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: Skipped freeing of pointer 0x%p to trigger memory leak.",
+                                __func__,
+                                pvMem));
+                       return;
+               }
+
+               spin_unlock_irqrestore(&kmalloc_leak_lock, flags);
+       }
+#endif
+       if (pvMem != NULL)
+       {
+               if (!is_vmalloc_addr(pvMem))
+               {
+                       _pvr_kfree(pvMem);
+               }
+               else
+               {
+                       _pvr_vfree(pvMem);
+               }
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/env_connection.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/env_connection.h
new file mode 100644 (file)
index 0000000..2a6c7d0
--- /dev/null
@@ -0,0 +1,92 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux specific server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(ENV_CONNECTION_H)
+#define ENV_CONNECTION_H
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "handle.h"
+#include "pvr_debug.h"
+#include "device.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_sys.h"
+#include "allocmem.h"
+#endif
+
+typedef struct _ENV_CONNECTION_PRIVATE_DATA_
+{
+       PVRSRV_DEVICE_NODE *psDevNode;
+} ENV_CONNECTION_PRIVATE_DATA;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#define ION_CLIENT_NAME_SIZE   50
+
+typedef struct _ENV_ION_CONNECTION_DATA_
+{
+       IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE];
+       struct ion_device *psIonDev;
+       struct ion_client *psIonClient;
+} ENV_ION_CONNECTION_DATA;
+#endif
+
+typedef struct _ENV_CONNECTION_DATA_
+{
+       pid_t owner;
+
+       PVRSRV_DEVICE_NODE *psDevNode;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       void *pvPvrSyncPrivateData;
+#endif
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+       ENV_ION_CONNECTION_DATA *psIonData;
+#endif
+} ENV_CONNECTION_DATA;
+
+#endif /* !defined(ENV_CONNECTION_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/event.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/event.c
new file mode 100644 (file)
index 0000000..aec0fc8
--- /dev/null
@@ -0,0 +1,514 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)
+#include <linux/sched/signal.h>
+#endif
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "event.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "pvr_bridge_k.h"
+
+#include "osfunc.h"
+
+/* Uncomment to enable event object stats that are useful for debugging.
+ * The stats can be gotten at any time (during lifetime of event object)
+ * using OSEventObjectDumpdebugInfo API */
+// #define LINUX_EVENT_OBJECT_STATS
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+       rwlock_t sLock;
+       /* Counts how many times event object was signalled i.e. how many times
+        * LinuxEventObjectSignal() was called on a given event object.
+        * Used for detecting pending signals.
+        * Note that this is in no way related to OS signals. */
+       atomic_t sEventSignalCount;
+       struct list_head sList;
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+       IMG_UINT32 ui32EventSignalCountPrevious;
+#if defined(DEBUG)
+       IMG_UINT ui32Stats;
+#endif
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+       POS_LOCK hLock;
+       IMG_UINT32 ui32ScheduleAvoided;
+       IMG_UINT32 ui32ScheduleCalled;
+       IMG_UINT32 ui32ScheduleSleptFully;
+       IMG_UINT32 ui32ScheduleSleptPartially;
+       IMG_UINT32 ui32ScheduleReturnedImmediately;
+#endif
+       wait_queue_head_t sWait;
+       struct list_head sList;
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+/*!
+******************************************************************************
+
+ @Function     LinuxEventObjectListCreate
+
+ @Description
+
+ Linux wait object list creation
+
+ @Output    hOSEventKM : Pointer to the event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
+
+       psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList));
+       if (psEvenObjectList == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       INIT_LIST_HEAD(&psEvenObjectList->sList);
+
+       rwlock_init(&psEvenObjectList->sLock);
+       atomic_set(&psEvenObjectList->sEventSignalCount, 0);
+
+       *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     LinuxEventObjectListDestroy
+
+ @Description
+
+ Linux wait object list destruction
+
+ @Input    hOSEventKM : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList;
+
+       if (psEvenObjectList)
+       {
+               if (!list_empty(&psEvenObjectList->sList))
+               {
+                        PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+                        return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+               }
+               OSFreeMem(psEvenObjectList);
+               /*not nulling pointer, copy on stack*/
+       }
+       return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function     LinuxEventObjectDelete
+
+ @Description
+
+ Linux wait object removal
+
+ @Input    hOSEventObject : Event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject)
+{
+       if (hOSEventObject)
+       {
+               PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+               PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+               write_lock_bh(&psLinuxEventObjectList->sLock);
+               list_del(&psLinuxEventObject->sList);
+               write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+               OSLockDestroy(psLinuxEventObject->hLock);
+#endif
+
+#if defined(DEBUG)
+//             PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+               OSFreeMem(psLinuxEventObject);
+               /*not nulling pointer, copy on stack*/
+
+               return PVRSRV_OK;
+       }
+       return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+}
+
+/*!
+******************************************************************************
+
+ @Function     LinuxEventObjectAdd
+
+ @Description
+
+ Linux wait object addition
+
+ @Input    hOSEventObjectList : Event object list handle
+ @Output   phOSEventObject : Pointer to the event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+       PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+
+       /* allocate completion variable */
+       psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject));
+       if (psLinuxEventObject == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory"));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+       /* Start with the timestamp at which event object was added to the list */
+       psLinuxEventObject->ui32EventSignalCountPrevious = atomic_read(&psLinuxEventObjectList->sEventSignalCount);
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+       PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&psLinuxEventObject->hLock), "OSLockCreate");
+       psLinuxEventObject->ui32ScheduleAvoided = 0;
+       psLinuxEventObject->ui32ScheduleCalled = 0;
+       psLinuxEventObject->ui32ScheduleSleptFully = 0;
+       psLinuxEventObject->ui32ScheduleSleptPartially = 0;
+       psLinuxEventObject->ui32ScheduleReturnedImmediately = 0;
+#endif
+
+#if defined(DEBUG)
+       psLinuxEventObject->ui32Stats = 0;
+#endif
+       init_waitqueue_head(&psLinuxEventObject->sWait);
+
+       psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+       write_lock_bh(&psLinuxEventObjectList->sLock);
+       list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+       write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+       *phOSEventObject = psLinuxEventObject;
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     LinuxEventObjectSignal
+
+ @Description
+
+ Linux wait object signaling function
+
+ @Input    hOSEventObjectList : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+       PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+       struct list_head *psListEntry, *psListEntryTemp, *psList;
+       psList = &psLinuxEventObjectList->sList;
+
+       /* Move the timestamp ahead for this call, so a potential "Wait" from any
+        * EventObject/s doesn't wait for the signal to occur before returning. Early
+        * setting/incrementing of timestamp reduces the window where a concurrent
+        * "Wait" call might block while "this" Signal call is being processed */
+       atomic_inc(&psLinuxEventObjectList->sEventSignalCount);
+
+       read_lock_bh(&psLinuxEventObjectList->sLock);
+       list_for_each_safe(psListEntry, psListEntryTemp, psList)
+       {
+               psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+               wake_up_interruptible(&psLinuxEventObject->sWait);
+       }
+       read_unlock_bh(&psLinuxEventObjectList->sLock);
+
+       return PVRSRV_OK;
+}
+
+static void _TryToFreeze(void)
+{
+       /* if we reach zero it means that all of the threads called try_to_freeze */
+       LinuxBridgeNumActiveKernelThreadsDecrement();
+
+       /* Returns true if the thread was frozen, should we do anything with this
+       * information? What do we return? Which one is the error case? */
+       try_to_freeze();
+
+       LinuxBridgeNumActiveKernelThreadsIncrement();
+}
+
+void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject)
+{
+#ifdef LINUX_EVENT_OBJECT_STATS
+       PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+
+       OSLockAcquire(psLinuxEventObject->hLock);
+       PVR_LOG(("%s: EvObj(%p) schedule: Avoided(%u) Called(%u) ReturnedImmediately(%u) SleptFully(%u) SleptPartially(%u)",
+                __func__, psLinuxEventObject, psLinuxEventObject->ui32ScheduleAvoided,
+                        psLinuxEventObject->ui32ScheduleCalled, psLinuxEventObject->ui32ScheduleReturnedImmediately,
+                        psLinuxEventObject->ui32ScheduleSleptFully, psLinuxEventObject->ui32ScheduleSleptPartially));
+       OSLockRelease(psLinuxEventObject->hLock);
+#else
+       PVR_LOG(("%s: LINUX_EVENT_OBJECT_STATS disabled!", __func__));
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function     LinuxEventObjectWait
+
+ @Description
+
+ Linux wait object routine
+
+ @Input    hOSEventObject : Event object handle
+
+ @Input   ui64Timeoutus : Time out value in usec
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject,
+                                  IMG_UINT64 ui64Timeoutus,
+                                  IMG_BOOL bFreezable)
+{
+       IMG_UINT32 ui32EventSignalCount;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       IMG_UINT32 ui32Remainder;
+       long timeOutJiffies;
+#ifdef LINUX_EVENT_OBJECT_STATS
+       long totalTimeoutJiffies;
+       IMG_BOOL bScheduleCalled = IMG_FALSE;
+#endif
+
+       DEFINE_WAIT(sWait);
+
+       PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+       /* Check if the driver is good shape */
+       if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an
+        * uint use the msec version. With such a long timeout we really don't need
+        * the high resolution of usecs. */
+       if (ui64Timeoutus > 0xffffffffULL)
+               timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder));
+       else
+               timeOutJiffies = usecs_to_jiffies(ui64Timeoutus);
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+       totalTimeoutJiffies = timeOutJiffies;
+#endif
+
+       do
+       {
+               prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+               ui32EventSignalCount = (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount);
+
+               if (psLinuxEventObject->ui32EventSignalCountPrevious != ui32EventSignalCount)
+               {
+                       /* There is a pending event signal i.e. LinuxEventObjectSignal()
+                        * was called on the event object since the last time we checked.
+                        * Return without waiting. */
+                       break;
+               }
+
+               if (signal_pending(current))
+               {
+                       /* There is an OS signal pending so return.
+                        * This allows to kill/interrupt user space processes which
+                        * are waiting on this event object. */
+                       break;
+               }
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+               bScheduleCalled = IMG_TRUE;
+#endif
+               timeOutJiffies = schedule_timeout(timeOutJiffies);
+
+               if (bFreezable)
+               {
+                       _TryToFreeze();
+               }
+
+#if defined(DEBUG)
+               psLinuxEventObject->ui32Stats++;
+#endif
+
+
+       } while (timeOutJiffies);
+
+       finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+       psLinuxEventObject->ui32EventSignalCountPrevious = ui32EventSignalCount;
+
+#ifdef LINUX_EVENT_OBJECT_STATS
+       OSLockAcquire(psLinuxEventObject->hLock);
+       if (bScheduleCalled)
+       {
+               psLinuxEventObject->ui32ScheduleCalled++;
+               if (totalTimeoutJiffies == timeOutJiffies)
+               {
+                       psLinuxEventObject->ui32ScheduleReturnedImmediately++;
+               }
+               else if (timeOutJiffies == 0)
+               {
+                       psLinuxEventObject->ui32ScheduleSleptFully++;
+               }
+               else
+               {
+                       psLinuxEventObject->ui32ScheduleSleptPartially++;
+               }
+       }
+       else
+       {
+               psLinuxEventObject->ui32ScheduleAvoided++;
+       }
+       OSLockRelease(psLinuxEventObject->hLock);
+#endif
+
+       if (signal_pending(current))
+       {
+               return PVRSRV_ERROR_INTERRUPTED;
+       }
+       else
+       {
+               return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+       }
+}
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       DEFINE_WAIT(sWait);
+
+       PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
+                       (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+       PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList =
+                       psLinuxEventObject->psLinuxEventObjectList;
+
+       /* Check if the driver is in good shape */
+       if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               return PVRSRV_ERROR_TIMEOUT;
+       }
+
+       prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+
+       if (psLinuxEventObject->ui32EventSignalCountPrevious !=
+           (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount))
+       {
+               /* There is a pending signal, so return without waiting */
+               goto finish;
+       }
+
+       schedule();
+
+       _TryToFreeze();
+
+finish:
+       finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+       psLinuxEventObject->ui32EventSignalCountPrevious =
+                       (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount);
+
+       return PVRSRV_OK;
+}
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/event.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/event.h
new file mode 100644 (file)
index 0000000..bb378cb
--- /dev/null
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject,
+                                  IMG_UINT64 ui64Timeoutus,
+                                  IMG_BOOL bFreezable);
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject);
+#endif
+void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject);
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/fwload.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/fwload.c
new file mode 100644 (file)
index 0000000..35e52af
--- /dev/null
@@ -0,0 +1,255 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services firmware load and access routines for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "device.h"
+#include "module_common.h"
+#include "fwload.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+
+#if defined(RGX_FW_SIGNED)
+
+#include <linux/verification.h>
+#include <linux/module.h>
+#include <crypto/public_key.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+#include <linux/module_signature.h>
+#else
+#define PKEY_ID_PKCS7 2
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) */
+
+#include "signfw.h"
+#endif /* RGX_FW_SIGNED */
+
+struct OS_FW_IMAGE_t
+{
+       const struct firmware *psFW;
+       size_t                 uSignatureSize;
+};
+
+#if defined(RGX_FW_SIGNED)
+
+static int OSCheckSignature(const struct FirmwareSignatureHeader *psHeader, size_t uSize)
+{
+       if (be32_to_cpu(psHeader->ui32SignatureLen) >= uSize - sizeof(*psHeader))
+       {
+               return -EBADMSG;
+       }
+
+       if (psHeader->ui8IDType != PKEY_ID_PKCS7)
+       {
+               return -ENOPKG;
+       }
+
+       if (psHeader->ui8Algo != 0 || psHeader->ui8HashAlgo != 0 ||
+           psHeader->ui8SignerLen != 0 || psHeader->ui8KeyIDLen != 0 ||
+           psHeader->__ui8Padding[0] != 0 || psHeader->__ui8Padding[1] != 0 ||
+           psHeader->__ui8Padding[2] != 0)
+       {
+               return -EBADMSG;
+       }
+
+       return 0;
+}
+
+bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage)
+{
+       const struct firmware *psFW        = psFWImage->psFW;
+       const u8              *pui8FWData  = psFW->data;
+       size_t                uFWSize      = psFW->size;
+       uint32_t              ui32MagicLen = sizeof(MODULE_SIG_STRING) - 1;
+       struct FirmwareSignatureHeader sHeader;
+       int                            err;
+
+       if (uFWSize <= ui32MagicLen)
+       {
+               return false;
+       }
+
+       /*
+        * Linux Kernel's sign-file utility is primarily intended for signing
+        * modules, and so appends the MODULE_SIG_STRING magic at the end of
+        * the signature. Only proceed with verification if this magic is found.
+        */
+       if (memcmp(pui8FWData + uFWSize - ui32MagicLen, MODULE_SIG_STRING, ui32MagicLen) != 0)
+       {
+               return false;
+       }
+
+       uFWSize -= ui32MagicLen;
+       if (uFWSize <= sizeof(sHeader))
+       {
+               return false;
+       }
+
+       /*
+        * After the magic, a header is placed which informs about the digest /
+        * crypto algorithm etc. Copy that header and ensure that it has valid
+        * contents (We only support RSA Crypto, SHA Hash, X509 certificate and
+        * PKCS#7 signature).
+        */
+       memcpy(&sHeader, pui8FWData + (uFWSize - sizeof(sHeader)), sizeof(sHeader));
+       if (OSCheckSignature(&sHeader, uFWSize) != 0)
+       {
+               return false;
+       }
+
+       /*
+        * As all information is now extracted, we can go ahead and ask PKCS
+        * module to verify the sign.
+        */
+       uFWSize -= be32_to_cpu(sHeader.ui32SignatureLen) + sizeof(sHeader);
+       err = verify_pkcs7_signature(pui8FWData, uFWSize, pui8FWData + uFWSize,
+                                    be32_to_cpu(sHeader.ui32SignatureLen), NULL,
+                                    VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL);
+       if (err == 0)
+       {
+               psFWImage->uSignatureSize = psFW->size - uFWSize;
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Successfully Verified",
+                                               __func__));
+               return true;
+       }
+
+       PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Verification Failed (%d)",
+                                       __func__, err));
+       return false;
+}
+
+#else /* defined(RGX_FW_SIGNED) */
+
+inline bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage)
+{
+       return true;
+}
+
+#endif /* defined(RGX_FW_SIGNED) */
+
+PVRSRV_ERROR
+OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString,
+               bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), OS_FW_IMAGE **ppsFWImage)
+{
+       const struct firmware *psFW = NULL;
+       OS_FW_IMAGE *psFWImage;
+       IMG_INT32    res;
+       PVRSRV_ERROR eError;
+
+       res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice);
+       if (res != 0)
+       {
+               release_firmware(psFW);
+               if (res == -ENOENT)
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not found (%d)",
+                                                       __func__, pszBVNCString, res));
+                       eError = PVRSRV_ERROR_NOT_FOUND;
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not ready (%d)",
+                                                       __func__, pszBVNCString, res));
+                       eError = PVRSRV_ERROR_NOT_READY;
+               }
+               goto err_exit;
+       }
+
+       psFWImage = OSAllocZMem(sizeof(*psFWImage));
+       if (psFWImage == NULL)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: OSAllocZMem('%s') failed.",
+                                               __func__, pszBVNCString));
+
+               release_firmware(psFW);
+               eError =  PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_exit;
+       }
+
+       psFWImage->psFW = psFW;
+       if (pfnVerifyFirmware != NULL && !pfnVerifyFirmware(psFWImage))
+       {
+               release_firmware(psFW);
+               OSFreeMem(psFWImage);
+               eError = PVRSRV_ERROR_NOT_AUTHENTICATED;
+               goto err_exit;
+       }
+
+       *ppsFWImage = psFWImage;
+       return PVRSRV_OK;
+
+err_exit:
+       *ppsFWImage = NULL;
+       return eError;
+}
+
+void
+OSUnloadFirmware(OS_FW_IMAGE *psFWImage)
+{
+       const struct firmware *psFW = psFWImage->psFW;
+
+       release_firmware(psFW);
+       OSFreeMem(psFWImage);
+}
+
+size_t
+OSFirmwareSize(OS_FW_IMAGE *psFWImage)
+{
+       const struct firmware *psFW = psFWImage->psFW;
+       return psFW->size - psFWImage->uSignatureSize;
+}
+
+const void *
+OSFirmwareData(OS_FW_IMAGE *psFWImage)
+{
+       const struct firmware *psFW = psFWImage->psFW;
+
+       return psFW->data;
+}
+
+/******************************************************************************
+ End of file (fwload.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/handle_idr.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/handle_idr.c
new file mode 100644 (file)
index 0000000..c40e096
--- /dev/null
@@ -0,0 +1,440 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Handle Manager - IDR Back-end
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide IDR based resource handle management back-end
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#define ID_VALUE_MIN   1
+#define ID_VALUE_MAX   INT_MAX
+
+#define        ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i))
+#define        HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h))
+
+struct _HANDLE_IMPL_BASE_
+{
+       struct idr sIdr;
+
+       IMG_UINT32 ui32MaxHandleValue;
+
+       IMG_UINT32 ui32TotalHandCount;
+};
+
+typedef struct _HANDLE_ITER_DATA_WRAPPER_
+{
+       PFN_HANDLE_ITER pfnHandleIter;
+       void *pvHandleIterData;
+} HANDLE_ITER_DATA_WRAPPER;
+
+
+static int HandleIterFuncWrapper(int id, void *data, void *iter_data)
+{
+       HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data;
+
+       PVR_UNREFERENCED_PARAMETER(data);
+
+       return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function     AcquireHandle
+
+ @Description  Acquire a new handle
+
+ @Input                psBase - Pointer to handle base structure
+               phHandle - Points to a handle pointer
+               pvData - Pointer to resource to be associated with the handle
+
+ @Output       phHandle - Points to a handle pointer
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase,
+                                 IMG_HANDLE *phHandle,
+                                 void *pvData)
+{
+       int id;
+       int result;
+
+       PVR_ASSERT(psBase != NULL);
+       PVR_ASSERT(phHandle != NULL);
+       PVR_ASSERT(pvData != NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+       idr_preload(GFP_KERNEL);
+       id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0);
+       idr_preload_end();
+
+       result = id;
+#else
+       do
+       {
+               if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0)
+               {
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+
+               result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id);
+       } while (result == -EAGAIN);
+
+       if ((IMG_UINT32)id > psBase->ui32MaxHandleValue)
+       {
+               idr_remove(&psBase->sIdr, id);
+               result = -ENOSPC;
+       }
+#endif
+
+       if (result < 0)
+       {
+               if (result == -ENOSPC)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached",
+                                __func__, psBase->ui32MaxHandleValue));
+
+                       return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+               }
+
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psBase->ui32TotalHandCount++;
+
+       *phHandle = ID_TO_HANDLE(id);
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     ReleaseHandle
+
+ @Description  Release a handle that is no longer needed.
+
+ @Input                psBase - Pointer to handle base structure
+               hHandle - Handle to release
+               ppvData - Points to a void data pointer
+
+ @Output       ppvData - Points to a void data pointer
+
+ @Return       PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase,
+                                 IMG_HANDLE hHandle,
+                                 void **ppvData)
+{
+       int id = HANDLE_TO_ID(hHandle);
+       void *pvData;
+
+       PVR_ASSERT(psBase);
+
+       /* Get the data associated with the handle. If we get back NULL then
+          it's an invalid handle */
+
+       pvData = idr_find(&psBase->sIdr, id);
+       if (likely(pvData))
+       {
+               idr_remove(&psBase->sIdr, id);
+               psBase->ui32TotalHandCount--;
+       }
+
+       if (unlikely(pvData == NULL))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)",
+                        __func__, id, psBase->ui32TotalHandCount));
+               return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+       }
+
+       if (ppvData)
+       {
+               *ppvData = pvData;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     GetHandleData
+
+ @Description  Get the data associated with the given handle
+
+ @Input                psBase - Pointer to handle base structure
+               hHandle - Handle from which data should be retrieved
+               ppvData - Points to a void data pointer
+
+ @Output       ppvData - Points to a void data pointer
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase,
+                                 IMG_HANDLE hHandle,
+                                 void **ppvData)
+{
+       int id = HANDLE_TO_ID(hHandle);
+       void *pvData;
+
+       PVR_ASSERT(psBase);
+       PVR_ASSERT(ppvData);
+
+       pvData = idr_find(&psBase->sIdr, id);
+       if (likely(pvData))
+       {
+               *ppvData = pvData;
+
+               return PVRSRV_OK;
+       }
+       else
+       {
+               return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+       }
+}
+
+/*!
+******************************************************************************
+
+ @Function     SetHandleData
+
+ @Description  Set the data associated with the given handle
+
+ @Input                psBase - Pointer to handle base structure
+               hHandle - Handle for which data should be changed
+               pvData - Pointer to new data to be associated with the handle
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase,
+                                 IMG_HANDLE hHandle,
+                                 void *pvData)
+{
+       int id = HANDLE_TO_ID(hHandle);
+       void *pvOldData;
+
+       PVR_ASSERT(psBase);
+
+       pvOldData = idr_replace(&psBase->sIdr, pvData, id);
+       if (IS_ERR(pvOldData))
+       {
+               if (PTR_ERR(pvOldData) == -ENOENT)
+               {
+                       return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+               }
+               else
+               {
+                       return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData)
+{
+       HANDLE_ITER_DATA_WRAPPER sIterData;
+
+       PVR_ASSERT(psBase);
+       PVR_ASSERT(pfnHandleIter);
+
+       sIterData.pfnHandleIter = pfnHandleIter;
+       sIterData.pvHandleIterData = pvHandleIterData;
+
+       return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function     EnableHandlePurging
+
+ @Description  Enable purging for a given handle base
+
+ @Input                psBase - pointer to handle base structure
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase)
+{
+       PVR_UNREFERENCED_PARAMETER(psBase);
+       PVR_ASSERT(psBase);
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     PurgeHandles
+
+ @Description  Purge handles for a given handle base
+
+ @Input                psBase - Pointer to handle base structure
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase)
+{
+       PVR_UNREFERENCED_PARAMETER(psBase);
+       PVR_ASSERT(psBase);
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     CreateHandleBase
+
+ @Description  Create a handle base structure
+
+ @Input                ppsBase - pointer to handle base structure pointer
+
+ @Output       ppsBase - points to handle base structure pointer
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase)
+{
+       HANDLE_IMPL_BASE *psBase;
+
+       PVR_ASSERT(ppsBase);
+
+       psBase = OSAllocZMem(sizeof(*psBase));
+       if (psBase == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base",
+                                __func__));
+
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       idr_init(&psBase->sIdr);
+
+       psBase->ui32MaxHandleValue = ID_VALUE_MAX;
+       psBase->ui32TotalHandCount = 0;
+
+       *ppsBase = psBase;
+
+       return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function     DestroyHandleBase
+
+ @Description  Destroy a handle base structure
+
+ @Input                psBase - pointer to handle base structure
+
+ @Return       Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase)
+{
+       PVR_ASSERT(psBase);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+       idr_remove_all(&psBase->sIdr);
+#endif
+
+       /* Finally destroy the idr */
+       idr_destroy(&psBase->sIdr);
+
+       OSFreeMem(psBase);
+
+       return PVRSRV_OK;
+}
+
+
+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab =
+{
+       .pfnAcquireHandle = AcquireHandle,
+       .pfnReleaseHandle = ReleaseHandle,
+       .pfnGetHandleData = GetHandleData,
+       .pfnSetHandleData = SetHandleData,
+       .pfnIterateOverHandles = IterateOverHandles,
+       .pfnEnableHandlePurging = EnableHandlePurging,
+       .pfnPurgeHandles = PurgeHandles,
+       .pfnCreateHandleBase = CreateHandleBase,
+       .pfnDestroyHandleBase = DestroyHandleBase
+};
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs)
+{
+       static IMG_BOOL bAcquired = IMG_FALSE;
+
+       if (bAcquired)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired",
+                        __func__));
+               return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+       }
+
+       if (ppsFuncs == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *ppsFuncs = &g_sHandleFuncTab;
+
+       bAcquired = IMG_TRUE;
+
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/km_apphint.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/km_apphint.c
new file mode 100644 (file)
index 0000000..8486edf
--- /dev/null
@@ -0,0 +1,1748 @@
+/*************************************************************************/ /*!
+@File           km_apphint.c
+@Title          Apphint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "di_server.h"
+#include "pvr_uaccess.h"
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <img_types.h>
+
+/* Common and SO layer */
+#include "img_defs.h"
+#include "sofunc_pvr.h"
+
+/* for action device access */
+#include "pvrsrv.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgxhwperf.h"
+#include "htbserver.h"
+#include "rgxutils.h"
+#include "rgxapi_km.h"
+
+
+/* defines for default values */
+#include "rgx_fwif_km.h"
+#include "htbuffer_types.h"
+
+#include "pvr_notifier.h"
+
+#include "km_apphint_defs.h"
+#include "km_apphint.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#include "pdump_km.h"
+#endif
+
+/* Size of temporary buffers used to read and write AppHint data.
+ * Must be large enough to contain any strings read or written but no larger
+ * than 4096: which is the buffer size for the kernel_param_ops .get
+ * function. And less than 1024 to keep the stack frame size within bounds.
+ */
+#define APPHINT_BUFFER_SIZE 512
+
+#define APPHINT_DEVICES_MAX 16
+
+/* Apphint Debug output level */
+#define APPHINT_DPF_LEVEL PVR_DBG_VERBOSE
+
+/*
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+struct apphint_lookup {
+       const char *name;
+       int value;
+};
+
+static const struct apphint_lookup fwt_logtype_tbl[] = {
+       { "trace", 0},
+       { "none", 0}
+#if defined(SUPPORT_TBI_INTERFACE)
+       , { "tbi", 1}
+#endif
+};
+
+static const struct apphint_lookup fwt_loggroup_tbl[] = {
+       RGXFWIF_LOG_GROUP_NAME_VALUE_MAP
+};
+
+static const struct apphint_lookup htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+       HTB_LOG_SFGROUPLIST
+#undef X
+};
+
+static const struct apphint_lookup htb_opmode_tbl[] = {
+       { "droplatest", HTB_OPMODE_DROPLATEST},
+       { "dropoldest", HTB_OPMODE_DROPOLDEST},
+       { "block", HTB_OPMODE_BLOCK}
+};
+
+__maybe_unused
+static const struct apphint_lookup htb_logmode_tbl[] = {
+       { "all", HTB_LOGMODE_ALLPID},
+       { "restricted", HTB_LOGMODE_RESTRICTEDPID}
+};
+
+__maybe_unused
+static const struct apphint_lookup timecorr_clk_tbl[] = {
+       { "mono", 0 },
+       { "mono_raw", 1 },
+       { "sched", 2 }
+};
+
+/*
+*******************************************************************************
+ Data types
+******************************************************************************/
+union apphint_value {
+       IMG_UINT64 UINT64;
+       IMG_UINT32 UINT32;
+       IMG_BOOL BOOL;
+       IMG_CHAR *STRING;
+};
+
+union apphint_query_action {
+       PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device,
+                              const void *private_data, IMG_UINT64 *value);
+       PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device,
+                              const void *private_data, IMG_UINT32 *value);
+       PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device,
+                            const void *private_data, IMG_BOOL *value);
+       PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device,
+                              const void *private_data, IMG_CHAR **value);
+};
+
+union apphint_set_action {
+       PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device,
+                              const void *private_data, IMG_UINT64 value);
+       PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device,
+                              const void *private_data, IMG_UINT32 value);
+       PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device,
+                            const void *private_data, IMG_BOOL value);
+       PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device,
+                              const void *private_data, IMG_CHAR *value);
+};
+
+struct apphint_action {
+       union apphint_query_action query; /*!< Query callbacks. */
+       union apphint_set_action set;     /*!< Set callbacks. */
+       const PVRSRV_DEVICE_NODE *device; /*!< Pointer to the device node.*/
+       const void *private_data;         /*!< Opaque data passed to `query` and
+                                              `set` callbacks. */
+       union apphint_value stored;       /*!< Value of the AppHint. */
+       bool free;                        /*!< Flag indicating that memory has been
+                                              allocated for this AppHint and it
+                                              needs to be freed on deinit. */
+       bool initialised;                 /*!< Flag indicating if the AppHint has
+                                              been already initialised. */
+};
+
+struct apphint_param {
+       IMG_UINT32 id;
+       APPHINT_DATA_TYPE data_type;
+       const void *data_type_helper;
+       IMG_UINT32 helper_size;
+};
+
+struct apphint_init_data {
+       IMG_UINT32 id;                  /* index into AppHint Table */
+       APPHINT_CLASS class;
+       const IMG_CHAR *name;
+       union apphint_value default_value;
+};
+
+struct apphint_init_data_mapping {
+       IMG_UINT32 device_apphint_id;
+       IMG_UINT32 modparam_apphint_id;
+};
+
+struct apphint_class_state {
+       APPHINT_CLASS class;
+       IMG_BOOL enabled;
+};
+
+struct apphint_work {
+       struct work_struct work;
+       union apphint_value new_value;
+       struct apphint_action *action;
+};
+
+/*
+*******************************************************************************
+ Initialization / configuration table data
+******************************************************************************/
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+
+static const struct apphint_init_data init_data_buildvar[] = {
+#define X(a, b, c, d, e) \
+       {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+       APPHINT_LIST_BUILDVAR_COMMON
+       APPHINT_LIST_BUILDVAR
+#undef X
+};
+
+static const struct apphint_init_data init_data_modparam[] = {
+#define X(a, b, c, d, e) \
+       {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+       APPHINT_LIST_MODPARAM_COMMON
+       APPHINT_LIST_MODPARAM
+#undef X
+};
+
+static const struct apphint_init_data init_data_debuginfo[] = {
+#define X(a, b, c, d, e) \
+       {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+       APPHINT_LIST_DEBUGINFO_COMMON
+       APPHINT_LIST_DEBUGINFO
+#undef X
+};
+
+static const struct apphint_init_data init_data_debuginfo_device[] = {
+#define X(a, b, c, d, e) \
+       {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+       APPHINT_LIST_DEBUGINFO_DEVICE_COMMON
+       APPHINT_LIST_DEBUGINFO_DEVICE
+#undef X
+};
+
+static const struct apphint_init_data_mapping init_data_debuginfo_device_to_modparams[] = {
+#define X(a, b) \
+       {APPHINT_ID_ ## a, APPHINT_ID_ ## b},
+       APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON
+       APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT
+#undef X
+};
+
+#undef UINT32Bitfield
+#undef UINT32List
+
+__maybe_unused static const char NO_PARAM_TABLE[] = {};
+
+static const struct apphint_param param_lookup[] = {
+#define X(a, b, c, d, e) \
+       {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) },
+       APPHINT_LIST_ALL
+#undef X
+};
+
+static const struct apphint_class_state class_state[] = {
+#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a},
+       APPHINT_CLASS_LIST
+#undef X
+};
+
+/*
+*******************************************************************************
+ Global state
+******************************************************************************/
+/* If the union apphint_value becomes such that it is not possible to read
+ * and write atomically, a mutex may be desirable to prevent a read returning
+ * a partially written state.
+ * This would require a statically initialized mutex outside of the
+ * struct apphint_state to prevent use of an uninitialized mutex when
+ * module_params are provided on the command line.
+ *     static DEFINE_MUTEX(apphint_mutex);
+ */
+static struct apphint_state
+{
+       struct workqueue_struct *workqueue;
+       DI_GROUP *debuginfo_device_rootdir[APPHINT_DEVICES_MAX];
+       DI_ENTRY *debuginfo_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGINFO_DEVICE_ID_MAX];
+       DI_GROUP *debuginfo_rootdir;
+       DI_ENTRY *debuginfo_entry[APPHINT_DEBUGINFO_ID_MAX];
+       DI_GROUP *buildvar_rootdir;
+       DI_ENTRY *buildvar_entry[APPHINT_BUILDVAR_ID_MAX];
+
+       unsigned int num_devices;
+       PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX];
+       unsigned int initialized;
+
+       /* Array contains value space for 1 copy of all apphint values defined
+        * (for device 1) and N copies of device specific apphint values for
+        * multi-device platforms.
+        */
+       struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGINFO_DEVICE_ID_MAX)];
+
+} apphint = {
+/* statically initialise default values to ensure that any module_params
+ * provided on the command line are not overwritten by defaults.
+ */
+       .val = {
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+#define X(a, b, c, d, e) \
+       { {NULL}, {NULL}, NULL, NULL, {.b=d}, false },
+       APPHINT_LIST_ALL
+#undef X
+#undef UINT32Bitfield
+#undef UINT32List
+       },
+       .initialized = 0,
+       .num_devices = 0
+};
+
+#define APPHINT_DEBUGINFO_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGINFO_DEVICE_ID_MAX)
+
+static inline void
+get_apphint_id_from_action_addr(const struct apphint_action * const addr,
+                                APPHINT_ID * const id)
+{
+       *id = (APPHINT_ID)(addr - apphint.val);
+       if (*id >= APPHINT_ID_MAX) {
+               *id -= APPHINT_DEBUGINFO_DEVICE_ID_OFFSET;
+               *id %= APPHINT_DEBUGINFO_DEVICE_ID_MAX;
+               *id += APPHINT_DEBUGINFO_DEVICE_ID_OFFSET;
+       }
+}
+
+static inline void
+get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device,
+                             int * const offset,
+                             APPHINT_ID id)
+{
+       int i;
+       IMG_BOOL bFound = IMG_FALSE;
+
+       /* No device offset if not a device specific apphint */
+       if (APPHINT_OF_DRIVER_NO_DEVICE == device) {
+               *offset = 0;
+               return;
+       }
+
+       /* Check that the specified ID is a device-specific one. If not we
+        * set the offset to 0 for the global MODPARAM / BUILDVAR etc. AppHint
+        */
+       for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device); i++)
+       {
+               const struct apphint_init_data *device_init = &init_data_debuginfo_device[i];
+
+               if ((IMG_UINT32)id == device_init->id) {
+                       bFound = IMG_TRUE;
+                       break;
+               }
+       }
+
+       if (!bFound) {
+               *offset = 0;
+               return;
+       }
+
+       for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) {
+               if (apphint.devices[i] == device)
+                       break;
+       }
+       if (APPHINT_DEVICES_MAX == i) {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__));
+               i = 0;
+       }
+       *offset = i * APPHINT_DEBUGINFO_DEVICE_ID_MAX;
+}
+
+/**
+ * apphint_action_worker - perform an action after an AppHint update has been
+ *                    requested by a UM process
+ *                    And update the record of the current active value
+ */
+static void apphint_action_worker(struct work_struct *work)
+{
+       struct apphint_work *work_pkt = container_of(work,
+                                                    struct apphint_work,
+                                                    work);
+       struct apphint_action *a = work_pkt->action;
+       union apphint_value value = work_pkt->new_value;
+       APPHINT_ID id;
+       PVRSRV_ERROR result = PVRSRV_OK;
+
+       get_apphint_id_from_action_addr(a, &id);
+
+       if (a->set.UINT64) {
+               switch (param_lookup[id].data_type) {
+               case APPHINT_DATA_TYPE_UINT64:
+                       result = a->set.UINT64(a->device,
+                                              a->private_data,
+                                              value.UINT64);
+                       break;
+
+               case APPHINT_DATA_TYPE_UINT32:
+               case APPHINT_DATA_TYPE_UINT32Bitfield:
+               case APPHINT_DATA_TYPE_UINT32List:
+                       result = a->set.UINT32(a->device,
+                                              a->private_data,
+                                              value.UINT32);
+                       break;
+
+               case APPHINT_DATA_TYPE_BOOL:
+                       result = a->set.BOOL(a->device,
+                                            a->private_data,
+                                            value.BOOL);
+                       break;
+
+               case APPHINT_DATA_TYPE_STRING:
+                       result = a->set.STRING(a->device,
+                                                                  a->private_data,
+                                                                  value.STRING);
+                       kfree(value.STRING);
+                       break;
+
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: unrecognised data type (%d), index (%d)",
+                                __func__, param_lookup[id].data_type, id));
+               }
+
+               /* Do not log errors if running in GUEST mode */
+               if ((PVRSRV_OK != result) && !PVRSRV_VZ_MODE_IS(GUEST)) {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: failed (%s)",
+                                __func__, PVRSRVGetErrorString(result)));
+               }
+       } else {
+               if (a->free) {
+                       kfree(a->stored.STRING);
+               }
+               a->stored = value;
+               if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+                       a->free = true;
+               }
+               PVR_DPF((PVR_DBG_MESSAGE,
+                        "%s: AppHint value updated before handler is registered, ID(%d)",
+                        __func__, id));
+       }
+       kfree((void *)work_pkt);
+}
+
+static void apphint_action(union apphint_value new_value,
+                           struct apphint_action *action)
+{
+       struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL);
+
+       /* queue apphint update on a serialized workqueue to avoid races */
+       if (work_pkt) {
+               work_pkt->new_value = new_value;
+               work_pkt->action = action;
+               INIT_WORK(&work_pkt->work, apphint_action_worker);
+               if (0 == queue_work(apphint.workqueue, &work_pkt->work)) {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: failed to queue apphint change request",
+                               __func__));
+                       goto err_exit;
+               }
+       } else {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: failed to alloc memory for apphint change request",
+                       __func__));
+               goto err_exit;
+       }
+       return;
+err_exit:
+       kfree(new_value.STRING);
+}
+
+/**
+ * apphint_read - read the different AppHint data types
+ * return -errno or the buffer size
+ */
+static int apphint_read(char *buffer, size_t count, APPHINT_ID ue,
+                        union apphint_value *value)
+{
+       APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type;
+       int result = 0;
+
+       switch (data_type) {
+       case APPHINT_DATA_TYPE_UINT64:
+               if (kstrtou64(buffer, 0, &value->UINT64) < 0) {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid UINT64 input data for id %d: %s",
+                               __func__, ue, buffer));
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+               break;
+       case APPHINT_DATA_TYPE_UINT32:
+               if (kstrtou32(buffer, 0, &value->UINT32) < 0) {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid UINT32 input data for id %d: %s",
+                               __func__, ue, buffer));
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+               break;
+       case APPHINT_DATA_TYPE_BOOL:
+               switch (buffer[0]) {
+               case '0':
+               case 'n':
+               case 'N':
+               case 'f':
+               case 'F':
+                       value->BOOL = IMG_FALSE;
+                       break;
+               case '1':
+               case 'y':
+               case 'Y':
+               case 't':
+               case 'T':
+                       value->BOOL = IMG_TRUE;
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid BOOL input data for id %d: %s",
+                               __func__, ue, buffer));
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+               break;
+       case APPHINT_DATA_TYPE_UINT32List:
+       {
+               int i;
+               struct apphint_lookup *lookup =
+                       (struct apphint_lookup *)
+                       param_lookup[ue].data_type_helper;
+               int size = param_lookup[ue].helper_size;
+               /* buffer may include '\n', remove it */
+               char *arg = strsep(&buffer, "\n");
+
+               if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+
+               for (i = 0; i < size; i++) {
+                       if (strcasecmp(lookup[i].name, arg) == 0) {
+                               value->UINT32 = lookup[i].value;
+                               break;
+                       }
+               }
+               if (i == size) {
+                       if (OSStringLength(arg) == 0) {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: No value set for AppHint",
+                                       __func__));
+                       } else {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Unrecognised AppHint value (%s)",
+                                       __func__, arg));
+                       }
+                       result = -EINVAL;
+               }
+               break;
+       }
+       case APPHINT_DATA_TYPE_UINT32Bitfield:
+       {
+               int i;
+               struct apphint_lookup *lookup =
+                       (struct apphint_lookup *)
+                       param_lookup[ue].data_type_helper;
+               int size = param_lookup[ue].helper_size;
+               /* buffer may include '\n', remove it */
+               char *string = strsep(&buffer, "\n");
+               char *token = strsep(&string, ",");
+
+               if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+
+               value->UINT32 = 0;
+               /* empty string is valid to clear the bitfield */
+               while (token && *token) {
+                       for (i = 0; i < size; i++) {
+                               if (strcasecmp(lookup[i].name, token) == 0) {
+                                       value->UINT32 |= lookup[i].value;
+                                       break;
+                               }
+                       }
+                       if (i == size) {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Unrecognised AppHint value (%s)",
+                                       __func__, token));
+                               result = -EINVAL;
+                               goto err_exit;
+                       }
+                       token = strsep(&string, ",");
+               }
+               break;
+       }
+       case APPHINT_DATA_TYPE_STRING:
+       {
+               /* buffer may include '\n', remove it */
+               char *string = strsep(&buffer, "\n");
+               size_t len = OSStringLength(string);
+
+               if (!len) {
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+
+               ++len;
+
+               value->STRING = kmalloc(len , GFP_KERNEL);
+               if (!value->STRING) {
+                       result = -ENOMEM;
+                       goto err_exit;
+               }
+
+               OSStringLCopy(value->STRING, string, len);
+               break;
+       }
+       default:
+               result = -EINVAL;
+               goto err_exit;
+       }
+
+err_exit:
+       return (result < 0) ? result : count;
+}
+
+static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * const action,
+                                                  union apphint_value * const value,
+                                                  const PVRSRV_DEVICE_NODE * const psDevNode)
+{
+       APPHINT_ID id;
+       APPHINT_DATA_TYPE data_type;
+       PVRSRV_ERROR result = PVRSRV_OK;
+       const PVRSRV_DEVICE_NODE *psDevice;
+
+       get_apphint_id_from_action_addr(action, &id);
+       data_type = param_lookup[id].data_type;
+
+       /* If we've got an entry that is APPHINT_OF_DRIVER_NO_DEVICE we should use
+        * the higher-level psDevNode value instead. This is the device-node that is
+        * associated with the original debug_dump request.
+        * Note: if we're called with psDevNode == APPHINT_OF_DRIVER_NO_DEVICE
+        * we attempt to use the first registered apphint.devices[0] (if any
+        * devices have been presented). If we have no devices hooked into the
+        * apphint mechanism we just return the default value for the AppHint.
+        */
+       if (psDevNode == APPHINT_OF_DRIVER_NO_DEVICE) {
+               if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) {
+                       if (apphint.num_devices > 0) {
+                               psDevice = apphint.devices[0];
+                       } else {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "Uninitialised AppHint device for AppHint index (%d)",
+                                       id));
+                               return PVRSRV_ERROR_RETRY;
+                       }
+               } else {
+                       psDevice = action->device;
+               }
+       } else {
+               if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) {
+                       psDevice = psDevNode;
+               } else {
+                       psDevice = action->device;
+               }
+       }
+
+       if (action->query.UINT64) {
+               switch (data_type) {
+               case APPHINT_DATA_TYPE_UINT64:
+                       result = action->query.UINT64(psDevice,
+                                                     action->private_data,
+                                                     &value->UINT64);
+                       break;
+
+               case APPHINT_DATA_TYPE_UINT32:
+               case APPHINT_DATA_TYPE_UINT32Bitfield:
+               case APPHINT_DATA_TYPE_UINT32List:
+                       result = action->query.UINT32(psDevice,
+                                                     action->private_data,
+                                                     &value->UINT32);
+                       break;
+
+               case APPHINT_DATA_TYPE_BOOL:
+                       result = action->query.BOOL(psDevice,
+                                                   action->private_data,
+                                                   &value->BOOL);
+                       break;
+
+               case APPHINT_DATA_TYPE_STRING:
+                       result = action->query.STRING(psDevice,
+                                                     action->private_data,
+                                                     &value->STRING);
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: unrecognised data type (%d), index (%d)",
+                                __func__, data_type, id));
+               }
+       } else {
+               *value = action->stored;
+       }
+
+       if (PVRSRV_OK != result) {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)", __func__, result, id));
+       }
+
+       return result;
+}
+
+/**
+ * apphint_write - write the current AppHint data to a buffer
+ *
+ * Returns length written or -errno
+ */
+static int apphint_write(char *buffer, const size_t size,
+                         const struct apphint_action *a)
+{
+       const struct apphint_param *hint;
+       int result = 0;
+       APPHINT_ID id;
+       union apphint_value value;
+
+       get_apphint_id_from_action_addr(a, &id);
+       hint = &param_lookup[id];
+
+       result = get_apphint_value_from_action(a, &value, a->device);
+
+       switch (hint->data_type) {
+       case APPHINT_DATA_TYPE_UINT64:
+               result += snprintf(buffer + result, size - result,
+                               "0x%016llx",
+                               value.UINT64);
+               break;
+       case APPHINT_DATA_TYPE_UINT32:
+               result += snprintf(buffer + result, size - result,
+                               "0x%08x",
+                               value.UINT32);
+               break;
+       case APPHINT_DATA_TYPE_BOOL:
+               result += snprintf(buffer + result, size - result,
+                       "%s",
+                       value.BOOL ? "Y" : "N");
+               break;
+       case APPHINT_DATA_TYPE_STRING:
+               if (value.STRING) {
+                       result += snprintf(buffer + result, size - result,
+                               "%s",
+                               *value.STRING ? value.STRING : "(none)");
+               } else {
+                       result += snprintf(buffer + result, size - result,
+                       "(none)");
+               }
+               break;
+       case APPHINT_DATA_TYPE_UINT32List:
+       {
+               struct apphint_lookup *lookup =
+                       (struct apphint_lookup *) hint->data_type_helper;
+               IMG_UINT32 i;
+
+               if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+
+               for (i = 0; i < hint->helper_size; i++) {
+                       if (lookup[i].value == value.UINT32) {
+                               result += snprintf(buffer + result,
+                                               size - result,
+                                               "%s",
+                                               lookup[i].name);
+                               break;
+                       }
+               }
+               break;
+       }
+       case APPHINT_DATA_TYPE_UINT32Bitfield:
+       {
+               struct apphint_lookup *lookup =
+                       (struct apphint_lookup *) hint->data_type_helper;
+               IMG_UINT32 i;
+
+               if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+                       result = -EINVAL;
+                       goto err_exit;
+               }
+
+               for (i = 0; i < hint->helper_size; i++) {
+                       if (lookup[i].value & value.UINT32) {
+                               result += snprintf(buffer + result,
+                                               size - result,
+                                               "%s,",
+                                               lookup[i].name);
+                       }
+               }
+               if (result) {
+                       /* remove any trailing ',' */
+                       --result;
+                       *(buffer + result) = '\0';
+               } else {
+                       result += snprintf(buffer + result,
+                                       size - result, "none");
+               }
+               break;
+       }
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: unrecognised data type (%d), index (%d)",
+                        __func__, hint->data_type, id));
+               result = -EINVAL;
+       }
+
+err_exit:
+       return result;
+}
+
+/*
+*******************************************************************************
+ Module parameters initialization - different from debuginfo
+******************************************************************************/
+/**
+ * apphint_kparam_set - Handle an update of a module parameter
+ *
+ * Returns 0, or -errno.  arg is in kp->arg.
+ */
+static int apphint_kparam_set(const char *val, const struct kernel_param *kp)
+{
+       char val_copy[APPHINT_BUFFER_SIZE];
+       APPHINT_ID id;
+       union apphint_value value;
+       int result;
+
+       /* need to discard const in case of string comparison */
+       result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE);
+
+       get_apphint_id_from_action_addr(kp->arg, &id);
+       if (result < APPHINT_BUFFER_SIZE) {
+               result = apphint_read(val_copy, result, id, &value);
+               if (result >= 0) {
+                       ((struct apphint_action *)kp->arg)->stored = value;
+                       ((struct apphint_action *)kp->arg)->initialised = true;
+                       if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+                               ((struct apphint_action *)kp->arg)->free = true;
+                       }
+               }
+       } else {
+               PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__));
+       }
+       return (result > 0) ? 0 : result;
+}
+
+/**
+ * apphint_kparam_get - handle a read of a module parameter
+ *
+ * Returns length written or -errno.  Buffer is 4k (ie. be short!)
+ */
+static int apphint_kparam_get(char *buffer, const struct kernel_param *kp)
+{
+       return apphint_write(buffer, PAGE_SIZE, kp->arg);
+}
+
+__maybe_unused
+static const struct kernel_param_ops apphint_kparam_fops = {
+       .set = apphint_kparam_set,
+       .get = apphint_kparam_get,
+};
+
+/*
+ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM
+ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for
+ * AppHint classes that have been disabled.
+ */
+
+#define apphint_modparam_enable(name, number, perm) \
+       module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm);
+
+#define X(a, b, c, d, e) \
+       apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444)
+       APPHINT_LIST_MODPARAM_COMMON
+       APPHINT_LIST_MODPARAM
+#undef X
+
+/*
+*******************************************************************************
+ Debug Info get (seq file) operations - supporting functions
+******************************************************************************/
+static void *apphint_di_start(OSDI_IMPL_ENTRY *s, IMG_UINT64 *pos)
+{
+       if (*pos == 0) {
+               /* We want only one entry in the sequence, one call to show() */
+               return (void *) 1;
+       }
+
+       PVR_UNREFERENCED_PARAMETER(s);
+
+       return NULL;
+}
+
+static void apphint_di_stop(OSDI_IMPL_ENTRY *s, void *v)
+{
+       PVR_UNREFERENCED_PARAMETER(s);
+       PVR_UNREFERENCED_PARAMETER(v);
+}
+
+static void *apphint_di_next(OSDI_IMPL_ENTRY *s, void *v, IMG_UINT64 *pos)
+{
+       PVR_UNREFERENCED_PARAMETER(s);
+       PVR_UNREFERENCED_PARAMETER(v);
+       PVR_UNREFERENCED_PARAMETER(pos);
+       return NULL;
+}
+
+static int apphint_di_show(OSDI_IMPL_ENTRY *s, void *v)
+{
+       IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE];
+       int result;
+       void *private = DIGetPrivData(s);
+
+       PVR_UNREFERENCED_PARAMETER(v);
+
+       result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, private);
+       if (result < 0) {
+               PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__));
+       } else {
+               /* debuginfo requires a trailing \n, module_params don't */
+               result += snprintf(km_buffer + result,
+                               APPHINT_BUFFER_SIZE - result,
+                               "\n");
+               DIPuts(s, km_buffer);
+       }
+
+       /* have to return 0 to see output */
+       return (result < 0) ? result : 0;
+}
+
+/*
+*******************************************************************************
+ Debug Info supporting functions
+******************************************************************************/
+
+/**
+ * apphint_set - Handle a DI value update
+ */
+static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count,
+                             IMG_UINT64 *ppos, void *data)
+{
+       APPHINT_ID id;
+       union apphint_value value;
+       struct apphint_action *action = data;
+       char km_buffer[APPHINT_BUFFER_SIZE];
+       int result = 0;
+
+       if (ppos == NULL)
+               return -EIO;
+
+       if (count >= APPHINT_BUFFER_SIZE) {
+               PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%" IMG_INT64_FMTSPECd ")",
+                       __func__, count));
+               result = -EINVAL;
+               goto err_exit;
+       }
+
+       /* apphint_read() modifies the buffer so we need to copy it */
+       memcpy(km_buffer, buffer, count);
+       /* count is larger than real buffer by 1 because DI framework appends
+        * a '\0' character at the end, but here we're ignoring this */
+       count -= 1;
+       km_buffer[count] = '\0';
+
+       get_apphint_id_from_action_addr(action, &id);
+       result = apphint_read(km_buffer, count, id, &value);
+       if (result >= 0)
+               apphint_action(value, action);
+
+       *ppos += count;
+err_exit:
+       return result;
+}
+
+/**
+ * apphint_debuginfo_init - Create the specified debuginfo entries
+ */
+static int apphint_debuginfo_init(const char *sub_dir,
+               unsigned int device_num,
+               unsigned int init_data_size,
+               const struct apphint_init_data *init_data,
+               DI_GROUP *parentdir,
+               DI_GROUP **rootdir,
+               DI_ENTRY *entry[])
+{
+       PVRSRV_ERROR result;
+       unsigned int i;
+       unsigned int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX;
+       const DI_ITERATOR_CB iterator = {
+               .pfnStart = apphint_di_start, .pfnStop = apphint_di_stop,
+               .pfnNext  = apphint_di_next,  .pfnShow = apphint_di_show,
+               .pfnWrite = apphint_set,      .ui32WriteLenMax = APPHINT_BUFFER_SIZE
+       };
+
+       if (*rootdir) {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "AppHint DebugFS already created, skipping"));
+               result = -EEXIST;
+               goto err_exit;
+       }
+
+       result = DICreateGroup(sub_dir, parentdir, rootdir);
+       if (result != PVRSRV_OK) {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "Failed to create \"%s\" DebugFS directory.", sub_dir));
+               goto err_exit;
+       }
+
+       for (i = 0; i < init_data_size; i++) {
+               if (!class_state[init_data[i].class].enabled)
+                       continue;
+
+               result = DICreateEntry(init_data[i].name,
+                               *rootdir,
+                               &iterator,
+                               (void *) &apphint.val[init_data[i].id + device_value_offset],
+                               DI_ENTRY_TYPE_GENERIC,
+                               &entry[i]);
+               if (result != PVRSRV_OK) {
+                       PVR_DPF((PVR_DBG_WARNING,
+                               "Failed to create \"%s/%s\" DebugFS entry.",
+                               sub_dir, init_data[i].name));
+               }
+       }
+
+       return 0;
+
+err_exit:
+       return result;
+}
+
+/**
+ * apphint_debuginfo_deinit- destroy the debuginfo entries
+ */
+static void apphint_debuginfo_deinit(unsigned int num_entries,
+               DI_GROUP **rootdir,
+               DI_ENTRY *entry[])
+{
+       unsigned int i;
+
+       for (i = 0; i < num_entries; i++) {
+               if (entry[i]) {
+                       DIDestroyEntry(entry[i]);
+               }
+       }
+
+       if (*rootdir) {
+               DIDestroyGroup(*rootdir);
+               *rootdir = NULL;
+       }
+}
+
+/*
+*******************************************************************************
+ AppHint status dump implementation
+******************************************************************************/
+#if defined(PDUMP)
+static void apphint_pdump_values(void *pvDeviceNode,
+                                 const IMG_CHAR *format, ...)
+{
+       char km_buffer[APPHINT_BUFFER_SIZE];
+       IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+       va_list ap;
+
+       va_start(ap, format);
+       (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap);
+       va_end(ap);
+
+       /* ui32CommentSize set to 0 here as function does not make use of the value. */
+       PDumpCommentKM(NULL, (PVRSRV_DEVICE_NODE*)pvDeviceNode, 0, km_buffer, ui32Flags);
+}
+#endif
+
+static IMG_BOOL is_apphint_value_equal(const APPHINT_DATA_TYPE data_type,
+                                                                       const union apphint_value * const left,
+                                                                       const union apphint_value * const right)
+{
+               switch (data_type) {
+               case APPHINT_DATA_TYPE_UINT64:
+                       return left->UINT64 == right->UINT64;
+               case APPHINT_DATA_TYPE_UINT32:
+               case APPHINT_DATA_TYPE_UINT32List:
+               case APPHINT_DATA_TYPE_UINT32Bitfield:
+                       return left->UINT32 == right->UINT32;
+               case APPHINT_DATA_TYPE_BOOL:
+                       return left->BOOL == right->BOOL;
+               case APPHINT_DATA_TYPE_STRING:
+                       return (OSStringNCompare(left->STRING, right->STRING, OSStringLength(right->STRING) + 1) == 0 ? IMG_TRUE : IMG_FALSE);
+               default:
+                       PVR_DPF((PVR_DBG_WARNING, "%s: unhandled data type (%d)", __func__, data_type));
+                       return IMG_FALSE;
+               }
+}
+
+static void apphint_dump_values(const char *group_name,
+                       int device_num,
+                       const struct apphint_init_data *group_data,
+                       int group_size,
+                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile,
+                       bool list_all,
+                       PVRSRV_DEVICE_NODE *psDevNode)
+{
+       int i, result;
+       int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX;
+       char km_buffer[APPHINT_BUFFER_SIZE];
+       char count = 0;
+
+       PVR_DUMPDEBUG_LOG("  %s", group_name);
+       for (i = 0; i < group_size; i++)
+       {
+               IMG_UINT32 id = group_data[i].id;
+               APPHINT_DATA_TYPE data_type = param_lookup[id].data_type;
+               const struct apphint_action *action = &apphint.val[id + device_value_offset];
+               union apphint_value value;
+
+               result = get_apphint_value_from_action(action, &value, psDevNode);
+
+               if (PVRSRV_OK != result) {
+                       continue;
+               }
+
+               /* List only apphints with non-default values */
+               if (!list_all &&
+                       is_apphint_value_equal(data_type, &value, &group_data[i].default_value)) {
+                       continue;
+               }
+
+               result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, action);
+               count++;
+
+               if (result <= 0) {
+                       PVR_DUMPDEBUG_LOG("    %s: <Error>",
+                               group_data[i].name);
+               } else {
+                       PVR_DUMPDEBUG_LOG("    %s: %s",
+                               group_data[i].name, km_buffer);
+               }
+       }
+
+       if (count == 0) {
+               PVR_DUMPDEBUG_LOG("    none");
+       }
+}
+
+/**
+ * Callback for debug dump
+ */
+static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                       IMG_UINT32 ui32VerbLevel,
+                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile)
+{
+       int i, result;
+       char km_buffer[APPHINT_BUFFER_SIZE];
+       PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) {
+               PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------");
+
+               apphint_dump_values("Build Vars", 0,
+                       init_data_buildvar, ARRAY_SIZE(init_data_buildvar),
+                       pfnDumpDebugPrintf, pvDumpDebugFile, true, device);
+
+               apphint_dump_values("Module Params", 0,
+                       init_data_modparam, ARRAY_SIZE(init_data_modparam),
+                       pfnDumpDebugPrintf, pvDumpDebugFile, false, device);
+
+               apphint_dump_values("Debug Info Params", 0,
+                       init_data_debuginfo, ARRAY_SIZE(init_data_debuginfo),
+                       pfnDumpDebugPrintf, pvDumpDebugFile, false, device);
+
+               for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+                       if (!apphint.devices[i]
+                           || (device && device != apphint.devices[i]))
+                               continue;
+
+                       result = snprintf(km_buffer,
+                                         APPHINT_BUFFER_SIZE,
+                                         "Debug Info Params Device ID: %d",
+                                         i);
+                       if (0 > result)
+                               continue;
+
+                       apphint_dump_values(km_buffer, i,
+                                           init_data_debuginfo_device,
+                                           ARRAY_SIZE(init_data_debuginfo_device),
+                                           pfnDumpDebugPrintf,
+                                           pvDumpDebugFile,
+                                               false, device);
+               }
+       }
+}
+
+/*
+*******************************************************************************
+ Public interface
+******************************************************************************/
+int pvr_apphint_init(void)
+{
+       int result, i;
+
+       if (apphint.initialized) {
+               result = -EEXIST;
+               goto err_out;
+       }
+
+       for (i = 0; i < APPHINT_DEVICES_MAX; i++)
+               apphint.devices[i] = NULL;
+
+       /* create workqueue with strict execution ordering to ensure no
+        * race conditions when setting/updating apphints from different
+        * contexts
+        */
+       apphint.workqueue = alloc_workqueue("apphint_workqueue",
+                                           WQ_UNBOUND | WQ_FREEZABLE, 1);
+       if (!apphint.workqueue) {
+               result = -ENOMEM;
+               goto err_out;
+       }
+
+       result = apphint_debuginfo_init("apphint", 0,
+               ARRAY_SIZE(init_data_debuginfo), init_data_debuginfo,
+               NULL,
+               &apphint.debuginfo_rootdir, apphint.debuginfo_entry);
+       if (0 != result)
+               goto err_out;
+
+       result = apphint_debuginfo_init("buildvar", 0,
+               ARRAY_SIZE(init_data_buildvar), init_data_buildvar,
+               NULL,
+               &apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+       apphint.initialized = 1;
+
+err_out:
+       return result;
+}
+
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device)
+{
+       int result, i;
+       char device_num[APPHINT_BUFFER_SIZE];
+       unsigned int device_value_offset;
+
+       if (!apphint.initialized) {
+               result = -EAGAIN;
+               goto err_out;
+       }
+
+       if (apphint.num_devices+1 > APPHINT_DEVICES_MAX) {
+               result = -EMFILE;
+               goto err_out;
+       }
+
+       result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%u", apphint.num_devices);
+       if (result < 0) {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "snprintf failed (%d)", result));
+               result = -EINVAL;
+               goto err_out;
+       }
+
+       /* Set the default values for the new device */
+       device_value_offset = apphint.num_devices * APPHINT_DEBUGINFO_DEVICE_ID_MAX;
+       for (i = 0; i < APPHINT_DEBUGINFO_DEVICE_ID_MAX; i++) {
+               apphint.val[init_data_debuginfo_device[i].id + device_value_offset].stored
+                       = init_data_debuginfo_device[i].default_value;
+       }
+
+       /* Set value of an apphint if mapping to module param exists for it
+        * and this module parameter has been initialised */
+       for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device_to_modparams); i++) {
+               const struct apphint_init_data_mapping *mapping =
+                       &init_data_debuginfo_device_to_modparams[i];
+               const struct apphint_action *modparam_action =
+                       &apphint.val[mapping->modparam_apphint_id];
+               struct apphint_action *device_action =
+                       &apphint.val[mapping->device_apphint_id + device_value_offset];
+
+               /* Set only if the module parameter was explicitly set during the module
+                * load. */
+               if (modparam_action->initialised) {
+                       device_action->stored = modparam_action->stored;
+               }
+       }
+
+       result = apphint_debuginfo_init(device_num, apphint.num_devices,
+                                     ARRAY_SIZE(init_data_debuginfo_device),
+                                     init_data_debuginfo_device,
+                                     apphint.debuginfo_rootdir,
+                                     &apphint.debuginfo_device_rootdir[apphint.num_devices],
+                                     apphint.debuginfo_device_entry[apphint.num_devices]);
+       if (0 != result)
+               goto err_out;
+
+       apphint.devices[apphint.num_devices] = device;
+       apphint.num_devices++;
+
+       (void)SOPvrDbgRequestNotifyRegister(
+                       &device->hAppHintDbgReqNotify,
+                       device,
+                       apphint_dump_state,
+                       DEBUG_REQUEST_APPHINT,
+                       device);
+
+err_out:
+       return result;
+}
+
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device)
+{
+       int i;
+
+       if (!apphint.initialized)
+               return;
+
+       /* find the device */
+       for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+               if (apphint.devices[i] == device)
+                       break;
+       }
+
+       if (APPHINT_DEVICES_MAX == i)
+               return;
+
+       if (device->hAppHintDbgReqNotify) {
+               (void)SOPvrDbgRequestNotifyUnregister(
+                       device->hAppHintDbgReqNotify);
+               device->hAppHintDbgReqNotify = NULL;
+       }
+
+       apphint_debuginfo_deinit(APPHINT_DEBUGINFO_DEVICE_ID_MAX,
+                              &apphint.debuginfo_device_rootdir[i],
+                              apphint.debuginfo_device_entry[i]);
+
+       apphint.devices[i] = NULL;
+
+       WARN_ON(apphint.num_devices==0);
+       apphint.num_devices--;
+}
+
+void pvr_apphint_deinit(void)
+{
+       int i;
+
+       if (!apphint.initialized)
+               return;
+
+       /* remove any remaining device data */
+       for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) {
+               if (apphint.devices[i])
+                       pvr_apphint_device_unregister(apphint.devices[i]);
+       }
+
+       /* free all alloc'd string apphints and set to NULL */
+       for (i = 0; i < ARRAY_SIZE(apphint.val); i++) {
+               if (apphint.val[i].free && apphint.val[i].stored.STRING) {
+                       kfree(apphint.val[i].stored.STRING);
+                       apphint.val[i].stored.STRING = NULL;
+                       apphint.val[i].free = false;
+               }
+       }
+
+       apphint_debuginfo_deinit(APPHINT_DEBUGINFO_ID_MAX,
+                       &apphint.debuginfo_rootdir, apphint.debuginfo_entry);
+       apphint_debuginfo_deinit(APPHINT_BUILDVAR_ID_MAX,
+                       &apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+       destroy_workqueue(apphint.workqueue);
+
+       apphint.initialized = 0;
+}
+
+void pvr_apphint_dump_state(PVRSRV_DEVICE_NODE *device)
+{
+#if defined(PDUMP)
+       /* NB. apphint_pdump_values() is the pfnDumpDebugPrintf
+        * function used when PDUMP is defined.
+        * apphintpdump_values() calls PDumpCommentKM(), which
+        * requires the device but as it is only called as a
+        * DUMPDEBUG_PRINTF_FUNC it is only passed pvDumpDebugFile
+        * (which happens to be the 4th parameter in the call to
+        * apphint_dump_state() below).
+        * Hence, we also need to pass device in the 4th parameter.
+        */
+       apphint_dump_state(device, DEBUG_REQUEST_VERBOSITY_HIGH,
+                          apphint_pdump_values, device);
+#endif
+       apphint_dump_state(device, DEBUG_REQUEST_VERBOSITY_HIGH,
+                          NULL, NULL);
+}
+
+
+int pvr_apphint_get_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 *pVal)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if (ue < APPHINT_ID_MAX) {
+               if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints
+               {
+                       *pVal = apphint.val[ue + device_offset].stored.UINT64;
+                       error = 0;
+               }
+               else
+               {
+                       *pVal = apphint.val[ue].stored.UINT64;
+                       error = 0;
+               }
+       }
+       return error;
+}
+
+int pvr_apphint_get_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 *pVal)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if (ue < APPHINT_ID_MAX) {
+               if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints
+               {
+                       *pVal = apphint.val[ue + device_offset].stored.UINT32;
+                       error = 0;
+               }
+               else
+               {
+                       *pVal = apphint.val[ue].stored.UINT32;
+                       error = 0;
+               }
+       }
+       return error;
+}
+
+int pvr_apphint_get_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL *pVal)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if (ue < APPHINT_ID_MAX) {
+               if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints
+               {
+                       *pVal = apphint.val[ue + device_offset].stored.BOOL;
+                       error = 0;
+               }
+               else
+               {
+                       *pVal = apphint.val[ue].stored.BOOL;
+                       error = 0;
+               }
+       }
+       return error;
+}
+
+int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) {
+               if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints
+               {
+                       if (OSStringLCopy(pBuffer, apphint.val[ue + device_offset].stored.STRING, size) < size) {
+                               error = 0;
+                       }
+               }
+               else
+               {
+                       if (OSStringLCopy(pBuffer, apphint.val[ue].stored.STRING, size) < size) {
+                               error = 0;
+                       }
+               }
+       }
+       return error;
+}
+
+int pvr_apphint_set_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 Val)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if ((ue < APPHINT_ID_MAX) &&
+               (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT64)) {
+
+               if (apphint.val[ue + device_offset].set.UINT64) {
+                       apphint.val[ue + device_offset].set.UINT64(apphint.val[ue + device_offset].device,
+                                                                                                        apphint.val[ue + device_offset].private_data,
+                                                                                                        Val);
+               } else {
+                       apphint.val[ue + device_offset].stored.UINT64 = Val;
+               }
+               apphint.val[ue].device = device;
+               error = 0;
+       }
+
+       return error;
+}
+
+int pvr_apphint_set_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 Val)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if ((ue < APPHINT_ID_MAX) &&
+               (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT32)) {
+
+               if (apphint.val[ue + device_offset].set.UINT32) {
+                       apphint.val[ue + device_offset].set.UINT32(apphint.val[ue + device_offset].device,
+                                                                                                        apphint.val[ue + device_offset].private_data,
+                                                                                                        Val);
+               } else {
+                       apphint.val[ue + device_offset].stored.UINT32 = Val;
+               }
+               apphint.val[ue].device = device;
+               error = 0;
+       }
+
+       return error;
+}
+
+int pvr_apphint_set_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL Val)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if ((ue < APPHINT_ID_MAX) &&
+               (param_lookup[ue].data_type == APPHINT_DATA_TYPE_BOOL)) {
+
+               error = 0;
+               if (apphint.val[ue + device_offset].set.BOOL) {
+                       apphint.val[ue + device_offset].set.BOOL(apphint.val[ue + device_offset].device,
+                                                                                                apphint.val[ue + device_offset].private_data,
+                                                                                                Val);
+               } else {
+                       apphint.val[ue + device_offset].stored.BOOL = Val;
+               }
+               apphint.val[ue].device = device;
+       }
+
+       return error;
+}
+
+int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size)
+{
+       int error = -ERANGE;
+       int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0;
+
+       if ((ue < APPHINT_ID_MAX) &&
+               ((param_lookup[ue].data_type == APPHINT_DATA_TYPE_STRING) &&
+               apphint.val[ue + device_offset].stored.STRING)) {
+
+               if (apphint.val[ue + device_offset].set.STRING) {
+                       error = apphint.val[ue + device_offset].set.STRING(apphint.val[ue + device_offset].device,
+                                                                                                                        apphint.val[ue + device_offset].private_data,
+                                                                                                                        pBuffer);
+               } else {
+                       if (strlcpy(apphint.val[ue + device_offset].stored.STRING, pBuffer, size) < size) {
+                               error = 0;
+                       }
+               }
+               apphint.val[ue].device = device;
+       }
+
+       return error;
+}
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data)
+{
+       int device_value_offset;
+
+       PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)",
+                __func__, id, query, set, device, private_data));
+
+       if (id >= APPHINT_ID_MAX) {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: AppHint ID (%d) is out of range, max (%d)",
+                        __func__, id, APPHINT_ID_MAX-1));
+               return;
+       }
+
+       get_value_offset_from_device(device, &device_value_offset, id);
+
+       switch (param_lookup[id].data_type) {
+       case APPHINT_DATA_TYPE_UINT64:
+               break;
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Does not match AppHint data type for ID (%d)",
+                        __func__, id));
+               return;
+       }
+
+       apphint.val[id + device_value_offset] = (struct apphint_action){
+               .query.UINT64 = query,
+               .set.UINT64 = set,
+               .device = device,
+               .private_data = private_data,
+               .stored = apphint.val[id + device_value_offset].stored
+       };
+}
+
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data)
+{
+       int device_value_offset;
+
+       PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)",
+                __func__, id, query, set, device, private_data));
+
+       if (id >= APPHINT_ID_MAX) {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: AppHint ID (%d) is out of range, max (%d)",
+                        __func__, id, APPHINT_ID_MAX-1));
+               return;
+       }
+
+       get_value_offset_from_device(device, &device_value_offset, id);
+
+       switch (param_lookup[id].data_type) {
+       case APPHINT_DATA_TYPE_UINT32:
+       case APPHINT_DATA_TYPE_UINT32Bitfield:
+       case APPHINT_DATA_TYPE_UINT32List:
+               break;
+
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Does not match AppHint data type for ID (%d)",
+                        __func__, id));
+               return;
+       }
+
+       apphint.val[id + device_value_offset] = (struct apphint_action){
+               .query.UINT32 = query,
+               .set.UINT32 = set,
+               .device = device,
+               .private_data = private_data,
+               .stored = apphint.val[id + device_value_offset].stored
+       };
+}
+
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data)
+{
+       int device_value_offset;
+
+       PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)",
+                __func__, id, query, set, device, private_data));
+
+       if (id >= APPHINT_ID_MAX) {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: AppHint ID (%d) is out of range, max (%d)",
+                        __func__, id, APPHINT_ID_MAX-1));
+               return;
+       }
+
+       get_value_offset_from_device(device, &device_value_offset, id);
+
+       switch (param_lookup[id].data_type) {
+       case APPHINT_DATA_TYPE_BOOL:
+               break;
+
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Does not match AppHint data type for ID (%d)",
+                        __func__, id));
+               return;
+       }
+
+       apphint.val[id + device_value_offset] = (struct apphint_action){
+               .query.BOOL = query,
+               .set.BOOL = set,
+               .device = device,
+               .private_data = private_data,
+               .stored = apphint.val[id + device_value_offset].stored
+       };
+}
+
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data)
+{
+       int device_value_offset;
+
+       PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)",
+                __func__, id, query, set, device, private_data));
+
+       if (id >= APPHINT_ID_MAX) {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: AppHint ID (%d) is out of range, max (%d)",
+                        __func__, id, APPHINT_ID_MAX-1));
+               return;
+       }
+
+       get_value_offset_from_device(device, &device_value_offset, id);
+
+       switch (param_lookup[id].data_type) {
+       case APPHINT_DATA_TYPE_STRING:
+               break;
+
+       default:
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Does not match AppHint data type for ID (%d)",
+                        __func__, id));
+               return;
+       }
+
+       apphint.val[id + device_value_offset] = (struct apphint_action){
+               .query.STRING = query,
+               .set.STRING = set,
+               .device = device,
+               .private_data = private_data,
+               .stored = apphint.val[id + device_value_offset].stored
+       };
+}
+
+/* EOF */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/km_apphint.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/km_apphint.h
new file mode 100644 (file)
index 0000000..71e2ce9
--- /dev/null
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File           km_apphint.h
+@Title          Apphint internal header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux kernel AppHint control
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef KM_APPHINT_H
+#define KM_APPHINT_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_apphint.h"
+#include "km_apphint_defs.h"
+#include "device.h"
+
+int pvr_apphint_init(void);
+void pvr_apphint_deinit(void);
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_dump_state(PVRSRV_DEVICE_NODE *device);
+
+int pvr_apphint_get_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 *pVal);
+int pvr_apphint_get_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 *pVal);
+int pvr_apphint_get_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL *pVal);
+int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size);
+
+int pvr_apphint_set_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 Val);
+int pvr_apphint_set_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 Val);
+int pvr_apphint_set_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL Val);
+int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size);
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void * private_data);
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data);
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data);
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+       PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+       PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data);
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* KM_APPHINT_H */
+
+/******************************************************************************
+ End of file (km_apphint.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/linkage.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/linkage.h
new file mode 100644 (file)
index 0000000..3f24dc6
--- /dev/null
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific Services code internal interfaces
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Interfaces between various parts of the Linux specific
+                Services code, that don't have any other obvious
+                header file to go into.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(LINKAGE_H)
+#define LINKAGE_H
+
+PVRSRV_ERROR PVROSFuncInit(void);
+void PVROSFuncDeInit(void);
+
+#endif /* !defined(LINKAGE_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/module_common.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/module_common.c
new file mode 100644 (file)
index 0000000..1f7b406
--- /dev/null
@@ -0,0 +1,742 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common Linux module setup
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+
+#if defined(CONFIG_DEBUG_FS)
+#include "pvr_debugfs.h"
+#endif /* defined(CONFIG_DEBUG_FS) */
+#if defined(CONFIG_PROC_FS)
+#include "pvr_procfs.h"
+#endif /* defined(CONFIG_PROC_FS) */
+#include "di_server.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "power.h"
+#include "env_connection.h"
+#include "process_stats.h"
+#include "module_common.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#endif
+#include "pvrsrv_error.h"
+#include "pvr_drv.h"
+#include "pvr_bridge_k.h"
+
+#include "pvr_fence.h"
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#if !defined(USE_PVRSYNC_DEVNODE)
+#include "pvr_sync_ioctl_drm.h"
+#endif
+#endif
+
+#include "ospvr_gputrace.h"
+
+#include "km_apphint.h"
+#include "srvinit.h"
+
+#include "pvr_ion_stats.h"
+#include "sysconfig.h"
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+/* Display class interface */
+#include "kerneldisplay.h"
+EXPORT_SYMBOL(DCRegisterDevice);
+EXPORT_SYMBOL(DCUnregisterDevice);
+EXPORT_SYMBOL(DCDisplayConfigurationRetired);
+EXPORT_SYMBOL(DCDisplayHasPendingCommand);
+EXPORT_SYMBOL(DCImportBufferAcquire);
+EXPORT_SYMBOL(DCImportBufferRelease);
+
+/* Physmem interface (required by LMA DC drivers) */
+#include "physheap.h"
+EXPORT_SYMBOL(PhysHeapAcquireByUsage);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapGetCpuPAddr);
+EXPORT_SYMBOL(PhysHeapGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
+
+EXPORT_SYMBOL(PVRSRVGetDriverStatus);
+EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR);
+EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR);
+
+#include "pvr_notifier.h"
+EXPORT_SYMBOL(PVRSRVCheckStatus);
+
+#include "pvr_debug.h"
+EXPORT_SYMBOL(PVRSRVGetErrorString);
+EXPORT_SYMBOL(PVRSRVGetDeviceInstance);
+#endif /* defined(SUPPORT_DISPLAY_CLASS) */
+
+#if defined(SUPPORT_RGX)
+#include "rgxapi_km.h"
+#if defined(SUPPORT_SHARED_SLC)
+EXPORT_SYMBOL(RGXInitSLC);
+#endif
+EXPORT_SYMBOL(RGXHWPerfConnect);
+EXPORT_SYMBOL(RGXHWPerfDisconnect);
+EXPORT_SYMBOL(RGXHWPerfControl);
+#if defined(RGX_FEATURE_HWPERF_VOLCANIC)
+EXPORT_SYMBOL(RGXHWPerfConfigureCounters);
+#else
+EXPORT_SYMBOL(RGXHWPerfConfigMuxCounters);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters);
+#endif
+EXPORT_SYMBOL(RGXHWPerfDisableCounters);
+EXPORT_SYMBOL(RGXHWPerfAcquireEvents);
+EXPORT_SYMBOL(RGXHWPerfReleaseEvents);
+EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp);
+#if defined(SUPPORT_KERNEL_HWPERF_TEST)
+EXPORT_SYMBOL(OSAddTimer);
+EXPORT_SYMBOL(OSEnableTimer);
+EXPORT_SYMBOL(OSDisableTimer);
+EXPORT_SYMBOL(OSRemoveTimer);
+#endif
+#endif
+
+static int PVRSRVDeviceSyncOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+                                struct drm_file *psDRMFile);
+
+CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile)
+{
+       if (pFile)
+       {
+               struct drm_file *psDRMFile = pFile->private_data;
+               PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+
+               return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData;
+       }
+
+       return NULL;
+}
+
+CONNECTION_DATA *LinuxSyncConnectionFromFile(struct file *pFile)
+{
+       if (pFile)
+       {
+               struct drm_file *psDRMFile = pFile->private_data;
+               PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+               return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData;
+#else
+               return (CONNECTION_DATA*)psConnectionPriv->pvSyncConnectionData;
+#endif
+       }
+
+       return NULL;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDriverInit
+@Description  Common one time driver initialisation
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVDriverInit(void)
+{
+       PVRSRV_ERROR error;
+       int os_err;
+
+       error = PVROSFuncInit();
+       if (error != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+
+       error = PVRSRVCommonDriverInit();
+       if (error != PVRSRV_OK)
+       {
+               return -ENODEV;
+       }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       error = pvr_sync_register_functions();
+       if (error != PVRSRV_OK)
+       {
+               return -EPERM;
+       }
+
+       os_err = pvr_sync_init();
+       if (os_err != 0)
+       {
+               return os_err;
+       }
+#endif
+
+       os_err = pvr_apphint_init();
+       if (os_err != 0)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: failed AppHint setup(%d)", __func__,
+                            os_err));
+       }
+
+#if defined(SUPPORT_RGX)
+       error = PVRGpuTraceSupportInit();
+       if (error != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+#endif
+
+#if defined(ANDROID)
+#if defined(CONFIG_PROC_FS)
+       error = PVRProcFsRegister();
+       if (error != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+#elif defined(CONFIG_DEBUG_FS)
+       error = PVRDebugFsRegister();
+       if (error != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+#endif /* defined(CONFIG_PROC_FS) || defined(CONFIG_DEBUG_FS) */
+#else
+#if defined(CONFIG_DEBUG_FS)
+       error = PVRDebugFsRegister();
+       if (error != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+#elif defined(CONFIG_PROC_FS)
+       error = PVRProcFsRegister();
+       if (error != PVRSRV_OK)
+       {
+               return -ENOMEM;
+       }
+#endif /* defined(CONFIG_DEBUG_FS) || defined(CONFIG_PROC_FS) */
+#endif /* defined(ANDROID) */
+
+       error = PVRSRVIonStatsInitialise();
+       if (error != PVRSRV_OK)
+       {
+               return -ENODEV;
+       }
+
+#if defined(SUPPORT_RGX)
+       /* calling here because we need to handle input from the file even
+        * before the devices are initialised
+        * note: we're not passing a device node because apphint callbacks don't
+        * need it */
+       PVRGpuTraceInitAppHintCallbacks(NULL);
+#endif
+       return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDriverDeinit
+@Description  Common one time driver de-initialisation
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVDriverDeinit(void)
+{
+       pvr_apphint_deinit();
+
+       PVRSRVIonStatsDestroy();
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       pvr_sync_deinit();
+#endif
+
+       PVRSRVCommonDriverDeInit();
+
+#if defined(SUPPORT_RGX)
+       PVRGpuTraceSupportDeInit();
+#endif
+
+       PVROSFuncDeInit();
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceInit
+@Description  Common device related initialisation.
+@Input        psDeviceNode  The device node for which initialisation should be
+                            performed
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       {
+               PVRSRV_ERROR eError = pvr_sync_device_init(psDeviceNode->psDevConfig->pvOSDevice);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)",
+                                        __func__, eError));
+                       return -EBUSY;
+               }
+       }
+#endif
+
+#if defined(SUPPORT_RGX)
+       {
+               int error = PVRGpuTraceInitDevice(psDeviceNode);
+               if (error != 0)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                "%s: failed to initialise PVR GPU Tracing on device%d (%d)",
+                                __func__, psDeviceNode->sDevId.i32OsDeviceID, error));
+               }
+       }
+#endif
+
+       return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceDeinit
+@Description  Common device related de-initialisation.
+@Input        psDeviceNode  The device node for which de-initialisation should
+                            be performed
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_RGX)
+       PVRGpuTraceDeInitDevice(psDeviceNode);
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       pvr_sync_device_deinit(psDeviceNode->psDevConfig->pvOSDevice);
+#endif
+
+#if defined(SUPPORT_DMA_TRANSFER)
+       PVRSRVDeInitialiseDMA(psDeviceNode);
+#endif
+
+       pvr_fence_cleanup();
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceShutdown
+@Description  Common device shutdown.
+@Input        psDeviceNode  The device node representing the device that should
+                            be shutdown
+@Return       void
+*/ /***************************************************************************/
+
+void PVRSRVDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+
+       /*
+        * Disable the bridge to stop processes trying to use the driver
+        * after it has been shut down.
+        */
+       eError = LinuxBridgeBlockClientsAccess(IMG_TRUE);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Failed to suspend driver (%d)",
+                       __func__, eError));
+               return;
+       }
+
+       (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+                                              PVRSRV_SYS_POWER_STATE_OFF,
+                                              PVRSRV_POWER_FLAGS_NONE);
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceSuspend
+@Description  Common device suspend.
+@Input        psDeviceNode  The device node representing the device that should
+                            be suspended
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /*
+        * LinuxBridgeBlockClientsAccess prevents processes from using the driver
+        * while it's suspended (this is needed for Android). Acquire the bridge
+        * lock first to ensure the driver isn't currently in use.
+        */
+       LinuxBridgeBlockClientsAccess(IMG_FALSE);
+
+#if defined(SUPPORT_AUTOVZ)
+       /* To allow the driver to power down the GPU under AutoVz, the firmware must
+        * declared as offline, otherwise all power requests will be ignored. */
+       psDeviceNode->bAutoVzFwIsUp = IMG_FALSE;
+#endif
+
+       if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+                                                                               PVRSRV_SYS_POWER_STATE_OFF,
+                                                                               PVRSRV_POWER_FLAGS_SUSPEND) != PVRSRV_OK)
+       {
+               LinuxBridgeUnblockClientsAccess();
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceResume
+@Description  Common device resume.
+@Input        psDeviceNode  The device node representing the device that should
+                            be resumed
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+                                                                               PVRSRV_SYS_POWER_STATE_ON,
+                                                                               PVRSRV_POWER_FLAGS_SUSPEND) != PVRSRV_OK)
+       {
+               return -EINVAL;
+       }
+
+       LinuxBridgeUnblockClientsAccess();
+
+       /*
+        * Reprocess the device queues in case commands were blocked during
+        * suspend.
+        */
+       if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+       {
+               PVRSRVCheckStatus(NULL);
+       }
+
+       return 0;
+}
+
+int sPVRSRVDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       struct sf7110_cfg *sft = sys_get_privdata();
+
+       if (sft->runtime_suspend != NULL)
+               sft->runtime_suspend(NULL);
+
+       return 0;
+}
+
+int sPVRSRVDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       struct sf7110_cfg *sft = sys_get_privdata();
+
+       if (sft->runtime_resume != NULL)
+               sft->runtime_resume(NULL);
+
+       return 0;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceServicesOpen
+@Description  Services device open.
+@Input        psDeviceNode  The device node representing the device being
+                            opened by a user mode process
+@Input        psDRMFile     The DRM file data that backs the file handle
+                            returned to the user mode process
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             struct drm_file *psDRMFile)
+{
+       static DEFINE_MUTEX(sDeviceInitMutex);
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       ENV_CONNECTION_PRIVATE_DATA sPrivData;
+       PVRSRV_CONNECTION_PRIV *psConnectionPriv;
+       PVRSRV_ERROR eError;
+       int iErr = 0;
+
+       if (!psPVRSRVData)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__));
+               iErr = -ENODEV;
+               goto out;
+       }
+
+       mutex_lock(&sDeviceInitMutex);
+       /*
+        * If the first attempt already set the state to bad,
+        * there is no point in going the second time, so get out
+        */
+       if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.",
+                                __func__));
+               iErr = -ENODEV;
+               mutex_unlock(&sDeviceInitMutex);
+               goto out;
+       }
+
+       if (psDRMFile->driver_priv == NULL)
+       {
+               /* Allocate psConnectionPriv (stores private data and release pfn under driver_priv) */
+               psConnectionPriv = kzalloc(sizeof(*psConnectionPriv), GFP_KERNEL);
+               if (!psConnectionPriv)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate driver_priv data", __func__));
+                       iErr = -ENOMEM;
+                       mutex_unlock(&sDeviceInitMutex);
+                       goto fail_alloc_connection_priv;
+               }
+       }
+       else
+       {
+               psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+       }
+
+       if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+       {
+               eError = PVRSRVCommonDeviceInitialise(psDeviceNode);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)",
+                                        __func__, PVRSRVGetErrorString(eError)));
+                       iErr = -ENODEV;
+                       mutex_unlock(&sDeviceInitMutex);
+                       goto fail_device_init;
+               }
+
+#if defined(SUPPORT_RGX)
+               PVRGpuTraceInitIfEnabled(psDeviceNode);
+#endif
+       }
+       mutex_unlock(&sDeviceInitMutex);
+
+       sPrivData.psDevNode = psDeviceNode;
+
+       /*
+        * Here we pass the file pointer which will passed through to our
+        * OSConnectionPrivateDataInit function where we can save it so
+        * we can back reference the file structure from its connection
+        */
+       eError = PVRSRVCommonConnectionConnect(&psConnectionPriv->pvConnectionData,
+                                              (void *)&sPrivData);
+       if (eError != PVRSRV_OK)
+       {
+               iErr = -ENOMEM;
+               goto fail_connect;
+       }
+
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       psConnectionPriv->pfDeviceRelease = PVRSRVCommonConnectionDisconnect;
+#endif
+       psDRMFile->driver_priv = (void*)psConnectionPriv;
+       goto out;
+
+fail_connect:
+fail_device_init:
+       kfree(psConnectionPriv);
+fail_alloc_connection_priv:
+out:
+       return iErr;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceSyncOpen
+@Description  Sync device open.
+@Input        psDeviceNode  The device node representing the device being
+                            opened by a user mode process
+@Input        psDRMFile     The DRM file data that backs the file handle
+                            returned to the user mode process
+@Return       int           0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+static int PVRSRVDeviceSyncOpen(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                struct drm_file *psDRMFile)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       CONNECTION_DATA *psConnection = NULL;
+       ENV_CONNECTION_PRIVATE_DATA sPrivData;
+       PVRSRV_CONNECTION_PRIV *psConnectionPriv;
+       PVRSRV_ERROR eError;
+       int iErr = 0;
+
+       if (!psPVRSRVData)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__));
+               iErr = -ENODEV;
+               goto out;
+       }
+
+       if (psDRMFile->driver_priv == NULL)
+       {
+               /* Allocate psConnectionPriv (stores private data and release pfn under driver_priv) */
+               psConnectionPriv = kzalloc(sizeof(*psConnectionPriv), GFP_KERNEL);
+               if (!psConnectionPriv)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate driver_priv data", __func__));
+                       iErr = -ENOMEM;
+                       goto out;
+               }
+       }
+       else
+       {
+               psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+       }
+
+       /* Allocate connection data area, no stats since process not registered yet */
+       psConnection = kzalloc(sizeof(*psConnection), GFP_KERNEL);
+       if (!psConnection)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate connection data", __func__));
+               iErr = -ENOMEM;
+               goto fail_alloc_connection;
+       }
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       psConnectionPriv->pvConnectionData = (void*)psConnection;
+#else
+       psConnectionPriv->pvSyncConnectionData = (void*)psConnection;
+#endif
+
+       sPrivData.psDevNode = psDeviceNode;
+
+       /* Call environment specific connection data init function */
+       eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, &sPrivData);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: OSConnectionPrivateDataInit() failed (%s)",
+                       __func__, PVRSRVGetErrorString(eError)));
+               goto fail_private_data_init;
+       }
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE)
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       iErr = pvr_sync_open(psConnectionPriv->pvConnectionData, psDRMFile);
+#else
+       iErr = pvr_sync_open(psConnectionPriv->pvSyncConnectionData, psDRMFile);
+#endif
+       if (iErr)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: pvr_sync_open() failed(%d)",
+                               __func__, iErr));
+               goto fail_pvr_sync_open;
+       }
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE)
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       psConnectionPriv->pfDeviceRelease = pvr_sync_close;
+#endif
+#endif
+       psDRMFile->driver_priv = psConnectionPriv;
+       goto out;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE)
+fail_pvr_sync_open:
+       OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+#endif
+fail_private_data_init:
+       kfree(psConnection);
+fail_alloc_connection:
+       kfree(psConnectionPriv);
+out:
+       return iErr;
+}
+
+/**************************************************************************/ /*!
+@Function     PVRSRVDeviceRelease
+@Description  Common device release.
+@Input        psDeviceNode  The device node for the device that the given file
+                            represents
+@Input        psDRMFile     The DRM file data that's being released
+@Return       void
+*/ /***************************************************************************/
+void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode,
+                         struct drm_file *psDRMFile)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+       if (psDRMFile->driver_priv)
+       {
+               PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+
+               if (psConnectionPriv->pvConnectionData)
+               {
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+                       if (psConnectionPriv->pfDeviceRelease)
+                       {
+                               psConnectionPriv->pfDeviceRelease(psConnectionPriv->pvConnectionData);
+                       }
+#else
+                       if (psConnectionPriv->pvConnectionData)
+                               PVRSRVCommonConnectionDisconnect(psConnectionPriv->pvConnectionData);
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE)
+                       if (psConnectionPriv->pvSyncConnectionData)
+                               pvr_sync_close(psConnectionPriv->pvSyncConnectionData);
+#endif
+#endif
+               }
+
+               kfree(psDRMFile->driver_priv);
+               psDRMFile->driver_priv = NULL;
+       }
+}
+
+int
+drm_pvr_srvkm_init(struct drm_device *dev, void *arg, struct drm_file *psDRMFile)
+{
+       struct drm_pvr_srvkm_init_data *data = arg;
+       struct pvr_drm_private *priv = dev->dev_private;
+       int iErr = 0;
+
+       switch (data->init_module)
+       {
+               case PVR_SRVKM_SYNC_INIT:
+               {
+                       iErr = PVRSRVDeviceSyncOpen(priv->dev_node, psDRMFile);
+                       break;
+               }
+               case PVR_SRVKM_SERVICES_INIT:
+               {
+                       iErr = PVRSRVDeviceServicesOpen(priv->dev_node, psDRMFile);
+                       break;
+               }
+               default:
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: invalid init_module (%d)",
+                               __func__, data->init_module));
+                       iErr = -EINVAL;
+               }
+       }
+
+       return iErr;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/module_common.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/module_common.h
new file mode 100644 (file)
index 0000000..c7d1ebd
--- /dev/null
@@ -0,0 +1,104 @@
+/*************************************************************************/ /*!
+@File           module_common.h
+@Title          Common linux module setup header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef MODULE_COMMON_H
+#define MODULE_COMMON_H
+
+#include "pvr_drm.h"
+
+/* DRVNAME is the name we use to register our driver. */
+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME
+
+struct _PVRSRV_DEVICE_NODE_;
+struct drm_file;
+struct drm_device;
+
+/* psDRMFile->driver_priv will point to a PVRSV_CONNECTION_PRIV
+ * struct, which will contain a ptr to the CONNECTION_DATA and
+ * a pfn to the release function (which will differ depending
+ * on whether the connection is to Sync or Services).
+ */
+typedef void (*PFN_PVRSRV_DEV_RELEASE)(void *pvData);
+typedef struct
+{
+       /* pvConnectionData is used to hold Services connection data
+        * for all PVRSRV_DEVICE_INIT_MODE options.
+        */
+       void *pvConnectionData;
+
+       /* pfDeviceRelease is used to indicate the release function
+        * to be called when PVRSRV_DEVICE_INIT_MODE is PVRSRV_LINUX_DEV_INIT_ON_CONNECT,
+        * as we can then have one connections made (either for Services or Sync) per
+        * psDRMFile, and need to know which type of connection is being released
+        * (as the ioctl release call is common for both).
+        */
+       PFN_PVRSRV_DEV_RELEASE pfDeviceRelease;
+
+       /* pvSyncConnectionData is used to hold Sync connection data
+        * when PVRSRV_DEVICE_INIT_MODE is not PVRSRV_LINUX_DEV_INIT_ON_CONNECT,
+        * as we can then have two connections made (for Services and Sync) to
+        * the same psDRMFile.
+        */
+       void *pvSyncConnectionData;
+} PVRSRV_CONNECTION_PRIV;
+
+int PVRSRVDriverInit(void);
+void PVRSRVDriverDeinit(void);
+
+int PVRSRVDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+void PVRSRVDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+void PVRSRVDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+int PVRSRVDeviceServicesOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+                             struct drm_file *psDRMFile);
+void PVRSRVDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+                         struct drm_file *psDRMFile);
+int drm_pvr_srvkm_init(struct drm_device *dev,
+                       void *arg, struct drm_file *psDRMFile);
+
+int sPVRSRVDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+int sPVRSRVDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+#endif /* MODULE_COMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osconnection_server.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osconnection_server.c
new file mode 100644 (file)
index 0000000..0c3bc2d
--- /dev/null
@@ -0,0 +1,157 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific per process data functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "connection_server.h"
+#include "osconnection_server.h"
+
+#include "env_connection.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#include <linux/sched.h>
+
+#if defined(SUPPORT_ION)
+#include <linux/err.h>
+#include PVR_ANDROID_ION_HEADER
+
+/*
+       The ion device (the base object for all requests)
+       gets created by the system and we acquire it via
+       Linux specific functions provided by the system layer
+*/
+#include "ion_sys.h"
+#endif
+
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+       ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData;
+       ENV_CONNECTION_DATA *psEnvConnection;
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+       ENV_ION_CONNECTION_DATA *psIonConnection;
+#endif
+
+       *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA));
+
+       if (*phOsPrivateData == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData;
+
+       psEnvConnection->owner = current->tgid;
+
+       psEnvConnection->psDevNode = psPrivData->psDevNode;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       psEnvConnection->pvPvrSyncPrivateData = NULL;
+#endif
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+       psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA));
+       if (psIonConnection == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psEnvConnection->psIonData = psIonConnection;
+       /*
+               We can have more than one connection per process, so we need
+               more than the PID to have a unique name.
+       */
+       psEnvConnection->psIonData->psIonDev = IonDevAcquire();
+       OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM());
+       psEnvConnection->psIonData->psIonClient =
+               ion_client_create(psEnvConnection->psIonData->psIonDev,
+                                                 psEnvConnection->psIonData->azIonClientName);
+
+       if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create "
+                                                               "ion client for per connection data"));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+       ENV_CONNECTION_DATA *psEnvConnection;
+
+       if (hOsPrivateData == NULL)
+       {
+               return PVRSRV_OK;
+       }
+
+       psEnvConnection = hOsPrivateData;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+       PVR_ASSERT(psEnvConnection->psIonData != NULL);
+
+       PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL);
+       ion_client_destroy(psEnvConnection->psIonData->psIonClient);
+
+       IonDevRelease(psEnvConnection->psIonData->psIonDev);
+       OSFreeMem(psEnvConnection->psIonData);
+#endif
+
+       OSFreeMem(hOsPrivateData);
+       /*not nulling pointer, copy on stack*/
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_DEVICE_NODE *OSGetDevNode(CONNECTION_DATA *psConnection)
+{
+       ENV_CONNECTION_DATA *psEnvConnection;
+
+       psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+       PVR_ASSERT(psEnvConnection);
+
+       return psEnvConnection->psDevNode;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc.c
new file mode 100644 (file)
index 0000000..4534643
--- /dev/null
@@ -0,0 +1,2600 @@
+/*************************************************************************/ /*!
+@File
+@Title          Environment related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <asm/hardirq.h>
+#include <asm/tlbflush.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/dmaengine.h>
+#include <linux/kthread.h>
+#include <linux/utsname.h>
+#include <linux/scatterlist.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#else
+#include <linux/sched.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+
+#include "log2.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvr_debug.h"
+#include "pvr_bridge_k.h"
+#include "pvrsrv_memallocflags.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "physmem_osmem_linux.h"
+#include "dma_support.h"
+#include "kernel_compatibility.h"
+
+#include "pvrsrv_sync_server.h"
+
+
+#if defined(VIRTUAL_PLATFORM)
+#define EVENT_OBJECT_TIMEOUT_US                (120000000ULL)
+#else
+#if defined(EMULATOR) || defined(TC_APOLLO_TCF5)
+#define EVENT_OBJECT_TIMEOUT_US                (2000000ULL)
+#else
+#define EVENT_OBJECT_TIMEOUT_US                (100000ULL)
+#endif /* EMULATOR */
+#endif
+
+
+typedef struct {
+       struct task_struct *kthread;
+       PFN_THREAD pfnThread;
+       void *hData;
+       IMG_CHAR *pszThreadName;
+       IMG_BOOL   bIsThreadRunning;
+       IMG_BOOL   bIsSupportingThread;
+       PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB;
+       DLLIST_NODE sNode;
+} OSThreadData;
+
+void OSSuspendTaskInterruptible(void)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+       schedule();
+}
+
+static DLLIST_NODE gsThreadListHead;
+
+static void _ThreadListAddEntry(OSThreadData *psThreadListNode)
+{
+       dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode));
+}
+
+static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode)
+{
+       dllist_remove_node(&(psThreadListNode->sNode));
+}
+
+static void _ThreadSetStopped(OSThreadData *psOSThreadData)
+{
+       psOSThreadData->bIsThreadRunning = IMG_FALSE;
+}
+
+static void _OSInitThreadList(void)
+{
+       dllist_init(&gsThreadListHead);
+}
+
+void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                      void *pvDumpDebugFile)
+{
+       PDLLIST_NODE psNodeCurr, psNodeNext;
+
+       dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext)
+       {
+               OSThreadData *psThreadListNode;
+               psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode);
+
+               PVR_DUMPDEBUG_LOG("  %s : %s",
+                                 psThreadListNode->pszThreadName,
+                                 (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped");
+
+               if (psThreadListNode->pfnDebugDumpCB)
+               {
+                       psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile);
+               }
+       }
+}
+
+PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize,
+                                                       PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr,
+                                                       IMG_PID uiPid)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap);
+       struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+       IMG_CPU_PHYADDR sCpuPAddr;
+       struct page *psPage;
+       IMG_UINT32      ui32Order=0;
+       gfp_t gfp_flags;
+
+       PVR_ASSERT(uiSize != 0);
+       /*Align the size to the page granularity */
+       uiSize = PAGE_ALIGN(uiSize);
+
+       /*Get the order to be used with the allocation */
+       ui32Order = get_order(uiSize);
+
+       gfp_flags = GFP_KERNEL;
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+       if (psDev)
+       {
+               if (*psDev->dma_mask == DMA_BIT_MASK(32))
+               {
+                       /* Limit to 32 bit.
+                        * Achieved by setting __GFP_DMA32 for 64 bit systems */
+                       gfp_flags |= __GFP_DMA32;
+               }
+               else if (*psDev->dma_mask < DMA_BIT_MASK(32))
+               {
+                       /* Limit to whatever the size of DMA zone is. */
+                       gfp_flags |= __GFP_DMA;
+               }
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psDev);
+#endif
+
+       /*allocate the pages */
+       psPage = alloc_pages(gfp_flags, ui32Order);
+       if (psPage == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+       uiSize = (1 << ui32Order) * PAGE_SIZE;
+
+       psMemHandle->u.pvHandle = psPage;
+       psMemHandle->uiOrder = ui32Order;
+       sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+
+       /*
+        * Even when more pages are allocated as base MMU object we still need one single physical address because
+        * they are physically contiguous.
+        */
+       PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, psDevPAddr, &sCpuPAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+                                           uiSize,
+                                           (IMG_UINT64)(uintptr_t) psPage,
+                                           uiPid);
+#else
+       PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+                                    psPage,
+                                    sCpuPAddr,
+                                    uiSize,
+                                    NULL,
+                                    uiPid
+                                    DEBUG_MEMSTATS_VALUES);
+#endif
+#else
+       PVR_UNREFERENCED_PARAMETER(uiPid);
+#endif
+
+       return PVRSRV_OK;
+}
+
+void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle)
+{
+       struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
+       IMG_UINT32      uiSize, uiPageCount=0, ui32Order;
+
+       PVR_UNREFERENCED_PARAMETER(psPhysHeap);
+
+       ui32Order = psMemHandle->uiOrder;
+       uiPageCount = (1 << ui32Order);
+       uiSize = (uiPageCount * PAGE_SIZE);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+                                             (IMG_UINT64)(uintptr_t) psPage);
+#else
+       PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+                                       (IMG_UINT64)(uintptr_t) psPage,
+                                       OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+       __free_pages(psPage, ui32Order);
+       psMemHandle->uiOrder = 0;
+}
+
+PVRSRV_ERROR OSPhyContigPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle,
+                                               size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+                                               void **pvPtr)
+{
+       size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->uiOrder);
+       *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle);
+
+       PVR_UNREFERENCED_PARAMETER(psDevPAddr);
+
+       PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(psPhysHeap);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM());
+#else
+       {
+               IMG_CPU_PHYADDR sCpuPAddr;
+               sCpuPAddr.uiAddr = 0;
+
+               PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+                                                                        *pvPtr,
+                                                                        sCpuPAddr,
+                                                                        actualSize,
+                                                                        NULL,
+                                                                        OSGetCurrentClientProcessIDKM()
+                                                                        DEBUG_MEMSTATS_VALUES);
+       }
+#endif
+#endif
+
+       return PVRSRV_OK;
+}
+
+void OSPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       /* Mapping is done a page at a time */
+       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+                                   (1 << (PAGE_SHIFT + psMemHandle->uiOrder)),
+                                   OSGetCurrentClientProcessIDKM());
+#else
+       PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+                                       (IMG_UINT64)(uintptr_t)pvPtr,
+                                       OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+       PVR_UNREFERENCED_PARAMETER(psPhysHeap);
+       PVR_UNREFERENCED_PARAMETER(pvPtr);
+
+       kunmap((struct page*) psMemHandle->u.pvHandle);
+}
+
+PVRSRV_ERROR OSPhyContigPagesClean(PHYS_HEAP *psPhysHeap,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_UINT32 uiOffset,
+                                   IMG_UINT32 uiLength)
+{
+       PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap);
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       struct page* psPage = (struct page*) psMemHandle->u.pvHandle;
+
+       void* pvVirtAddrStart = kmap(psPage) + uiOffset;
+       IMG_CPU_PHYADDR sPhysStart, sPhysEnd;
+
+       IMG_UINT32 ui32Order;
+
+       if (uiLength == 0)
+       {
+               goto e0;
+       }
+
+       ui32Order = psMemHandle->uiOrder;
+       if ((uiOffset + uiLength) > ((1 << ui32Order) * PAGE_SIZE))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Invalid size params, uiOffset %u, uiLength %u",
+                               __func__,
+                               uiOffset,
+                               uiLength));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e0;
+       }
+
+       sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset;
+       sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength;
+
+       CacheOpExec(psDevNode,
+                               pvVirtAddrStart,
+                               pvVirtAddrStart + uiLength,
+                               sPhysStart,
+                               sPhysEnd,
+                               PVRSRV_CACHE_OP_CLEAN);
+
+e0:
+       kunmap(psPage);
+
+       return eError;
+}
+
+#if defined(__GNUC__)
+#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8)))
+#define PVRSRV_MEM_ALIGN_MASK (0x7)
+#else
+#error "PVRSRV Alignment macros need to be defined for this compiler"
+#endif
+
+IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute)
+{
+       IMG_UINT32 uiSize = 0;
+
+       switch (eCacheAttribute)
+       {
+               case OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE:
+                       uiSize = cache_line_size();
+                       break;
+
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d",
+                                       __func__, (IMG_UINT32)eCacheAttribute));
+                       PVR_ASSERT(0);
+                       break;
+       }
+
+       return uiSize;
+}
+
+IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...)
+{
+       va_list argList;
+       IMG_INT32 iCount = 0;
+
+       va_start(argList, pszFormat);
+       iCount = vsscanf(pStr, pszFormat, argList);
+       va_end(argList);
+
+       return iCount;
+}
+
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen)
+{
+       return (IMG_INT)memcmp(pvBufA, pvBufB, uiLen);
+}
+
+size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize)
+{
+       /*
+        * Let strlcat handle any truncation cases correctly.
+        * We will definitely get a NUL-terminated string set in pszDest
+        */
+       size_t uSrcSize = strlcat(pszDest, pszSrc, uDstSize);
+
+#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG)
+       /* Handle truncation by dumping calling stack if debug allows */
+       if (uSrcSize >= uDstSize)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'",
+                       __func__, pszSrc, (long)uDstSize, pszDest));
+               OSDumpStack();
+       }
+#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */
+
+       return uSrcSize;
+}
+
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+       va_list argList;
+       IMG_INT32 iCount;
+
+       va_start(argList, pszFormat);
+       iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+       va_end(argList);
+
+       return iCount;
+}
+
+IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs)
+{
+       return vsnprintf(pStr, ui32Size, pszFormat, vaArgs);
+}
+
+size_t OSStringLength(const IMG_CHAR *pStr)
+{
+       return strlen(pStr);
+}
+
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount)
+{
+       return strnlen(pStr, uiCount);
+}
+
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                          size_t uiSize)
+{
+#if defined(DEBUG)
+       /* Double-check that we are not passing NULL parameters in. If we are we
+        * return -1 (for arg1 == NULL, arg2 != NULL)
+        * 0 (for arg1 == NULL, arg2 == NULL
+        * +1 (for arg1 != NULL, arg2 == NULL)
+        * strncmp(arg1, arg2, size) otherwise
+        */
+       if (pStr1 == NULL)
+       {
+               if (pStr2 == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): Both args NULL",
+                                __func__, pStr1, pStr2, (int)uiSize));
+                       OSDumpStack();
+                       return 0;       /* Both NULL */
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): arg1 NULL",
+                                __func__, pStr1, pStr2, (int)uiSize));
+                       OSDumpStack();
+                       return -1;      /* NULL < non-NULL */
+               }
+       }
+       else
+       {
+               if (pStr2 == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): arg2 NULL",
+                                __func__, pStr1, pStr2, (int)uiSize));
+                       OSDumpStack();
+                       return +1;      /* non-NULL > NULL */
+               }
+               else
+               {
+                       return strncmp(pStr1, pStr2, uiSize);
+               }
+       }
+#else
+       return strncmp(pStr1, pStr2, uiSize);
+#endif
+}
+
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+                              IMG_UINT32 *ui32Result)
+{
+       if (kstrtou32(pStr, ui32Base, ui32Result) != 0)
+               return PVRSRV_ERROR_CONVERSION_FAILED;
+
+       return PVRSRV_OK;
+}
+
+IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize,
+                                                       IMG_UINT32 ui32Num)
+{
+       IMG_UINT32 ui32i, ui32Len = 0, ui32NumCopy = ui32Num;
+
+       /* calculate string length required to hold the number string */
+       do
+       {
+               ui32Len++;
+               ui32NumCopy /= 10;
+       } while (ui32NumCopy != 0);
+
+       if (unlikely(ui32Len >= uSize))
+       {
+               /* insufficient buffer */
+               return 0;
+       }
+
+       for (ui32i = 0; ui32i < ui32Len; ui32i++)
+       {
+               pszBuf[ui32Len - (ui32i + 1)] = '0' + ui32Num % 10;
+               ui32Num = ui32Num / 10;
+       }
+
+       pszBuf[ui32Len] = '\0';
+       return ui32Len;
+}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC)
+static struct workqueue_struct *gpFenceStatusWq;
+
+static PVRSRV_ERROR _NativeSyncInit(void)
+{
+       gpFenceStatusWq = create_freezable_workqueue("pvr_fence_status");
+       if (!gpFenceStatusWq)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create foreign fence status workqueue",
+                                __func__));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+       return PVRSRV_OK;
+}
+
+static void _NativeSyncDeinit(void)
+{
+       destroy_workqueue(gpFenceStatusWq);
+}
+
+struct workqueue_struct *NativeSyncGetFenceStatusWq(void)
+{
+       if (!gpFenceStatusWq)
+       {
+#if defined(DEBUG)
+               PVR_ASSERT(gpFenceStatusWq);
+#endif
+               return NULL;
+       }
+
+       return gpFenceStatusWq;
+}
+#endif
+
+PVRSRV_ERROR OSInitEnvData(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       LinuxInitPhysmem();
+
+       _OSInitThreadList();
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC)
+       eError = _NativeSyncInit();
+#endif
+
+       return eError;
+}
+
+void OSDeInitEnvData(void)
+{
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC)
+       _NativeSyncDeinit();
+#endif
+
+       LinuxDeinitPhysmem();
+}
+
+void OSReleaseThreadQuanta(void)
+{
+       schedule();
+}
+
+void OSMemoryBarrier(volatile void *hReadback)
+{
+       mb();
+
+       if (hReadback)
+       {
+               /* Force a read-back to memory to avoid posted writes on certain buses
+                * e.g. PCI(E)
+                */
+               (void) OSReadDeviceMem32(hReadback);
+       }
+}
+
+void OSWriteMemoryBarrier(volatile void *hReadback)
+{
+       wmb();
+
+       if (hReadback)
+       {
+               /* Force a read-back to memory to avoid posted writes on certain buses
+                * e.g. PCI(E)
+                */
+               (void) OSReadDeviceMem32(hReadback);
+       }
+}
+
+/* Not matching/aligning this API to the Clockus() API above to avoid necessary
+ * multiplication/division operations in calling code.
+ */
+static inline IMG_UINT64 Clockns64(void)
+{
+       IMG_UINT64 timenow;
+
+       /* Kernel thread preempt protection. Some architecture implementations
+        * (ARM) of sched_clock are not preempt safe when the kernel is configured
+        * as such e.g. CONFIG_PREEMPT and others.
+        */
+       preempt_disable();
+
+       /* Using sched_clock instead of ktime_get since we need a time stamp that
+        * correlates with that shown in kernel logs and trace data not one that
+        * is a bit behind. */
+       timenow = sched_clock();
+
+       preempt_enable();
+
+       return timenow;
+}
+
+IMG_UINT64 OSClockns64(void)
+{
+       return Clockns64();
+}
+
+IMG_UINT64 OSClockus64(void)
+{
+       IMG_UINT64 timenow = Clockns64();
+       IMG_UINT32 remainder;
+
+       return OSDivide64r64(timenow, 1000, &remainder);
+}
+
+IMG_UINT32 OSClockus(void)
+{
+       return (IMG_UINT32) OSClockus64();
+}
+
+IMG_UINT32 OSClockms(void)
+{
+       IMG_UINT64 timenow = Clockns64();
+       IMG_UINT32 remainder;
+
+       return OSDivide64(timenow, 1000000, &remainder);
+}
+
+static inline IMG_UINT64 KClockns64(void)
+{
+       ktime_t sTime = ktime_get();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+       return sTime;
+#else
+       return sTime.tv64;
+#endif
+}
+
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time)
+{
+       *pui64Time = KClockns64();
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time)
+{
+       IMG_UINT64 timenow = KClockns64();
+       IMG_UINT32 remainder;
+
+       *pui64Time = OSDivide64r64(timenow, 1000, &remainder);
+       return PVRSRV_OK;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
+IMG_UINT64 OSClockMonotonicRawns64(void)
+{
+       struct timespec64 ts;
+
+       ktime_get_raw_ts64(&ts);
+       return ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+#else
+IMG_UINT64 OSClockMonotonicRawns64(void)
+{
+       struct timespec ts;
+
+       getrawmonotonic(&ts);
+       return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+#endif
+
+IMG_UINT64 OSClockMonotonicRawus64(void)
+{
+       IMG_UINT32 rem;
+       return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem);
+}
+
+/*
+       OSWaitus
+*/
+void OSWaitus(IMG_UINT32 ui32Timeus)
+{
+       udelay(ui32Timeus);
+}
+
+
+/*
+       OSSleepms
+*/
+void OSSleepms(IMG_UINT32 ui32Timems)
+{
+       msleep(ui32Timems);
+}
+
+
+INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void)
+{
+       return (IMG_UINT64)TASK_SIZE;
+}
+
+INLINE IMG_PID OSGetCurrentProcessID(void)
+{
+       if (in_interrupt())
+       {
+               return KERNEL_ID;
+       }
+
+       return (IMG_PID)task_tgid_nr(current);
+}
+
+INLINE IMG_PID OSGetCurrentVirtualProcessID(void)
+{
+       if (in_interrupt())
+       {
+               return KERNEL_ID;
+       }
+
+       return (IMG_PID)task_tgid_vnr(current);
+}
+
+INLINE IMG_CHAR *OSGetCurrentProcessName(void)
+{
+       return current->comm;
+}
+
+INLINE uintptr_t OSGetCurrentThreadID(void)
+{
+       if (in_interrupt())
+       {
+               return KERNEL_ID;
+       }
+
+       return current->pid;
+}
+
+IMG_PID OSGetCurrentClientProcessIDKM(void)
+{
+       return OSGetCurrentProcessID();
+}
+
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void)
+{
+       return OSGetCurrentProcessName();
+}
+
+uintptr_t OSGetCurrentClientThreadIDKM(void)
+{
+       return OSGetCurrentThreadID();
+}
+
+size_t OSGetPageSize(void)
+{
+       return PAGE_SIZE;
+}
+
+size_t OSGetPageShift(void)
+{
+       return PAGE_SHIFT;
+}
+
+size_t OSGetPageMask(void)
+{
+       return (OSGetPageSize()-1);
+}
+
+size_t OSGetOrder(size_t uSize)
+{
+       return get_order(PAGE_ALIGN(uSize));
+}
+
+IMG_UINT64 OSGetRAMSize(void)
+{
+       struct sysinfo SI;
+       si_meminfo(&SI);
+
+       return (PAGE_SIZE * SI.totalram);
+}
+
+typedef struct
+{
+       int os_error;
+       PVRSRV_ERROR pvr_error;
+} error_map_t;
+
+/* return -ve versions of POSIX errors as they are used in this form */
+static const error_map_t asErrorMap[] =
+{
+       {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT},
+       {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL},
+       {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM},
+       {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE},
+       {-EPERM,  PVRSRV_ERROR_BRIDGE_EPERM},
+       {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY},
+       {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED},
+       {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL},
+       {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY},
+       {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS},
+
+       {0,       PVRSRV_OK}
+};
+
+int PVRSRVToNativeError(PVRSRV_ERROR e)
+{
+       int os_error = -EFAULT;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(asErrorMap); i++)
+       {
+               if (e == asErrorMap[i].pvr_error)
+               {
+                       os_error = asErrorMap[i].os_error;
+                       break;
+               }
+       }
+       return os_error;
+}
+
+typedef struct  _MISR_DATA_ {
+       struct workqueue_struct *psWorkQueue;
+       struct work_struct sMISRWork;
+       const IMG_CHAR* pszName;
+       PFN_MISR pfnMISR;
+       void *hData;
+} MISR_DATA;
+
+/*
+       MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+       MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Waking up '%s' MISR %p", psMISRData->pszName, psMISRData));
+
+       psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+       OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR,
+                                                       void *hData, const IMG_CHAR *pszMisrName)
+{
+       MISR_DATA *psMISRData;
+
+       psMISRData = OSAllocMem(sizeof(*psMISRData));
+       PVR_LOG_RETURN_IF_NOMEM(psMISRData, "psMISRData");
+
+       psMISRData->hData = hData;
+       psMISRData->pfnMISR = pfnMISR;
+       psMISRData->pszName = pszMisrName;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Installing MISR with cookie %p", psMISRData));
+
+       psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr");
+
+       if (psMISRData->psWorkQueue == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+               OSFreeMem(psMISRData);
+               return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+       }
+
+       INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+       *hMISRData = (IMG_HANDLE) psMISRData;
+
+       return PVRSRV_OK;
+}
+
+/*
+       OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+       MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Uninstalling MISR with cookie %p", psMISRData));
+
+       destroy_workqueue(psMISRData->psWorkQueue);
+       OSFreeMem(psMISRData);
+
+       return PVRSRV_OK;
+}
+
+/*
+       OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+       MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+       /*
+               Note:
+
+               In the case of NO_HARDWARE we want the driver to be synchronous so
+               that we don't have to worry about waiting for previous operations
+               to complete
+       */
+#if defined(NO_HARDWARE)
+       psMISRData->pfnMISR(psMISRData->hData);
+       return PVRSRV_OK;
+#else
+       {
+               bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork);
+               return rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS;
+       }
+#endif
+}
+
+/* OS specific values for thread priority */
+static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] =
+{
+         0, /* OS_THREAD_NOSET_PRIORITY */
+       -20, /* OS_THREAD_HIGHEST_PRIORITY */
+       -10, /* OS_THREAD_HIGH_PRIORITY */
+         0, /* OS_THREAD_NORMAL_PRIORITY */
+         9, /* OS_THREAD_LOW_PRIORITY */
+        19, /* OS_THREAD_LOWEST_PRIORITY */
+};
+
+static int OSThreadRun(void *data)
+{
+       OSThreadData *psOSThreadData = data;
+
+       /* count freezable threads */
+       LinuxBridgeNumActiveKernelThreadsIncrement();
+
+       /* Returns true if the thread was frozen, should we do anything with this
+        * information? What do we return? Which one is the error case? */
+       set_freezable();
+
+       PVR_DPF((PVR_DBG_MESSAGE, "Starting Thread '%s'...", psOSThreadData->pszThreadName));
+
+       /* Call the client's kernel thread with the client's data pointer */
+       psOSThreadData->pfnThread(psOSThreadData->hData);
+
+       if (psOSThreadData->bIsSupportingThread)
+       {
+               _ThreadSetStopped(psOSThreadData);
+       }
+
+       /* Wait for OSThreadDestroy() to call kthread_stop() */
+       while (!kthread_freezable_should_stop(NULL))
+       {
+               schedule();
+       }
+
+       LinuxBridgeNumActiveKernelThreadsDecrement();
+
+       return 0;
+}
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+                            IMG_CHAR *pszThreadName,
+                            PFN_THREAD pfnThread,
+                            PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                            IMG_BOOL bIsSupportingThread,
+                            void *hData)
+{
+       return OSThreadCreatePriority(phThread, pszThreadName, pfnThread,
+                                     pfnDebugDumpCB, bIsSupportingThread, hData,
+                                     OS_THREAD_NOSET_PRIORITY);
+}
+
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+                                    IMG_CHAR *pszThreadName,
+                                    PFN_THREAD pfnThread,
+                                    PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                                    IMG_BOOL bIsSupportingThread,
+                                    void *hData,
+                                    OS_THREAD_LEVEL eThreadPriority)
+{
+       OSThreadData *psOSThreadData;
+       PVRSRV_ERROR eError;
+
+       psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData));
+       PVR_LOG_GOTO_IF_NOMEM(psOSThreadData, eError, fail_alloc);
+
+       psOSThreadData->pfnThread = pfnThread;
+       psOSThreadData->hData = hData;
+       psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName);
+
+       if (IS_ERR(psOSThreadData->kthread))
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_kthread;
+       }
+
+       if (bIsSupportingThread)
+       {
+               psOSThreadData->pszThreadName = pszThreadName;
+               psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB;
+               psOSThreadData->bIsThreadRunning = IMG_TRUE;
+               psOSThreadData->bIsSupportingThread = IMG_TRUE;
+
+               _ThreadListAddEntry(psOSThreadData);
+       }
+
+       if (eThreadPriority != OS_THREAD_NOSET_PRIORITY &&
+           eThreadPriority < OS_THREAD_LAST_PRIORITY)
+       {
+               set_user_nice(psOSThreadData->kthread,
+                             ai32OSPriorityValues[eThreadPriority]);
+       }
+
+       *phThread = psOSThreadData;
+
+       return PVRSRV_OK;
+
+fail_kthread:
+       OSFreeMem(psOSThreadData);
+fail_alloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread)
+{
+       OSThreadData *psOSThreadData = hThread;
+       int ret;
+
+       /* Let the thread know we are ready for it to end and wait for it. */
+       ret = kthread_stop(psOSThreadData->kthread);
+       if (0 != ret)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       if (psOSThreadData->bIsSupportingThread)
+       {
+               _ThreadListRemoveEntry(psOSThreadData);
+       }
+
+       OSFreeMem(psOSThreadData);
+
+       return PVRSRV_OK;
+}
+
+void OSPanic(void)
+{
+       BUG();
+
+#if defined(__KLOCWORK__)
+       /* Klocwork does not understand that BUG is terminal... */
+       abort();
+#endif
+}
+
+void *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+                          size_t ui32Bytes,
+                          PVRSRV_MEMALLOCFLAGS_T uiMappingFlags)
+{
+       void __iomem *pvLinAddr;
+
+       if (uiMappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+       {
+               PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu");
+               return NULL;
+       }
+
+       if (! PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               /*
+                 This is required to support DMA physheaps for GPU virtualization.
+                 Unfortunately, if a region of kernel managed memory is turned into
+                 a DMA buffer, conflicting mappings can come about easily on Linux
+                 as the original memory is mapped by the kernel as normal cached
+                 memory whilst DMA buffers are mapped mostly as uncached device or
+                 cache-coherent device memory. In both cases the system will have
+                 two conflicting mappings for the same memory region and will have
+                 "undefined behaviour" for most processors notably ARMv6 onwards
+                 and some x86 micro-architectures. As a result, perform ioremapping
+                 manually for DMA physheap allocations by translating from CPU/VA
+                 to BUS/PA thereby preventing the creation of conflicting mappings.
+               */
+               pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes);
+               if (pvLinAddr != NULL)
+               {
+                       return (void __force *) pvLinAddr;
+               }
+       }
+
+       switch (uiMappingFlags)
+       {
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+                       pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+                       break;
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+                       pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes);
+#else
+                       pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+                       break;
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+                       pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes);
+#else
+                       pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+                       break;
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+                       PVR_ASSERT(!"Unexpected cpu cache mode");
+                       pvLinAddr = NULL;
+                       break;
+               default:
+                       PVR_ASSERT(!"Unsupported cpu cache mode");
+                       pvLinAddr = NULL;
+                       break;
+       }
+
+       return (void __force *) pvLinAddr;
+}
+
+
+IMG_BOOL
+OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+       if (!PVRSRV_VZ_MODE_IS(NATIVE))
+       {
+               if (SysDmaCpuVAddrToDevPAddr(pvLinAddr))
+               {
+                       return IMG_TRUE;
+               }
+       }
+
+       iounmap((void __iomem *) pvLinAddr);
+
+       return IMG_TRUE;
+}
+
+#define OS_MAX_TIMERS  8
+
+/* Timer callback structure used by OSAddTimer */
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+       IMG_BOOL                        bInUse;
+       PFN_TIMER_FUNC          pfnTimerFunc;
+       void                            *pvData;
+       struct timer_list       sTimer;
+       IMG_UINT32                      ui32Delay;
+       IMG_BOOL                        bActive;
+       struct work_struct      sWork;
+}TIMER_CALLBACK_DATA;
+
+static struct workqueue_struct *psTimerWorkQueue;
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+static DEFINE_MUTEX(sTimerStructLock);
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+       if (!psTimerCBData->bActive)
+               return;
+
+       /* call timer callback */
+       psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+       /* reset timer */
+       mod_timer(&psTimerCBData->sTimer, psTimerCBData->sTimer.expires + psTimerCBData->ui32Delay);
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+/*************************************************************************/ /*!
+@Function       OSTimerCallbackWrapper
+@Description    OS specific timer callback wrapper function
+@Input          psTimer    Timer list structure
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(struct timer_list *psTimer)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer);
+#else
+/*************************************************************************/ /*!
+@Function       OSTimerCallbackWrapper
+@Description    OS specific timer callback wrapper function
+@Input          uData    Timer callback data
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(uintptr_t uData)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData;
+#endif
+       int res;
+
+       res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+       if (res == 0)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+       }
+}
+
+
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+       OSTimerCallbackBody(psTimerCBData);
+}
+
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData;
+       IMG_UINT32              ui32i;
+
+       /* check callback */
+       if (!pfnTimerFunc)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+               return NULL;
+       }
+
+       /* Allocate timer callback data structure */
+       mutex_lock(&sTimerStructLock);
+       for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+       {
+               psTimerCBData = &sTimers[ui32i];
+               if (!psTimerCBData->bInUse)
+               {
+                       psTimerCBData->bInUse = IMG_TRUE;
+                       break;
+               }
+       }
+       mutex_unlock(&sTimerStructLock);
+       if (ui32i >= OS_MAX_TIMERS)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+               return NULL;
+       }
+
+       psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+       psTimerCBData->pvData = pvData;
+       psTimerCBData->bActive = IMG_FALSE;
+
+       /*
+               HZ = ticks per second
+               ui32MsTimeout = required ms delay
+               ticks = (Hz * ui32MsTimeout) / 1000
+       */
+       psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+                                                               ?       1
+                                                               :       ((HZ * ui32MsTimeout) / 1000);
+
+       /* initialise object */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+       timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0);
+#else
+       init_timer(&psTimerCBData->sTimer);
+
+       /* setup timer object */
+       psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper;
+       psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData;
+#endif
+
+       return (IMG_HANDLE)(uintptr_t)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+       IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1;
+
+       PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+       return &sTimers[ui32i];
+}
+
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+       PVR_ASSERT(psTimerCBData->bInUse);
+       PVR_ASSERT(!psTimerCBData->bActive);
+
+       /* free timer callback data struct */
+       psTimerCBData->bInUse = IMG_FALSE;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+       PVR_ASSERT(psTimerCBData->bInUse);
+       PVR_ASSERT(!psTimerCBData->bActive);
+
+       /* Start timer arming */
+       psTimerCBData->bActive = IMG_TRUE;
+
+       /* set the expire time */
+       psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+       /* Add the timer to the list */
+       add_timer(&psTimerCBData->sTimer);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+       TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+       PVR_ASSERT(psTimerCBData->bInUse);
+       PVR_ASSERT(psTimerCBData->bActive);
+
+       /* Stop timer from arming */
+       psTimerCBData->bActive = IMG_FALSE;
+       smp_mb();
+
+       flush_workqueue(psTimerWorkQueue);
+
+       /* remove timer */
+       del_timer_sync(&psTimerCBData->sTimer);
+
+       /*
+        * This second flush is to catch the case where the timer ran
+        * before we managed to delete it, in which case, it will have
+        * queued more work for the workqueue. Since the bActive flag
+        * has been cleared, this second flush won't result in the
+        * timer being rearmed.
+        */
+       flush_workqueue(psTimerWorkQueue);
+
+       return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject)
+{
+       PVR_UNREFERENCED_PARAMETER(pszName);
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject");
+
+       return LinuxEventObjectListCreate(hEventObject);
+}
+
+
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject");
+
+       return LinuxEventObjectListDestroy(hEventObject);
+}
+
+#define _FREEZABLE IMG_TRUE
+#define _NON_FREEZABLE IMG_FALSE
+
+/*
+ * EventObjectWaitTimeout()
+ */
+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM,
+                                                                                  IMG_UINT64 uiTimeoutus)
+{
+       PVRSRV_ERROR eError;
+
+       if (hOSEventKM && uiTimeoutus > 0)
+       {
+               eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, _NON_FREEZABLE);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+       return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus);
+}
+
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+{
+       return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM,
+                                     IMG_UINT64 uiTimeoutus)
+{
+       PVRSRV_ERROR eError;
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+       if (hOSEventKM)
+       {
+               if (uiTimeoutus > 0)
+                       eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus,
+                                                     _FREEZABLE);
+               else
+                       eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM);
+       }
+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+       if (hOSEventKM && uiTimeoutus > 0)
+       {
+               eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus,
+                                             _FREEZABLE);
+       }
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p",
+                       hOSEventKM));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return eError;
+}
+
+void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM)
+{
+       LinuxEventObjectDumpDebugInfo(hOSEventKM);
+}
+
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, IMG_HANDLE *phOSEvent)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(phOSEvent, "phOSEvent");
+       PVR_LOG_GOTO_IF_INVALID_PARAM(hEventObject, eError, error);
+
+       eError = LinuxEventObjectAdd(hEventObject, phOSEvent);
+       PVR_LOG_GOTO_IF_ERROR(eError, "LinuxEventObjectAdd", error);
+
+       return PVRSRV_OK;
+
+error:
+       *phOSEvent = NULL;
+       return eError;
+}
+
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(hOSEventKM, "hOSEventKM");
+
+       return LinuxEventObjectDelete(hOSEventKM);
+}
+
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject)
+{
+       PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject");
+
+       return LinuxEventObjectSignal(hEventObject);
+}
+
+PVRSRV_ERROR OSCopyToUser(void *pvProcess,
+                                                 void __user *pvDest,
+                                                 const void *pvSrc,
+                                                 size_t ui32Bytes)
+{
+       PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+       if (pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+               return PVRSRV_OK;
+       else
+               return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess,
+                                                       void *pvDest,
+                                                       const void __user *pvSrc,
+                                                       size_t ui32Bytes)
+{
+       PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+       if (likely(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0))
+               return PVRSRV_OK;
+       else
+               return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+       *pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+       return ui64Divident;
+}
+
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+       *pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+       return (IMG_UINT32) ui64Divident;
+}
+
+/* One time osfunc initialisation */
+PVRSRV_ERROR PVROSFuncInit(void)
+{
+       {
+               PVR_ASSERT(!psTimerWorkQueue);
+
+               psTimerWorkQueue = create_freezable_workqueue("pvr_timer");
+               if (psTimerWorkQueue == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue",
+                                        __func__));
+                       return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+               }
+       }
+
+       {
+               IMG_UINT32 ui32i;
+
+               for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+               {
+                       TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+                       INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+               }
+       }
+       return PVRSRV_OK;
+}
+
+/*
+ * Osfunc deinitialisation.
+ * Note that PVROSFuncInit may not have been called
+ */
+void PVROSFuncDeInit(void)
+{
+       if (psTimerWorkQueue != NULL)
+       {
+               destroy_workqueue(psTimerWorkQueue);
+               psTimerWorkQueue = NULL;
+       }
+}
+
+void OSDumpStack(void)
+{
+       dump_stack();
+}
+
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+                                         IMG_UINT64 sCpuVAddrBase,
+                                         IMG_CPU_PHYADDR sCpuPAHeapBase,
+                                         IMG_UINT32 ui32AllocPageCount,
+                                         IMG_UINT32 *pai32AllocIndices,
+                                         IMG_UINT32 ui32FreePageCount,
+                                         IMG_UINT32 *pai32FreeIndices,
+                                         IMG_BOOL bIsLMA)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+       pfn_t sPFN;
+#else
+       IMG_UINT64 uiPFN;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+       PVRSRV_ERROR eError;
+
+       struct mm_struct *psMM = current->mm;
+       struct vm_area_struct *psVMA = NULL;
+       struct address_space *psMapping = NULL;
+       struct page *psPage = NULL;
+
+       IMG_UINT64 uiCPUVirtAddr = 0;
+       IMG_UINT32 ui32Loop = 0;
+       IMG_UINT32 ui32PageSize = OSGetPageSize();
+       IMG_BOOL bMixedMap = IMG_FALSE;
+
+       /*
+        * Acquire the lock before manipulating the VMA
+        * In this case only mmap_sem lock would suffice as the pages associated with this VMA
+        * are never meant to be swapped out.
+        *
+        * In the future, in case the pages are marked as swapped, page_table_lock needs
+        * to be acquired in conjunction with this to disable page swapping.
+        */
+
+       /* Find the Virtual Memory Area associated with the user base address */
+       psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase);
+       if (NULL == psVMA)
+       {
+               eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND;
+               return eError;
+       }
+
+       /* Acquire the memory sem */
+       mmap_write_lock(psMM);
+
+       psMapping = psVMA->vm_file->f_mapping;
+
+       /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */
+       psVMA->vm_pgoff = (psVMA->vm_start >>  PAGE_SHIFT);
+
+       /* Delete the entries for the pages that got freed */
+       if (ui32FreePageCount && (pai32FreeIndices != NULL))
+       {
+               for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+               {
+                       uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize));
+
+                       unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+                       /*
+                        * Still need to map pages in case remap flag is set.
+                        * That is not done until the remap case succeeds
+                        */
+#endif
+               }
+               eError = PVRSRV_OK;
+       }
+
+       if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA)
+       {
+               psVMA->vm_flags |= VM_MIXEDMAP;
+               bMixedMap = IMG_TRUE;
+       }
+       else
+       {
+               if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+               {
+                       for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+                       {
+
+                               psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                               sPFN = page_to_pfn_t(psPage);
+
+                               if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+                               uiPFN = page_to_pfn(psPage);
+
+                               if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+                               {
+                                       bMixedMap = IMG_TRUE;
+                                       psVMA->vm_flags |= VM_MIXEDMAP;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       /* Map the pages that got allocated */
+       if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+       {
+               for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+               {
+                       int err;
+
+                       uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize));
+                       unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+                       if (bIsLMA)
+                       {
+                               phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr +
+                                                    ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                               sPFN = phys_to_pfn_t(uiAddr, 0);
+                               psPage = pfn_t_to_page(sPFN);
+#else
+                               uiPFN = uiAddr >> PAGE_SHIFT;
+                               psPage = pfn_to_page(uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+                       }
+                       else
+                       {
+                               psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                               sPFN = page_to_pfn_t(psPage);
+#else
+                               uiPFN = page_to_pfn(psPage);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+                       }
+
+                       if (bMixedMap)
+                       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+                               vm_fault_t vmf;
+
+                               vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+                               if (vmf & VM_FAULT_ERROR)
+                               {
+                                       err = vm_fault_to_errno(vmf, 0);
+                               }
+                               else
+                               {
+                                       err = 0;
+                               }
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                               err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+#else
+                               err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */
+                       }
+                       else
+                       {
+                               err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage);
+                       }
+
+                       if (err)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err));
+                               eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+                               goto eFailed;
+                       }
+               }
+       }
+
+       eError = PVRSRV_OK;
+eFailed:
+       mmap_write_unlock(psMM);
+
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSDebugSignalPID
+@Description    Sends a SIGTRAP signal to a specific PID in user mode for
+                debugging purposes. The user mode process can register a handler
+                against this signal.
+                This is necessary to support the Rogue debugger. If the Rogue
+                debugger is not used then this function may be implemented as
+                a stub.
+@Input          ui32PID    The PID for the signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID)
+{
+       int err;
+       struct pid *psPID;
+
+       psPID = find_vpid(ui32PID);
+       if (psPID == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__));
+               return PVRSRV_ERROR_NOT_FOUND;
+       }
+
+       err = kill_pid(psPID, SIGTRAP, 0);
+       if (err != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err));
+               return PVRSRV_ERROR_SIGNAL_FAILED;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSIsKernelThread
+@Description    This API determines if the current running thread is a kernel
+                thread (i.e. one not associated with any userland process,
+                typically an MISR handler.)
+@Return         IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE.
+*/ /**************************************************************************/
+IMG_BOOL OSIsKernelThread(void)
+{
+       /*
+        * Kernel threads have a NULL memory descriptor.
+        *
+        * See https://www.kernel.org/doc/Documentation/vm/active_mm.txt
+        */
+       return current->mm == NULL;
+}
+
+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                          void *pvDumpDebugFile)
+{
+       PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s",
+                                       utsname()->sysname,
+                                       utsname()->release,
+                                       utsname()->version,
+                                       utsname()->machine);
+}
+#if defined(SUPPORT_DMA_TRANSFER)
+
+typedef struct _OS_CLEANUP_DATA_
+{
+       IMG_BOOL bSucceed;
+       IMG_BOOL bAdvanceTimeline;
+       IMG_UINT uiRefCount;
+       IMG_UINT uiNumDMA;
+       IMG_UINT uiCount;
+
+       struct dma_async_tx_descriptor** ppsDescriptors;
+
+
+       PVRSRV_DEVICE_NODE *psDevNode;
+       PFN_SERVER_CLEANUP pfnServerCleanup;
+       void* pvServerCleanupData;
+
+       enum dma_transfer_direction eDirection;
+       struct sg_table **ppsSg;
+       struct page ***pages;
+       IMG_UINT32* puiNumPages;
+       spinlock_t spinlock;
+
+       struct completion start_cleanup;
+       struct completion *sync_completion;
+
+       /* Sparse PMR transfer information */
+       IMG_BOOL *pbIsSparse;
+       IMG_UINT *uiNumValidPages;
+       struct sg_table ***ppsSgSparse;
+       struct dma_async_tx_descriptor*** ppsDescriptorsSparse;
+
+} OS_CLEANUP_DATA;
+
+static int cleanup_thread(void *pvData)
+{
+       IMG_UINT32 i, j;
+       struct completion *sync_completion = NULL;
+       OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvData;
+       IMG_BOOL bSucceed = psOSCleanup->bSucceed;
+
+       sync_completion = psOSCleanup->sync_completion;
+
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR, "Cleanup thread waiting (%p) on completion", pvData));
+#endif
+
+       wait_for_completion(&psOSCleanup->start_cleanup);
+
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR, "Cleanup thread notified (%p)", pvData));
+#endif
+       /* Free resources */
+       for (i=0; i<psOSCleanup->uiCount; i++)
+       {
+               if (!psOSCleanup->pbIsSparse[i])
+               {
+                       dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice,
+                                                               psOSCleanup->ppsSg[i]->sgl,
+                                                               psOSCleanup->ppsSg[i]->nents,
+                                                               psOSCleanup->eDirection);
+
+                       dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice,
+                                                psOSCleanup->ppsSg[i]->sgl,
+                                                psOSCleanup->ppsSg[i]->nents,
+                                                psOSCleanup->eDirection);
+
+                       sg_free_table(psOSCleanup->ppsSg[i]);
+
+                       OSFreeMem(psOSCleanup->ppsSg[i]);
+
+                       /* Unpin pages */
+                       for (j=0; j<psOSCleanup->puiNumPages[i]; j++)
+                       {
+                               if (psOSCleanup->eDirection == DMA_DEV_TO_MEM)
+                               {
+                                       set_page_dirty_lock(psOSCleanup->pages[i][j]);
+                               }
+                               put_page(psOSCleanup->pages[i][j]);
+                       }
+               }
+               else
+               {
+                       for (j = 0; j < psOSCleanup->puiNumPages[i]; j++)
+                       {
+                               if (psOSCleanup->ppsSgSparse[i][j]) {
+                                       dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice,
+                                                                               psOSCleanup->ppsSgSparse[i][j]->sgl,
+                                                                               psOSCleanup->ppsSgSparse[i][j]->nents,
+                                                                               psOSCleanup->eDirection);
+
+
+                                       dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice,
+                                                               psOSCleanup->ppsSgSparse[i][j]->sgl,
+                                                               psOSCleanup->ppsSgSparse[i][j]->nents,
+                                                               psOSCleanup->eDirection);
+
+                                       sg_free_table(psOSCleanup->ppsSgSparse[i][j]);
+
+                                       OSFreeMem(psOSCleanup->ppsSgSparse[i][j]);
+
+                               }
+                       }
+
+                       OSFreeMem(psOSCleanup->ppsSgSparse[i]);
+                       OSFreeMem(psOSCleanup->ppsDescriptorsSparse[i]);
+
+                       /* Unpin pages */
+                       for (j=0; j<psOSCleanup->puiNumPages[i]*2; j++)
+                       {
+                               /*
+                                * Some pages might've been pinned twice
+                                * Others may have not been pinned at all
+                                */
+                               if (psOSCleanup->pages[i][j])
+                               {
+                                       if (psOSCleanup->eDirection == DMA_DEV_TO_MEM)
+                                       {
+                                               set_page_dirty_lock(psOSCleanup->pages[i][j]);
+                                       }
+                                       put_page(psOSCleanup->pages[i][j]);
+                               }
+                       }
+               }
+
+               OSFreeMem(psOSCleanup->pages[i]);
+       }
+
+       psOSCleanup->pfnServerCleanup(psOSCleanup->pvServerCleanupData,
+                                                                 psOSCleanup->bAdvanceTimeline);
+
+       OSFreeMem(psOSCleanup->ppsSg);
+       OSFreeMem(psOSCleanup->pages);
+       OSFreeMem(psOSCleanup->puiNumPages);
+       OSFreeMem(psOSCleanup->ppsSgSparse);
+       OSFreeMem(psOSCleanup->ppsDescriptorsSparse);
+       OSFreeMem(psOSCleanup->ppsDescriptors);
+       OSFreeMem(psOSCleanup->pbIsSparse);
+       OSFreeMem(psOSCleanup->uiNumValidPages);
+       OSFreeMem(psOSCleanup);
+
+       if (sync_completion && bSucceed)
+       {
+               complete(sync_completion);
+       }
+
+       do_exit(0);
+       return 0;
+}
+
+static void dma_callback(void *pvOSCleanup)
+{
+       OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSCleanup;
+       unsigned long flags;
+
+#if defined(DMA_VERBOSE)
+       PVR_DPF((PVR_DBG_ERROR, "dma_callback (%p) refcount decreased to %d", psOSCleanup, psOSCleanup->uiRefCount - 1));
+#endif
+       spin_lock_irqsave(&psOSCleanup->spinlock, flags);
+
+       psOSCleanup->uiRefCount--;
+
+       if (psOSCleanup->uiRefCount==0)
+       {
+               /* Notify the cleanup thread */
+               spin_unlock_irqrestore(&psOSCleanup->spinlock, flags);
+               complete(&psOSCleanup->start_cleanup);
+               return;
+       }
+
+       spin_unlock_irqrestore(&psOSCleanup->spinlock, flags);
+}
+
+#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA)
+static void
+DMADumpPhysicalAddresses(struct page **ppsHostMemPages,
+                                                IMG_UINT32 uiNumPages,
+                                                IMG_DMA_ADDR *sDmaAddr,
+                                                IMG_UINT64 ui64Offset)
+{
+       IMG_CPU_PHYADDR sPagePhysAddr;
+       IMG_UINT32 uiIdx;
+
+       PVR_DPF((PVR_DBG_MESSAGE, "DMA Transfer Address Dump"));
+       PVR_DPF((PVR_DBG_MESSAGE, "Hostmem phys addresses:"));
+
+       for (uiIdx = 0; uiIdx < uiNumPages; uiIdx++)
+       {
+               sPagePhysAddr.uiAddr = page_to_phys(ppsHostMemPages[uiIdx]);
+               if (uiIdx == 0)
+               {
+                       sPagePhysAddr.uiAddr += ui64Offset;
+                       PVR_DPF((PVR_DBG_MESSAGE, "\tHost mem start at 0x%llX", sPagePhysAddr.uiAddr));
+               }
+               else
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "\tHost Mem Page %d at 0x%llX", uiIdx,
+                                        sPagePhysAddr.uiAddr));
+               }
+       }
+       PVR_DPF((PVR_DBG_MESSAGE, "Devmem CPU phys address: 0x%llX",
+                        sDmaAddr->uiAddr));
+}
+#endif
+
+PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData,
+                                   void *pvChan, IMG_BOOL bSynchronous)
+{
+       OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSData;
+       struct completion* sync_completion = NULL;
+
+       psOSCleanup->bSucceed = IMG_TRUE;
+       psOSCleanup->bAdvanceTimeline = IMG_TRUE;
+
+       if (bSynchronous)
+       {
+               sync_completion = OSAllocZMem(sizeof(struct completion));
+               init_completion(sync_completion);
+       }
+
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       /* Wait only on number of ops scheduled. This might be different to NumDMAs
+       in certain error conditions */
+       psOSCleanup->uiRefCount = psOSCleanup->uiCount;
+       psOSCleanup->sync_completion = sync_completion;
+
+       {
+               IMG_UINT32 i,j;
+               for (i=0; i<psOSCleanup->uiCount; i++)
+               {
+                       if (psOSCleanup->pbIsSparse[i])
+                       {
+                               for (j=0; j<psOSCleanup->puiNumPages[i]; j++)
+                               {
+                                       if (psOSCleanup->ppsDescriptorsSparse[i][j])
+                                               dmaengine_submit(psOSCleanup->ppsDescriptorsSparse[i][j]);
+                               }
+                       }
+                       else
+                       {
+                               dmaengine_submit(psOSCleanup->ppsDescriptors[i]);
+                       }
+               }
+       }
+
+       dma_async_issue_pending(pvChan);
+
+       if (bSynchronous)
+       {
+               wait_for_completion(sync_completion);
+               OSFreeMem(sync_completion);
+       }
+
+       return PVRSRV_OK;
+}
+
+void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan,
+                                          void *pvOSData, void *pvServerCleanupParam,
+                                          PFN_SERVER_CLEANUP pfnServerCleanup)
+{
+       OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA *)pvOSData;
+       IMG_UINT ui32Retries;
+
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+       psOSCleanup->bSucceed = IMG_FALSE;
+       psOSCleanup->bAdvanceTimeline = IMG_TRUE;
+
+       /* Need to wait for outstanding DMA Engine ops before advancing the
+          user-supplied timeline in case of error. dmaengine_terminate_sync
+          cannot be called from within atomic context, so cannot invoke it
+          from inside the cleanup kernel thread. */
+       for (ui32Retries = 0; ui32Retries < DMA_ERROR_SYNC_RETRIES; ui32Retries++)
+       {
+               if (dmaengine_terminate_sync(pvChan) == 0)
+               {
+                       break;
+               }
+       }
+       if (ui32Retries == DMA_ERROR_SYNC_RETRIES)
+       {
+               /* We cannot guarantee all outstanding DMAs were terminated
+                * so we let the UM fence time out as a fallback mechanism */
+               psOSCleanup->bAdvanceTimeline = IMG_FALSE;
+       }
+
+       if (psOSCleanup->uiCount > 0)
+       {
+               complete(&psOSCleanup->start_cleanup);
+       }
+       else
+       {
+               /* Cleanup kthread never run, need to manually wind down */
+               pfnServerCleanup(pvServerCleanupParam, psOSCleanup->bAdvanceTimeline);
+
+               OSFreeMem(psOSCleanup->ppsSg);
+               OSFreeMem(psOSCleanup->pages);
+               OSFreeMem(psOSCleanup->puiNumPages);
+               OSFreeMem(psOSCleanup->ppsSgSparse);
+               OSFreeMem(psOSCleanup->pbIsSparse);
+               OSFreeMem(psOSCleanup->uiNumValidPages);
+               OSFreeMem(psOSCleanup->ppsDescriptors);
+               OSFreeMem(psOSCleanup->ppsDescriptorsSparse);
+
+               OSFreeMem(psOSCleanup);
+       }
+}
+
+PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 uiNumDMA, void **pvOutData)
+{
+       PVRSRV_ERROR eError;
+       OS_CLEANUP_DATA *psOSCleanup = OSAllocZMem(sizeof(OS_CLEANUP_DATA));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup, eError, e0);
+
+       psOSCleanup->uiNumDMA = uiNumDMA;
+       psOSCleanup->psDevNode = psDevNode;
+
+       spin_lock_init(&psOSCleanup->spinlock);
+
+       init_completion(&psOSCleanup->start_cleanup);
+
+       psOSCleanup->ppsDescriptors = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptors, eError, e0);
+
+       psOSCleanup->ppsDescriptorsSparse = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptorsSparse, eError, e11);
+
+       psOSCleanup->ppsSg = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSg, eError, e1);
+
+       psOSCleanup->ppsSgSparse = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSgSparse, eError, e12);
+
+       psOSCleanup->pbIsSparse = OSAllocZMem(uiNumDMA * sizeof(IMG_BOOL));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pbIsSparse, eError, e13);
+
+       psOSCleanup->uiNumValidPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->uiNumValidPages, eError, e14);
+
+       psOSCleanup->pages = OSAllocZMem(uiNumDMA * sizeof(struct page **));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pages, eError, e2);
+
+       psOSCleanup->puiNumPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT32));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->puiNumPages, eError, e3);
+
+       *pvOutData = psOSCleanup;
+
+       return PVRSRV_OK;
+
+e3:
+       OSFreeMem(psOSCleanup->pages);
+e2:
+       OSFreeMem(psOSCleanup->uiNumValidPages);
+e14:
+       OSFreeMem(psOSCleanup->pbIsSparse);
+e13:
+       OSFreeMem(psOSCleanup->ppsSgSparse);
+e12:
+       OSFreeMem(psOSCleanup->ppsSg);
+e1:
+       OSFreeMem(psOSCleanup->ppsDescriptorsSparse);
+e11:
+       OSFreeMem(psOSCleanup->ppsDescriptors);
+e0:
+       OSFreeMem(psOSCleanup);
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSDmaTransfer
+@Description    This API is used to ask OS to perform a DMA transfer operation
+@Return
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode,
+                                                          void* pvChan,
+                                                          IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress,
+                                                          IMG_UINT64 uiSize, IMG_BOOL bMemToDev,
+                                                          void* pvOSData,
+                                                          void* pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst)
+{
+
+       IMG_INT iRet;
+       PVRSRV_ERROR eError;
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+       OS_CLEANUP_DATA* psOSCleanupData = pvOSData;
+
+       struct dma_slave_config sConfig = {0};
+       struct dma_async_tx_descriptor *psDesc;
+
+       unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1);
+       unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       int num_pinned_pages = 0;
+       unsigned int gup_flags = 0;
+
+       struct sg_table *psSg = OSAllocZMem(sizeof(struct sg_table));
+       PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e0);
+
+       psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(num_pages * sizeof(struct page *));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e1);
+
+       gup_flags |= bMemToDev ? 0 : FOLL_WRITE;
+
+       num_pinned_pages = get_user_pages_fast(
+                       (unsigned long)puiAddress,
+                       (int)num_pages,
+                       gup_flags,
+                       psOSCleanupData->pages[psOSCleanupData->uiCount]);
+       if (num_pinned_pages != num_pages)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast failed: (%d - %u)", num_pinned_pages, num_pages));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e2;
+       }
+
+#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA)
+       DMADumpPhysicalAddresses(psOSCleanupData->pages[psOSCleanupData->uiCount],
+                                                        num_pages, psDmaAddr, offset);
+#endif
+
+       psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = num_pinned_pages;
+
+       if (sg_alloc_table_from_pages(psSg, psOSCleanupData->pages[psOSCleanupData->uiCount], num_pages, offset, uiSize, GFP_KERNEL) != 0)
+       {
+               eError = PVRSRV_ERROR_BAD_MAPPING;
+               PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed"));
+               goto e3;
+       }
+
+       if (bMemToDev)
+       {
+               sConfig.direction = DMA_MEM_TO_DEV;
+               sConfig.src_addr = 0;
+               sConfig.dst_addr = psDmaAddr->uiAddr;
+       }
+       else
+       {
+               sConfig.direction = DMA_DEV_TO_MEM;
+               sConfig.src_addr = psDmaAddr->uiAddr;
+               sConfig.dst_addr = 0;
+       }
+       dmaengine_slave_config(pvChan, &sConfig);
+
+       iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+       if (!iRet)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e4;
+       }
+
+       dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction);
+
+       psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0);
+       if (!psDesc)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e5;
+       }
+
+       psOSCleanupData->eDirection = sConfig.direction;
+       psOSCleanupData->ppsSg[psOSCleanupData->uiCount] = psSg;
+       psOSCleanupData->pfnServerCleanup = pfnServerCleanup;
+       psOSCleanupData->pvServerCleanupData = pvServerCleanupParam;
+
+       psDesc->callback_param = psOSCleanupData;
+       psDesc->callback = dma_callback;
+
+       if      (bFirst)
+       {
+               struct task_struct* t1;
+               t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread");
+       }
+       psOSCleanupData->ppsDescriptors[psOSCleanupData->uiCount] = psDesc;
+
+       psOSCleanupData->uiCount++;
+
+       return PVRSRV_OK;
+
+e5:
+       dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+e4:
+       sg_free_table(psSg);
+e3:
+       {
+               IMG_UINT32 i;
+               /* Unpin pages */
+               for (i=0; i<psOSCleanupData->puiNumPages[psOSCleanupData->uiCount]; i++)
+               {
+                       put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]);
+               }
+       }
+e2:
+       OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]);
+e1:
+       OSFreeMem(psSg);
+e0:
+       return eError;
+}
+
+static IMG_UINT32
+CalculateValidPages(IMG_BOOL *pbValid,
+                                       IMG_UINT32 ui32SizeInPages)
+{
+       IMG_UINT32 ui32nValid;
+       IMG_UINT32 ui32Idx;
+
+       for (ui32Idx = 0, ui32nValid = 0; ui32Idx < ui32SizeInPages; ui32Idx++)
+       {
+               ui32nValid += pbValid[ui32Idx] ? 1 : 0;
+       }
+
+       return ui32nValid;
+}
+
+PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                               void* pvChan,
+                                                                               IMG_DMA_ADDR* psDmaAddr,
+                                                                               IMG_BOOL *pbValid,
+                                                                               IMG_UINT64* puiAddress,
+                                                                               IMG_UINT64 uiSize,
+                                                                               IMG_UINT32 uiOffsetInFirstPMRPage,
+                                                                               IMG_UINT32 ui32SizeInPages,
+                                                                               IMG_BOOL bMemToDev,
+                                                                               void* pvOSData,
+                                                                               void* pvServerCleanupParam,
+                                                                               PFN_SERVER_CLEANUP pfnServerCleanup,
+                                                                               IMG_BOOL bFirst)
+{
+
+       IMG_INT iRet;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+       OS_CLEANUP_DATA* psOSCleanupData = pvOSData;
+       IMG_UINT32 ui32PageSize = OSGetPageSize();
+       void *pvNextAddress = puiAddress;
+       IMG_UINT32 ui32Idx;
+       IMG_INT32 i32Rwd;
+
+       struct dma_slave_config sConfig = {0};
+       struct dma_async_tx_descriptor *psDesc;
+
+       unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1);
+       unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       unsigned int num_valid_pages = CalculateValidPages(pbValid, ui32SizeInPages);
+       unsigned int num_pinned_pages = 0;
+       unsigned int gup_flags = 0;
+       unsigned int valid_idx;
+       size_t transfer_size;
+       struct page ** next_pages;
+       struct sg_table *psSg;
+
+       psOSCleanupData->uiNumValidPages[psOSCleanupData->uiCount] = num_valid_pages;
+       psOSCleanupData->pbIsSparse[psOSCleanupData->uiCount] = IMG_TRUE;
+
+       /*
+        * If an SG transfer from virtual memory to card memory goes over a page boundary in
+        * main memory, it'll span two different pages - therefore, total number of pages to
+        * keep track of should be twice as many as for a simple transfer. This twice-as-big
+        * allocation is also necessary because the same virtual memory page might be present
+        * in more than one SG DMA transfer, because of differences in first-page offset between
+        * the sparse device PMR and the virtual memory buffer.
+        */
+       psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(2*num_valid_pages * sizeof(struct page *));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e0);
+
+       psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct sg_table *));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount], eError, e1);
+
+       psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct dma_async_tx_descriptor *));
+       PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount], eError, e11);
+
+       gup_flags |= bMemToDev ? 0 : FOLL_WRITE;
+
+       for (ui32Idx = 0, valid_idx = 0; ui32Idx < ui32SizeInPages; ui32Idx++)
+       {
+               if (valid_idx == num_valid_pages)
+               {
+                       break;
+               }
+               if (!pbValid[ui32Idx])
+               {
+                       pvNextAddress += (ui32Idx == 0) ? ui32PageSize - uiOffsetInFirstPMRPage : ui32PageSize;
+                       continue;
+               }
+
+               /* Pick transfer size */
+               if (ui32Idx == 0)
+               {
+                       if (uiOffsetInFirstPMRPage + uiSize <= ui32PageSize)
+                       {
+                               PVR_ASSERT(num_valid_pages == 1);
+                               transfer_size = uiSize;
+                       }
+                       else
+                       {
+                               transfer_size = ui32PageSize - uiOffsetInFirstPMRPage;
+                       }
+               }
+               else
+               {
+                       /* Last valid LMA page */
+                       if (valid_idx == num_valid_pages - 1)
+                       {
+                               transfer_size = ((uiOffsetInFirstPMRPage + uiSize - 1) % ui32PageSize) + 1;
+                       }
+                       else
+                       {
+                               transfer_size = ui32PageSize;
+                       }
+               }
+
+               if (((unsigned long long)pvNextAddress & (ui32PageSize - 1)) + transfer_size > ui32PageSize)
+               {
+                       num_pages = 2;
+               }
+               else
+               {
+                       num_pages = 1;
+               }
+
+               next_pages = psOSCleanupData->pages[psOSCleanupData->uiCount] + (valid_idx * 2);
+
+               num_pinned_pages = get_user_pages_fast(
+                       (unsigned long)pvNextAddress,
+                       (int)num_pages,
+                       gup_flags,
+                       next_pages);
+               if (num_pinned_pages != num_pages)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast for sparse failed: (%d - %u)", num_pinned_pages, num_pages));
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto e2;
+               }
+
+#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA)
+               DMADumpPhysicalAddresses(next_pages, num_pages,
+                                                                &psDmaAddr[ui32Idx],
+                                                                (unsigned long)pvNextAddress & (ui32PageSize - 1));
+#endif
+
+               psSg = OSAllocZMem(sizeof(struct sg_table));
+               PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e3);
+
+               if (sg_alloc_table_from_pages(psSg, next_pages, num_pages,
+                                                                         (unsigned long)pvNextAddress & (ui32PageSize - 1),
+                                                                         transfer_size,
+                                                                         GFP_KERNEL) != 0)
+               {
+                       eError = PVRSRV_ERROR_BAD_MAPPING;
+                       PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed"));
+                       goto e4;
+               }
+
+               pvNextAddress += transfer_size;
+
+               if (bMemToDev)
+               {
+                       sConfig.direction = DMA_MEM_TO_DEV;
+                       sConfig.src_addr = 0;
+                       sConfig.dst_addr = psDmaAddr[ui32Idx].uiAddr;
+               }
+               else
+               {
+                       sConfig.direction = DMA_DEV_TO_MEM;
+                       sConfig.src_addr = psDmaAddr[ui32Idx].uiAddr;
+                       sConfig.dst_addr = 0;
+               }
+               dmaengine_slave_config(pvChan, &sConfig);
+
+               iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+               if (!iRet)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto e5;
+               }
+               dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction);
+
+               psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0);
+               if (!psDesc)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+               goto e6;
+               }
+
+               psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][valid_idx] = psSg;
+               psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount][valid_idx] = psDesc;
+               psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = ++valid_idx;
+
+               if (valid_idx == num_valid_pages)
+               {
+                       psDesc->callback_param = psOSCleanupData;
+                       psDesc->callback = dma_callback;
+
+                       if (bFirst)
+                       {
+                               struct task_struct* t1;
+
+                               psOSCleanupData->eDirection = sConfig.direction;
+                               psOSCleanupData->pfnServerCleanup = pfnServerCleanup;
+                               psOSCleanupData->pvServerCleanupData = pvServerCleanupParam;
+
+                               t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread");
+                       }
+
+                       psOSCleanupData->uiCount++;
+               }
+
+       }
+
+       return PVRSRV_OK;
+
+e6:
+       dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+e5:
+       sg_free_table(psSg);
+e4:
+       OSFreeMem(psSg);
+e3:
+       /* Unpin last */
+       put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx]);
+       if (psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1])
+       {
+               put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]);
+       }
+e2:
+       /* rewind */
+       for (i32Rwd=valid_idx-1; i32Rwd >= 0; i32Rwd--)
+       {
+               IMG_UINT32 i;
+
+               psSg = psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][i32Rwd];
+               dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+               sg_free_table(psSg);
+
+               /* Unpin pages */
+               for (i=0; i < psOSCleanupData->puiNumPages[psOSCleanupData->uiCount]*2; i++)
+               {
+                       if (psOSCleanupData->pages[psOSCleanupData->uiCount][i])
+                       {
+                               put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]);
+                       }
+               }
+       }
+       OSFreeMem(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount]);
+e11:
+       OSFreeMem(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount]);
+e1:
+       OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]);
+e0:
+       return eError;
+}
+
+#endif /* SUPPORT_DMA_TRANSFER */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_arm.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_arm.c
new file mode 100644 (file)
index 0000000..7d52c1e
--- /dev/null
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Processor specific OS functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+ #include <asm/system.h>
+#endif
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd)
+{
+       return (size_t)((char *)pvEnd - (char *)pvStart);
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+       arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else  /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+       /* Inner cache */
+       dmac_flush_range(pvVirtStart, pvVirtEnd);
+
+       /* Outer cache */
+       outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+#else  /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+       /* Inner cache */
+       dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE);
+
+       /* Outer cache */
+       outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else  /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+       /* Inner cache */
+       dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE);
+
+       /* Outer cache */
+       outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       return OS_CACHE_OP_ADDR_TYPE_PHYSICAL;
+#else
+       return OS_CACHE_OP_ADDR_TYPE_BOTH;
+#endif
+}
+
+/* User Enable Register */
+#define PMUSERENR_EN      0x00000001 /* enable user access to the counters */
+
+static void per_cpu_perf_counter_user_access_en(void *data)
+{
+       PVR_UNREFERENCED_PARAMETER(data);
+       /* Enable user-mode access to counters. */
+       asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN));
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+       on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1);
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+       /*
+        * The kernel looks to have always used normal memory under ARM32.
+        * See osfunc_arm64.c implementation for more details.
+        */
+       return IMG_TRUE;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_arm64.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_arm64.c
new file mode 100644 (file)
index 0000000..68d1285
--- /dev/null
@@ -0,0 +1,290 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm64 specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Processor specific OS functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/cpumask.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/uaccess.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#include "kernel_compatibility.h"
+
+#if defined(CONFIG_OUTER_CACHE)
+  /* If you encounter a 64-bit ARM system with an outer cache, you'll need
+   * to add the necessary code to manage that cache. See osfunc_arm.c
+   * for an example of how to do so.
+   */
+       #error "CONFIG_OUTER_CACHE not supported on arm64."
+#endif
+
+static inline void begin_user_mode_access(void)
+{
+#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN)
+       uaccess_enable_privileged();
+#endif
+}
+
+static inline void end_user_mode_access(void)
+{
+#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN)
+       uaccess_disable_privileged();
+#endif
+}
+
+static inline void FlushRange(void *pvRangeAddrStart,
+                                                         void *pvRangeAddrEnd,
+                                                         PVRSRV_CACHE_OP eCacheOp)
+{
+       IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE);
+       IMG_BYTE *pbStart = pvRangeAddrStart;
+       IMG_BYTE *pbEnd = pvRangeAddrEnd;
+       IMG_BYTE *pbBase;
+
+       /*
+         On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache
+         maintenance is performed on a memory location using a VA, the effect of
+         that cache maintenance is visible to all VA aliases of the physical memory
+         location. So here it's quicker to issue the machine cache maintenance
+         instruction directly without going via the Linux kernel DMA framework as
+         this is sufficient to maintain the CPU d-caches on arm64.
+        */
+
+       begin_user_mode_access();
+
+       pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize);
+       for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize)
+       {
+               switch (eCacheOp)
+               {
+                       case PVRSRV_CACHE_OP_CLEAN:
+                               asm volatile ("dc cvac, %0" :: "r" (pbBase));
+                               break;
+
+                       case PVRSRV_CACHE_OP_INVALIDATE:
+                               asm volatile ("dc ivac, %0" :: "r" (pbBase));
+                               break;
+
+                       case PVRSRV_CACHE_OP_FLUSH:
+                               asm volatile ("dc civac, %0" :: "r" (pbBase));
+                               break;
+
+                       default:
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Cache maintenance operation type %d is invalid",
+                                               __func__, eCacheOp));
+                               break;
+               }
+       }
+
+       end_user_mode_access();
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                                       void *pvVirtStart,
+                                                       void *pvVirtEnd,
+                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                       IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       struct device *dev;
+
+       if (pvVirtStart)
+       {
+               FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH);
+               return;
+       }
+
+       dev = psDevNode->psDevConfig->pvOSDevice;
+
+       if (dev)
+       {
+               dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr,
+                                                                  sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr,
+                                                                  DMA_TO_DEVICE);
+               dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr,
+                                                               sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr,
+                                                               DMA_FROM_DEVICE);
+       }
+       else
+       {
+               /*
+                * Allocations done prior to obtaining device pointer may
+                * affect in cache operations being scheduled.
+                *
+                * Ignore operations with null device pointer.
+                * This prevents crashes on newer kernels that don't return dummy ops
+                * when null pointer is passed to get_dma_ops.
+                *
+                */
+
+               /* Don't spam on nohw */
+#if !defined(NO_HARDWARE)
+               PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!"));
+#endif
+       }
+
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                                       void *pvVirtStart,
+                                                       void *pvVirtEnd,
+                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                       IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       struct device *dev;
+
+       if (pvVirtStart)
+       {
+               FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN);
+               return;
+       }
+
+       dev = psDevNode->psDevConfig->pvOSDevice;
+
+       if (dev)
+       {
+               dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr,
+                                                                  sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr,
+                                                                  DMA_TO_DEVICE);
+       }
+       else
+       {
+               /*
+                * Allocations done prior to obtaining device pointer may
+                * affect in cache operations being scheduled.
+                *
+                * Ignore operations with null device pointer.
+                * This prevents crashes on newer kernels that don't return dummy ops
+                * when null pointer is passed to get_dma_ops.
+                *
+                */
+
+
+               /* Don't spam on nohw */
+#if !defined(NO_HARDWARE)
+               PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!"));
+#endif
+       }
+
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                void *pvVirtStart,
+                                                                void *pvVirtEnd,
+                                                                IMG_CPU_PHYADDR sCPUPhysStart,
+                                                                IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       struct device *dev;
+
+       if (pvVirtStart)
+       {
+               FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE);
+               return;
+       }
+
+       dev = psDevNode->psDevConfig->pvOSDevice;
+
+       if (dev)
+       {
+               dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr,
+                                                               sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr,
+                                                               DMA_FROM_DEVICE);
+       }
+       else
+       {
+               /*
+                * Allocations done prior to obtaining device pointer may
+                * affect in cache operations being scheduled.
+                *
+                * Ignore operations with null device pointer.
+                * This prevents crashes on newer kernels that don't return dummy ops
+                * when null pointer is passed to get_dma_ops.
+                *
+                */
+
+               /* Don't spam on nohw */
+#if !defined(NO_HARDWARE)
+               PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!"));
+#endif
+       }
+}
+
+
+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+       return OS_CACHE_OP_ADDR_TYPE_PHYSICAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+       /*
+        * Under ARM64 there is the concept of 'device' [0] and 'normal' [1] memory.
+        * Unaligned access on device memory is explicitly disallowed [2]:
+        *
+        * 'Further, unaligned accesses are only allowed to regions marked as Normal
+        *  memory type.
+        *  ...
+        *  Attempts to perform unaligned accesses when not allowed will cause an
+        *  alignment fault (data abort).'
+        *
+        * Write-combine on ARM64 can be implemented as either normal non-cached
+        * memory (NORMAL_NC) or as device memory with gathering enabled
+        * (DEVICE_GRE.) Kernel 3.13 changed this from the latter to the former.
+        *
+        * [0]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CHDBDIDF.html
+        * [1]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch13s01s01.html
+        * [2]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
+        */
+
+       pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+       return (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_riscv.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_riscv.c
new file mode 100644 (file)
index 0000000..0de3bf4
--- /dev/null
@@ -0,0 +1,210 @@
+/*************************************************************************/ /*!
+@File
+@Title          RISC-V specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Processor specific OS functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "cache_ops.h"
+
+
+extern void SysDevHost_Cache_Maintenance(IMG_HANDLE hSysData,
+                                                                       PVRSRV_CACHE_OP eRequestType,
+                                                                       void *pvVirtStart,
+                                                                       void *pvVirtEnd,
+                                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                                       IMG_CPU_PHYADDR sCPUPhysEnd);
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                                       void *pvVirtStart,
+                                                       void *pvVirtEnd,
+                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                       IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       /*
+        * RISC-V cache maintenance mechanism is not part of the core spec.
+        * This leaves the actual mechanism of action to an implementer.
+        * Here we let the system layer decide how maintenance is done.
+        */
+       if (psDevNode->psDevConfig->pfnHostCacheMaintenance)
+       {
+               psDevNode->psDevConfig->pfnHostCacheMaintenance(
+                               psDevNode->psDevConfig->hSysData,
+                               PVRSRV_CACHE_OP_FLUSH,
+                               pvVirtStart,
+                               pvVirtEnd,
+                               sCPUPhysStart,
+                               sCPUPhysEnd);
+
+       }
+#if !defined(NO_HARDWARE)
+       else
+       {
+               //PVR_DPF((PVR_DBG_ERROR,
+               //         "%s: System doesn't register cache maintenance flush. Skipping!",
+               //         __func__));
+               SysDevHost_Cache_Maintenance(
+                               psDevNode->psDevConfig->hSysData,
+                               PVRSRV_CACHE_OP_FLUSH,
+                               pvVirtStart,
+                               pvVirtEnd,
+                               sCPUPhysStart,
+                               sCPUPhysEnd);           
+
+       }
+#endif
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                                       void *pvVirtStart,
+                                                       void *pvVirtEnd,
+                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                       IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       /*
+        * RISC-V cache maintenance mechanism is not part of the core spec.
+        * This leaves the actual mechanism of action to an implementer.
+        * Here we let the system layer decide how maintenance is done.
+        */
+       if (psDevNode->psDevConfig->pfnHostCacheMaintenance)
+       {
+               psDevNode->psDevConfig->pfnHostCacheMaintenance(
+                               psDevNode->psDevConfig->hSysData,
+                               PVRSRV_CACHE_OP_CLEAN,
+                               pvVirtStart,
+                               pvVirtEnd,
+                               sCPUPhysStart,
+                               sCPUPhysEnd);
+
+       }
+#if !defined(NO_HARDWARE)
+       else
+       {
+               //PVR_DPF((PVR_DBG_ERROR,
+               //         "%s: System doesn't register cache maintenance clean. Skipping!",
+               //         __func__));
+               SysDevHost_Cache_Maintenance(
+                               psDevNode->psDevConfig->hSysData,
+                               PVRSRV_CACHE_OP_CLEAN,
+                               pvVirtStart,
+                               pvVirtEnd,
+                               sCPUPhysStart,
+                               sCPUPhysEnd);           
+
+       }
+#endif
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                                                void *pvVirtStart,
+                                                                void *pvVirtEnd,
+                                                                IMG_CPU_PHYADDR sCPUPhysStart,
+                                                                IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       /*
+        * RISC-V cache maintenance mechanism is not part of the core spec.
+        * This leaves the actual mechanism of action to an implementer.
+        * Here we let the system layer decide how maintenance is done.
+        */
+       if (psDevNode->psDevConfig->pfnHostCacheMaintenance)
+       {
+               psDevNode->psDevConfig->pfnHostCacheMaintenance(
+                               psDevNode->psDevConfig->hSysData,
+                               PVRSRV_CACHE_OP_INVALIDATE,
+                               pvVirtStart,
+                               pvVirtEnd,
+                               sCPUPhysStart,
+                               sCPUPhysEnd);
+
+       }
+#if !defined(NO_HARDWARE)
+       else
+       {
+               //PVR_DPF((PVR_DBG_ERROR,
+               //         "%s: System doesn't register cache maintenance invalid. Skipping!",
+               //         __func__));
+               SysDevHost_Cache_Maintenance(
+                               psDevNode->psDevConfig->hSysData,
+                               PVRSRV_CACHE_OP_INVALIDATE,
+                               pvVirtStart,
+                               pvVirtEnd,
+                               sCPUPhysStart,
+                               sCPUPhysEnd);           
+       }
+#endif
+}
+
+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+       /*
+        * Need to obtain psDevNode here and do the following:
+        *
+        * OS_CACHE_OP_ADDR_TYPE eOpAddrType =
+        *      psDevNode->psDevConfig->bHasPhysicalCacheMaintenance ?
+        *              OS_CACHE_OP_ADDR_TYPE_PHYSICAL : OS_CACHE_OP_ADDR_TYPE_VIRTUAL;
+        *
+        * Return BOTH for now on.
+        *
+        */
+       return OS_CACHE_OP_ADDR_TYPE_PHYSICAL;//OS_CACHE_OP_ADDR_TYPE_BOTH;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+#if 0// !defined(NO_HARDWARE)
+       PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__));
+       PVR_ASSERT(0);
+#endif
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+#if 0//!defined(NO_HARDWARE)
+       PVR_DPF((PVR_DBG_WARNING,
+                "%s: Not implemented (assuming false)!",
+                __func__));
+       PVR_ASSERT(0);
+       return IMG_FALSE;
+#else
+       return IMG_TRUE;
+#endif
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_x86.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osfunc_x86.c
new file mode 100644 (file)
index 0000000..2c271d8
--- /dev/null
@@ -0,0 +1,134 @@
+/*************************************************************************/ /*!
+@File
+@Title          x86 specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Processor specific OS functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/smp.h>
+#include <linux/uaccess.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+       IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+       IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+       IMG_BYTE *pbBase;
+
+       pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd,
+                                     (uintptr_t)boot_cpu_data.x86_clflush_size);
+
+       mb();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168))
+       __uaccess_begin();
+#endif
+
+       for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+       {
+               clflush(pbBase);
+       }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168))
+       __uaccess_end();
+#endif
+
+       mb();
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+       PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+       x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+       PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+       /* No clean feature on x86 */
+       x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+       PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+       /* No invalidate-only support */
+       x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+       return OS_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+       /* Not applicable to x86 architecture. */
+}
+
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void)
+{
+       return IMG_TRUE;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osmmap_stub.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/osmmap_stub.c
new file mode 100644 (file)
index 0000000..74bad70
--- /dev/null
@@ -0,0 +1,146 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* our exported API */
+#include "osmmap.h"
+
+/* include/ */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/include/ */
+
+/* services/include/srvhelper/ */
+#include "ra.h"
+
+#include "pmr.h"
+
+PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRSize,
+          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          size_t *puiMappingLengthOut)
+{
+    PVRSRV_ERROR eError;
+    PMR *psPMR;
+    void *pvKernelAddress;
+    size_t uiLength;
+    IMG_HANDLE hPriv;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+    /*
+      Normally this function would mmap a PMR into the memory space of
+      user process, but in this case we're taking a PMR and mapping it
+      into kernel virtual space.  We keep the same function name for
+      symmetry as this allows the higher layers of the software stack
+      to not care whether they are user mode or kernel
+    */
+
+    psPMR = hPMR;
+
+    if (PMR_IsSparse(psPMR))
+    {
+        eError = PMRAcquireSparseKernelMappingData(psPMR,
+                                            0,
+                                            0,
+                                            &pvKernelAddress,
+                                            &uiLength,
+                                            &hPriv);
+    }
+    else
+    {
+        eError = PMRAcquireKernelMappingData(psPMR,
+                                            0,
+                                            0,
+                                            &pvKernelAddress,
+                                            &uiLength,
+                                            &hPriv);
+    }
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    *phOSMMapPrivDataOut = hPriv;
+    *ppvMappingAddressOut = pvKernelAddress;
+    *puiMappingLengthOut = uiLength;
+
+    /* MappingLength might be rounded up to page size */
+    PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize);
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            size_t uiMappingLength)
+{
+    PMR *psPMR;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(pvMappingAddress);
+    PVR_UNREFERENCED_PARAMETER(uiMappingLength);
+
+    psPMR = hPMR;
+    PMRReleaseKernelMappingData(psPMR,
+                                hOSMMapPrivData);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/ossecure_export.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/ossecure_export.c
new file mode 100644 (file)
index 0000000..1070067
--- /dev/null
@@ -0,0 +1,155 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "ossecure_export.h"
+#include "private_data.h"
+#include "pvr_debug.h"
+
+#include "kernel_compatibility.h"
+
+typedef struct
+{
+       PVRSRV_ERROR (*pfnReleaseFunc)(void *);
+       void *pvData;
+} OSSecureFileData;
+
+static IMG_INT _OSSecureFileReleaseFunc(struct inode *psInode,
+                                        struct file *psFile)
+{
+       OSSecureFileData *psSecureFileData = (OSSecureFileData *)psFile->private_data;
+       psSecureFileData->pfnReleaseFunc(psSecureFileData->pvData);
+
+       OSFreeMem(psSecureFileData);
+       PVR_UNREFERENCED_PARAMETER(psInode);
+
+       return 0;
+}
+
+static struct file_operations secure_file_fops = {
+       .release        = _OSSecureFileReleaseFunc,
+};
+
+PVRSRV_ERROR OSSecureExport(const IMG_CHAR *pszName,
+                            PVRSRV_ERROR (*pfnReleaseFunc)(void *),
+                            void *pvData,
+                            IMG_SECURE_TYPE *phSecure)
+{
+       struct file *secure_file;
+       int secure_fd;
+       PVRSRV_ERROR eError;
+       OSSecureFileData *psSecureFileData;
+
+       PVR_ASSERT(pfnReleaseFunc != NULL || pvData != NULL);
+
+       psSecureFileData = OSAllocMem(sizeof(*psSecureFileData));
+       if (psSecureFileData == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e0;
+       }
+
+       psSecureFileData->pvData = pvData;
+       psSecureFileData->pfnReleaseFunc = pfnReleaseFunc;
+
+       /* Allocate a fd number */
+       secure_fd = get_unused_fd();
+       if (secure_fd < 0)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e0;
+       }
+
+       /* Create a file with provided name, fops and flags,
+        * also store the private data in the file */
+       secure_file = anon_inode_getfile(pszName, &secure_file_fops, psSecureFileData, 0);
+       if (IS_ERR(secure_file))
+       {
+               put_unused_fd(secure_fd);
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e0;
+       }
+
+       /* Bind our struct file with it's fd number */
+       fd_install(secure_fd, secure_file);
+
+       *phSecure = secure_fd;
+       return PVRSRV_OK;
+
+e0:
+       OSFreeMem(psSecureFileData);
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR OSSecureImport(IMG_SECURE_TYPE hSecure, void **ppvData)
+{
+       struct file *secure_file;
+       PVRSRV_ERROR eError;
+       OSSecureFileData *psSecureFileData;
+
+       secure_file = fget(hSecure);
+       if (!secure_file)
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       psSecureFileData = (OSSecureFileData *)secure_file->private_data;
+       *ppvData = psSecureFileData->pvData;
+
+       fput(secure_file);
+       return PVRSRV_OK;
+
+err_out:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_dmabuf.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_dmabuf.c
new file mode 100644 (file)
index 0000000..e13adaa
--- /dev/null
@@ -0,0 +1,1282 @@
+/*************************************************************************/ /*!
+@File           physmem_dmabuf.c
+@Title          dmabuf memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for dmabuf memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "physmem_dmabuf.h"
+#include "pvrsrv.h"
+#include "pmr.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP)
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pmr_impl.h"
+#include "hash.h"
+#include "private_data.h"
+#include "module_common.h"
+#include "pvr_ion_stats.h"
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * dma_buf_ops
+ *
+ * These are all returning errors if used.
+ * The point is to prevent anyone outside of our driver from importing
+ * and using our dmabuf.
+ */
+
+static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf,
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \
+       !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL))))
+                                                         struct device *psDev,
+#endif
+                                                         struct dma_buf_attachment *psAttachment)
+{
+       return -ENOSYS;
+}
+
+static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment,
+                                        enum dma_data_direction eDirection)
+{
+       /* Attach hasn't been called yet */
+       return ERR_PTR(-EINVAL);
+}
+
+static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment,
+                              struct sg_table *psTable,
+                              enum dma_data_direction eDirection)
+{
+}
+
+static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf)
+{
+       PMR *psPMR = (PMR *) psDmaBuf->priv;
+
+       PMRUnrefPMR(psPMR);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))
+static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum)
+{
+       return ERR_PTR(-ENOSYS);
+}
+#endif
+
+static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA)
+{
+       return -ENOSYS;
+}
+
+static const struct dma_buf_ops sPVRDmaBufOps =
+{
+       .attach        = PVRDmaBufOpsAttach,
+       .map_dma_buf   = PVRDmaBufOpsMap,
+       .unmap_dma_buf = PVRDmaBufOpsUnmap,
+       .release       = PVRDmaBufOpsRelease,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \
+       !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL))))
+       .map_atomic    = PVRDmaBufOpsKMap,
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))
+       .map           = PVRDmaBufOpsKMap,
+#endif
+#else
+       .kmap_atomic   = PVRDmaBufOpsKMap,
+       .kmap          = PVRDmaBufOpsKMap,
+#endif
+       .mmap          = PVRDmaBufOpsMMap,
+};
+
+/* end of dma_buf_ops */
+
+typedef struct _PMR_DMA_BUF_DATA_
+{
+       /* Filled in at PMR create time */
+       PHYS_HEAP *psPhysHeap;
+       struct dma_buf_attachment *psAttachment;
+       PFN_DESTROY_DMABUF_PMR pfnDestroy;
+       IMG_BOOL bPoisonOnFree;
+
+       /* Mapping information. */
+       struct iosys_map sMap;
+
+       /* Modified by PMR lock/unlock */
+       struct sg_table *psSgTable;
+       IMG_DEV_PHYADDR *pasDevPhysAddr;
+       IMG_UINT32 ui32PhysPageCount;
+       IMG_UINT32 ui32VirtPageCount;
+} PMR_DMA_BUF_DATA;
+
+/* Start size of the g_psDmaBufHash hash table */
+#define DMA_BUF_HASH_SIZE 20
+
+static DEFINE_MUTEX(g_HashLock);
+
+static HASH_TABLE *g_psDmaBufHash;
+static IMG_UINT32 g_ui32HashRefCount;
+
+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
+#define pvr_sg_length(sg) ((sg)->length)
+#else
+#define pvr_sg_length(sg) sg_dma_len(sg)
+#endif
+
+static int
+DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc)
+{
+       struct iosys_map sMap;
+       int err, err_end_access;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))
+       int i;
+#endif
+
+       err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+       if (err)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to begin cpu access (err=%d)",
+                                       szFunc, err));
+               goto err_out;
+       }
+
+       err = dma_buf_vmap(psDmaBuf, &sMap);
+       if (err)
+       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map page (err=%d)",
+                                       szFunc, err));
+               goto exit_end_access;
+#else
+               for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+               {
+                       void *pvKernAddr;
+
+                       pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+                       if (IS_ERR_OR_NULL(pvKernAddr))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map page (err=%ld)",
+                                                       szFunc,
+                                                       pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+                               err = !pvKernAddr ? -ENOMEM : -EINVAL;
+
+                               goto exit_end_access;
+                       }
+
+                       memset(pvKernAddr, iValue, PAGE_SIZE);
+
+                       dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+               }
+#endif
+       }
+       else
+       {
+               memset(sMap.vaddr, iValue, psDmaBuf->size);
+
+               dma_buf_vunmap(psDmaBuf, &sMap);
+       }
+
+       err = 0;
+
+exit_end_access:
+       do {
+               err_end_access = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+       } while (err_end_access == -EAGAIN || err_end_access == -EINTR);
+
+       if (err_end_access)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to end cpu access (err=%d)",
+                                       szFunc, err_end_access));
+               if (!err)
+               {
+                       err = err_end_access;
+               }
+       }
+
+err_out:
+       return err;
+}
+
+/*****************************************************************************
+ *                          PMR callback functions                           *
+ *****************************************************************************/
+
+static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+       struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+       struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+       struct sg_table *psSgTable = psPrivData->psSgTable;
+       PMR *psPMR;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (psDmaBuf->ops != &sPVRDmaBufOps)
+       {
+               if (g_psDmaBufHash)
+               {
+                       /* We have a hash table so check if we've seen this dmabuf before */
+                       psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+
+                       if (psPMR)
+                       {
+                               if (!PMRIsPMRLive(psPMR))
+                               {
+                                       HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+                                       g_ui32HashRefCount--;
+
+                                       if (g_ui32HashRefCount == 0)
+                                       {
+                                               HASH_Delete(g_psDmaBufHash);
+                                               g_psDmaBufHash = NULL;
+                                       }
+                               }
+                               else{
+                                       eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+                               }
+                       }
+                       PVRSRVIonRemoveMemAllocRecord(psDmaBuf);
+               }
+       }else
+       {
+               psPMR = (PMR *) psDmaBuf->priv;
+               if (PMRIsPMRLive(psPMR))
+               {
+                       eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+               }
+
+       }
+
+       if (PVRSRV_OK != eError)
+       {
+               return eError;
+       }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,
+                                   psPrivData->ui32PhysPageCount << PAGE_SHIFT,
+                                   OSGetCurrentClientProcessIDKM());
+#endif
+
+       psPrivData->ui32PhysPageCount = 0;
+
+       dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
+
+
+       if (psPrivData->bPoisonOnFree)
+       {
+               int err;
+
+               err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE, __func__);
+               if (err)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before "
+                                               "free", __func__));
+                       PVR_ASSERT(IMG_FALSE);
+               }
+       }
+
+       if (psPrivData->pfnDestroy)
+       {
+               eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       }
+
+       OSFreeMem(psPrivData->pasDevPhysAddr);
+       OSFreeMem(psPrivData);
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PVR_UNREFERENCED_PARAMETER(pvPriv);
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PVR_UNREFERENCED_PARAMETER(pvPriv);
+       return PVRSRV_OK;
+}
+
+static void PMRGetFactoryLock(void)
+{
+       mutex_lock(&g_HashLock);
+}
+
+static void PMRReleaseFactoryLock(void)
+{
+       mutex_unlock(&g_HashLock);
+}
+
+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+                                        IMG_UINT32 ui32Log2PageSize,
+                                        IMG_UINT32 ui32NumOfPages,
+                                        IMG_DEVMEM_OFFSET_T *puiOffset,
+                                        IMG_BOOL *pbValid,
+                                        IMG_DEV_PHYADDR *psDevPAddr)
+{
+       PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+       IMG_UINT32 ui32PageIndex;
+       IMG_UINT32 idx;
+
+       if (ui32Log2PageSize != PAGE_SHIFT)
+       {
+               return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+       }
+
+       for (idx=0; idx < ui32NumOfPages; idx++)
+       {
+               if (pbValid[idx])
+               {
+                       IMG_UINT32 ui32InPageOffset;
+
+                       ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
+                       ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
+
+                       PVR_LOG_RETURN_IF_FALSE(ui32PageIndex < psPrivData->ui32VirtPageCount,
+                                               "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE);
+
+                       PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
+                       psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
+               }
+       }
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+                                 size_t uiOffset,
+                                 size_t uiSize,
+                                 void **ppvKernelAddressOut,
+                                 IMG_HANDLE *phHandleOut,
+                                 PMR_FLAGS_T ulFlags)
+{
+       PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+       struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+       PVRSRV_ERROR eError;
+       int err;
+
+       if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs "
+                               "are not allowed!", __func__));
+               eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+               goto fail;
+       }
+
+       err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+       if (err)
+       {
+               eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+               goto fail;
+       }
+
+       err = dma_buf_vmap(psDmaBuf, &psPrivData->sMap);
+       if (err != 0 || psPrivData->sMap.vaddr == NULL)
+       {
+               eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+               goto fail_kmap;
+       }
+
+       *ppvKernelAddressOut = psPrivData->sMap.vaddr + uiOffset;
+       *phHandleOut = psPrivData->sMap.vaddr;
+
+       return PVRSRV_OK;
+
+fail_kmap:
+       do {
+               err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+       } while (err == -EAGAIN || err == -EINTR);
+
+fail:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+                                             IMG_HANDLE hHandle)
+{
+       PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+       struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+       int err;
+
+       dma_buf_vunmap(psDmaBuf, &psPrivData->sMap);
+
+       do {
+               err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+       } while (err == -EAGAIN || err == -EINTR);
+}
+
+static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+                                  PMR *psPMR,
+                                  PMR_MMAP_DATA pOSMMapData)
+{
+       PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+       struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+       struct vm_area_struct *psVma = pOSMMapData;
+       int err;
+
+       if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Not possible to MMAP sparse DMABufs",
+                               __func__));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+
+       err = dma_buf_mmap(psDmaBuf, psVma, 0);
+       if (err)
+       {
+               return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING;
+       }
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+       MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start);
+#endif
+
+       return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
+{
+       .pfnLockPhysAddresses           = PMRLockPhysAddressesDmaBuf,
+       .pfnUnlockPhysAddresses         = PMRUnlockPhysAddressesDmaBuf,
+       .pfnDevPhysAddr                 = PMRDevPhysAddrDmaBuf,
+       .pfnAcquireKernelMappingData    = PMRAcquireKernelMappingDataDmaBuf,
+       .pfnReleaseKernelMappingData    = PMRReleaseKernelMappingDataDmaBuf,
+       .pfnMMap                        = PMRMMapDmaBuf,
+       .pfnFinalize                    = PMRFinalizeDmaBuf,
+       .pfnGetPMRFactoryLock = PMRGetFactoryLock,
+       .pfnReleasePMRFactoryLock = PMRReleaseFactoryLock,
+};
+
+/*****************************************************************************
+ *                          Public facing interface                          *
+ *****************************************************************************/
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                IMG_UINT32 ui32NameSize,
+                                const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                                PMR **ppsPMRPtr)
+{
+       struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+       PMR_DMA_BUF_DATA *psPrivData;
+       PMR_FLAGS_T uiPMRFlags;
+       IMG_BOOL bZeroOnAlloc;
+       IMG_BOOL bPoisonOnAlloc;
+       IMG_BOOL bPoisonOnFree;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 i, j;
+       IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT;
+       IMG_UINT32 ui32PageCount = 0;
+       struct scatterlist *sg;
+       struct sg_table *table;
+       IMG_UINT32 uiSglOffset;
+       IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN];
+
+       bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+       bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+#if defined(DEBUG)
+       bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags);
+#else
+       bPoisonOnFree = IMG_FALSE;
+#endif
+       if (bZeroOnAlloc && bPoisonOnFree)
+       {
+               /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto errReturn;
+       }
+
+       psPrivData = OSAllocZMem(sizeof(*psPrivData));
+       if (psPrivData == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto errReturn;
+       }
+
+       psPrivData->psPhysHeap = psHeap;
+       psPrivData->psAttachment = psAttachment;
+       psPrivData->pfnDestroy = pfnDestroy;
+       psPrivData->bPoisonOnFree = bPoisonOnFree;
+       psPrivData->ui32VirtPageCount =
+                       (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT;
+
+       psPrivData->pasDevPhysAddr =
+                       OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) *
+                                   psPrivData->ui32VirtPageCount);
+       if (!psPrivData->pasDevPhysAddr)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate buffer for physical addresses (oom)",
+                                __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto errFreePrivData;
+       }
+
+       if (bZeroOnAlloc || bPoisonOnAlloc)
+       {
+               int iValue = bZeroOnAlloc ? 0 : PVRSRV_POISON_ON_ALLOC_VALUE;
+               int err;
+
+               err = DmaBufSetValue(psDmaBuf, iValue, __func__);
+               if (err)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map buffer for %s",
+                                               __func__,
+                                               bZeroOnAlloc ? "zeroing" : "poisoning"));
+
+                       eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+                       goto errFreePhysAddr;
+               }
+       }
+
+       table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
+       if (IS_ERR_OR_NULL(table))
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto errFreePhysAddr;
+       }
+
+       /*
+        * We do a two pass process: first work out how many pages there
+        * are and second, fill in the data.
+        */
+       for_each_sg(table->sgl, sg, table->nents, i)
+       {
+               ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
+       }
+
+       if (WARN_ON(!ui32PageCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero",
+                                __func__));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto errUnmap;
+       }
+
+       if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual "
+                               "number of physical dma buf pages don't match",
+                                __func__));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto errUnmap;
+       }
+
+       psPrivData->ui32PhysPageCount = ui32PageCount;
+       psPrivData->psSgTable = table;
+       ui32PageCount = 0;
+       sg = table->sgl;
+       uiSglOffset = 0;
+
+
+       /* Fill physical address array */
+       for (i = 0; i < ui32NumPhysChunks; i++)
+       {
+               for (j = 0; j < uiPagesPerChunk; j++)
+               {
+                       IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j;
+
+                       psPrivData->pasDevPhysAddr[uiIdx].uiAddr =
+                                       sg_dma_address(sg) + uiSglOffset;
+
+                       /* Get the next offset for the current sgl or the next sgl */
+                       uiSglOffset += PAGE_SIZE;
+                       if (uiSglOffset >= pvr_sg_length(sg))
+                       {
+                               sg = sg_next(sg);
+                               uiSglOffset = 0;
+
+                               /* Check that we haven't looped */
+                               if (WARN_ON(sg == table->sgl))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address "
+                                                       "array",
+                                                        __func__));
+                                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                       goto errUnmap;
+                               }
+                       }
+               }
+       }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,
+                                   psPrivData->ui32PhysPageCount << PAGE_SHIFT,
+                                   OSGetCurrentClientProcessIDKM());
+#endif
+
+       uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+       /*
+        * Check no significant bits were lost in cast due to different
+        * bit widths for flags
+        */
+       PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+       if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0)
+       {
+               pszAnnotation[0] = '\0';
+       }
+       else
+       {
+               pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0';
+       }
+
+       eError = PMRCreatePMR(psHeap,
+                             ui32NumVirtChunks * uiChunkSize,
+                             uiChunkSize,
+                             ui32NumPhysChunks,
+                             ui32NumVirtChunks,
+                             pui32MappingTable,
+                             PAGE_SHIFT,
+                             uiPMRFlags,
+                             pszAnnotation,
+                             &_sPMRDmaBufFuncTab,
+                             psPrivData,
+                             PMR_TYPE_DMABUF,
+                             ppsPMRPtr,
+                             PDUMP_NONE);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)",
+                                __func__, PVRSRVGetErrorString(eError)));
+               goto errFreePhysAddr;
+       }
+
+       return PVRSRV_OK;
+
+errUnmap:
+       dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
+errFreePhysAddr:
+       OSFreeMem(psPrivData->pasDevPhysAddr);
+errFreePrivData:
+       OSFreeMem(psPrivData);
+errReturn:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+                                        struct dma_buf_attachment *psAttachment)
+{
+       struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+
+       PVR_UNREFERENCED_PARAMETER(psHeap);
+
+       dma_buf_detach(psDmaBuf, psAttachment);
+       dma_buf_put(psDmaBuf);
+
+       return PVRSRV_OK;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+       PMR_DMA_BUF_DATA *psPrivData;
+
+       psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab);
+       if (psPrivData)
+       {
+               return psPrivData->psAttachment->dmabuf;
+       }
+
+       return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd)
+{
+       struct dma_buf *psDmaBuf;
+       IMG_DEVMEM_SIZE_T uiPMRSize;
+       PVRSRV_ERROR eError;
+       IMG_INT iFd;
+
+       mutex_lock(&g_HashLock);
+
+       PMRRefPMR(psPMR);
+
+       PMR_LogicalSize(psPMR, &uiPMRSize);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+       {
+               DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo);
+
+               sDmaBufExportInfo.priv  = psPMR;
+               sDmaBufExportInfo.ops   = &sPVRDmaBufOps;
+               sDmaBufExportInfo.size  = uiPMRSize;
+               sDmaBufExportInfo.flags = O_RDWR;
+
+               psDmaBuf = dma_buf_export(&sDmaBufExportInfo);
+       }
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+       psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+                                 uiPMRSize, O_RDWR, NULL);
+#else
+       psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+                                 uiPMRSize, O_RDWR);
+#endif
+
+       if (IS_ERR_OR_NULL(psDmaBuf))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)",
+                        __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_pmr_ref;
+       }
+
+       iFd = dma_buf_fd(psDmaBuf, O_RDWR);
+       if (iFd < 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)",
+                        __func__, iFd));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto fail_dma_buf;
+       }
+
+       mutex_unlock(&g_HashLock);
+       *piFd = iFd;
+
+       /* A PMR memory lay out can't change once exported
+        * This makes sure the exported and imported parties see
+        * the same layout of the memory */
+       PMR_SetLayoutFixed(psPMR, IMG_TRUE);
+
+       return PVRSRV_OK;
+
+fail_dma_buf:
+       dma_buf_put(psDmaBuf);
+
+fail_pmr_ref:
+       mutex_unlock(&g_HashLock);
+       PMRUnrefPMR(psPMR);
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_UINT32 ui32NameSize,
+                    const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_UINT32 ui32MappingTable = 0;
+       struct dma_buf *psDmaBuf;
+       PVRSRV_ERROR eError;
+
+       /* Get the buffer handle */
+       psDmaBuf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(psDmaBuf))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+                                __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+               return PVRSRV_ERROR_BAD_MAPPING;
+
+       }
+
+       uiSize = psDmaBuf->size;
+
+       eError = PhysmemImportSparseDmaBuf(psConnection,
+                                        psDevNode,
+                                        fd,
+                                        uiFlags,
+                                        uiSize,
+                                        1,
+                                        1,
+                                        &ui32MappingTable,
+                                        ui32NameSize,
+                                        pszName,
+                                        ppsPMRPtr,
+                                        puiSize,
+                                        puiAlign);
+
+       dma_buf_put(psDmaBuf);
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       PMR *psPMRPtr;
+       PVRSRV_ERROR eError;
+
+       eError = PhysmemImportDmaBuf(psConnection,
+                                    psDevNode,
+                                    fd,
+                                    uiFlags,
+                                    ui32NameSize,
+                                    pszName,
+                                    &psPMRPtr,
+                                    puiSize,
+                                    puiAlign);
+
+       if (eError == PVRSRV_OK)
+       {
+               eError = PMRLockSysPhysAddresses(psPMRPtr);
+               if (eError == PVRSRV_OK)
+               {
+                       *ppsPMRPtr = psPMRPtr;
+               }
+               else
+               {
+                       PMRUnrefPMR(psPMRPtr);
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       PMR *psPMR = NULL;
+       struct dma_buf_attachment *psAttachment;
+       struct dma_buf *psDmaBuf;
+       PVRSRV_ERROR eError;
+       IMG_BOOL bHashTableCreated = IMG_FALSE;
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       if (!psDevNode)
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto errReturn;
+       }
+
+       /* Terminate string from bridge to prevent corrupt annotations in RI */
+       if (pszName != NULL)
+       {
+               IMG_CHAR* pszName0 = (IMG_CHAR*) pszName;
+               pszName0[ui32NameSize-1] = '\0';
+       }
+
+       mutex_lock(&g_HashLock);
+
+       /* Get the buffer handle */
+       psDmaBuf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(psDmaBuf))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+                                __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+               eError = PVRSRV_ERROR_BAD_MAPPING;
+               goto errUnlockReturn;
+       }
+
+       if (psDmaBuf->ops == &sPVRDmaBufOps)
+       {
+               PVRSRV_DEVICE_NODE *psPMRDevNode;
+
+               /* We exported this dma_buf, so we can just get its PMR */
+               psPMR = (PMR *) psDmaBuf->priv;
+
+               /* However, we can't import it if it belongs to a different device */
+               psPMRDevNode = PMR_DeviceNode(psPMR);
+               if (psPMRDevNode != psDevNode)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device",
+                                        __func__));
+                       eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+                       goto err;
+               }
+       }
+       else
+       {
+               if (g_psDmaBufHash)
+               {
+                       /* We have a hash table so check if we've seen this dmabuf before */
+                       psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+               }
+               else
+               {
+                       /*
+                        * As different processes may import the same dmabuf we need to
+                        * create a hash table so we don't generate a duplicate PMR but
+                        * rather just take a reference on an existing one.
+                        */
+                       g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
+                       if (!g_psDmaBufHash)
+                       {
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto err;
+                       }
+                       bHashTableCreated = IMG_TRUE;
+               }
+       }
+
+       if (psPMR)
+       {
+               /* Reuse the PMR we already created */
+               PMRRefPMR(psPMR);
+
+               *ppsPMRPtr = psPMR;
+               PMR_LogicalSize(psPMR, puiSize);
+               *puiAlign = PAGE_SIZE;
+       }
+       /* No errors so far */
+       eError = PVRSRV_OK;
+
+err:
+       if (psPMR || (PVRSRV_OK != eError))
+       {
+               mutex_unlock(&g_HashLock);
+               dma_buf_put(psDmaBuf);
+
+               if (PVRSRV_OK == eError)
+               {
+                       /*
+                        * We expect a PMR to be immutable at this point
+                        * But its explicitly set here to cover a corner case
+                        * where a PMR created through non-DMA interface could be
+                        *  imported back again through DMA interface */
+                       PMR_SetLayoutFixed(psPMR, IMG_TRUE);
+               }
+               return eError;
+       }
+
+       /* Do we want this to be a sparse PMR? */
+       if (ui32NumVirtChunks > 1)
+       {
+               IMG_UINT32 i;
+
+               /* Parameter validation */
+               if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) ||
+                   uiChunkSize != PAGE_SIZE ||
+                   ui32NumPhysChunks > ui32NumVirtChunks)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Requesting sparse buffer: "
+                                       "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to "
+                                       "OS page size (%lu). uiChunkSize * ui32NumPhysChunks "
+                                       "("IMG_DEVMEM_SIZE_FMTSPEC") must"
+                                       " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). "
+                                       "ui32NumPhysChunks (%u) must be lesser or equal to "
+                                       "ui32NumVirtChunks (%u)",
+                                        __func__,
+                                       uiChunkSize,
+                                       PAGE_SIZE,
+                                       uiChunkSize * ui32NumPhysChunks,
+                                       psDmaBuf->size,
+                                       ui32NumPhysChunks,
+                                       ui32NumVirtChunks));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto errUnlockAndDMAPut;
+               }
+
+               /* Parameter validation - Mapping table entries*/
+               for (i = 0; i < ui32NumPhysChunks; i++)
+               {
+                       if (pui32MappingTable[i] > ui32NumVirtChunks)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Requesting sparse buffer: "
+                                               "Entry in mapping table (%u) is out of allocation "
+                                               "bounds (%u)",
+                                                __func__,
+                                                (IMG_UINT32) pui32MappingTable[i],
+                                                (IMG_UINT32) ui32NumVirtChunks));
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               goto errUnlockAndDMAPut;
+                       }
+               }
+       }
+       else
+       {
+               /* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because
+                * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */
+               if (pui32MappingTable == NULL)
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto errUnlockAndDMAPut;
+               }
+
+               /* Make sure parameters are valid for non-sparse allocations as well */
+               uiChunkSize = psDmaBuf->size;
+               ui32NumPhysChunks = 1;
+               ui32NumVirtChunks = 1;
+       }
+
+
+       psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice);
+       if (IS_ERR_OR_NULL(psAttachment))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)",
+                                __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM));
+               eError = PVRSRV_ERROR_BAD_MAPPING;
+               goto errUnlockAndDMAPut;
+       }
+
+       /*
+        * Note:
+        * While we have no way to determine the type of the buffer we just
+        * assume that all dmabufs are from the same physical heap.
+        */
+       eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL],
+                                                psAttachment,
+                                                PhysmemDestroyDmaBuf,
+                                                uiFlags,
+                                                uiChunkSize,
+                                                ui32NumPhysChunks,
+                                                ui32NumVirtChunks,
+                                                pui32MappingTable,
+                                                ui32NameSize,
+                                                pszName,
+                                                &psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               goto errDMADetach;
+       }
+
+       /* First time we've seen this dmabuf so store it in the hash table */
+       HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR);
+       g_ui32HashRefCount++;
+
+       mutex_unlock(&g_HashLock);
+
+       PVRSRVIonAddMemAllocRecord(psDmaBuf);
+
+       *ppsPMRPtr = psPMR;
+       *puiSize = ui32NumVirtChunks * uiChunkSize;
+       *puiAlign = PAGE_SIZE;
+
+       /* The memory that's just imported is owned by some other entity.
+        * Hence the memory layout cannot be changed through our API */
+       PMR_SetLayoutFixed(psPMR, IMG_TRUE);
+
+       return PVRSRV_OK;
+
+errDMADetach:
+       dma_buf_detach(psDmaBuf, psAttachment);
+
+errUnlockAndDMAPut:
+       if (IMG_TRUE == bHashTableCreated)
+       {
+               HASH_Delete(g_psDmaBufHash);
+               g_psDmaBufHash = NULL;
+       }
+       dma_buf_put(psDmaBuf);
+
+errUnlockReturn:
+       mutex_unlock(&g_HashLock);
+
+errReturn:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                IMG_UINT32 ui32NameSize,
+                                const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                                PMR **ppsPMRPtr)
+{
+       PVR_UNREFERENCED_PARAMETER(psHeap);
+       PVR_UNREFERENCED_PARAMETER(psAttachment);
+       PVR_UNREFERENCED_PARAMETER(pfnDestroy);
+       PVR_UNREFERENCED_PARAMETER(uiFlags);
+       PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+       PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+       PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+       PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+       PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+       PVR_UNREFERENCED_PARAMETER(pszName);
+       PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+
+       return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(piFd);
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_UINT32 ui32NameSize,
+                    const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(fd);
+       PVR_UNREFERENCED_PARAMETER(uiFlags);
+       PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+       PVR_UNREFERENCED_PARAMETER(pszName);
+       PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+       PVR_UNREFERENCED_PARAMETER(puiSize);
+       PVR_UNREFERENCED_PARAMETER(puiAlign);
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(fd);
+       PVR_UNREFERENCED_PARAMETER(uiFlags);
+       PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+       PVR_UNREFERENCED_PARAMETER(puiSize);
+       PVR_UNREFERENCED_PARAMETER(puiAlign);
+       PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+       PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+       PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+       PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+       PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+       PVR_UNREFERENCED_PARAMETER(pszName);
+
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_osmem_linux.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_osmem_linux.c
new file mode 100644 (file)
index 0000000..3f3baa4
--- /dev/null
@@ -0,0 +1,3948 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for OS managed memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                implementing the function callbacks for physical memory borrowed
+                from that normally managed by the operating system.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+
+#if defined(CONFIG_X86)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
+#include <asm/set_memory.h>
+#else
+#include <asm/cacheflush.h>
+#endif
+#endif
+
+/* include/ */
+#include "rgx_heaps.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "cache_km.h"
+#include "devicemem_server_utils.h"
+#include "pvr_vmap.h"
+#include "physheap.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+#include "physmem_osmem_linux.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+#include "hash.h"
+#endif
+#endif
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
+#else
+/* split_page not available on older kernels */
+#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
+static IMG_UINT32 g_uiMaxOrder;
+#endif
+
+/*
+       These corresponds to the MMU min/max page sizes and associated PTE
+       alignment that can be used on the device for an allocation. It is
+       4KB (min) and 2MB (max) respectively.
+*/
+#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ  RGX_HEAP_4KB_PAGE_SHIFT
+#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ  RGX_HEAP_2MB_PAGE_SHIFT
+
+/* Defines how many pages should be mapped at once to the kernel */
+#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */
+
+/*
+       These are used to get/set/mask lower-order bits in a dma_addr_t
+       to provide side-band information associated with that address.
+       These includes whether the address was obtained via alloc_page
+       or dma_alloc and if address came allocated pre-aligned or an
+       adjustment was made manually to aligned it.
+*/
+#define DMA_SET_ADJUSTED_ADDR(x)               ((x) | ((dma_addr_t)0x02))
+#define DMA_IS_ADDR_ADJUSTED(x)                        ((x) & ((dma_addr_t)0x02))
+#define DMA_SET_ALLOCPG_ADDR(x)                        ((x) | ((dma_addr_t)0x01))
+#define DMA_IS_ALLOCPG_ADDR(x)                 ((x) & ((dma_addr_t)0x01))
+#define DMA_GET_ALIGN_ADJUSTMENT(x)            ((x>>2) & ((dma_addr_t)0x3ff))
+#define DMA_SET_ALIGN_ADJUSTMENT(x,y)  ((x) | (((dma_addr_t)y)<<0x02))
+#define DMA_GET_ADDR(x)                                        (((dma_addr_t)x) & ((dma_addr_t)~0xfff))
+#define DMA_VADDR_NOT_IN_USE                   0xCAFEF00DDEADBEEFULL
+
+#define PVRSRV_ZERO_VALUE 0
+
+typedef struct _PMR_OSPAGEARRAY_DATA_ {
+       /* Device for which this allocation has been made */
+       PVRSRV_DEVICE_NODE *psDevNode;
+       /* The pid that made this allocation */
+       IMG_PID uiPid;
+
+       /*
+        * iNumOSPagesAllocated:
+        * Number of pages allocated in this PMR so far.
+        * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
+        */
+       IMG_INT32 iNumOSPagesAllocated;
+
+       /*
+        * uiTotalNumOSPages:
+        * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+        *  number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
+        */
+       IMG_UINT32 uiTotalNumOSPages;
+
+       /*
+         uiLog2AllocPageSize;
+
+         size of each "page" -- this would normally be the same as
+         PAGE_SHIFT, but we support the idea that we may allocate pages
+         in larger chunks for better contiguity, using order>0 in the
+         call to alloc_pages()
+       */
+       IMG_UINT32 uiLog2AllocPageSize;
+
+       /*
+         ui64DmaMask;
+       */
+       IMG_UINT64 ui64DmaMask;
+
+       /*
+         For non DMA/CMA allocation, pagearray references the pages
+         thus allocated; one entry per compound page when compound
+         pages are used. In addition, for DMA/CMA allocations, we
+         track the returned cpu virtual and device bus address.
+       */
+       struct page **pagearray;
+       dma_addr_t *dmaphysarray;
+       void **dmavirtarray;
+
+
+#define FLAG_ZERO              (0U)
+#define FLAG_POISON_ON_FREE    (1U)
+#define FLAG_POISON_ON_ALLOC   (2U)
+#define FLAG_ONDEMAND          (3U)
+#define FLAG_UNPINNED          (4U)
+#define FLAG_IS_CMA            (5U)
+#define FLAG_UNSET_MEMORY_TYPE (6U)
+
+       /*
+        * Allocation flags related to the pages:
+        * Zero              - Should we Zero memory on alloc
+        * Poison on free    - Should we Poison the memory on free.
+        * Poison on alloc   - Should we Poison the memory on alloc.
+        * On demand         - Is the allocation on Demand i.e Do we defer allocation to time of use.
+        * Unpinned          - Should be protected by page pool lock
+        * CMA               - Is CMA memory allocated via DMA framework
+        * Unset Memory Type - Upon free do we need to revert the cache type before return to OS
+        * */
+       IMG_UINT32 ui32AllocFlags;
+
+       /*
+         The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
+         flag, advising us to do cache maintenance on behalf of the caller.
+         Boolean used to track if we need to revert the cache attributes
+         of the pages used in this allocation. Depends on OS/architecture.
+       */
+       IMG_UINT32 ui32CPUCacheFlags;
+       /*
+        * In CMA allocation path, algorithm can allocate double the size of
+        * requested allocation size to satisfy the alignment. In this case
+        * the additional pages allocated are tracked through this additional
+        * variable and are accounted for in the memory statistics */
+       IMG_UINT32 ui32CMAAdjustedPageCount;
+} PMR_OSPAGEARRAY_DATA;
+
+/***********************************
+ * Page pooling for uncached pages *
+ ***********************************/
+
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+                               size_t alloc_size,
+                               IMG_UINT32 uiOrder,
+                               void *virt_addr,
+                               dma_addr_t dev_addr,
+                               struct page *psPage);
+
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+                       IMG_BOOL bUnsetMemoryType,
+                       struct page *psPage);
+
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                       IMG_UINT32 *pai32FreeIndices,
+                       IMG_UINT32 ui32FreePageCount);
+
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+                                                  IMG_UINT32 *puiPagesFreed);
+
+/* A struct for our page pool holding an array of zeroed (!) pages.
+ * We always put units of page arrays to the pool but are
+ * able to take individual pages */
+typedef        struct
+{
+       /* Linkage for page pool LRU list */
+       struct list_head sPagePoolItem;
+
+       /* How many items are still in the page array */
+       IMG_UINT32 uiItemsRemaining;
+       /* Array of the actual pages */
+       struct page **ppsPageArray;
+
+} LinuxPagePoolEntry;
+
+/* CleanupThread structure to put allocation in page pool */
+typedef struct
+{
+       PVRSRV_CLEANUP_THREAD_WORK sCleanupWork;
+       IMG_UINT32 ui32CPUCacheMode;
+       LinuxPagePoolEntry *psPoolEntry;
+} LinuxCleanupData;
+
+/* A struct for the unpinned items */
+typedef struct
+{
+       struct list_head sUnpinPoolItem;
+       PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
+} LinuxUnpinEntry;
+
+
+/* Caches to hold page pool and page array structures */
+static struct kmem_cache *g_psLinuxPagePoolCache;
+static struct kmem_cache *g_psLinuxPageArray;
+
+/* Track what is live, all protected by pool lock.
+ * x86 needs two page pools because we have to change the memory attributes
+ * of the pages which is expensive due to an implicit flush.
+ * See set_pages_array_uc/wc/wb. */
+static IMG_UINT32 g_ui32UnpinPageCount;
+static IMG_UINT32 g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+static IMG_UINT32 g_ui32PagePoolWCCount;
+#endif
+/* Tracks asynchronous tasks currently accessing the page pool.
+ * It is incremented if a defer free task
+ * is created. Both will decrement the value when they finished the work.
+ * The atomic prevents piling up of deferred work in case the deferred thread
+ * cannot keep up with the application.*/
+static ATOMIC_T g_iPoolCleanTasks;
+/* We don't want too many asynchronous threads trying to access the page pool
+ * at the same time */
+#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128
+
+/* Defines how many pages the page cache should hold. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxEntries;
+#endif
+
+/*     We double check if we would exceed this limit if we are below MAX_POOL_PAGES
+       and want to add an allocation to the pool.
+       This prevents big allocations being given back to the OS just because they
+       exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries;
+#endif
+
+#if defined(CONFIG_X86)
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 2
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC
+};
+#else
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 1
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+       PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+};
+#endif
+
+/* Global structures we use to manage the page pool */
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+/* List holding the page array pointers: */
+static LIST_HEAD(g_sPagePoolList_WC);
+static LIST_HEAD(g_sPagePoolList_UC);
+static LIST_HEAD(g_sUnpinList);
+
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
+/* Global structure to manage GPU memory leak */
+static DEFINE_MUTEX(g_sUMALeakMutex);
+static IMG_UINT32 g_ui32UMALeakCounter = 0;
+#endif
+
+static inline IMG_UINT32
+_PagesInPoolUnlocked(void)
+{
+       IMG_UINT32 uiCnt = g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+       uiCnt += g_ui32PagePoolWCCount;
+#endif
+       return uiCnt;
+}
+
+static inline void
+_PagePoolLock(void)
+{
+       mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline int
+_PagePoolTrylock(void)
+{
+       return mutex_trylock(&g_sPagePoolMutex);
+}
+
+static inline void
+_PagePoolUnlock(void)
+{
+       mutex_unlock(&g_sPagePoolMutex);
+}
+
+static PVRSRV_ERROR
+_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+       LinuxUnpinEntry *psUnpinEntry;
+
+       psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
+       if (!psUnpinEntry)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: OSAllocMem failed. Cannot add entry to unpin list.",
+                               __func__));
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
+
+       /* Add into pool that the shrinker can access easily*/
+       list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
+
+       g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated;
+
+       return PVRSRV_OK;
+}
+
+static void
+_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+       LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+
+       /* Remove from pool */
+       list_for_each_entry_safe(psUnpinEntry,
+                                psTempUnpinEntry,
+                                &g_sUnpinList,
+                                sUnpinPoolItem)
+       {
+               if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
+               {
+                       list_del(&psUnpinEntry->sUnpinPoolItem);
+                       break;
+               }
+       }
+
+       OSFreeMem(psUnpinEntry);
+
+       g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated;
+}
+
+static inline IMG_BOOL
+_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
+                                struct list_head **ppsPoolHead,
+                                IMG_UINT32 **ppuiCounter)
+{
+       switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+       {
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+#if defined(CONFIG_X86)
+               /*
+                       For x86 we need to keep different lists for uncached
+                       and write-combined as we must always honour the PAT
+                       setting which cares about this difference.
+               */
+
+                       *ppsPoolHead = &g_sPagePoolList_WC;
+                       *ppuiCounter = &g_ui32PagePoolWCCount;
+                       break;
+#endif
+
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+                       *ppsPoolHead = &g_sPagePoolList_UC;
+                       *ppuiCounter = &g_ui32PagePoolUCCount;
+                       break;
+
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Unknown CPU caching mode. "
+                                        "Using default UC pool.",
+                                        __func__));
+                       *ppsPoolHead = &g_sPagePoolList_UC;
+                       *ppuiCounter = &g_ui32PagePoolUCCount;
+                       PVR_ASSERT(0);
+                       return IMG_FALSE;
+       }
+       return IMG_TRUE;
+}
+
+static struct shrinker g_sShrinker;
+
+/* Returning the number of pages that still reside in the page pool. */
+static unsigned long
+_GetNumberOfPagesInPoolUnlocked(void)
+{
+       return _PagesInPoolUnlocked() + g_ui32UnpinPageCount;
+}
+
+/* Linux shrinker function that informs the OS about how many pages we are caching and
+ * it is able to reclaim. */
+static unsigned long
+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+       int remain;
+
+       PVR_ASSERT(psShrinker == &g_sShrinker);
+       (void)psShrinker;
+       (void)psShrinkControl;
+
+       /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+       if (_PagePoolTrylock() == 0)
+               return 0;
+       remain = _GetNumberOfPagesInPoolUnlocked();
+       _PagePoolUnlock();
+
+       return remain;
+}
+
+/* Linux shrinker function to reclaim the pages from our page pool */
+static unsigned long
+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+       unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+       unsigned long uSurplus = 0;
+       LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+       IMG_UINT32 uiPagesFreed;
+
+       PVR_ASSERT(psShrinker == &g_sShrinker);
+       (void)psShrinker;
+
+       /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+       if (_PagePoolTrylock() == 0)
+               return SHRINK_STOP;
+
+       _FreePagesFromPoolUnlocked(uNumToScan,
+                                                          &uiPagesFreed);
+       uNumToScan -= uiPagesFreed;
+
+       if (uNumToScan == 0)
+       {
+               goto e_exit;
+       }
+
+       /* Free unpinned memory, starting with LRU entries */
+       list_for_each_entry_safe(psUnpinEntry,
+                                                        psTempUnpinEntry,
+                                                        &g_sUnpinList,
+                                                        sUnpinPoolItem)
+       {
+               PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
+               IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)?
+                                                               psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages;
+               PVRSRV_ERROR eError;
+
+               /* Free associated pages */
+               eError = _FreeOSPages(psPageArrayDataPtr,
+                                                         NULL,
+                                                         0);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
+                                        __func__,
+                                        PVRSRVGetErrorString(eError),
+                                        eError));
+                       goto e_exit;
+               }
+
+               /* Remove item from pool */
+               list_del(&psUnpinEntry->sUnpinPoolItem);
+
+               g_ui32UnpinPageCount -= uiNumPages;
+
+               /* Check if there is more to free or if we already surpassed the limit */
+               if (uiNumPages < uNumToScan)
+               {
+                       uNumToScan -= uiNumPages;
+
+               }
+               else if (uiNumPages > uNumToScan)
+               {
+                       uSurplus += uiNumPages - uNumToScan;
+                       uNumToScan = 0;
+                       goto e_exit;
+               }
+               else
+               {
+                       uNumToScan -= uiNumPages;
+                       goto e_exit;
+               }
+       }
+
+e_exit:
+       if (list_empty(&g_sUnpinList))
+       {
+               PVR_ASSERT(g_ui32UnpinPageCount == 0);
+       }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+       {
+               int remain;
+               remain = _GetNumberOfPagesInPoolUnlocked();
+               _PagePoolUnlock();
+               return remain;
+       }
+#else
+       /* Returning the number of pages freed during the scan */
+       _PagePoolUnlock();
+       return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+static int
+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+       if (psShrinkControl->nr_to_scan != 0)
+       {
+               return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
+       }
+       else
+       {
+               /* No pages are being reclaimed so just return the page count */
+               return _CountObjectsInPagePool(psShrinker, psShrinkControl);
+       }
+}
+
+static struct shrinker g_sShrinker =
+{
+       .shrink = _ShrinkPagePool,
+       .seeks = DEFAULT_SEEKS
+};
+#else
+static struct shrinker g_sShrinker =
+{
+       .count_objects = _CountObjectsInPagePool,
+       .scan_objects = _ScanObjectsInPagePool,
+       .seeks = DEFAULT_SEEKS
+};
+#endif
+
+/* Register the shrinker so Linux can reclaim cached pages */
+void LinuxInitPhysmem(void)
+{
+       g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
+
+       g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
+       if (g_psLinuxPagePoolCache)
+       {
+               /* Only create the shrinker if we created the cache OK */
+               register_shrinker(&g_sShrinker, NULL);
+       }
+
+       OSAtomicWrite(&g_iPoolCleanTasks, 0);
+}
+
+/* Unregister the shrinker and remove all pages from the pool that are still left */
+void LinuxDeinitPhysmem(void)
+{
+       IMG_UINT32 uiPagesFreed;
+
+       if (OSAtomicRead(&g_iPoolCleanTasks) > 0)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running "
+                               "while deinitialising memory subsystem."));
+       }
+
+       _PagePoolLock();
+       if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when "
+                               "deinitialising memory subsystem."));
+               PVR_ASSERT(0);
+       }
+
+       PVR_ASSERT(_PagesInPoolUnlocked() == 0);
+
+       /* Free the page cache */
+       kmem_cache_destroy(g_psLinuxPagePoolCache);
+
+       unregister_shrinker(&g_sShrinker);
+       _PagePoolUnlock();
+
+       kmem_cache_destroy(g_psLinuxPageArray);
+}
+
+static void EnableOOMKiller(void)
+{
+       current->flags &= ~PF_DUMPCORE;
+}
+
+static void DisableOOMKiller(void)
+{
+       /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
+        *
+        * As oom_killer_disable() is an inline, non-exported function, we
+        * can't use it from a modular driver. Furthermore, the OOM killer
+        * API doesn't look thread safe, which 'current' is.
+        */
+       WARN_ON(current->flags & PF_DUMPCORE);
+       current->flags |= PF_DUMPCORE;
+}
+
+/* Prints out the addresses in a page array for debugging purposes
+ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
+static inline void
+_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
+       IMG_UINT32 i;
+       if (pagearray)
+       {
+               printk("Array %p:\n", pagearray);
+               for (i = 0; i < uiPagesToPrint; i++)
+               {
+                       printk("%p | ", (pagearray)[i]);
+               }
+               printk("\n");
+       }
+       else
+       {
+               printk("Array is NULL:\n");
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(pagearray);
+       PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
+#endif
+}
+
+/* Debugging function that dumps out the number of pages for every
+ * page array that is currently in the page pool.
+ * Not defined by default. Define locally to activate feature: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
+static void
+_DumpPoolStructure(void)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
+       LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+       struct list_head *psPoolHead = NULL;
+       IMG_UINT32 j;
+       IMG_UINT32 *puiCounter;
+
+       printk("\n");
+       /* Empty all pools */
+       for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+       {
+
+               printk("pool = %u\n", j);
+
+               /* Get the correct list for this caching mode */
+               if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+               {
+                       break;
+               }
+
+               list_for_each_entry_safe(psPagePoolEntry,
+                                                                psTempPoolEntry,
+                                                                psPoolHead,
+                                                                sPagePoolItem)
+               {
+                       printk("%u | ", psPagePoolEntry->uiItemsRemaining);
+               }
+               printk("\n");
+       }
+#endif
+}
+
+/* Free a certain number of pages from the page pool.
+ * Mainly used in error paths or at deinitialisation to
+ * empty the whole pool. */
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+                                                  IMG_UINT32 *puiPagesFreed)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+       struct list_head *psPoolHead = NULL;
+       IMG_UINT32 i, j;
+       IMG_UINT32 *puiCounter;
+
+       *puiPagesFreed = uiMaxPagesToFree;
+
+       /* Empty all pools */
+       for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+       {
+
+               /* Get the correct list for this caching mode */
+               if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+               {
+                       break;
+               }
+
+               /* Free the pages and remove page arrays from the pool if they are exhausted */
+               list_for_each_entry_safe(psPagePoolEntry,
+                                                                psTempPoolEntry,
+                                                                psPoolHead,
+                                                                sPagePoolItem)
+               {
+                       IMG_UINT32 uiItemsToFree;
+                       struct page **ppsPageArray;
+
+                       /* Check if we are going to free the whole page array or just parts */
+                       if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
+                       {
+                               uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
+                               ppsPageArray = psPagePoolEntry->ppsPageArray;
+                       }
+                       else
+                       {
+                               uiItemsToFree = uiMaxPagesToFree;
+                               ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
+                       }
+
+#if defined(CONFIG_X86)
+                       /* Set the correct page caching attributes on x86 */
+                       if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
+                       {
+                               int ret;
+                               ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
+                               if (ret)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s: Failed to reset page attributes",
+                                                        __func__));
+                                       eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
+                                       goto e_exit;
+                               }
+                       }
+#endif
+
+                       /* Free the actual pages */
+                       for (i = 0; i < uiItemsToFree; i++)
+                       {
+                               __free_pages(ppsPageArray[i], 0);
+                               ppsPageArray[i] = NULL;
+                       }
+
+                       /* Reduce counters */
+                       uiMaxPagesToFree -= uiItemsToFree;
+                       *puiCounter -= uiItemsToFree;
+                       psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                       /*
+                        * MemStats usually relies on having the bridge lock held, however
+                        * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+                        * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+                        * the page pool lock is used to ensure these calls are mutually
+                        * exclusive
+                        */
+                       PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
+#endif
+
+                       /* Is this pool entry exhausted, delete it */
+                       if (psPagePoolEntry->uiItemsRemaining == 0)
+                       {
+                               OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+                               list_del(&psPagePoolEntry->sPagePoolItem);
+                               kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+                       }
+
+                       /* Return if we have all our pages */
+                       if (uiMaxPagesToFree == 0)
+                       {
+                               goto e_exit;
+                       }
+               }
+       }
+
+e_exit:
+       *puiPagesFreed -= uiMaxPagesToFree;
+       _DumpPoolStructure();
+       return eError;
+}
+
+/* Get a certain number of pages from the page pool and
+ * copy them directly into a given page array. */
+static void
+_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
+                                                 IMG_UINT32 uiMaxNumPages,
+                                                 struct page **ppsPageArray,
+                                                 IMG_UINT32 *puiNumReceivedPages)
+{
+       LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+       struct list_head *psPoolHead = NULL;
+       IMG_UINT32 i;
+       IMG_UINT32 *puiCounter;
+
+       *puiNumReceivedPages = 0;
+
+       /* Get the correct list for this caching mode */
+       if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+       {
+               return;
+       }
+
+       /* Check if there are actually items in the list */
+       if (list_empty(psPoolHead))
+       {
+               return;
+       }
+
+       PVR_ASSERT(*puiCounter > 0);
+
+       /* Receive pages from the pool */
+       list_for_each_entry_safe(psPagePoolEntry,
+                                                        psTempPoolEntry,
+                                                        psPoolHead,
+                                                        sPagePoolItem)
+       {
+               /* Get the pages from this pool entry */
+               for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
+               {
+                       ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
+                       (*puiNumReceivedPages)++;
+                       psPagePoolEntry->uiItemsRemaining--;
+               }
+
+               /* Is this pool entry exhausted, delete it */
+               if (psPagePoolEntry->uiItemsRemaining == 0)
+               {
+                       OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+                       list_del(&psPagePoolEntry->sPagePoolItem);
+                       kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+               }
+
+               /* Return if we have all our pages */
+               if (*puiNumReceivedPages == uiMaxNumPages)
+               {
+                       goto exit_ok;
+               }
+       }
+
+exit_ok:
+
+       /* Update counters */
+       *puiCounter -= *puiNumReceivedPages;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       /* MemStats usually relies on having the bridge lock held, however
+        * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+        * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+        * the page pool lock is used to ensure these calls are mutually
+        * exclusive
+        */
+       PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
+#endif
+
+       _DumpPoolStructure();
+       return;
+}
+
+/* Same as _GetPagesFromPoolUnlocked but handles locking and
+ * checks first whether pages from the pool are a valid option. */
+static inline void
+_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
+                                               IMG_UINT32 ui32CPUCacheFlags,
+                                               IMG_UINT32 uiPagesToAlloc,
+                                               IMG_UINT32 uiOrder,
+                                               IMG_BOOL bZero,
+                                               struct page **ppsPageArray,
+                                               IMG_UINT32 *puiPagesFromPool)
+{
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+       PVR_UNREFERENCED_PARAMETER(bZero);
+#else
+       /* Don't get pages from pool if it doesn't provide zeroed pages */
+       if (bZero)
+       {
+               return;
+       }
+#endif
+
+       /* The page pool stores only order 0 pages. If we need zeroed memory we
+        * directly allocate from the OS because it is faster than
+        * doing it within the driver. */
+       if (uiOrder == 0 &&
+           !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+       {
+
+               _PagePoolLock();
+               _GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
+                                                                 uiPagesToAlloc,
+                                                                 ppsPageArray,
+                                                                 puiPagesFromPool);
+               _PagePoolUnlock();
+       }
+
+       return;
+}
+
+/* Takes a page array and maps it into the kernel to write zeros */
+static PVRSRV_ERROR
+_MemsetPageArray(IMG_UINT32 uiNumToClean,
+                 struct page **ppsCleanArray,
+                 pgprot_t pgprot,
+                 IMG_UINT8 ui8Pattern)
+{
+       IMG_CPU_VIRTADDR pvAddr;
+       IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
+                                        uiNumToClean);
+
+       /* Map and fill the pages with zeros.
+        * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+        * at a time. */
+       while (uiNumToClean != 0)
+       {
+               IMG_UINT32 uiToClean = MIN(uiNumToClean, uiMaxPagesToMap);
+
+               pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot);
+               if (!pvAddr)
+               {
+                       if (uiMaxPagesToMap <= 1)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Out of vmalloc memory, unable to map pages for %s.",
+                                       __func__,
+                                       ui8Pattern == PVRSRV_ZERO_VALUE ? "zeroing" : "poisoning"));
+                               return PVRSRV_ERROR_OUT_OF_MEMORY;
+                       }
+                       else
+                       {
+                               /* Halve the pages to map at once and try again. */
+                               uiMaxPagesToMap = uiMaxPagesToMap >> 1;
+                               continue;
+                       }
+               }
+
+               if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(PAGE_KERNEL)))
+               {
+                       /* this is most likely unnecessary as all pages must be 8-bytes
+                        * aligned so there unaligned access is impossible */
+                       OSDeviceMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean);
+               }
+               else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
+               {
+                       OSCachedMemSetWMB(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean);
+               }
+               else
+               {
+                       OSCachedMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean);
+               }
+               pvr_vunmap(pvAddr, uiToClean, pgprot);
+               ppsCleanArray = &(ppsCleanArray[uiToClean]);
+               uiNumToClean -= uiToClean;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_CleanupThread_CleanPages(void *pvData)
+{
+       LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData;
+       LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry;
+       struct list_head *psPoolHead = NULL;
+       IMG_UINT32 *puiCounter = NULL;
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+       PVRSRV_ERROR eError;
+       pgprot_t pgprot;
+       IMG_UINT32 i;
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+
+       /* Get the correct pool for this caching mode. */
+       _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter);
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+       switch (PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode))
+       {
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+#if defined(CONFIG_X86)
+                       /* For x86 we can only map with the same attributes
+                        * as in the PAT settings*/
+                       pgprot = pgprot_noncached(PAGE_KERNEL);
+                       break;
+#endif
+
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+                       pgprot = pgprot_writecombine(PAGE_KERNEL);
+                       break;
+
+               default:
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Unknown caching mode to set page protection flags.",
+                                       __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto eExit;
+       }
+
+       /* Map and fill the pages with zeros.
+        * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+        * at a time. */
+       eError = _MemsetPageArray(psPagePoolEntry->uiItemsRemaining,
+                                 psPagePoolEntry->ppsPageArray,
+                                 pgprot, PVRSRV_ZERO_VALUE);
+       if (eError != PVRSRV_OK)
+       {
+               goto eExit;
+       }
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+
+       /* Lock down pool and add item */
+       _PagePoolLock();
+
+       /* Pool counters were already updated so don't do it here again*/
+
+       /* The pages are all zeroed so return them to the pool. */
+       list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
+
+       _DumpPoolStructure();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       /* Calling PVRSRVStatsIncrMemAllocPoolStat and PVRSRVStatsDecrMemAllocPoolStat
+        * inside page pool lock ensures that the stat reflects the state of the pool. */
+       PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining);
+#endif
+
+       _PagePoolUnlock();
+
+       OSFreeMem(pvData);
+       OSAtomicDecrement(&g_iPoolCleanTasks);
+
+       return PVRSRV_OK;
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+eExit:
+       /* we failed to zero the pages so return the error so we can
+        * retry during the next spin */
+       if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0)
+       {
+               return eError;
+       }
+
+       /* this was the last retry, give up and free pages to OS */
+       PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Deferred task error, freeing pages to OS.",
+                       __func__));
+       _PagePoolLock();
+
+       *puiCounter -= psPagePoolEntry->uiItemsRemaining;
+
+       _PagePoolUnlock();
+
+       for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++)
+       {
+               _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]);
+       }
+       OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+       kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+       OSFreeMem(psCleanupData);
+
+       OSAtomicDecrement(&g_iPoolCleanTasks);
+
+       return PVRSRV_OK;
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+}
+
+
+/* Put page array to the page pool.
+ * Handles locking and checks whether the pages are
+ * suitable to be stored in the pool. */
+static inline IMG_BOOL
+_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
+                                         struct page **ppsPageArray,
+                                         IMG_BOOL bUnpinned,
+                                         IMG_UINT32 uiOrder,
+                                         IMG_UINT32 uiNumPages)
+{
+       LinuxCleanupData *psCleanupData;
+       PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
+#if defined(SUPPORT_PHYSMEM_TEST)
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#endif
+
+       if (uiOrder == 0 &&
+               !bUnpinned &&
+               !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+       {
+               IMG_UINT32 uiEntries;
+               IMG_UINT32 *puiCounter;
+               struct list_head *psPoolHead;
+
+
+               _PagePoolLock();
+
+               uiEntries = _PagesInPoolUnlocked();
+
+               /* Check for number of current page pool entries and whether
+                * we have other asynchronous tasks in-flight */
+               if ( (uiEntries < g_ui32PagePoolMaxEntries) &&
+                    ((uiEntries + uiNumPages) <
+                     (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) ))
+               {
+                       if (OSAtomicIncrement(&g_iPoolCleanTasks) <=
+                                       PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS)
+                       {
+#if defined(SUPPORT_PHYSMEM_TEST)
+                               if (!psPVRSRVData->hCleanupThread)
+                               {
+                                       goto eDecrement;
+                               }
+#endif
+
+                               psCleanupData = OSAllocMem(sizeof(*psCleanupData));
+
+                               if (!psCleanupData)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s: Failed to get memory for deferred page pool cleanup. "
+                                                        "Trying to free pages immediately",
+                                                        __func__));
+                                       goto eDecrement;
+                               }
+
+                               psCleanupThreadFn = &psCleanupData->sCleanupWork;
+                               psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags;
+                               psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
+
+                               if (!psCleanupData->psPoolEntry)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s: Failed to get memory for deferred page pool cleanup. "
+                                                        "Trying to free pages immediately",
+                                                        __func__));
+                                       goto eFreeCleanupData;
+                               }
+
+                               if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                        "%s: Failed to get correct page pool",
+                                                        __func__));
+                                       goto eFreePoolEntry;
+                               }
+
+                               /* Increase counter here to avoid deferred cleanup tasks piling up */
+                               *puiCounter = *puiCounter + uiNumPages;
+
+                               psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray;
+                               psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages;
+
+                               psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages;
+                               psCleanupThreadFn->pvData = psCleanupData;
+                               psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
+                               CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn,
+                                                              CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
+
+                               /* We must not hold the pool lock when calling AddWork because it might call us back to
+                                * free pooled pages directly when unloading the driver  */
+                               _PagePoolUnlock();
+
+                               PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
+
+
+                       }
+                       else
+                       {
+                               goto eDecrement;
+                       }
+
+               }
+               else
+               {
+                       goto eUnlock;
+               }
+       }
+       else
+       {
+               goto eExitFalse;
+       }
+
+       return IMG_TRUE;
+
+eFreePoolEntry:
+       OSFreeMem(psCleanupData->psPoolEntry);
+eFreeCleanupData:
+       OSFreeMem(psCleanupData);
+eDecrement:
+       OSAtomicDecrement(&g_iPoolCleanTasks);
+eUnlock:
+       _PagePoolUnlock();
+eExitFalse:
+       return IMG_FALSE;
+}
+
+/* Get the GFP flags that we pass to the page allocator */
+static inline gfp_t
+_GetGFPFlags(IMG_BOOL bZero,
+             PVRSRV_DEVICE_NODE *psDevNode)
+{
+       struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+       gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
+
+#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+       /* Force use of HIGHMEM */
+       gfp_flags |= __GFP_HIGHMEM;
+
+       PVR_UNREFERENCED_PARAMETER(psDev);
+#else
+       if (psDev)
+       {
+#if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE)
+               if (*psDev->dma_mask > DMA_BIT_MASK(32))
+               {
+                       /* If our system is able to handle large addresses use highmem */
+                       gfp_flags |= __GFP_HIGHMEM;
+               }
+               else if (*psDev->dma_mask == DMA_BIT_MASK(32))
+               {
+                       /* Limit to 32 bit.
+                        * Achieved by setting __GFP_DMA32 for 64 bit systems */
+                       gfp_flags |= __GFP_DMA32;
+               }
+               else
+               {
+                       /* Limit to size of DMA zone. */
+                       gfp_flags |= __GFP_DMA;
+               }
+#else
+               if (*psDev->dma_mask < DMA_BIT_MASK(32))
+               {
+                       gfp_flags |= __GFP_DMA;
+               }
+               else
+               {
+                       gfp_flags |= __GFP_HIGHMEM;
+               }
+#endif /* if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) */
+       }
+
+#endif /* if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) */
+
+       if (bZero)
+       {
+               gfp_flags |= __GFP_ZERO;
+       }
+
+       return gfp_flags;
+}
+
+/*
+ * @Function _PoisonDevicePage
+ *
+ * @Description  Poisons a device page. In normal case the device page has the
+ *               same size as the OS page and so the ui32DevPageOrder will be
+ *               equal to 0 and page argument will point to one OS page
+ *               structure. In case of Non4K pages the order will be greater
+ *               than 0 and page argument will point to an array of OS
+ *               allocated pages.
+ *
+ * @Input psDevNode          pointer to the device object
+ * @Input page               array of the pages allocated by from the OS
+ * @Input ui32DevPageOrder   order of the page (same as the one used to allocate
+ *                           the page array by alloc_pages())
+ * @Input ui32CPUCacheFlags  CPU cache flags applied to the page
+ * @Input ui8PoisonValue     value used to poison the page
+ */
+static void
+_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode,
+                  struct page *page,
+                  IMG_UINT32 ui32DevPageOrder,
+                  IMG_UINT32 ui32CPUCacheFlags,
+                  IMG_BYTE ui8PoisonValue)
+{
+       IMG_UINT32 ui32OsPageIdx;
+
+       for (ui32OsPageIdx = 0;
+            ui32OsPageIdx < (1U << ui32DevPageOrder);
+            ui32OsPageIdx++)
+       {
+               struct page *current_page = page + ui32OsPageIdx;
+               IMG_CPU_PHYADDR sCPUPhysAddrStart = {page_to_phys(current_page)};
+               IMG_CPU_PHYADDR sCPUPhysAddrEnd = {sCPUPhysAddrStart.uiAddr + PAGE_SIZE};
+
+               void *kvaddr = kmap_atomic(current_page);
+
+               /* kmap_atomic maps pages as cached so it's safe to use OSCachedMemSet
+                * here (also pages are always 8 bytes aligned anyway) */
+               OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE);
+
+               OSCPUCacheFlushRangeKM(psDevNode, kvaddr, kvaddr + PAGE_SIZE,
+                                      sCPUPhysAddrStart, sCPUPhysAddrEnd);
+
+               kunmap_atomic(kvaddr);
+       }
+}
+
+/* Allocate and initialise the structure to hold the metadata of the allocation */
+static PVRSRV_ERROR
+_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+                                 PMR_SIZE_T uiChunkSize,
+                                 IMG_UINT32 ui32NumPhysChunks,
+                                 IMG_UINT32 ui32NumVirtChunks,
+                                 IMG_UINT32 uiLog2AllocPageSize,
+                                 IMG_UINT32 ui32AllocFlags,
+                                 IMG_UINT32 ui32CPUCacheFlags,
+                                 IMG_PID uiPid,
+                                 PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
+{
+       PVRSRV_ERROR eError;
+       PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
+       IMG_UINT32 uiNumOSPageSizeVirtPages;
+       IMG_UINT32 uiNumDevPageSizeVirtPages;
+       PMR_OSPAGEARRAY_DATA *psPageArrayData;
+       IMG_UINT64 ui64DmaMask = 0;
+       PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+
+       /* Use of cast below is justified by the assertion that follows to
+        * prove that no significant bits have been truncated */
+       uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
+       PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
+
+       uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT);
+
+       /* Allocate the struct to hold the metadata */
+       psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
+       if (psPageArrayData == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: OS refused the memory allocation for the private data.",
+                                __func__));
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e_freed_none;
+       }
+
+       /*
+        * Allocate the page array
+        *
+        * We avoid tracking this memory because this structure might go into the page pool.
+        * The OS can drain the pool asynchronously and when doing that we have to avoid
+        * any potential deadlocks.
+        *
+        * In one scenario the process stats vmalloc hash table lock is held and then
+        * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
+        * try to acquire the vmalloc hash table lock again.
+        */
+       psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
+       if (psPageArrayData->pagearray == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e_free_kmem_cache;
+       }
+       else
+       {
+               if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
+                       psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
+                       if (psPageArrayData->dmavirtarray == NULL)
+                       {
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto e_free_pagearray;
+                       }
+
+                       psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
+                       if (psPageArrayData->dmaphysarray == NULL)
+                       {
+                               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto e_free_cpuvirtaddrarray;
+                       }
+               }
+       }
+
+       if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice)
+       {
+               struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+               ui64DmaMask = *psDev->dma_mask;
+       }
+
+       /* Init metadata */
+       psPageArrayData->psDevNode = psDevNode;
+       psPageArrayData->uiPid = uiPid;
+       psPageArrayData->iNumOSPagesAllocated = 0;
+       psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages;
+       psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize;
+       psPageArrayData->ui64DmaMask = ui64DmaMask;
+       psPageArrayData->ui32AllocFlags = ui32AllocFlags;
+       psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
+       psPageArrayData->ui32CMAAdjustedPageCount = 0;
+
+       *ppsPageArrayDataPtr = psPageArrayData;
+       return PVRSRV_OK;
+
+/* Error path */
+e_free_cpuvirtaddrarray:
+       OSFreeMemNoStats(psPageArrayData->dmavirtarray);
+
+e_free_pagearray:
+       OSFreeMemNoStats(psPageArrayData->pagearray);
+
+e_free_kmem_cache:
+       kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+       PVR_DPF((PVR_DBG_ERROR,
+                        "%s: OS refused the memory allocation for the page pointer table. "
+                        "Did you ask for too much?",
+                        __func__));
+
+e_freed_none:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+                                          struct page **ppsPage,
+                                          IMG_UINT32 uiNumPages)
+{
+       void * pvAddr;
+
+       if (OSCPUCacheOpAddressType() == OS_CACHE_OP_ADDR_TYPE_VIRTUAL)
+       {
+               pgprot_t pgprot = PAGE_KERNEL;
+
+               IMG_UINT32 uiNumToClean = uiNumPages;
+               struct page **ppsCleanArray = ppsPage;
+
+               /* Map and flush page.
+                * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+                * at a time. */
+               while (uiNumToClean != 0)
+               {
+                       IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
+                                                  uiNumToClean);
+                       IMG_CPU_PHYADDR sUnused =
+                               { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
+
+                       pvAddr = pvr_vmap(ppsCleanArray, uiToClean, -1, pgprot);
+                       if (!pvAddr)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "Unable to flush page cache for new allocation, skipping flush."));
+                               return;
+                       }
+
+                       CacheOpExec(psDevNode,
+                                               pvAddr,
+                                               pvAddr + PAGE_SIZE,
+                                               sUnused,
+                                               sUnused,
+                                               PVRSRV_CACHE_OP_FLUSH);
+
+                       pvr_vunmap(pvAddr, uiToClean, pgprot);
+                       ppsCleanArray = &(ppsCleanArray[uiToClean]);
+                       uiNumToClean -= uiToClean;
+               }
+       }
+       else
+       {
+               IMG_UINT32 ui32Idx;
+
+               for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx)
+               {
+                       IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+
+                       pvAddr = kmap(ppsPage[ui32Idx]);
+                       sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
+                       sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+                       /* If we're zeroing, we need to make sure the cleared memory is pushed out
+                        * of the cache before the cache lines are invalidated */
+                       CacheOpExec(psDevNode,
+                                               pvAddr,
+                                               pvAddr + PAGE_SIZE,
+                                               sCPUPhysAddrStart,
+                                               sCPUPhysAddrEnd,
+                                               PVRSRV_CACHE_OP_FLUSH);
+
+                       kunmap(ppsPage[ui32Idx]);
+               }
+       }
+}
+
+/* Change the caching attribute of pages on x86 systems and takes care of
+ * cache maintenance. This function is supposed to be called once for pages that
+ * came from alloc_pages(). It expects an array of OS page sized pages!
+ *
+ * Flush/Invalidate pages in case the allocation is not cached. Necessary to
+ * remove pages from the cache that might be flushed later and corrupt memory. */
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+                                          struct page **ppsPage,
+                                          IMG_UINT32 uiNumPages,
+                                          IMG_BOOL bFlush,
+                                          IMG_UINT32 ui32CPUCacheFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
+       IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
+       IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
+
+       if (ppsPage != NULL && uiNumPages != 0)
+       {
+#if defined(CONFIG_X86)
+               /* On x86 we have to set page cache attributes for non-cached pages.
+                * The call is implicitly taking care of all flushing/invalidating
+                * and therefore we can skip the usual cache maintenance after this. */
+               if (bCPUUncached || bCPUWriteCombine)
+               {
+                       /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of
+                               current mapping before we map it ourselves      */
+                       int ret = IMG_FALSE;
+
+                       switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+                       {
+                               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+                                       ret = set_pages_array_uc(ppsPage, uiNumPages);
+                                       if (ret)
+                                       {
+                                               eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+                                               PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
+                                       }
+                                       break;
+
+                               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+                                       ret = set_pages_array_wc(ppsPage, uiNumPages);
+                                       if (ret)
+                                       {
+                                               eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+                                               PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
+                                       }
+                                       break;
+
+                               case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+                                       break;
+
+                               default:
+                                       break;
+                       }
+               }
+               else
+#endif
+               {
+                       if ( bFlush ||
+                                bCPUUncached || bCPUWriteCombine ||
+                                (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) )
+                       {
+                               /*  We can be given pages which still remain in the cache.
+                                       In order to make sure that the data we write through our mappings
+                                       doesn't get overwritten by later cache evictions we invalidate the
+                                       pages that are given to us.
+
+                                       Note:
+                                       This still seems to be true if we request cold pages, it's just less
+                                       likely to be in the cache. */
+                               _ApplyCacheMaintenance(psDevNode,
+                                                                          ppsPage,
+                                                                          uiNumPages);
+                       }
+               }
+       }
+
+       return eError;
+}
+
+/* Same as _AllocOSPage except it uses DMA framework to perform allocation.
+ * uiPageIndex is expected to be the pagearray index where to store the higher order page. */
+static PVRSRV_ERROR
+_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                               gfp_t gfp_flags,
+                               IMG_UINT32 ui32AllocOrder,
+                               IMG_UINT32 ui32MinOrder,
+                               IMG_UINT32 uiPageIndex)
+{
+       void *virt_addr;
+       struct page *page;
+       dma_addr_t bus_addr;
+       IMG_UINT32 uiAllocIsMisaligned;
+       size_t alloc_size = PAGE_SIZE << ui32AllocOrder;
+       struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice;
+       PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
+
+       do
+       {
+               DisableOOMKiller();
+#if defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC)
+               virt_addr = NULL;
+#else
+               virt_addr = dma_alloc_coherent(dev, alloc_size, &bus_addr, gfp_flags);
+#endif
+               if (virt_addr == NULL)
+               {
+                       /* The idea here is primarily to support some older kernels with
+                          broken or non-functioning DMA/CMA implementations (< Linux-3.4)
+                          and to also handle DMA/CMA allocation failures by attempting a
+                          normal page allocation though we expect dma_alloc_coherent()
+                          already attempts this internally also before failing but
+                          nonetheless it does no harm to retry the allocation ourselves */
+                       page = alloc_pages(gfp_flags, ui32AllocOrder);
+                       if (page)
+                       {
+                               /* Taint bus_addr as alloc_page, needed when freeing;
+                                  also acquire the low memory page address only, this
+                                  prevents mapping possible high memory pages into
+                                  kernel virtual address space which might exhaust
+                                  the VMALLOC address space */
+                               bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+                               virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+                       }
+                       else
+                       {
+                               EnableOOMKiller();
+                               return PVRSRV_ERROR_OUT_OF_MEMORY;
+                       }
+               }
+               else
+               {
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+                       page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+#else
+                       /* Assumes bus address space is identical to physical address space */
+                       page = phys_to_page(bus_addr);
+#endif
+               }
+               EnableOOMKiller();
+
+               /* Physical allocation alignment works/hidden behind the scene transparently,
+                  we do this here if the allocated buffer address does not meet its alignment
+                  requirement by over-allocating using the next power-2 order and reporting
+                  aligned-adjusted values back to meet the requested alignment constraint.
+                  Evidently we waste memory by doing this so should only do so if we do not
+                  initially meet the alignment constraint. */
+               uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE<<ui32MinOrder)-1);
+               if (uiAllocIsMisaligned || ui32AllocOrder > ui32MinOrder)
+               {
+                       IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr);
+                       if (ui32AllocOrder == ui32MinOrder)
+                       {
+                               if (bUsedAllocPages)
+                               {
+                                       __free_pages(page, ui32AllocOrder);
+                               }
+                               else
+                               {
+                                       dma_free_coherent(dev, alloc_size, virt_addr, bus_addr);
+                               }
+
+                               ui32AllocOrder = ui32AllocOrder + 1;
+                               alloc_size = PAGE_SIZE << ui32AllocOrder;
+
+                               PVR_ASSERT(uiAllocIsMisaligned != 0);
+                       }
+                       else
+                       {
+                               size_t align_adjust = PAGE_SIZE << ui32MinOrder;
+
+                               /* Adjust virtual/bus addresses to meet alignment */
+                               bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr;
+                               align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust);
+                               align_adjust -= (size_t)bus_addr;
+
+                               if (align_adjust)
+                               {
+                                       if (bUsedAllocPages)
+                                       {
+                                               page += align_adjust >> PAGE_SHIFT;
+                                               bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+                                               virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+                                       }
+                                       else
+                                       {
+                                               bus_addr += align_adjust;
+                                               virt_addr += align_adjust;
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+                                               page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+#else
+                                               /* Assumes bus address space is identical to physical address space */
+                                               page = phys_to_page(bus_addr);
+#endif
+                                       }
+
+                                       /* Store adjustments in PAGE_SIZE counts */
+                                       align_adjust = align_adjust >> PAGE_SHIFT;
+                                       bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust);
+                               }
+
+                               /* Taint bus_addr due to over-allocation, allows us to free
+                                * memory correctly */
+                               bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr);
+                               uiAllocIsMisaligned = 0;
+                       }
+               }
+       } while (uiAllocIsMisaligned);
+
+       /* Convert OSPageSize-based index into DevicePageSize-based index */
+       psPageArrayData->ui32CMAAdjustedPageCount += (alloc_size - (PAGE_SIZE << ui32AllocOrder ));
+
+       psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr;
+       psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr;
+       psPageArrayData->pagearray[uiPageIndex] = page;
+
+       return PVRSRV_OK;
+}
+
+/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
+ * position uiPageIndex.
+ *
+ * If the order is higher than 0, it splits the page into multiples and
+ * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder).
+ *
+ * This function is supposed to be used for uiMinOrder == 0 only! */
+static PVRSRV_ERROR
+_AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                       gfp_t gfp_flags,
+                       IMG_UINT32 uiAllocOrder,
+                       IMG_UINT32 uiMinOrder,
+                       IMG_UINT32 uiPageIndex)
+{
+       struct page *psPage;
+       IMG_UINT32 ui32Count;
+
+       /* Parameter check. If it fails we write into the wrong places in the array. */
+       PVR_ASSERT(uiMinOrder == 0);
+
+       /* Allocate the page */
+       DisableOOMKiller();
+       psPage = alloc_pages(gfp_flags, uiAllocOrder);
+       EnableOOMKiller();
+
+       if (psPage == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+       /* In case we need to, split the higher order page;
+          this should only be used for order-0 allocations
+          as higher order allocations should use DMA/CMA */
+       if (uiAllocOrder != 0)
+       {
+               split_page(psPage, uiAllocOrder);
+       }
+#endif
+
+       /* Store the page (or multiple split pages) in the page array */
+       for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
+       {
+               psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
+       }
+
+       return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+
+static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                                               struct page *psPage)
+{
+       IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) };
+       PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+                                    NULL, sCPUPhysAddr,
+                                    1 << psPageArrayData->uiLog2AllocPageSize,
+                                    NULL, psPageArrayData->uiPid
+                                    DEBUG_MEMSTATS_VALUES);
+}
+
+static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                                                  struct page *psPage)
+{
+       PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+                                       (IMG_UINT64) page_to_phys(psPage),
+                                       psPageArrayData->uiPid);
+}
+
+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+
+static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid)
+{
+       PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+                                   uiSize, uiPid);
+}
+
+static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid)
+{
+       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+                                   uiSize, uiPid);
+}
+
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
+ *
+ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
+ * order OS pages at a time we guarantee the device page is contiguous.
+ *
+ * Secondly for performance where we may ask for 2^N order pages to reduce the number
+ * of calls to alloc_pages, and thus reduce time for huge allocations.
+ *
+ * Regardless of page order requested, we need to break them down to track _OS pages.
+ * The maximum order requested is increased if all max order allocations were successful.
+ * If any request fails we reduce the max order.
+ */
+static PVRSRV_ERROR
+_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 uiArrayIndex = 0;
+       IMG_UINT32 ui32Order;
+       IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+       IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
+
+       IMG_UINT32 ui32NumPageReq;
+       IMG_UINT32 uiOSPagesToAlloc;
+       IMG_UINT32 uiDevPagesFromPool = 0;
+
+       gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) : IMG_FALSE, /* Zero all pages later as batch */
+                                             psPageArrayData->psDevNode);
+       gfp_t ui32GfpFlags;
+       gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
+
+       struct page **ppsPageArray = psPageArrayData->pagearray;
+       struct page **ppsPageAttributeArray = NULL;
+
+       uiOSPagesToAlloc = psPageArrayData->uiTotalNumOSPages;
+
+       /* Try to get pages from the pool since it is faster;
+          the page pool currently only supports zero-order pages
+          thus currently excludes all DMA/CMA allocated memory */
+       _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+                                                       psPageArrayData->ui32CPUCacheFlags,
+                                                       uiOSPagesToAlloc,
+                                                       ui32MinOrder,
+                                                       BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO),
+                                                       ppsPageArray,
+                                                       &uiDevPagesFromPool);
+
+       uiArrayIndex = uiDevPagesFromPool;
+
+       if ((uiOSPagesToAlloc - uiDevPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
+       {       /* Small allocations: ask for one device page at a time */
+               ui32Order = ui32MinOrder;
+               bIncreaseMaxOrder = IMG_FALSE;
+       }
+       else
+       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+               /* Large zero-order or none zero-order allocations, ask for
+                  MAX(max-order, min-order) order pages at a time; alloc
+                  failures throttles this down to ZeroOrder allocations */
+               ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
+#else
+               /* Because split_page() is not available on older kernels
+                  we cannot mix-and-match any-order pages in the PMR;
+                  only same-order pages must be present in page array.
+                  So we unconditionally force it to use ui32MinOrder on
+                  these older kernels */
+               ui32Order = ui32MinOrder;
+#if defined(DEBUG)
+               if (! BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       /* Check that this is zero */
+                       PVR_ASSERT(! ui32Order);
+               }
+#endif
+#endif
+       }
+
+       /* Only if asking for more contiguity than we actually need, let it fail */
+       ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+       ui32NumPageReq = (1 << ui32Order);
+
+       while (uiArrayIndex < uiOSPagesToAlloc)
+       {
+               IMG_UINT32 ui32PageRemain = uiOSPagesToAlloc - uiArrayIndex;
+
+               while (ui32NumPageReq > ui32PageRemain)
+               {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+                       /* Pages to request is larger than that remaining
+                          so ask for less so never over allocate */
+                       ui32Order = MAX(ui32Order >> 1, ui32MinOrder);
+#else
+                       /* Pages to request is larger than that remaining so
+                          do nothing thus over allocate as we do not support
+                          mix/match of any-order pages in PMR page-array in
+                          older kernels (simplifies page free logic) */
+                       PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+                       ui32NumPageReq = (1 << ui32Order);
+                       ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+               }
+
+               if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       /* As the DMA/CMA framework rounds-up request to the
+                          next power-of-two, we request multiple uiMinOrder
+                          pages to satisfy allocation request in order to
+                          minimise wasting memory */
+                       eError = _AllocOSPage_CMA(psPageArrayData,
+                                                                         ui32GfpFlags,
+                                                                         ui32Order,
+                                                                         ui32MinOrder,
+                                                                         uiArrayIndex >> ui32MinOrder);
+               }
+               else
+               {
+                       /* Allocate uiOrder pages at uiArrayIndex */
+                       eError = _AllocOSPage(psPageArrayData,
+                                                                 ui32GfpFlags,
+                                                                 ui32Order,
+                                                                 ui32MinOrder,
+                                                                 uiArrayIndex);
+               }
+
+               if (eError == PVRSRV_OK)
+               {
+                       /* Successful request. Move onto next. */
+                       uiArrayIndex += ui32NumPageReq;
+               }
+               else
+               {
+                       if (ui32Order > ui32MinOrder)
+                       {
+                               /* Last request failed. Let's ask for less next time */
+                               ui32Order = MAX(ui32Order >> 1, ui32MinOrder);
+                               bIncreaseMaxOrder = IMG_FALSE;
+                               ui32NumPageReq = (1 << ui32Order);
+                               ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+                               g_uiMaxOrder = ui32Order;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
+                               /* We should not trigger this code path in older kernels,
+                                  this is enforced by ensuring ui32Order == ui32MinOrder */
+                               PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+                       }
+                       else
+                       {
+                               /* Failed to alloc pages at required contiguity. Failed allocation */
+                               PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)",
+                                                               __func__,
+                                                               BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA) ? "dma_alloc_coherent" : "alloc_pages",
+                                                               uiArrayIndex,
+                                                               uiOSPagesToAlloc,
+                                                               ui32GfpFlags,
+                                                               ui32Order,
+                                                               PVRSRVGetErrorString(eError)));
+                               eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+                               goto e_free_pages;
+                       }
+               }
+       }
+
+       if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
+       {       /* All successful allocations on max order. Let's ask for more next time */
+               g_uiMaxOrder++;
+       }
+
+       /* Construct table of page pointers to apply attributes */
+       ppsPageAttributeArray = &ppsPageArray[uiDevPagesFromPool];
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+       {
+               IMG_UINT32 uiIdx, uiIdy, uiIdz;
+
+               ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiOSPagesToAlloc);
+               PVR_LOG_GOTO_IF_NOMEM(ppsPageAttributeArray, eError, e_free_pages);
+
+               for (uiIdx = 0; uiIdx < uiOSPagesToAlloc; uiIdx += ui32NumPageReq)
+               {
+                       uiIdy = uiIdx >> ui32Order;
+                       for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
+                       {
+                               ppsPageAttributeArray[uiIdx+uiIdz] = ppsPageArray[uiIdy];
+                               ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
+                       }
+               }
+       }
+
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) && ui32MinOrder == 0)
+       {
+               eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool,
+                                         ppsPageAttributeArray, PAGE_KERNEL,
+                                         PVRSRV_ZERO_VALUE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)"));
+                       goto e_free_pages;
+               }
+       }
+       else if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC))
+       {
+               /* need to call twice because ppsPageArray and ppsPageAttributeArray
+                * can point to different allocations: first for pages obtained from
+                * the pool and then the remaining pages */
+               eError = _MemsetPageArray(uiDevPagesFromPool, ppsPageArray, PAGE_KERNEL,
+                                         PVRSRV_POISON_ON_ALLOC_VALUE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)"));
+               }
+               eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool,
+                                         ppsPageAttributeArray, PAGE_KERNEL,
+                                         PVRSRV_POISON_ON_ALLOC_VALUE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)"));
+               }
+
+               /* for poisoning need to also flush the pool pages as the 0s have
+                * been overwritten */
+               _ApplyCacheMaintenance(psPageArrayData->psDevNode, ppsPageArray,
+                                      uiDevPagesFromPool);
+       }
+
+       /* Do the cache management as required */
+       eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+                                                                       ppsPageAttributeArray,
+                                                                       uiOSPagesToAlloc - uiDevPagesFromPool,
+                                                                       BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) ||
+                                                                       BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC),
+                                                                       psPageArrayData->ui32CPUCacheFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+               goto e_free_pages;
+       }
+       else
+       {
+               if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       OSFreeMem(ppsPageAttributeArray);
+               }
+       }
+
+       /* Update metadata */
+       psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+       {
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+               IMG_UINT32 ui32NumPages =
+                       psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder;
+               IMG_UINT32 i;
+
+               for (i = 0; i < ui32NumPages; i++)
+               {
+                       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+                       {
+                               _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]);
+                       }
+                       else
+                       {
+                               _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << ui32MinOrder]);
+                       }
+               }
+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+               _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)),
+                                          psPageArrayData->uiPid);
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+       }
+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+       return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+       {
+               IMG_UINT32 ui32PageToFree;
+
+               if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
+                       IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
+                       PVR_ASSERT(ui32Order == ui32MinOrder);
+
+                       if (ppsPageAttributeArray)
+                       {
+                               OSFreeMem(ppsPageAttributeArray);
+                       }
+
+                       for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
+                       {
+                               _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+                                                               uiDevPageSize,
+                                                               ui32MinOrder,
+                                                               psPageArrayData->dmavirtarray[ui32PageToFree],
+                                                               psPageArrayData->dmaphysarray[ui32PageToFree],
+                                                               ppsPageArray[ui32PageToFree]);
+                               psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
+                               psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
+                               ppsPageArray[ui32PageToFree] = NULL;
+                       }
+               }
+               else
+               {
+                       /* Free the pages we got from the pool */
+                       for (ui32PageToFree = 0; ui32PageToFree < uiDevPagesFromPool; ui32PageToFree++)
+                       {
+                               _FreeOSPage(ui32MinOrder,
+                                                       BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE),
+                                                       ppsPageArray[ui32PageToFree]);
+                               ppsPageArray[ui32PageToFree] = NULL;
+                       }
+
+                       for (ui32PageToFree = uiDevPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
+                       {
+                               _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);
+                               ppsPageArray[ui32PageToFree] = NULL;
+                       }
+               }
+
+               return eError;
+       }
+}
+
+static INLINE PVRSRV_ERROR
+_CheckIfIndexInRange(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, IMG_UINT32 ui32Limit)
+{
+       if (pui32Indices[ui32Index] >= ui32Limit)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Given alloc index %u at %u is larger than page array %u.",
+                       __func__, pui32Indices[ui32Index], ui32Index, ui32Limit));
+               return PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+       }
+
+       return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+_CheckIfPageNotAllocated(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, struct page **ppsPageArray)
+{
+       if (ppsPageArray[pui32Indices[ui32Index]] != NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Mapping number %u at page array index %u already exists. "
+                       "Page struct %p", __func__, pui32Indices[ui32Index], ui32Index,
+                       ppsPageArray[pui32Indices[ui32Index]]));
+               return PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+       }
+
+       return PVRSRV_OK;
+}
+
+/* Allocation of OS pages: This function is used for sparse allocations.
+ *
+ * Sparse allocations provide only a proportion of sparse physical backing within the total
+ * virtual range. */
+static PVRSRV_ERROR
+_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                                        IMG_UINT32 *puiAllocIndices,
+                                        IMG_UINT32 uiDevPagesToAlloc)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 i;
+       struct page **ppsPageArray = psPageArrayData->pagearray;
+       IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+       IMG_UINT32 uiDevPagesFromPool = 0;
+       IMG_UINT32 uiOSPagesToAlloc = uiDevPagesToAlloc * (1 << uiOrder);
+       IMG_UINT32 uiDevPagesAllocated = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+       const IMG_UINT32 ui32AllocFlags = psPageArrayData->ui32AllocFlags;
+       gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? BIT_ISSET(ui32AllocFlags, FLAG_ZERO):
+                                                                         IMG_FALSE, /* Zero pages later as batch */
+                                                                         psPageArrayData->psDevNode);
+
+       /* We use this page array to receive pages from the pool and then reuse it afterwards to
+        * store pages that need their cache attribute changed on x86 */
+       struct page **ppsTempPageArray;
+       IMG_UINT32 uiTempPageArrayIndex = 0;
+
+       /* Allocate the temporary page array that we need here to receive pages
+        * from the pool and to store pages that need their caching attributes changed.
+        * Allocate number of OS pages to be able to use the attribute function later. */
+       ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiOSPagesToAlloc);
+       PVR_LOG_GOTO_IF_NOMEM(ppsTempPageArray, eError, e_exit);
+
+       /* Check the requested number of pages if they fit in the page array */
+       if (uiDevPagesAllocated <
+               ((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiDevPagesToAlloc))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Trying to allocate more pages (Order %u) than this buffer can handle, "
+                                "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
+                                __func__,
+                                uiOrder,
+                                uiDevPagesToAlloc,
+                                psPageArrayData->iNumOSPagesAllocated >> uiOrder,
+                                uiDevPagesAllocated));
+               eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+               goto e_free_temp_array;
+       }
+
+       /* Try to get pages from the pool since it is faster. The pages from pool are going to be
+        * allocated only if:
+        * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 1 && uiOrder == 0
+        * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 0 && uiOrder == 0 &&
+        *   !BIT_ISSET(ui32AllocFlags, FLAG_ZERO) */
+       _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+                                                       psPageArrayData->ui32CPUCacheFlags,
+                                                       uiDevPagesToAlloc,
+                                                       uiOrder,
+                                                       BIT_ISSET(ui32AllocFlags, FLAG_ZERO),
+                                                       ppsTempPageArray,
+                                                       &uiDevPagesFromPool);
+
+       /* In general device pages can have higher order than 0 but page pool always provides only 0
+        * order pages so they can be assigned to the OS pages values (in other words if we're
+        * allocating non-4k pages uiDevPagesFromPool will always be 0) */
+       uiTempPageArrayIndex = uiDevPagesFromPool;
+
+       /* Move pages we got from the pool to the array. */
+       for (i = 0; i < uiDevPagesFromPool; i++)
+       {
+               eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated);
+               PVR_GOTO_IF_ERROR(eError, e_free_pool_pages);
+               eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray);
+               PVR_GOTO_IF_ERROR(eError, e_free_pool_pages);
+
+               ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[i];
+       }
+
+       /* Allocate pages from the OS */
+       for (i = uiDevPagesFromPool; i < uiDevPagesToAlloc; i++)
+       {
+               eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated);
+               PVR_GOTO_IF_ERROR(eError, e_free_pages);
+               eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray);
+               PVR_GOTO_IF_ERROR(eError, e_free_pages);
+
+               /* Allocated pages and assign them the array. */
+               if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       /* As the DMA/CMA framework rounds-up request to the
+                          next power-of-two, we request multiple uiMinOrder
+                          pages to satisfy allocation request in order to
+                          minimise wasting memory */
+                       eError = _AllocOSPage_CMA(psPageArrayData,
+                                                                         ui32GfpFlags,
+                                                                         uiOrder,
+                                                                         uiOrder,
+                                                                         puiAllocIndices[i]);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages"));
+                               goto e_free_pages;
+                       }
+               }
+               else
+               {
+                       DisableOOMKiller();
+                       ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder);
+                       EnableOOMKiller();
+               }
+
+               if (ppsPageArray[puiAllocIndices[i]] != NULL)
+               {
+                       /* Append pages to the temporary array so it's easier to process
+                        * them later on. */
+
+                       if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA))
+                       {
+                               IMG_UINT32 idx;
+                               struct page *psPageAddr;
+
+                               psPageAddr = ppsPageArray[puiAllocIndices[i]];
+
+                               /* "divide" CMA pages into OS pages if they have higher order */
+                               for (idx = 0; idx < (1 << uiOrder); idx++)
+                               {
+                                       ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr;
+                                       psPageAddr++;
+                               }
+                               uiTempPageArrayIndex += (1 << uiOrder);
+                       }
+                       else
+                       {
+                               ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
+                               uiTempPageArrayIndex++;
+                       }
+               }
+               else
+               {
+                       /* Failed to alloc pages at required contiguity. Failed allocation */
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
+                               __func__, i, uiDevPagesToAlloc, ui32GfpFlags, uiOrder));
+                       eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+                       goto e_free_pages;
+               }
+       }
+
+       if (BIT_ISSET(ui32AllocFlags, FLAG_ZERO) && uiOrder == 0)
+       {
+               /* At this point this array contains pages allocated from the page pool at its start
+                * and pages allocated from the OS after that.
+                * If there are pages from the pool here they must be zeroed already hence we don't have
+                * to do it again. This is because if PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES is enabled pool pages
+                * are zeroed in the cleanup thread. If it's disabled they aren't, and in that case we never
+                * allocate pages with FLAG_ZERO from the pool. This is why those pages need to be zeroed
+                * here.
+                * All of the above is true for the 0 order pages. For higher order we never allocated from
+                * the pool and those pages are allocated already zeroed from the OS.
+                * Long story short we can always skip pages allocated from the pool because they are either
+                * zeroed or we didn't allocate any of them. */
+               eError = _MemsetPageArray(uiTempPageArrayIndex - uiDevPagesFromPool,
+                                         &ppsTempPageArray[uiDevPagesFromPool],
+                                         PAGE_KERNEL, PVRSRV_ZERO_VALUE);
+               PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to zero pages (sparse)", e_free_pages);
+       }
+       else if (BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC))
+       {
+               /* Here we need to poison all of the pages regardless if they were
+                * allocated from the pool or from the system. */
+               eError = _MemsetPageArray(uiTempPageArrayIndex, ppsTempPageArray,
+                                         PAGE_KERNEL, PVRSRV_POISON_ON_ALLOC_VALUE);
+               PVR_LOG_IF_FALSE(eError == PVRSRV_OK, "failed to poison pages (sparse)");
+
+               /* We need to flush the cache for the poisoned pool pages here. The flush for the pages
+                * allocated from the system is done below because we also need to add appropriate cache
+                * attributes to them. Pages allocated from the pool already come with correct caching
+                * mode. */
+               _ApplyCacheMaintenance(psPageArrayData->psDevNode, ppsTempPageArray, uiDevPagesFromPool);
+       }
+
+       /* Do the cache management as required */
+       eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+                                       &ppsTempPageArray[uiDevPagesFromPool],
+                                       uiTempPageArrayIndex - uiDevPagesFromPool,
+                                       BIT_ISSET(ui32AllocFlags, FLAG_ZERO) ||
+                                       BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC),
+                                       psPageArrayData->ui32CPUCacheFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+               goto e_free_pages;
+       }
+
+       /* Update metadata */
+       psPageArrayData->iNumOSPagesAllocated += uiOSPagesToAlloc;
+
+       /* Free temporary page array */
+       OSFreeMem(ppsTempPageArray);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       for (i = 0; i < uiDevPagesToAlloc; i++)
+       {
+               _AddMemAllocRecord_UmaPages(psPageArrayData,
+                                           ppsPageArray[puiAllocIndices[i]]);
+       }
+#else
+       _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)),
+                                  psPageArrayData->uiPid);
+#endif
+#endif
+
+       return PVRSRV_OK;
+
+e_free_pages:
+       if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA))
+       {
+               IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+
+               /* Free the pages we just allocated from the CMA */
+               for (; i > uiDevPagesFromPool; i--)
+               {
+                       _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+                                       uiDevPageSize,
+                                       uiOrder,
+                                       psPageArrayData->dmavirtarray[puiAllocIndices[i-1]],
+                                       psPageArrayData->dmaphysarray[puiAllocIndices[i-1]],
+                                       ppsPageArray[puiAllocIndices[i-1]]);
+                       psPageArrayData->dmaphysarray[puiAllocIndices[i-1]]= (dma_addr_t) 0;
+                       psPageArrayData->dmavirtarray[puiAllocIndices[i-1]] = NULL;
+                       ppsPageArray[puiAllocIndices[i-1]] = NULL;
+               }
+       }
+       else
+       {
+               /* Free the pages we just allocated from the OS */
+               for (; i > uiDevPagesFromPool; i--)
+               {
+                       _FreeOSPage(0, IMG_FALSE, ppsPageArray[puiAllocIndices[i-1]]);
+                       ppsPageArray[puiAllocIndices[i-1]] = NULL;
+               }
+       }
+
+e_free_pool_pages:
+       /* And now free all of the pages we allocated from the pool. */
+       for (i = 0; i < uiDevPagesFromPool; i++)
+       {
+               _FreeOSPage(0, BIT_ISSET(ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE),
+                           ppsTempPageArray[i]);
+
+               /* not using _CheckIfIndexInRange() to not print error message */
+               if (puiAllocIndices[i] < uiDevPagesAllocated)
+               {
+                       ppsPageArray[puiAllocIndices[i]] = NULL;
+               }
+       }
+
+e_free_temp_array:
+       OSFreeMem(ppsTempPageArray);
+
+e_exit:
+       return eError;
+}
+
+/* Allocate pages for a given page array.
+ *
+ * The executed allocation path depends whether an array with allocation
+ * indices has been passed or not */
+static PVRSRV_ERROR
+_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                         IMG_UINT32 *puiAllocIndices,
+                         IMG_UINT32 uiPagesToAlloc)
+{
+       PVRSRV_ERROR eError;
+       struct page **ppsPageArray;
+
+       /* Parameter checks */
+       PVR_ASSERT(NULL != psPageArrayData);
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+       {
+               PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
+               PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
+       }
+       PVR_ASSERT(psPageArrayData->pagearray != NULL);
+       PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+
+       ppsPageArray = psPageArrayData->pagearray;
+
+       /* Go the sparse alloc path if we have an array with alloc indices.*/
+       if (puiAllocIndices != NULL)
+       {
+               eError = _AllocOSPages_Sparse(psPageArrayData,
+                                                                         puiAllocIndices,
+                                                                         uiPagesToAlloc);
+       }
+       else
+       {
+               eError = _AllocOSPages_Fast(psPageArrayData);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               goto e_exit;
+       }
+
+       _DumpPageArray(ppsPageArray,
+                      psPageArrayData->uiTotalNumOSPages >>
+                      (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+       PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
+       return PVRSRV_OK;
+
+e_exit:
+       return eError;
+}
+
+/* Same as _FreeOSPage except free memory using DMA framework */
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+                               size_t alloc_size,
+                               IMG_UINT32 uiOrder,
+                               void *virt_addr,
+                               dma_addr_t dev_addr,
+                               struct page *psPage)
+{
+       if (DMA_IS_ALLOCPG_ADDR(dev_addr))
+       {
+#if defined(CONFIG_X86)
+               void *pvPageVAddr = page_address(psPage);
+               if (pvPageVAddr)
+               {
+                       int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+                       if (ret)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Failed to reset page attribute",
+                                               __func__));
+                       }
+               }
+#endif
+
+               if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+               {
+                       psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+                       uiOrder += 1;
+               }
+
+               __free_pages(psPage, uiOrder);
+       }
+       else
+       {
+               if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+               {
+                       size_t align_adjust;
+
+                       align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+                       alloc_size = alloc_size << 1;
+
+                       dev_addr = DMA_GET_ADDR(dev_addr);
+                       dev_addr -= align_adjust << PAGE_SHIFT;
+                       virt_addr -= align_adjust << PAGE_SHIFT;
+               }
+
+               dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
+       }
+}
+
+/* Free a single page back to the OS.
+ * Make sure the cache type is set back to the default value.
+ *
+ * Note:
+ * We must _only_ check bUnsetMemoryType in the case where we need to free
+ * the page back to the OS since we may have to revert the cache properties
+ * of the page to the default as given by the OS when it was allocated. */
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+                       IMG_BOOL bUnsetMemoryType,
+                       struct page *psPage)
+{
+
+#if defined(CONFIG_X86)
+       void *pvPageVAddr;
+       pvPageVAddr = page_address(psPage);
+
+       if (pvPageVAddr && bUnsetMemoryType)
+       {
+               int ret;
+
+               ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+               if (ret)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute",
+                                        __func__));
+               }
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
+#endif
+       __free_pages(psPage, uiOrder);
+}
+
+/* Free the struct holding the metadata */
+static PVRSRV_ERROR
+_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+       PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
+
+       /* Check if the page array actually still exists.
+        * It might be the case that has been moved to the page pool */
+       if (psPageArrayData->pagearray != NULL)
+       {
+               OSFreeMemNoStats(psPageArrayData->pagearray);
+       }
+
+       kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+
+       return PVRSRV_OK;
+}
+
+/* Free all or some pages from a sparse page array */
+static PVRSRV_ERROR
+_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                                       IMG_UINT32 *pai32FreeIndices,
+                                       IMG_UINT32 ui32FreePageCount)
+{
+       IMG_BOOL bSuccess;
+       IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+       IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0;
+       struct page **ppsPageArray = psPageArrayData->pagearray;
+       IMG_UINT32 uiNumPages;
+
+       struct page **ppsTempPageArray;
+       IMG_UINT32 uiTempArraySize;
+
+       /* We really should have something to free before we call this */
+       PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+       if (pai32FreeIndices == NULL)
+       {
+               uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+               uiTempArraySize = psPageArrayData->iNumOSPagesAllocated;
+       }
+       else
+       {
+               uiNumPages = ui32FreePageCount;
+               uiTempArraySize = ui32FreePageCount << uiOrder;
+       }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+       for (i = 0; i < uiNumPages; i++)
+       {
+               IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+
+               if (NULL != ppsPageArray[idx])
+               {
+                       _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]);
+               }
+       }
+#endif
+
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE))
+       {
+               for (i = 0; i < uiNumPages; i++)
+               {
+                       IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+
+                       if (NULL != ppsPageArray[idx])
+                       {
+                               _PoisonDevicePage(psPageArrayData->psDevNode,
+                                                 ppsPageArray[idx],
+                                                 uiOrder,
+                                                 psPageArrayData->ui32CPUCacheFlags,
+                                                 PVRSRV_POISON_ON_FREE_VALUE);
+                       }
+               }
+       }
+
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+       {
+               IMG_UINT32 uiDevNumPages = uiNumPages;
+               IMG_UINT32 uiDevPageSize = 1<<psPageArrayData->uiLog2AllocPageSize;
+
+               for (i = 0; i < uiDevNumPages; i++)
+               {
+                       IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+                       if (NULL != ppsPageArray[idx])
+                       {
+                               _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+                                                               uiDevPageSize,
+                                                               uiOrder,
+                                                               psPageArrayData->dmavirtarray[idx],
+                                                               psPageArrayData->dmaphysarray[idx],
+                                                               ppsPageArray[idx]);
+                               psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0;
+                               psPageArrayData->dmavirtarray[idx] = NULL;
+                               ppsPageArray[idx] = NULL;
+                               uiTempIdx++;
+                       }
+               }
+               uiTempIdx <<= uiOrder;
+       }
+       else
+       {
+
+               /* OSAllocMemNoStats required because this code may be run without the bridge lock held */
+               ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
+               if (ppsTempPageArray == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__));
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
+
+               /* Put pages in a contiguous array so further processing is easier */
+               for (i = 0; i < uiNumPages; i++)
+               {
+                       uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
+                       if (NULL != ppsPageArray[uiPageIndex])
+                       {
+                               struct page *psPage = ppsPageArray[uiPageIndex];
+
+                               for (j = 0; j < (1<<uiOrder); j++)
+                               {
+                                       ppsTempPageArray[uiTempIdx] = psPage;
+                                       uiTempIdx++;
+                                       psPage++;
+                               }
+
+                               ppsPageArray[uiPageIndex] = NULL;
+                       }
+               }
+
+               /* Try to move the temp page array to the pool */
+               bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+                                                                                ppsTempPageArray,
+                                                                                BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED),
+                                                                                0,
+                                                                                uiTempIdx);
+               if (bSuccess)
+               {
+                       goto exit_ok;
+               }
+
+               /* Free pages and reset page caching attributes on x86 */
+#if defined(CONFIG_X86)
+               if (uiTempIdx != 0 && BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE))
+               {
+                       int iError;
+                       iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
+
+                       if (iError)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__));
+                       }
+               }
+#endif
+
+               /* Free the pages */
+               for (i = 0; i < uiTempIdx; i++)
+               {
+                       __free_pages(ppsTempPageArray[i], 0);
+               }
+
+               /* Free the temp page array here if it did not move to the pool */
+               OSFreeMemNoStats(ppsTempPageArray);
+       }
+
+exit_ok:
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       _DecrMemAllocStat_UmaPages(((uiTempIdx * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)),
+                                  psPageArrayData->uiPid);
+#endif
+
+       if (pai32FreeIndices && ((uiTempIdx >> uiOrder) != ui32FreePageCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Probable sparse duplicate indices: ReqFreeCount: %d "
+                               "ActualFreedCount: %d", __func__, ui32FreePageCount, (uiTempIdx >> uiOrder)));
+       }
+       /* Update metadata */
+       psPageArrayData->iNumOSPagesAllocated -= uiTempIdx;
+       PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+       return PVRSRV_OK;
+}
+
+/* Free all the pages in a page array */
+static PVRSRV_ERROR
+_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+       IMG_BOOL bSuccess;
+       IMG_UINT32 i;
+       IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages;
+       IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+       IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
+       IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+       struct page **ppsPageArray = psPageArrayData->pagearray;
+
+       /* We really should have something to free before we call this */
+       PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       for (i = 0; i < uiDevNumPages; i++)
+       {
+               if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+               {
+                       _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]);
+               }else
+               {
+                       _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << uiOrder]);
+               }
+       }
+#else
+       _DecrMemAllocStat_UmaPages(((uiNumPages * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)),
+                                  psPageArrayData->uiPid);
+#endif
+#endif
+
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE))
+       {
+               for (i = 0; i < uiDevNumPages; i++)
+               {
+                       _PoisonDevicePage(psPageArrayData->psDevNode,
+                                         ppsPageArray[i],
+                                         uiOrder,
+                                         psPageArrayData->ui32CPUCacheFlags,
+                                         PVRSRV_POISON_ON_FREE_VALUE);
+               }
+       }
+
+       /* Try to move the page array to the pool */
+       bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+                                                                        ppsPageArray,
+                                                                        BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED),
+                                                                        uiOrder,
+                                                                        uiNumPages);
+       if (bSuccess)
+       {
+               psPageArrayData->pagearray = NULL;
+               goto exit_ok;
+       }
+
+       if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA))
+       {
+               for (i = 0; i < uiDevNumPages; i++)
+               {
+                       _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+                                                       uiDevPageSize,
+                                                       uiOrder,
+                                                       psPageArrayData->dmavirtarray[i],
+                                                       psPageArrayData->dmaphysarray[i],
+                                                       ppsPageArray[i]);
+                       psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
+                       psPageArrayData->dmavirtarray[i] = NULL;
+                       ppsPageArray[i] = NULL;
+               }
+       }
+       else
+       {
+#if defined(CONFIG_X86)
+               if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE))
+               {
+                       int ret;
+
+                       ret = set_pages_array_wb(ppsPageArray, uiNumPages);
+                       if (ret)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes",
+                                                __func__));
+                       }
+               }
+#endif
+
+               for (i = 0; i < uiNumPages; i++)
+               {
+                       _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
+                       ppsPageArray[i] = NULL;
+               }
+       }
+
+exit_ok:
+       /* Update metadata */
+       psPageArrayData->iNumOSPagesAllocated = 0;
+       return PVRSRV_OK;
+}
+
+/* Free pages from a page array.
+ * Takes care of mem stats and chooses correct free path depending on parameters. */
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+                        IMG_UINT32 *pai32FreeIndices,
+                        IMG_UINT32 ui32FreePageCount)
+{
+       PVRSRV_ERROR eError;
+
+       /* Go the sparse or non-sparse path */
+       if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages
+               || pai32FreeIndices != NULL)
+       {
+               eError = _FreeOSPages_Sparse(psPageArrayData,
+                                                                        pai32FreeIndices,
+                                                                        ui32FreePageCount);
+       }
+       else
+       {
+               eError = _FreeOSPages_Fast(psPageArrayData);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+       }
+
+       _DumpPageArray(psPageArrayData->pagearray,
+                      psPageArrayData->uiTotalNumOSPages >>
+                     (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+       return eError;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* Destruction function is called after last reference disappears,
+ * but before PMR itself is freed.
+ */
+static PVRSRV_ERROR
+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PVRSRV_ERROR eError;
+       PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+       /* We can't free pages until now. */
+       if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+       {
+#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
+               PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+               IMG_UINT32 ui32UMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU;
+
+               mutex_lock(&g_sUMALeakMutex);
+
+               g_ui32UMALeakCounter++;
+               if (ui32UMALeakMax && g_ui32UMALeakCounter >= ui32UMALeakMax)
+               {
+                       g_ui32UMALeakCounter = 0;
+                       mutex_unlock(&g_sUMALeakMutex);
+
+                       PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv));
+                       return PVRSRV_OK;
+               }
+
+               mutex_unlock(&g_sUMALeakMutex);
+#endif
+               _PagePoolLock();
+               if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED))
+               {
+                       _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+               }
+               _PagePoolUnlock();
+
+               eError = _FreeOSPages(psOSPageArrayData,
+                                                         NULL,
+                                                         0);
+               PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */
+       }
+
+       eError = _FreeOSPagesArray(psOSPageArrayData);
+       PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */
+       return PVRSRV_OK;
+}
+
+/* Callback function for locking the system physical page addresses.
+ * This function must be called before the lookup address func. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+       PVRSRV_ERROR eError;
+       PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+       if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND))
+       {
+               /* Allocate Memory for deferred allocation */
+               eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       }
+
+       eError = PVRSRV_OK;
+       return eError;
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+       /* Just drops the refcount. */
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+       if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND))
+       {
+               /* Free Memory for deferred allocation */
+               eError = _FreeOSPages(psOSPageArrayData,
+                                                         NULL,
+                                                         0);
+               if (eError != PVRSRV_OK)
+               {
+                       return eError;
+               }
+       }
+
+       PVR_ASSERT(eError == PVRSRV_OK);
+       return eError;
+}
+
+static INLINE IMG_BOOL IsOffsetValid(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData,
+                                     IMG_UINT32 ui32Offset)
+{
+       return (ui32Offset >> psOSPageArrayData->uiLog2AllocPageSize) <
+           psOSPageArrayData->uiTotalNumOSPages;
+}
+
+/* Determine PA for specified offset into page array. */
+static IMG_DEV_PHYADDR GetOffsetPA(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData,
+                                   IMG_UINT32 ui32Offset)
+{
+       IMG_UINT32 ui32Log2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize;
+       IMG_UINT32 ui32PageIndex = ui32Offset >> ui32Log2AllocPageSize;
+       IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << ui32Log2AllocPageSize);
+       IMG_DEV_PHYADDR sPA;
+
+       PVR_ASSERT(ui32InPageOffset < (1U << ui32Log2AllocPageSize));
+
+       sPA.uiAddr = page_to_phys(psOSPageArrayData->pagearray[ui32PageIndex]);
+       sPA.uiAddr += ui32InPageOffset;
+
+       return sPA;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                                       IMG_UINT32 ui32Log2PageSize,
+                                       IMG_UINT32 ui32NumOfPages,
+                                       IMG_DEVMEM_OFFSET_T *puiOffset,
+                                       IMG_BOOL *pbValid,
+                                       IMG_DEV_PHYADDR *psDevPAddr)
+{
+       const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+       IMG_UINT32 uiIdx;
+
+       if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Requested physical addresses from PMR "
+                        "for incompatible contiguity %u!",
+                        __func__,
+                        ui32Log2PageSize));
+               return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+       }
+
+       for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
+       {
+               if (pbValid[uiIdx])
+               {
+                       PVR_LOG_RETURN_IF_FALSE(IsOffsetValid(psOSPageArrayData, puiOffset[uiIdx]),
+                                               "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE);
+
+                       psDevPAddr[uiIdx] = GetOffsetPA(psOSPageArrayData, puiOffset[uiIdx]);
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+                       /* this is just a precaution, normally this should be always
+                        * available */
+                       if (psOSPageArrayData->ui64DmaMask)
+                       {
+                               if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask)
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: physical address"
+                                                       " (%" IMG_UINT64_FMTSPECX ") out of allowable range"
+                                                       " [0; %" IMG_UINT64_FMTSPECX "]", __func__,
+                                                       psDevPAddr[uiIdx].uiAddr,
+                                                       psOSPageArrayData->ui64DmaMask));
+                                       BUG();
+                               }
+                       }
+#endif
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
+       void *pvBase;
+       IMG_UINT32 ui32PageCount;
+       pgprot_t PageProps;
+} PMR_OSPAGEARRAY_KERNMAP_DATA;
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                                                                size_t uiOffset,
+                                                                size_t uiSize,
+                                                                void **ppvKernelAddressOut,
+                                                                IMG_HANDLE *phHandleOut,
+                                                                PMR_FLAGS_T ulFlags)
+{
+       PVRSRV_ERROR eError;
+       PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+       void *pvAddress;
+       pgprot_t prot = PAGE_KERNEL;
+       IMG_UINT32 ui32PageOffset=0;
+       size_t uiMapOffset=0;
+       IMG_UINT32 ui32PageCount = 0;
+       IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize;
+       IMG_UINT32 uiOSPageShift = OSGetPageShift();
+       IMG_UINT32 uiPageSizeDiff = 0;
+       struct page **pagearray;
+       PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+       /* For cases device page size greater than the OS page size,
+        * multiple physically contiguous OS pages constitute one device page.
+        * However only the first page address of such an ensemble is stored
+        * as part of the mapping table in the driver. Hence when mapping the PMR
+        * in part/full, all OS pages that constitute the device page
+        * must also be mapped to kernel.
+        *
+        * For the case where device page size less than OS page size,
+        * treat it the same way as the page sizes are equal */
+       if (uiLog2AllocPageSize > uiOSPageShift)
+       {
+               uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift;
+       }
+
+       /*
+               Zero offset and size as a special meaning which means map in the
+               whole of the PMR, this is due to fact that the places that call
+               this callback might not have access to be able to determine the
+               physical size
+       */
+       if ((uiOffset == 0) && (uiSize == 0))
+       {
+               ui32PageOffset = 0;
+               uiMapOffset = 0;
+               /* Page count = amount of OS pages */
+               ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated;
+       }
+       else
+       {
+               size_t uiEndoffset;
+
+               ui32PageOffset = uiOffset >> uiLog2AllocPageSize;
+               uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize);
+               uiEndoffset = uiOffset + uiSize - 1;
+               /* Add one as we want the count, not the offset */
+               /* Page count = amount of device pages (note uiLog2AllocPageSize being used) */
+               ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1;
+               ui32PageCount -= ui32PageOffset;
+
+               /* The OS page count to be mapped might be different if the
+                * OS page size is lesser than the device page size */
+               ui32PageCount <<= uiPageSizeDiff;
+       }
+
+       switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
+       {
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+                               prot = pgprot_noncached(prot);
+                               break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+                               prot = pgprot_writecombine(prot);
+                               break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+                               break;
+
+               default:
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               goto e0;
+       }
+
+       if (uiPageSizeDiff)
+       {
+               /* Each device page can be broken down into ui32SubPageCount OS pages */
+               IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff;
+               IMG_UINT32 i;
+               struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset];
+
+               /* Allocate enough memory for the OS page pointers for this mapping */
+               pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0]));
+
+               if (pagearray == NULL)
+               {
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto e0;
+               }
+
+               /* construct array that holds the page pointers that constitute the requested
+                * mapping */
+               for (i = 0; i < ui32PageCount; i++)
+               {
+                       IMG_UINT32 ui32OSPageArrayIndex  = i / ui32SubPageCount;
+                       IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount;
+
+                       /*
+                        * The driver only stores OS page pointers for the first OS page
+                        * within each device page (psPage[ui32OSPageArrayIndex]).
+                        * Get the next OS page structure at device page granularity,
+                        * then calculate OS page pointers for all the other pages.
+                        */
+                       pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset;
+               }
+       }
+       else
+       {
+               pagearray = &psOSPageArrayData->pagearray[ui32PageOffset];
+       }
+
+       psData = OSAllocMem(sizeof(*psData));
+       if (psData == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e1;
+       }
+
+       pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot);
+       if (pvAddress == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e2;
+       }
+
+       *ppvKernelAddressOut = pvAddress + uiMapOffset;
+       psData->pvBase = pvAddress;
+       psData->ui32PageCount = ui32PageCount;
+       psData->PageProps = prot;
+       *phHandleOut = psData;
+
+       if (uiPageSizeDiff)
+       {
+               OSFreeMem(pagearray);
+       }
+
+       return PVRSRV_OK;
+
+       /*
+         error exit paths follow
+       */
+e2:
+       OSFreeMem(psData);
+e1:
+       if (uiPageSizeDiff)
+       {
+               OSFreeMem(pagearray);
+       }
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                                                                                        IMG_HANDLE hHandle)
+{
+       PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
+       PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+       pvr_vunmap(psData->pvBase, psData->ui32PageCount, psData->PageProps);
+       OSFreeMem(psData);
+}
+
+static
+PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
+{
+       PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Lock down the pool and add the array to the unpin list */
+       _PagePoolLock();
+
+       /* Check current state */
+       PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED) == IMG_FALSE);
+       PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND) == IMG_FALSE);
+
+       eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Unable to add allocation to unpinned list (%d).",
+                        __func__,
+                        eError));
+
+               goto e_exit;
+       }
+
+       /* Set the Unpinned bit */
+       BIT_SET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED);
+
+e_exit:
+       _PagePoolUnlock();
+       return eError;
+}
+
+static
+PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
+                                               PMR_MAPPING_TABLE *psMappingTable)
+{
+       PVRSRV_ERROR eError;
+       PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+       IMG_UINT32 *pui32MapTable = NULL;
+       IMG_UINT32 i, j = 0, ui32Temp = 0;
+
+       _PagePoolLock();
+
+       /* Check current state */
+       PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED));
+
+       /* Clear unpinned bit */
+       BIT_UNSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED);
+
+       /* If there are still pages in the array remove entries from the pool */
+       if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+       {
+               _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+               _PagePoolUnlock();
+
+               eError = PVRSRV_OK;
+               goto e_exit_mapalloc_failure;
+       }
+       _PagePoolUnlock();
+
+       /* If pages were reclaimed we allocate new ones and
+        * return PVRSRV_ERROR_PMR_NEW_MEMORY */
+       if (psMappingTable->ui32NumVirtChunks == 1)
+       {
+               eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+       }
+       else
+       {
+               pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
+               if (NULL == pui32MapTable)
+               {
+                       eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Unable to Alloc Map Table.",
+                                        __func__));
+                       goto e_exit_mapalloc_failure;
+               }
+
+               for (i = 0, j = 0; i < psMappingTable->ui32NumVirtChunks; i++)
+               {
+                       ui32Temp = psMappingTable->aui32Translation[i];
+                       if (TRANSLATION_INVALID != ui32Temp)
+                       {
+                               pui32MapTable[j++] = ui32Temp;
+                       }
+               }
+               eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Unable to get new pages for unpinned allocation.",
+                                __func__));
+
+               eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+               goto e_exit;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                        "%s: Allocating new pages for unpinned allocation. "
+                        "Old content is lost!",
+                        __func__));
+
+       eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
+
+e_exit:
+       OSFreeMem(pui32MapTable);
+e_exit_mapalloc_failure:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemOSMem
+@Description    This function Changes the sparse mapping by allocating and
+                freeing of pages. It changes the GPU and CPU maps accordingly.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
+                                               const PMR *psPMR,
+                                               IMG_UINT32 ui32AllocPageCount,
+                                               IMG_UINT32 *pai32AllocIndices,
+                                               IMG_UINT32 ui32FreePageCount,
+                                               IMG_UINT32 *pai32FreeIndices,
+                                               IMG_UINT32 uiFlags)
+{
+       PVRSRV_ERROR eError;
+
+       PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR);
+       PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+       struct page **psPageArray = psPMRPageArrayData->pagearray;
+       void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray;
+       dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray;
+
+       struct page *psPage;
+       dma_addr_t psDMAPAddr;
+       void *pvDMAVAddr;
+
+       IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
+       IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
+       IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
+       IMG_UINT32 ui32Loop = 0;
+       IMG_UINT32 ui32Index = 0;
+       IMG_UINT32 uiAllocpgidx;
+       IMG_UINT32 uiFreepgidx;
+       IMG_UINT32 uiOrder = psPMRPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+       IMG_BOOL bCMA = BIT_ISSET(psPMRPageArrayData->ui32AllocFlags, FLAG_IS_CMA);
+
+
+       /* Check SPARSE flags and calculate pages to allocate and free */
+       if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+       {
+               ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
+                               ui32FreePageCount : ui32AllocPageCount;
+
+               PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+       }
+
+       if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+       {
+               ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
+       }
+       else
+       {
+               ui32AllocPageCount = 0;
+       }
+
+       if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+       {
+               ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
+       }
+       else
+       {
+               ui32FreePageCount = 0;
+       }
+
+       if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Missing parameters for number of pages to alloc/free",
+                        __func__));
+               return eError;
+       }
+
+       /* The incoming request is classified into two operations independent of
+        * each other: alloc & free pages.
+        * These operations can be combined with two mapping operations as well
+        * which are GPU & CPU space mappings.
+        *
+        * From the alloc and free page requests, the net amount of pages to be
+        * allocated or freed is computed. Pages that were requested to be freed
+        * will be reused to fulfil alloc requests.
+        *
+        * The order of operations is:
+        * 1. Allocate new pages from the OS
+        * 2. Move the free pages from free request to alloc positions.
+        * 3. Free the rest of the pages not used for alloc
+        *
+        * Alloc parameters are validated at the time of allocation
+        * and any error will be handled then. */
+
+       /* Validate the free indices */
+       if (ui32FreePageCount)
+       {
+               if (NULL != pai32FreeIndices){
+
+                       for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+                       {
+                               uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+                               if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+                               {
+                                       eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+                                       goto e0;
+                               }
+
+                               if (NULL == psPageArray[uiFreepgidx])
+                               {
+                                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                "%s: Trying to free non-allocated page",
+                                                __func__));
+                                       goto e0;
+                               }
+                       }
+               }
+               else
+               {
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: Given non-zero free count but missing indices array",
+                                __func__));
+                       return eError;
+               }
+       }
+
+       /* Validate the alloc indices */
+       for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+       {
+               uiAllocpgidx = pai32AllocIndices[ui32Loop];
+
+               if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+               {
+                       eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+                       goto e0;
+               }
+
+               if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+               {
+                       if ((NULL != psPageArray[uiAllocpgidx]) ||
+                           (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+                       {
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Trying to allocate already allocated page again",
+                                        __func__));
+                               goto e0;
+                       }
+               }
+               else
+               {
+                       if ((NULL == psPageArray[uiAllocpgidx]) ||
+                           (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
+                       {
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               PVR_DPF((PVR_DBG_ERROR,
+                                        "%s: Unable to remap memory due to missing page",
+                                        __func__));
+                               goto e0;
+                       }
+               }
+       }
+
+       ui32Loop = 0;
+
+       /* Allocate new pages from the OS */
+       if (0 != ui32AdtnlAllocPages)
+       {
+                       eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
+                       if (PVRSRV_OK != eError)
+                       {
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                        "%s: New Addtl Allocation of pages failed",
+                                        __func__));
+                               goto e0;
+                       }
+
+                       psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+                       /*Mark the corresponding pages of translation table as valid */
+                       for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+                       {
+                               psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+                       }
+       }
+
+
+       ui32Index = ui32Loop;
+
+       /* Move the corresponding free pages to alloc request */
+       for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
+       {
+               uiAllocpgidx = pai32AllocIndices[ui32Index];
+               uiFreepgidx  = pai32FreeIndices[ui32Loop];
+
+               psPage = psPageArray[uiAllocpgidx];
+               psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+               if (bCMA)
+               {
+                       pvDMAVAddr = psDMAVirtArray[uiAllocpgidx];
+                       psDMAPAddr = psDMAPhysArray[uiAllocpgidx];
+                       psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx];
+                       psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx];
+               }
+
+               /* Is remap mem used in real world scenario? Should it be turned to a
+                *  debug feature? The condition check needs to be out of loop, will be
+                *  done at later point though after some analysis */
+               if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+               {
+                       psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+                       psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+                       psPageArray[uiFreepgidx] = NULL;
+                       if (bCMA)
+                       {
+                               psDMAVirtArray[uiFreepgidx] = NULL;
+                               psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0;
+                       }
+               }
+               else
+               {
+                       psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+                       psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+                       psPageArray[uiFreepgidx] = psPage;
+                       if (bCMA)
+                       {
+                               psDMAVirtArray[uiFreepgidx] = pvDMAVAddr;
+                               psDMAPhysArray[uiFreepgidx] = psDMAPAddr;
+                       }
+               }
+       }
+
+       /* Free the additional free pages */
+       if (0 != ui32AdtnlFreePages)
+       {
+               eError = _FreeOSPages(psPMRPageArrayData,
+                                     &pai32FreeIndices[ui32Loop],
+                                     ui32AdtnlFreePages);
+               if (eError != PVRSRV_OK)
+               {
+                       goto e0;
+               }
+               psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+               while (ui32Loop < ui32FreePageCount)
+               {
+                       psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
+                       ui32Loop++;
+               }
+       }
+
+       eError = PVRSRV_OK;
+
+e0:
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       PMRChangeSparseMemCPUMapOSMem
+@Description    This function Changes CPU maps accordingly
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
+                                           const PMR *psPMR,
+                                           IMG_UINT64 sCpuVAddrBase,
+                                           IMG_UINT32 ui32AllocPageCount,
+                                           IMG_UINT32 *pai32AllocIndices,
+                                           IMG_UINT32 ui32FreePageCount,
+                                           IMG_UINT32 *pai32FreeIndices)
+{
+       struct page **psPageArray;
+       PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+       IMG_CPU_PHYADDR sCPUPAddr;
+
+       sCPUPAddr.uiAddr = 0;
+       psPageArray = psPMRPageArrayData->pagearray;
+
+       return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+                                          sCpuVAddrBase,
+                                          sCPUPAddr,
+                                          ui32AllocPageCount,
+                                          pai32AllocIndices,
+                                          ui32FreePageCount,
+                                          pai32FreeIndices,
+                                          IMG_FALSE);
+}
+
+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
+       .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
+       .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
+       .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
+       .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
+       .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
+       .pfnReadBytes = NULL,
+       .pfnWriteBytes = NULL,
+       .pfnUnpinMem = &PMRUnpinOSMem,
+       .pfnPinMem = &PMRPinOSMem,
+       .pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
+       .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
+       .pfnFinalize = &PMRFinalizeOSMem,
+};
+
+/* Wrapper around OS page allocation. */
+static PVRSRV_ERROR
+DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData,
+            IMG_UINT32 *puiAllocIndices,
+            IMG_UINT32 ui32NumPhysChunks,
+            IMG_UINT32 ui32NumVirtChunks,
+            IMG_DEVMEM_SIZE_T uiChunkSize,
+            IMG_UINT32 ui32Log2AllocPageSize)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* Do we fill the whole page array or just parts (sparse)? */
+       if (ui32NumPhysChunks == ui32NumVirtChunks)
+       {
+               /* Allocate the physical pages */
+               eError = _AllocOSPages(psPrivData,
+                                      NULL,
+                                      psPrivData->uiTotalNumOSPages >>
+                                      (ui32Log2AllocPageSize - PAGE_SHIFT));
+       }
+       else if (ui32NumPhysChunks != 0)
+       {
+               /* Calculate the number of pages we want to allocate */
+               IMG_UINT32 ui32PagesToAlloc =
+                       (IMG_UINT32)((((ui32NumPhysChunks * uiChunkSize) - 1) >> ui32Log2AllocPageSize) + 1);
+
+               /* Make sure calculation is correct */
+               PVR_ASSERT(((PMR_SIZE_T) ui32PagesToAlloc << ui32Log2AllocPageSize) ==
+                          (ui32NumPhysChunks * uiChunkSize));
+
+               /* Allocate the physical pages */
+               eError = _AllocOSPages(psPrivData, puiAllocIndices,
+                                      ui32PagesToAlloc);
+       }
+
+       return eError;
+}
+
+static void _EncodeAllocationFlags(IMG_UINT32 uiLog2AllocPageSize,
+                                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                      IMG_UINT32* ui32AllocFlags)
+{
+
+       /*
+        * Use CMA framework if order is greater than OS page size; please note
+        * that OSMMapPMRGeneric() has the same expectation as well.
+        */
+       /* IsCMA? */
+       if (uiLog2AllocPageSize > PAGE_SHIFT)
+       {
+               BIT_SET(*ui32AllocFlags, FLAG_IS_CMA);
+       }
+
+       /* OnDemand? */
+       if (PVRSRV_CHECK_ON_DEMAND(uiFlags))
+       {
+               BIT_SET(*ui32AllocFlags, FLAG_ONDEMAND);
+       }
+
+       /* Zero? */
+       if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+       {
+               BIT_SET(*ui32AllocFlags, FLAG_ZERO);
+       }
+
+       /* Poison on alloc? */
+       if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+       {
+               BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_ALLOC);
+       }
+
+#if defined(DEBUG)
+       /* Poison on free? */
+       if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags))
+       {
+               BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_FREE);
+       }
+#endif
+
+       /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
+       if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) ||
+               PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))
+       {
+               BIT_SET(*ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE);
+       }
+
+}
+
+void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData,
+                                          IMG_UINT64 *pui64TotalSize,
+                                          IMG_UINT64 *pui64FreeSize)
+{
+       struct sysinfo sMeminfo;
+       si_meminfo(&sMeminfo);
+
+       PVR_UNREFERENCED_PARAMETER(pvImplData);
+
+       *pui64TotalSize = sMeminfo.totalram * sMeminfo.mem_unit;
+       *pui64FreeSize = sMeminfo.freeram * sMeminfo.mem_unit;
+
+}
+
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap,
+                                                CONNECTION_DATA *psConnection,
+                                                IMG_DEVMEM_SIZE_T uiSize,
+                                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                IMG_UINT32 ui32NumPhysChunks,
+                                                IMG_UINT32 ui32NumVirtChunks,
+                                                IMG_UINT32 *puiAllocIndices,
+                                                IMG_UINT32 uiLog2AllocPageSize,
+                                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                const IMG_CHAR *pszAnnotation,
+                                                IMG_PID uiPid,
+                                                PMR **ppsPMRPtr,
+                                                IMG_UINT32 ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eError2;
+       PMR *psPMR;
+       struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
+       PMR_FLAGS_T uiPMRFlags;
+       IMG_UINT32 ui32CPUCacheFlags;
+       IMG_UINT32 ui32AllocFlags = 0;
+       PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap);
+
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       /*
+        * The host driver (but not guest) can still use this factory for firmware
+        * allocations
+        */
+       if (PVRSRV_VZ_MODE_IS(GUEST) && PVRSRV_CHECK_FW_MAIN(uiFlags))
+       {
+               PVR_ASSERT(0);
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto errorOnParam;
+       }
+
+       /* Select correct caching mode */
+       eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto errorOnParam;
+       }
+
+       if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
+       {
+               ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
+       }
+
+       _EncodeAllocationFlags(uiLog2AllocPageSize, uiFlags, &ui32AllocFlags);
+
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+       /* Overwrite flags and always zero pages that could go back to UM */
+       BIT_SET(ui32AllocFlags, FLAG_ZERO);
+       BIT_UNSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC);
+#endif
+
+       /* Physical allocation alignment is generally not supported except under
+          very restrictive conditions, also there is a maximum alignment value
+          which must not exceed the largest device page-size. If these are not
+          met then fail the aligned-requested allocation */
+       if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA))
+       {
+               IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize;
+               if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Invalid PA alignment: size 0x%llx, align 0x%x",
+                                       __func__, uiSize, uiAlign));
+                       eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+                       goto errorOnParam;
+               }
+               PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ);
+       }
+
+       /* Create Array structure that hold the physical pages */
+       eError = _AllocOSPageArray(psDevNode,
+                                                          uiChunkSize,
+                                                          ui32NumPhysChunks,
+                                                          ui32NumVirtChunks,
+                                                          uiLog2AllocPageSize,
+                                                          ui32AllocFlags,
+                                                          ui32CPUCacheFlags,
+                                                          uiPid,
+                                                          &psPrivData);
+       if (eError != PVRSRV_OK)
+       {
+               goto errorOnAllocPageArray;
+       }
+
+       if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND))
+       {
+               eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks,
+                                    ui32NumVirtChunks, uiChunkSize, uiLog2AllocPageSize);
+               if (eError != PVRSRV_OK)
+               {
+                       goto errorOnAllocPages;
+               }
+       }
+
+       /*
+        * In this instance, we simply pass flags straight through.
+        *
+        * Generically, uiFlags can include things that control the PMR factory, but
+        * we don't need any such thing (at the time of writing!), and our caller
+        * specifies all PMR flags so we don't need to meddle with what was given to
+        * us.
+        */
+       uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+       /*
+        * Check no significant bits were lost in cast due to different bit widths
+        * for flags
+        */
+       PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+       if (BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND))
+       {
+               PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (UMA)");
+       }
+
+       eError = PMRCreatePMR(psPhysHeap,
+                                                 uiSize,
+                                                 uiChunkSize,
+                                                 ui32NumPhysChunks,
+                                                 ui32NumVirtChunks,
+                                                 puiAllocIndices,
+                                                 uiLog2AllocPageSize,
+                                                 uiPMRFlags,
+                                                 pszAnnotation,
+                                                 &_sPMROSPFuncTab,
+                                                 psPrivData,
+                                                 PMR_TYPE_OSMEM,
+                                                 &psPMR,
+                                                 ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto errorOnCreate;
+       }
+
+       *ppsPMRPtr = psPMR;
+
+       return PVRSRV_OK;
+
+errorOnCreate:
+       if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND))
+       {
+               eError2 = _FreeOSPages(psPrivData, NULL, 0);
+               PVR_ASSERT(eError2 == PVRSRV_OK);
+       }
+
+errorOnAllocPages:
+       eError2 = _FreeOSPagesArray(psPrivData);
+       PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_osmem_linux.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_osmem_linux.h
new file mode 100644 (file)
index 0000000..89706ff
--- /dev/null
@@ -0,0 +1,49 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS physmem implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PHYSMEM_OSMEM_LINUX_H
+#define PHYSMEM_OSMEM_LINUX_H
+
+void LinuxInitPhysmem(void);
+void LinuxDeinitPhysmem(void);
+
+#endif /* PHYSMEM_OSMEM_LINUX_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_test.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_test.c
new file mode 100644 (file)
index 0000000..3874594
--- /dev/null
@@ -0,0 +1,710 @@
+/*************************************************************************/ /*!
+@Title          Physmem_test
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Single entry point for testing of page factories
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "physmem_test.h"
+#include "device.h"
+#include "syscommon.h"
+#include "pmr.h"
+#include "osfunc.h"
+#include "physmem.h"
+#include "physmem_osmem.h"
+#include "physmem_lma.h"
+#include "pvrsrv.h"
+
+#define PHYSMEM_TEST_PAGES        2     /* Mem test pages */
+#define PHYSMEM_TEST_PASSES_MAX   1000  /* Limit number of passes to some reasonable value */
+
+
+/* Test patterns for mem test */
+
+static const IMG_UINT64 gui64Patterns[] = {
+       0,
+       0xffffffffffffffffULL,
+       0x5555555555555555ULL,
+       0xaaaaaaaaaaaaaaaaULL,
+       0x1111111111111111ULL,
+       0x2222222222222222ULL,
+       0x4444444444444444ULL,
+       0x8888888888888888ULL,
+       0x3333333333333333ULL,
+       0x6666666666666666ULL,
+       0x9999999999999999ULL,
+       0xccccccccccccccccULL,
+       0x7777777777777777ULL,
+       0xbbbbbbbbbbbbbbbbULL,
+       0xddddddddddddddddULL,
+       0xeeeeeeeeeeeeeeeeULL,
+       0x7a6c7258554e494cULL,
+};
+
+static const IMG_UINT32 gui32Patterns[] = {
+       0,
+       0xffffffffU,
+       0x55555555U,
+       0xaaaaaaaaU,
+       0x11111111U,
+       0x22222222U,
+       0x44444444U,
+       0x88888888U,
+       0x33333333U,
+       0x66666666U,
+       0x99999999U,
+       0xccccccccU,
+       0x77777777U,
+       0xbbbbbbbbU,
+       0xddddddddU,
+       0xeeeeeeeeU,
+       0x7a6c725cU,
+};
+
+static const IMG_UINT16 gui16Patterns[] = {
+       0,
+       0xffffU,
+       0x5555U,
+       0xaaaaU,
+       0x1111U,
+       0x2222U,
+       0x4444U,
+       0x8888U,
+       0x3333U,
+       0x6666U,
+       0x9999U,
+       0xccccU,
+       0x7777U,
+       0xbbbbU,
+       0xddddU,
+       0xeeeeU,
+       0x7a6cU,
+};
+
+static const IMG_UINT8 gui8Patterns[] = {
+       0,
+       0xffU,
+       0x55U,
+       0xaaU,
+       0x11U,
+       0x22U,
+       0x44U,
+       0x88U,
+       0x33U,
+       0x66U,
+       0x99U,
+       0xccU,
+       0x77U,
+       0xbbU,
+       0xddU,
+       0xeeU,
+       0x6cU,
+};
+
+
+/* Following function does minimal required initialisation for mem test using dummy device node */
+static PVRSRV_ERROR
+PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       PVRSRV_ERROR eError;
+
+       /* Dummy device node */
+       psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+       PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+       psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+       psDeviceNode->psDevConfig = psDevConfig;
+       psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+       /* Initialise Phys mem heaps */
+       eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysMemHeapsInit", ErrorSysDevDeInit);
+
+       *ppsDeviceNode = psDeviceNode;
+
+       return PVRSRV_OK;
+
+ErrorSysDevDeInit:
+       psDevConfig->psDevNode = NULL;
+       OSFreeMem(psDeviceNode);
+       return eError;
+}
+
+/* Undo initialisation done for mem test */
+static void
+PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       /* Deinitialise Phys mem heaps */
+       PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+
+       OSFreeMem(psDeviceNode);
+}
+
+/* Test for PMR factory validation */
+static PVRSRV_ERROR
+PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+       PVRSRV_ERROR eError, eError1;
+       IMG_UINT32 i = 0, j = 0, ui32Index = 0;
+       IMG_UINT32 *pui32MappingTable = NULL;
+       PMR *psPMR = NULL;
+       IMG_BOOL *pbValid;
+       IMG_DEV_PHYADDR *apsDevPAddr;
+       IMG_UINT32 ui32NumOfPages = 10, ui32NumOfPhysPages = 5;
+       size_t uiMappedSize, uiPageSize;
+       IMG_UINT8 *pcWriteBuffer, *pcReadBuffer;
+       IMG_HANDLE hPrivData = NULL;
+       void *pvKernAddr = NULL;
+
+       uiPageSize = OSGetPageSize();
+
+       /* Allocate OS memory for PMR page list */
+       apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+       PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem");
+
+       /* Allocate OS memory for PMR page state */
+       pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL));
+       PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem);
+       OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL));
+
+       /* Allocate OS memory for write buffer */
+       pcWriteBuffer = OSAllocMem(uiPageSize);
+       PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem);
+       OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize);
+
+       /* Allocate OS memory for read buffer */
+       pcReadBuffer = OSAllocMem(uiPageSize);
+       PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer);
+
+       /* Allocate OS memory for mapping table */
+       pui32MappingTable = (IMG_UINT32 *)OSAllocMem(ui32NumOfPhysPages * sizeof(*pui32MappingTable));
+       PVR_LOG_GOTO_IF_NOMEM(pui32MappingTable, eError, ErrorFreeReadBuffer);
+
+       /* Pages having even index will have physical backing in PMR */
+       for (ui32Index=0; ui32Index < ui32NumOfPages; ui32Index+=2)
+       {
+               pui32MappingTable[i++] = ui32Index;
+       }
+
+       /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */
+       uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+                               PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                               PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC;
+
+       /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */
+       eError = PhysmemNewRamBackedPMR(NULL,
+                                                                       psDeviceNode,
+                                                                       ui32NumOfPages * uiPageSize,
+                                                                       uiPageSize,
+                                                                       ui32NumOfPhysPages,
+                                                                       ui32NumOfPages,
+                                                                       pui32MappingTable,
+                                                                       OSGetPageShift(),
+                                                                       uiFlags,
+                                                                       sizeof("PMR ValidationTest"),
+                                                                       "PMR ValidationTest",
+                                                                       OSGetCurrentClientProcessIDKM(),
+                                                                       &psPMR,
+                                                                       PDUMP_NONE,
+                                                                       NULL);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR"));
+               goto ErrorFreeMappingTable;
+       }
+
+       /* Check whether allocated PMR can be locked and obtain physical addresses
+        * of underlying memory pages.
+        */
+       eError = PMRLockSysPhysAddresses(psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR"));
+               goto ErrorUnrefPMR;
+       }
+
+       /* Get the Device physical addresses of the pages */
+       eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+               goto ErrorUnlockPhysAddresses;
+       }
+
+       /* Check whether device address of each physical page is OS PAGE_SIZE aligned */
+       for (i = 0; i < ui32NumOfPages; i++)
+       {
+               if (pbValid[i])
+               {
+                       if ((apsDevPAddr[i].uiAddr & OSGetPageMask()) != 0)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Physical memory of PMR is not page aligned"));
+                               eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+                               goto ErrorUnlockPhysAddresses;
+                       }
+               }
+       }
+
+       /* Acquire kernel virtual address of each physical page and write to it
+        * and then release it.
+        */
+       for (i = 0; i < ui32NumOfPages; i++)
+       {
+               if (pbValid[i])
+               {
+                       eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
+                               goto ErrorUnlockPhysAddresses;
+                       }
+                       OSCachedMemCopyWMB(pvKernAddr, pcWriteBuffer, OSGetPageSize());
+
+                       eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
+                       PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+               }
+       }
+
+       /* Acquire kernel virtual address of each physical page and read
+        * from it and check where contents are intact.
+        */
+       for (i = 0; i < ui32NumOfPages; i++)
+       {
+               if (pbValid[i])
+               {
+                       eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
+                               goto ErrorUnlockPhysAddresses;
+                       }
+                       OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize);
+                       OSCachedMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize);
+
+                       eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
+                       PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+                       for (j = 0; j < uiPageSize; j++)
+                       {
+                               if (pcReadBuffer[j] != pcWriteBuffer[j])
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR,
+                                                "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!",
+                                                __func__, pcReadBuffer[j], pcWriteBuffer[j]));
+                                       eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+                                       goto ErrorUnlockPhysAddresses;
+                               }
+                       }
+               }
+       }
+
+ErrorUnlockPhysAddresses:
+       /* Unlock and Unref the PMR to destroy it */
+       eError1 = PMRUnlockSysPhysAddresses(psPMR);
+       if (eError1 != PVRSRV_OK)
+       {
+               eError = (eError == PVRSRV_OK)? eError1 : eError;
+               PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR"));
+       }
+
+ErrorUnrefPMR:
+       eError1 = PMRUnrefPMR(psPMR);
+       if (eError1 != PVRSRV_OK)
+       {
+               eError = (eError == PVRSRV_OK)? eError1 : eError;
+               PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR"));
+       }
+ErrorFreeMappingTable:
+       OSFreeMem(pui32MappingTable);
+ErrorFreeReadBuffer:
+       OSFreeMem(pcReadBuffer);
+ErrorFreeWriteBuffer:
+       OSFreeMem(pcWriteBuffer);
+ErrorFreePMRPageStateMem:
+       OSFreeMem(pbValid);
+ErrorFreePMRPageListMem:
+       OSFreeMem(apsDevPAddr);
+
+       return eError;
+}
+
+#define DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, Patterns, NumOfPatterns, Error, ptr, i) \
+       for (i = 0; i < NumOfPatterns; i++) \
+       { \
+               /* Write pattern */ \
+               for (ptr = StartAddr; ptr < EndAddr; ptr++) \
+               { \
+                       *ptr = Patterns[i]; \
+               } \
+               \
+               /* Read back and validate pattern */ \
+               for (ptr = StartAddr; ptr < EndAddr ; ptr++) \
+               { \
+                       if (*ptr != Patterns[i]) \
+                       { \
+                               Error = PVRSRV_ERROR_MEMORY_TEST_FAILED; \
+                               break; \
+                       } \
+               } \
+               \
+               if (Error != PVRSRV_OK) \
+               { \
+                       break; \
+               } \
+       }
+
+static PVRSRV_ERROR
+TestPatternU8(void *pvKernAddr, size_t uiMappedSize)
+{
+       IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr;
+       IMG_UINT8 *EndAddr = ((IMG_UINT8 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT8));
+       IMG_UINT8 *p;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT8)) == 0);
+
+       DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui8Patterns, sizeof(gui8Patterns)/sizeof(IMG_UINT8), eError, p, i);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!",
+                        __func__, *p, gui8Patterns[i]));
+       }
+
+       return eError;
+}
+
+
+static PVRSRV_ERROR
+TestPatternU16(void *pvKernAddr, size_t uiMappedSize)
+{
+       IMG_UINT16 *StartAddr = (IMG_UINT16 *) pvKernAddr;
+       IMG_UINT16 *EndAddr = ((IMG_UINT16 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT16));
+       IMG_UINT16 *p;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT16)) == 0);
+
+       DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui16Patterns, sizeof(gui16Patterns)/sizeof(IMG_UINT16), eError, p, i);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Test failed. Got (0x%hx), expected (0x%hx)!",
+                        __func__, *p, gui16Patterns[i]));
+       }
+
+       return eError;
+}
+
+static PVRSRV_ERROR
+TestPatternU32(void *pvKernAddr, size_t uiMappedSize)
+{
+       IMG_UINT32 *StartAddr = (IMG_UINT32 *) pvKernAddr;
+       IMG_UINT32 *EndAddr = ((IMG_UINT32 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT32));
+       IMG_UINT32 *p;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT32)) == 0);
+
+       DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui32Patterns, sizeof(gui32Patterns)/sizeof(IMG_UINT32), eError, p, i);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Test failed. Got (0x%x), expected (0x%x)!",
+                        __func__, *p, gui32Patterns[i]));
+       }
+
+       return eError;
+}
+
+static PVRSRV_ERROR
+TestPatternU64(void *pvKernAddr, size_t uiMappedSize)
+{
+       IMG_UINT64 *StartAddr = (IMG_UINT64 *) pvKernAddr;
+       IMG_UINT64 *EndAddr = ((IMG_UINT64 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT64));
+       IMG_UINT64 *p;
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT64)) == 0);
+
+       DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui64Patterns, sizeof(gui64Patterns)/sizeof(IMG_UINT64), eError, p, i);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Test failed. Got (0x%llx), expected (0x%llx)!",
+                        __func__, *p, gui64Patterns[i]));
+       }
+
+       return eError;
+}
+
+static PVRSRV_ERROR
+TestSplitCacheline(void *pvKernAddr, size_t uiMappedSize)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       size_t uiCacheLineSize;
+       size_t uiBlockSize;
+       size_t j;
+       IMG_UINT8 *pcWriteBuffer, *pcReadBuffer;
+       IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr;
+       IMG_UINT8 *EndAddr, *p;
+
+       uiCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE);
+
+       if (uiCacheLineSize > 0)
+       {
+               uiBlockSize = (uiCacheLineSize * 2)/3; /* split cacheline */
+
+               pcWriteBuffer = OSAllocMem(uiBlockSize);
+               PVR_LOG_RETURN_IF_NOMEM(pcWriteBuffer, "OSAllocMem");
+
+               /* Fill the write buffer with test data, 0xAB*/
+               OSCachedMemSet(pcWriteBuffer, 0xAB, uiBlockSize);
+
+               pcReadBuffer = OSAllocMem(uiBlockSize);
+               PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer);
+
+               /* Fit only complete blocks in uiMappedSize, ignore leftover bytes */
+               EndAddr = StartAddr + (uiBlockSize * (uiMappedSize / uiBlockSize));
+
+               /* Write blocks into the memory */
+               for (p = StartAddr; p < EndAddr; p += uiBlockSize)
+               {
+                       OSCachedMemCopy(p, pcWriteBuffer, uiBlockSize);
+               }
+
+               /* Read back blocks and check */
+               for (p = StartAddr; p < EndAddr; p += uiBlockSize)
+               {
+                       OSCachedMemCopy(pcReadBuffer, p, uiBlockSize);
+
+                       for (j = 0; j < uiBlockSize; j++)
+                       {
+                               if (pcReadBuffer[j] != pcWriteBuffer[j])
+                               {
+                                       PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j]));
+                                       eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+                                       goto ErrorMemTestFailed;
+                               }
+                       }
+               }
+
+ErrorMemTestFailed:
+               OSFreeMem(pcReadBuffer);
+ErrorFreeWriteBuffer:
+               OSFreeMem(pcWriteBuffer);
+       }
+
+       return eError;
+}
+
+/* Memory test - writes and reads back different patterns to memory and validate the same */
+static PVRSRV_ERROR
+MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32MappingTable = 0;
+       PMR *psPMR = NULL;
+       size_t uiMappedSize, uiPageSize;
+       IMG_HANDLE hPrivData = NULL;
+       void *pvKernAddr = NULL;
+
+       uiPageSize = OSGetPageSize();
+
+       /* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */
+       uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                          PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                          PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC;
+
+       /*Allocate a PMR from given physical heap */
+       eError = PhysmemNewRamBackedPMR(NULL,
+                                                                       psDeviceNode,
+                                                                       uiPageSize * PHYSMEM_TEST_PAGES,
+                                                                       uiPageSize * PHYSMEM_TEST_PAGES,
+                                                                       1,
+                                                                       1,
+                                                                       &ui32MappingTable,
+                                                                       OSGetPageShift(),
+                                                                       uiFlags,
+                                                                       sizeof("PMR PhysMemTest"),
+                                                                       "PMR PhysMemTest",
+                                                                       OSGetCurrentClientProcessIDKM(),
+                                                                       &psPMR,
+                                                                       PDUMP_NONE,
+                                                                       NULL);
+       PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewRamBackedPMR");
+
+       /* Check whether allocated PMR can be locked and obtain physical
+        * addresses of underlying memory pages.
+        */
+       eError = PMRLockSysPhysAddresses(psPMR);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrorUnrefPMR);
+
+       /* Map the physical page(s) into kernel space, acquire kernel mapping
+        * for PMR.
+        */
+       eError = PMRAcquireKernelMappingData(psPMR, 0, uiPageSize * PHYSMEM_TEST_PAGES, &pvKernAddr, &uiMappedSize, &hPrivData);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrorUnlockPhysAddresses);
+
+       PVR_ASSERT((uiPageSize * PHYSMEM_TEST_PAGES) == uiMappedSize);
+
+       /* Test various patterns */
+       eError = TestPatternU64(pvKernAddr, uiMappedSize);
+       if (eError != PVRSRV_OK)
+       {
+               goto ErrorReleaseKernelMappingData;
+       }
+
+       eError = TestPatternU32(pvKernAddr, uiMappedSize);
+       if (eError != PVRSRV_OK)
+       {
+               goto ErrorReleaseKernelMappingData;
+       }
+
+       eError = TestPatternU16(pvKernAddr, uiMappedSize);
+       if (eError != PVRSRV_OK)
+       {
+               goto ErrorReleaseKernelMappingData;
+       }
+
+       eError = TestPatternU8(pvKernAddr, uiMappedSize);
+       if (eError != PVRSRV_OK)
+       {
+               goto ErrorReleaseKernelMappingData;
+       }
+
+       /* Test split cachelines */
+       eError = TestSplitCacheline(pvKernAddr, uiMappedSize);
+
+ErrorReleaseKernelMappingData:
+       (void) PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+ErrorUnlockPhysAddresses:
+       /* Unlock and Unref the PMR to destroy it, ignore returned value */
+       (void) PMRUnlockSysPhysAddresses(psPMR);
+ErrorUnrefPMR:
+       (void) PMRUnrefPMR(psPMR);
+
+       return eError;
+}
+
+static PVRSRV_ERROR
+PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32 ui32Passes)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT32 i;
+
+       /* PMR validation test */
+       eError = PMRValidationTest(psDeviceNode, uiFlags);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: PMR validation test failed!",
+                        __func__));
+               return eError;
+       }
+
+       for (i = 0; i < ui32Passes; i++)
+       {
+               /* Mem test */
+               eError = MemTestPatterns(psDeviceNode, uiFlags);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                "%s: [Pass#%u] MemTestPatterns failed!",
+                                __func__, i));
+                       break;
+               }
+       }
+
+       return eError;
+}
+
+PVRSRV_ERROR
+PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses)
+{
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+       PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig;
+       PVRSRV_ERROR eError;
+
+       /* validate memtest passes requested */
+       ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses;
+
+       /* Do minimal initialisation before test */
+       eError = PhysMemTestInit(&psDeviceNode, psDevConfig);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__));
+               return eError;
+       }
+
+       /* GPU local mem */
+       eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!"));
+               goto ErrorPhysMemTestDeinit;
+       }
+
+       /* CPU local mem */
+       eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL), ui32MemTestPasses);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "CPU local memory test failed!"));
+               goto ErrorPhysMemTestDeinit;
+       }
+
+       PVR_LOG(("PhysMemTest: Passed."));
+       goto PhysMemTestPassed;
+
+ErrorPhysMemTestDeinit:
+       PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed."));
+PhysMemTestPassed:
+       PhysMemTestDeInit(psDeviceNode);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_test.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/physmem_test.h
new file mode 100644 (file)
index 0000000..684c729
--- /dev/null
@@ -0,0 +1,51 @@
+/*************************************************************************/ /*!
+@Title          Physmem test header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for single entry point for testing of page factories
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVSRV_PHYSMEM_TEST_H
+#define SRVSRV_PHYSMEM_TEST_H
+/*
+ * PhysMemTest
+ */
+PVRSRV_ERROR
+PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses);
+
+#endif /* SRVSRV_PHYSMEM_TEST_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pmr_os.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pmr_os.c
new file mode 100644 (file)
index 0000000..20de047
--- /dev/null
@@ -0,0 +1,596 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS PMR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pmr.h"
+#include "pmr_os.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * x86_32:
+ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
+ * pages with default memory attributes; these HIGHMEM pages are skipped in
+ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
+ * Also vm_insert_page is faster.
+ *
+ * x86_64:
+ * Use vm_insert_page because it is faster.
+ *
+ * Other platforms:
+ * Use remap_pfn_range by default because it does not issue a cache flush.
+ * It is known that ARM32 benefits from this. When other platforms become
+ * available it has to be investigated if this assumption holds for them as well.
+ *
+ * Since vm_insert_page does more precise memory accounting we have the build
+ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
+ * feature.
+ *
+ */
+#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
+#define PMR_OS_USE_VM_INSERT_PAGE 1
+#endif
+
+static void MMapPMROpen(struct vm_area_struct *ps_vma)
+{
+       PMR *psPMR = ps_vma->vm_private_data;
+
+       /* Our VM flags should ensure this function never gets called */
+       PVR_DPF((PVR_DBG_WARNING,
+                        "%s: Unexpected mmap open call, this is probably an application bug.",
+                        __func__));
+       PVR_DPF((PVR_DBG_WARNING,
+                        "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
+                        __func__,
+                        ps_vma,
+                        ps_vma->vm_start,
+                        ps_vma->vm_end - ps_vma->vm_start,
+                        psPMR));
+
+       /* In case we get called anyway let's do things right by increasing the refcount and
+        * locking down the physical addresses. */
+       PMRRefPMR(psPMR);
+
+       if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
+               PMRUnrefPMR(psPMR);
+       }
+}
+
+static void MMapPMRClose(struct vm_area_struct *ps_vma)
+{
+       PMR *psPMR = ps_vma->vm_private_data;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+       {
+               uintptr_t vAddr = ps_vma->vm_start;
+
+               while (vAddr < ps_vma->vm_end)
+               {
+                       /* USER MAPPING */
+                       PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+                                                       (IMG_UINT64)vAddr,
+                                                       OSGetCurrentClientProcessIDKM());
+                       vAddr += PAGE_SIZE;
+               }
+       }
+#else
+       PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+                                   ps_vma->vm_end - ps_vma->vm_start,
+                                   OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+       PMRUnlockSysPhysAddresses(psPMR);
+       PMRUnrefPMR(psPMR);
+}
+
+/*
+ * This vma operation is used to read data from mmap regions. It is called
+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
+ * requests and reads from /proc/<pid>/mem.
+ */
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+                      void *buf, int len, int write)
+{
+       PMR *psPMR = ps_vma->vm_private_data;
+       unsigned long ulOffset = addr - ps_vma->vm_start;
+       size_t uiBytesCopied;
+       PVRSRV_ERROR eError;
+       int iRetVal = -EINVAL;
+
+       if (write)
+       {
+               eError = PMR_WriteBytes(psPMR,
+                                       (IMG_DEVMEM_OFFSET_T) ulOffset,
+                                       buf,
+                                       len,
+                                       &uiBytesCopied);
+       }
+       else
+       {
+               eError = PMR_ReadBytes(psPMR,
+                                      (IMG_DEVMEM_OFFSET_T) ulOffset,
+                                      buf,
+                                      len,
+                                      &uiBytesCopied);
+       }
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
+                        __func__,
+                        write ? "PMR_WriteBytes" : "PMR_ReadBytes",
+                        eError));
+       }
+       else
+       {
+               iRetVal = uiBytesCopied;
+       }
+
+       return iRetVal;
+}
+
+static const struct vm_operations_struct gsMMapOps =
+{
+       .open = &MMapPMROpen,
+       .close = &MMapPMRClose,
+       .access = MMapVAccess,
+};
+
+static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                                                       struct vm_area_struct *ps_vma,
+                                                       IMG_DEVMEM_OFFSET_T uiOffset,
+                                                       IMG_CPU_PHYADDR *psCpuPAddr,
+                                                       IMG_UINT32 uiLog2PageSize,
+                                                       IMG_BOOL bUseVMInsertPage,
+                                                       IMG_BOOL bUseMixedMap)
+{
+       IMG_INT32 iStatus;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+       pfn_t sPFN;
+#else
+       unsigned long uiPFN;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+       sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr + SYSPORT_MEM_OFFSET, 0);
+#else
+       uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
+       PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
+#endif
+
+       /*
+        * vm_insert_page() allows insertion of individual pages into user
+        * VMA space _only_ if page is a order-zero allocated page
+        */
+       if (bUseVMInsertPage)
+       {
+               if (bUseMixedMap)
+               {
+                       /*
+                        * This path is just for debugging. It should be
+                        * equivalent to the remap_pfn_range() path.
+                        */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0))
+                       vm_fault_t vmf;
+
+                       vmf = vmf_insert_mixed(ps_vma,
+                                                                       ps_vma->vm_start + uiOffset,
+                                                                       sPFN);
+                       if (vmf & VM_FAULT_ERROR)
+                       {
+                               iStatus = vm_fault_to_errno(vmf, 0);
+                       }
+                       else
+                       {
+                               iStatus = 0;
+                       }
+#else
+                       iStatus = vm_insert_mixed(ps_vma,
+                                                                         ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                                                                         sPFN);
+#else
+                                                                         uiPFN);
+#endif
+#endif
+               }
+               else
+               {
+                       /* Since kernel 3.7 this sets VM_MIXEDMAP internally */
+                       iStatus = vm_insert_page(ps_vma,
+                                                                        ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                                                                        pfn_t_to_page(sPFN));
+#else
+                                                                        pfn_to_page(uiPFN));
+#endif
+               }
+       }
+       else
+       {
+               /*
+                  NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR()
+
+                  The current services mmap model maps in a PMR's full-length size
+                  into the user VMA & applies any user specified offset to the kernel
+                  returned zero-offset based VA in services client; this essentially
+                  means services server ignores ps_vma->vm_pgoff (this houses hPMR)
+                  during a mmap call.
+
+                  Furthermore, during a DMA/CMA memory allocation, multiple order-n
+                  pages are used to satisfy an allocation request due to DMA/CMA
+                  framework rounding-up allocation size to next power-of-two which
+                  can lead to wasted memory (so we don't allocate using single call).
+
+                  The combination of the above two issues mean that we cannot use the
+                  dma_mmap_coherent() for a number of reasons outlined below:
+
+                    - Services mmap semantics does not fit with dma_mmap_coherent()
+                      which requires proper ps_vma->vm_pgoff; seeing this houses a
+                      hPMR handle value, calls into dma_mmap_coherent() fails. This
+                      could be avoided by forcing ps_vma->vm_pgoff to zero but the
+                      ps_vma->vm_pgoff is applied to DMA bus address PFN and not
+                      user VMA which is always mapped at ps_vma->vm_start.
+
+                    - As multiple order-n pages are used for DMA/CMA allocations, a
+                      single dma_mmap_coherent() call with a vma->vm_pgoff set to
+                      zero cannot (maybe) be used because there is no guarantee that
+                      all of the multiple order-n pages in the PMR are physically
+                      contiguous from the first entry to the last. Whilst this is
+                      highly likely to be the case, there is no guarantee that it
+                      will be so we cannot depend on this being the case.
+
+                  The solution is to manually mmap DMA/CMA pages into user VMA
+                  using remap_pfn_range() directly. Furthermore, accounting is
+                  always compromised for DMA/CMA allocations.
+               */
+               size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize;
+
+               iStatus = remap_pfn_range(ps_vma,
+                                                                 ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                                                                 pfn_t_to_pfn(sPFN),
+#else
+                                                                 uiPFN,
+#endif
+                                                                 uiNumContiguousBytes,
+                                                                 ps_vma->vm_page_prot);
+       }
+
+       return iStatus;
+}
+
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+       struct vm_area_struct *ps_vma = pOSMMapData;
+       PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+       PVRSRV_ERROR eError;
+       size_t uiLength;
+       IMG_INT32 iStatus;
+       IMG_DEVMEM_OFFSET_T uiOffset;
+       IMG_UINT32 ui32CPUCacheFlags;
+       pgprot_t sPageProt;
+       IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+       IMG_UINT32 uiOffsetIdx;
+       IMG_UINT32 uiNumOfPFNs;
+       IMG_UINT32 uiLog2PageSize;
+       IMG_CPU_PHYADDR *psCpuPAddr;
+       IMG_BOOL *pbValid;
+       IMG_BOOL bUseMixedMap = IMG_FALSE;
+       IMG_BOOL bUseVMInsertPage = IMG_FALSE;
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+               ((ps_vma->vm_flags & VM_SHARED) == 0))
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e1;
+       }
+
+       sPageProt = vm_get_page_prot(ps_vma->vm_flags);
+
+       eError = DevmemCPUCacheMode(psDevNode,
+                                   PMR_Flags(psPMR),
+                                   &ui32CPUCacheFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       switch (ui32CPUCacheFlags)
+       {
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+                               sPageProt = pgprot_noncached(sPageProt);
+                               break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+                               sPageProt = pgprot_writecombine(sPageProt);
+                               break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+               {
+/* Do not set to write-combine for plato */
+#if !defined(PLATO_MEMORY_CONFIG)
+                               PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR);
+
+                               if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
+                                       sPageProt = pgprot_writecombine(sPageProt);
+#endif
+                               break;
+               }
+
+               default:
+                               eError = PVRSRV_ERROR_INVALID_PARAMS;
+                               goto e1;
+       }
+       ps_vma->vm_page_prot = sPageProt;
+
+       ps_vma->vm_flags |= VM_IO;
+
+       /* Don't include the mapping in core dumps */
+       ps_vma->vm_flags |= VM_DONTDUMP;
+
+       /*
+        * Disable mremap because our nopage handler assumes all
+        * page requests have already been validated.
+        */
+       ps_vma->vm_flags |= VM_DONTEXPAND;
+
+       /* Don't allow mapping to be inherited across a process fork */
+       ps_vma->vm_flags |= VM_DONTCOPY;
+
+       uiLength = ps_vma->vm_end - ps_vma->vm_start;
+
+       /* Is this mmap targeting non order-zero pages or does it use pfn mappings?
+        * If yes, don't use vm_insert_page */
+       uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
+
+#if defined(PMR_OS_USE_VM_INSERT_PAGE)
+       bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM);
+#endif
+
+       /* Can we use stack allocations */
+       uiNumOfPFNs = uiLength >> uiLog2PageSize;
+       if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
+       {
+               psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr));
+               if (psCpuPAddr == NULL)
+               {
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto e1;
+               }
+
+               /* Should allocation fail, clean-up here before exiting */
+               pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid));
+               if (pbValid == NULL)
+               {
+                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       OSFreeMem(psCpuPAddr);
+                       goto e2;
+               }
+       }
+       else
+       {
+               psCpuPAddr = asCpuPAddr;
+               pbValid = abValid;
+       }
+
+       /* Obtain map range pfns */
+       eError = PMR_CpuPhysAddr(psPMR,
+                                uiLog2PageSize,
+                                uiNumOfPFNs,
+                                0,
+                                psCpuPAddr,
+                                pbValid);
+       if (eError != PVRSRV_OK)
+       {
+               goto e3;
+       }
+
+       /*
+        * Scan the map range for pfns without struct page* handling. If
+        * we find one, this is a mixed map, and we can't use vm_insert_page()
+        * NOTE: vm_insert_page() allows insertion of individual pages into user
+        * VMA space _only_ if said page is an order-zero allocated page.
+        */
+       if (bUseVMInsertPage)
+       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+               pfn_t sPFN;
+#else
+               unsigned long uiPFN;
+#endif
+
+               for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
+               {
+                       if (pbValid[uiOffsetIdx])
+                       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+                               sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
+
+                               if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+                               uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+                               PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+                               if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+                               {
+                                       bUseMixedMap = IMG_TRUE;
+                                       break;
+                               }
+                       }
+               }
+
+               if (bUseMixedMap)
+               {
+                       ps_vma->vm_flags |= VM_MIXEDMAP;
+               }
+       }
+       else
+       {
+               ps_vma->vm_flags |= VM_PFNMAP;
+       }
+
+       /* For each PMR page-size contiguous bytes, map page(s) into user VMA */
+       for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<uiLog2PageSize)
+       {
+               uiOffsetIdx = uiOffset >> uiLog2PageSize;
+               /*
+                * Only map in pages that are valid, any that aren't will be
+                * picked up by the nopage handler which will return a zeroed
+                * page for us.
+                */
+               if (pbValid[uiOffsetIdx])
+               {
+                       iStatus = _OSMMapPMR(psDevNode,
+                                                                ps_vma,
+                                                                uiOffset,
+                                                                &psCpuPAddr[uiOffsetIdx],
+                                                                uiLog2PageSize,
+                                                                bUseVMInsertPage,
+                                                                bUseMixedMap);
+                       if (iStatus)
+                       {
+                               /* Failure error code doesn't get propagated */
+                               eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+                               PVR_ASSERT(0);
+                               goto e3;
+                       }
+               }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD
+               {
+                       IMG_CPU_PHYADDR sPAddr;
+                       sPAddr.uiAddr = pbValid[uiOffsetIdx] ?
+                                       psCpuPAddr[uiOffsetIdx].uiAddr :
+                                       IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR);
+
+                       PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+                                                                               (void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
+                                                                               sPAddr,
+                                                                               1<<uiLog2PageSize,
+                                                                               NULL,
+                                                                               OSGetCurrentClientProcessIDKM()
+                                                                               DEBUG_MEMSTATS_VALUES);
+               }
+#undef PMR_OS_BAD_CPUADDR
+#endif
+       }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+       PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, uiNumOfPFNs * PAGE_SIZE, OSGetCurrentClientProcessIDKM());
+#endif
+
+       if (psCpuPAddr != asCpuPAddr)
+       {
+               OSFreeMem(psCpuPAddr);
+               OSFreeMem(pbValid);
+       }
+
+       /* let us see the PMR so we can unlock it later */
+       ps_vma->vm_private_data = psPMR;
+
+       /* Install open and close handlers for ref-counting */
+       ps_vma->vm_ops = &gsMMapOps;
+
+       /*
+        * Take a reference on the PMR so that it can't be freed while mapped
+        * into the user process.
+        */
+       PMRRefPMR(psPMR);
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+       /* record the stats */
+       MMapStatsAddOrUpdatePMR(psPMR, uiLength);
+#endif
+
+       return PVRSRV_OK;
+
+       /* Error exit paths follow */
+e3:
+       if (pbValid != abValid)
+       {
+               OSFreeMem(pbValid);
+       }
+e2:
+       if (psCpuPAddr != asCpuPAddr)
+       {
+               OSFreeMem(psCpuPAddr);
+       }
+e1:
+       PMRUnlockSysPhysAddresses(psPMR);
+e0:
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/private_data.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/private_data.h
new file mode 100644 (file)
index 0000000..60a1fac
--- /dev/null
@@ -0,0 +1,59 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux private data structure
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(INCLUDED_PRIVATE_DATA_H)
+#define INCLUDED_PRIVATE_DATA_H
+
+#include <linux/fs.h>
+
+#include "connection_server.h"
+#include "pvr_drm.h"
+
+#define PVR_SRVKM_PRIV_DATA_IDX 0
+#define PVR_SYNC_PRIV_DATA_IDX  1
+
+#define PVR_NUM_PRIV_DATA_IDXS  2
+
+CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile);
+CONNECTION_DATA *LinuxSyncConnectionFromFile(struct file *pFile);
+
+#endif /* !defined(INCLUDED_PRIVATE_DATA_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_bridge_k.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_bridge_k.c
new file mode 100644 (file)
index 0000000..7211ef0
--- /dev/null
@@ -0,0 +1,582 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Receives calls from the user portion of services and
+                despatches them to functions in the kernel portion.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include <linux/mm_types.h>
+
+#include "img_defs.h"
+#include "pvr_bridge.h"
+#include "pvr_bridge_k.h"
+#include "connection_server.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "di_server.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "pmr.h"
+#include "rgx_bvnc_defs_km.h"
+#include "pvrsrv_bridge_init.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+#include <drm/drm_print.h>
+#else
+#include <drm/drmP.h>
+#endif
+
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+#include "env_connection.h"
+#include <linux/sched.h>
+#include <linux/freezer.h>
+
+/* RGX: */
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "srvcore.h"
+#include "common_srvcore_bridge.h"
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+void DeinitDMABUFBridge(void);
+
+#if defined(MODULE_TEST)
+/************************************************************************/
+// additional includes for services testing
+/************************************************************************/
+#include "pvr_test_bridge.h"
+#include "kern_test.h"
+/************************************************************************/
+// end of additional includes
+/************************************************************************/
+#endif
+
+/* The mmap code has its own mutex, to prevent possible re-entrant issues
+ * when the same PMR is mapped from two different connections/processes.
+ */
+static DEFINE_MUTEX(g_sMMapMutex);
+
+#define _DRIVER_SUSPENDED 1
+#define _DRIVER_NOT_SUSPENDED 0
+static ATOMIC_T g_iDriverSuspended;
+static ATOMIC_T g_iNumActiveDriverThreads;
+static ATOMIC_T g_iNumActiveKernelThreads;
+static IMG_HANDLE g_hDriverThreadEventObject;
+
+#if defined(DEBUG_BRIDGE_KM)
+static DI_ENTRY *gpsDIBridgeStatsEntry;
+
+static void *BridgeStatsDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry);
+
+       if (psDispatchTable == NULL || *pui64Pos > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+       {
+               return NULL;
+       }
+
+       if (*pui64Pos == 0)
+       {
+               return DI_START_TOKEN;
+       }
+
+       return &(psDispatchTable[*pui64Pos - 1]);
+}
+
+static void BridgeStatsDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       PVR_UNREFERENCED_PARAMETER(psEntry);
+       PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *BridgeStatsDINext(OSDI_IMPL_ENTRY *psEntry, void *pvData,
+                               IMG_UINT64 *pui64Pos)
+{
+       PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry);
+       IMG_UINT64 uiItemAskedFor = *pui64Pos; /* pui64Pos on entry is the index to return */
+
+       PVR_UNREFERENCED_PARAMETER(pvData);
+
+       /* Is the item asked for (starts at 0) a valid table index? */
+       if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+       {
+               (*pui64Pos)++; /* on exit it is the next DI index to ask for */
+               return &(psDispatchTable[uiItemAskedFor]);
+       }
+
+       /* Now passed the end of the table to indicate stop */
+       return NULL;
+}
+
+static int BridgeStatsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+       if (pvData == DI_START_TOKEN)
+       {
+               DIPrintf(psEntry,
+                        "Total ioctl call count = %u\n"
+                        "Total number of bytes copied via copy_from_user = %u\n"
+                        "Total number of bytes copied via copy_to_user = %u\n"
+                        "Total number of bytes copied via copy_*_user = %u\n\n"
+                        "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n",
+                        g_BridgeGlobalStats.ui32IOCTLCount,
+                        g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+                        g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+                        g_BridgeGlobalStats.ui32TotalCopyFromUserBytes +
+                            g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+                        "#",
+                        "Bridge Name",
+                        "Wrapper Function",
+                        "Call Count",
+                        "copy_from_user (B)",
+                        "copy_to_user (B)",
+                        "Total Time (us)",
+                        "Max Time (us)");
+       }
+       else if (pvData != NULL)
+       {
+               PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psTableEntry = pvData;
+               IMG_UINT32 ui32Remainder;
+
+               DIPrintf(psEntry,
+                        "%3d: %-60s   %-48s   %-10u   %-20u   %-20u   %-20" IMG_UINT64_FMTSPEC "   %-20" IMG_UINT64_FMTSPEC "\n",
+                        (IMG_UINT32)(((size_t)psTableEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+                        psTableEntry->pszIOCName,
+                        (psTableEntry->pfFunction != NULL) ? psTableEntry->pszFunctionName : "(null)",
+                        psTableEntry->ui32CallCount,
+                        psTableEntry->ui32CopyFromUserTotalBytes,
+                        psTableEntry->ui32CopyToUserTotalBytes,
+                        OSDivide64r64(psTableEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+                        OSDivide64r64(psTableEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+       }
+
+       return 0;
+}
+
+static IMG_INT64 BridgeStatsWrite(const IMG_CHAR *pcBuffer,
+                                  IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos,
+                                  void *pvData)
+{
+       IMG_UINT32 i;
+
+       PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+       PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO);
+       PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[0] == '0', -EINVAL);
+       PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+
+       /* Reset stats. */
+
+       BridgeGlobalStatsLock();
+
+       g_BridgeGlobalStats.ui32IOCTLCount = 0;
+       g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0;
+       g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0;
+
+       for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++)
+       {
+               g_BridgeDispatchTable[i].ui32CallCount = 0;
+               g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
+               g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
+               g_BridgeDispatchTable[i].ui64TotalTimeNS = 0;
+               g_BridgeDispatchTable[i].ui64MaxTimeNS = 0;
+       }
+
+       BridgeGlobalStatsUnlock();
+
+       return ui64Count;
+}
+
+#endif /* defined(DEBUG_BRIDGE_KM) */
+
+PVRSRV_ERROR OSPlatformBridgeInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       eError = InitDMABUFBridge();
+       PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge");
+
+       OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED);
+       OSAtomicWrite(&g_iNumActiveDriverThreads, 0);
+       OSAtomicWrite(&g_iNumActiveKernelThreads, 0);
+
+       eError = OSEventObjectCreate("Global driver thread event object",
+                                    &g_hDriverThreadEventObject);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_);
+
+#if defined(DEBUG_BRIDGE_KM)
+       {
+               DI_ITERATOR_CB sIter = {
+                       .pfnStart = BridgeStatsDIStart,
+                       .pfnStop = BridgeStatsDIStop,
+                       .pfnNext = BridgeStatsDINext,
+                       .pfnShow = BridgeStatsDIShow,
+                       .pfnWrite = BridgeStatsWrite,
+
+                       //Expects '0' + Null terminator
+                       .ui32WriteLenMax = ((1U)+1U)
+               };
+
+               eError = DICreateEntry("bridge_stats", NULL, &sIter,
+                                      &g_BridgeDispatchTable[0],
+                                      DI_ENTRY_TYPE_GENERIC,
+                                      &gpsDIBridgeStatsEntry);
+               PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error_);
+       }
+#endif
+
+       return PVRSRV_OK;
+
+error_:
+       if (g_hDriverThreadEventObject) {
+               OSEventObjectDestroy(g_hDriverThreadEventObject);
+               g_hDriverThreadEventObject = NULL;
+       }
+
+       return eError;
+}
+
+void OSPlatformBridgeDeInit(void)
+{
+#if defined(DEBUG_BRIDGE_KM)
+       if (gpsDIBridgeStatsEntry != NULL)
+       {
+               DIDestroyEntry(gpsDIBridgeStatsEntry);
+       }
+#endif
+
+       DeinitDMABUFBridge();
+
+       if (g_hDriverThreadEventObject != NULL) {
+               OSEventObjectDestroy(g_hDriverThreadEventObject);
+               g_hDriverThreadEventObject = NULL;
+       }
+}
+
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown)
+{
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hEvent;
+
+       eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+               return eError;
+       }
+
+       if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED,
+                                   _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto out_put;
+       }
+
+       /* now wait for any threads currently in the server to exit */
+       while (OSAtomicRead(&g_iNumActiveDriverThreads) != 0 ||
+                  (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown))
+       {
+               if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0)
+               {
+                       PVR_LOG(("%s: waiting for user threads (%d)", __func__,
+                               OSAtomicRead(&g_iNumActiveDriverThreads)));
+               }
+               if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0)
+               {
+                       PVR_LOG(("%s: waiting for kernel threads (%d)", __func__,
+                               OSAtomicRead(&g_iNumActiveKernelThreads)));
+               }
+               /* Regular wait is called here (and not OSEventObjectWaitKernel) because
+                * this code is executed by the caller of .suspend/.shutdown callbacks
+                * which is most likely PM (or other actor responsible for suspend
+                * process). Because of that this thread shouldn't and most likely
+                * event cannot be frozen. */
+               OSEventObjectWait(hEvent);
+       }
+
+out_put:
+       OSEventObjectClose(hEvent);
+
+       return eError;
+}
+
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void)
+{
+       PVRSRV_ERROR eError;
+
+       /* resume the driver and then signal so any waiting threads wake up */
+       if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED,
+                                   _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       eError = OSEventObjectSignal(g_hDriverThreadEventObject);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s",
+                       __func__, PVRSRVGetErrorString(eError)));
+       }
+
+       return eError;
+}
+
+static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+       {
+               PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event"
+                               " object: %s", __func__, PVRSRVGetErrorString(eError)));
+               }
+       }
+
+       return eError;
+}
+
+void LinuxBridgeNumActiveKernelThreadsIncrement(void)
+{
+       OSAtomicIncrement(&g_iNumActiveKernelThreads);
+}
+
+void LinuxBridgeNumActiveKernelThreadsDecrement(void)
+{
+       OSAtomicDecrement(&g_iNumActiveKernelThreads);
+       PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0);
+
+       /* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is
+        * waiting for the threads to freeze.
+        * (error is logged in called function so ignore, we can't do much with
+        * it anyway) */
+       (void) LinuxBridgeSignalIfSuspended();
+}
+
+static PVRSRV_ERROR _WaitForDriverUnsuspend(void)
+{
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hEvent;
+
+       eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+               return eError;
+       }
+
+       while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+       {
+               /* we should be able to use normal (not kernel) wait here since
+                * we were just unfrozen and most likely we're not going to
+                * be frozen again (?) */
+               OSEventObjectWait(hEvent);
+       }
+
+       OSEventObjectClose(hEvent);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVDriverThreadEnter(void)
+{
+       PVRSRV_ERROR eError;
+
+       /* increment first so there is no race between this value and
+        * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */
+       OSAtomicIncrement(&g_iNumActiveDriverThreads);
+
+       if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+       {
+               /* decrement here because the driver is going to be suspended and
+                * this thread is going to be frozen so we don't want to wait for
+                * it in LinuxBridgeBlockClientsAccess() */
+               OSAtomicDecrement(&g_iNumActiveDriverThreads);
+
+               /* during suspend procedure this will put the current thread to
+                * the freezer but during shutdown this will just return */
+               try_to_freeze();
+
+               /* if the thread was unfrozen but the flag is not yet set to
+                * _DRIVER_NOT_SUSPENDED wait for it
+                * in case this is a shutdown the thread was not frozen so we'll
+                * wait here indefinitely but this is ok (and this is in fact what
+                * we want) because no thread should be entering the driver in such
+                * case */
+               eError = _WaitForDriverUnsuspend();
+
+               /* increment here because that means that the thread entered the
+                * driver */
+               OSAtomicIncrement(&g_iNumActiveDriverThreads);
+
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver"
+                               " unsuspend: %s", __func__,
+                               PVRSRVGetErrorString(eError)));
+                       return eError;
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+void PVRSRVDriverThreadExit(void)
+{
+       OSAtomicDecrement(&g_iNumActiveDriverThreads);
+       /* if the driver is being suspended then we need to signal the
+        * event object as the thread suspending the driver is waiting
+        * for active threads to exit
+        * error is logged in called function so ignore returned error
+        */
+       (void) LinuxBridgeSignalIfSuspended();
+}
+
+int
+PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile)
+{
+       struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg;
+       PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 };
+       CONNECTION_DATA *psConnection = LinuxServicesConnectionFromFile(pDRMFile->filp);
+       PVRSRV_ERROR error;
+
+       if (psConnection == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+               return -EFAULT;
+       }
+
+       PVR_ASSERT(psSrvkmCmd != NULL);
+
+       DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d",
+                         task_tgid_nr(current),
+                         ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner,
+                         psSrvkmCmd->bridge_id,
+                         psSrvkmCmd->bridge_func_id);
+
+       error = PVRSRVDriverThreadEnter();
+       PVR_LOG_GOTO_IF_ERROR(error, "PVRSRVDriverThreadEnter", e0);
+
+       sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id;
+       sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id;
+       sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM);
+       sBridgePackageKM.pvParamIn = (void __user *)(uintptr_t)psSrvkmCmd->in_data_ptr;
+       sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size;
+       sBridgePackageKM.pvParamOut = (void __user *)(uintptr_t)psSrvkmCmd->out_data_ptr;
+       sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size;
+
+       error = BridgedDispatchKM(psConnection, &sBridgePackageKM);
+
+       PVRSRVDriverThreadExit();
+
+e0:
+       return OSPVRSRVToNativeError(error);
+}
+
+int
+PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+       CONNECTION_DATA *psConnection = LinuxServicesConnectionFromFile(pFile);
+       IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff);
+       PMR *psPMR;
+       PVRSRV_ERROR eError;
+
+       if (psConnection == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+               return -ENOENT;
+       }
+
+       eError = PVRSRVDriverThreadEnter();
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDriverThreadEnter", e0);
+
+       /*
+        * The bridge lock used here to protect PVRSRVLookupHandle is replaced
+        * by a specific lock considering that the handle functions have now
+        * their own lock. This change was necessary to solve the lockdep issues
+        * related with the PVRSRV_MMap.
+        */
+
+       eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+                                                               (void **)&psPMR,
+                                                               hSecurePMRHandle,
+                                                               PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+                                                               IMG_TRUE);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       mutex_lock(&g_sMMapMutex);
+       /* Note: PMRMMapPMR will take a reference on the PMR.
+        * Unref the handle immediately, because we have now done
+        * the required operation on the PMR (whether it succeeded or not)
+        */
+       eError = PMRMMapPMR(psPMR, ps_vma);
+       mutex_unlock(&g_sMMapMutex);
+       PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)",
+                               __func__, PVRSRVGetErrorString(eError)));
+               goto e0;
+       }
+
+       PVRSRVDriverThreadExit();
+
+       return 0;
+
+e0:
+       PVRSRVDriverThreadExit();
+
+       PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError));
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return -ENOENT; // -EAGAIN // or what?
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_bridge_k.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_bridge_k.h
new file mode 100644 (file)
index 0000000..859ec64
--- /dev/null
@@ -0,0 +1,103 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Receives calls from the user portion of services and
+                despatches them to functions in the kernel portion.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_BRIDGE_K_H
+#define PVR_BRIDGE_K_H
+
+#include "pvrsrv_error.h"
+
+/*!
+******************************************************************************
+ @Function      LinuxBridgeBlockClientsAccess
+ @Description   This function will wait for any existing threads in the Server
+                to exit and then disable access to the driver. New threads will
+                not be allowed to enter the Server until the driver is
+                unsuspended (see LinuxBridgeUnblockClientsAccess).
+ @Input         bShutdown this flag indicates that the function was called
+                          from a shutdown callback and therefore it will
+                          not wait for the kernel threads to get frozen
+                          (because this doesn't happen during shutdown
+                          procedure)
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown);
+
+/*!
+******************************************************************************
+ @Function      LinuxBridgeUnblockClientsAccess
+ @Description   This function will re-enable the bridge and allow any threads
+                waiting to enter the Server to continue.
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void);
+
+void LinuxBridgeNumActiveKernelThreadsIncrement(void);
+void LinuxBridgeNumActiveKernelThreadsDecrement(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVDriverThreadEnter
+ @Description   Increments number of client threads currently operating
+                in the driver's context.
+                If the driver is currently being suspended this function
+                will call try_to_freeze() on behalf of the client thread.
+                When the driver is resumed the function will exit and allow
+                the thread into the driver.
+ @Return        PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDriverThreadEnter(void);
+
+/*!
+******************************************************************************
+ @Function      PVRSRVDriverThreadExit
+ @Description   Decrements the number of client threads currently operating
+                in the driver's context to match the call to
+                PVRSRVDriverThreadEnter().
+                The function also signals the driver that a thread left the
+                driver context so if it's waiting to suspend it knows that
+                the number of threads decreased.
+******************************************************************************/
+void PVRSRVDriverThreadExit(void);
+
+#endif /* PVR_BRIDGE_K_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_buffer_sync.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_buffer_sync.c
new file mode 100644 (file)
index 0000000..b5426d4
--- /dev/null
@@ -0,0 +1,583 @@
+/*
+ * @File
+ * @Title       Linux buffer sync interface
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+
+#include "services_kernel_client.h"
+#include "pvr_dma_resv.h"
+#include "pvr_buffer_sync.h"
+#include "pvr_buffer_sync_shared.h"
+#include "pvr_drv.h"
+#include "pvr_fence.h"
+
+struct pvr_buffer_sync_context {
+       struct mutex ctx_lock;
+       struct pvr_fence_context *fence_ctx;
+       struct ww_acquire_ctx acquire_ctx;
+};
+
+struct pvr_buffer_sync_check_data {
+       struct dma_fence_cb base;
+
+       u32 nr_fences;
+       struct pvr_fence **fences;
+};
+
+struct pvr_buffer_sync_append_data {
+       struct pvr_buffer_sync_context *ctx;
+
+       u32 nr_pmrs;
+       struct _PMR_ **pmrs;
+       u32 *pmr_flags;
+
+       struct pvr_fence *update_fence;
+       struct pvr_buffer_sync_check_data *check_data;
+};
+
+static struct dma_resv *
+pmr_reservation_object_get(struct _PMR_ *pmr)
+{
+       struct dma_buf *dmabuf;
+
+       dmabuf = PhysmemGetDmaBuf(pmr);
+       if (dmabuf)
+               return dmabuf->resv;
+
+       return NULL;
+}
+
+static int
+pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx,
+                         u32 nr_pmrs,
+                         struct _PMR_ **pmrs)
+{
+       struct dma_resv *resv, *cresv = NULL, *lresv = NULL;
+       int i, err;
+       struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+       mutex_lock(&ctx->ctx_lock);
+
+       ww_acquire_init(acquire_ctx, &reservation_ww_class);
+retry:
+       for (i = 0; i < nr_pmrs; i++) {
+               resv = pmr_reservation_object_get(pmrs[i]);
+               if (!resv) {
+                       pr_err("%s: Failed to get reservation object from pmr %p\n",
+                              __func__, pmrs[i]);
+                       err = -EINVAL;
+                       goto fail;
+               }
+
+               if (resv != lresv) {
+                       err = ww_mutex_lock_interruptible(&resv->lock,
+                                                         acquire_ctx);
+                       if (err) {
+                               cresv = (err == -EDEADLK) ? resv : NULL;
+                               goto fail;
+                       }
+               } else {
+                       lresv = NULL;
+               }
+       }
+
+       ww_acquire_done(acquire_ctx);
+
+       return 0;
+
+fail:
+       while (i--) {
+               resv = pmr_reservation_object_get(pmrs[i]);
+               if (WARN_ON_ONCE(!resv))
+                       continue;
+               ww_mutex_unlock(&resv->lock);
+       }
+
+       if (lresv)
+               ww_mutex_unlock(&lresv->lock);
+
+       if (cresv) {
+               err = ww_mutex_lock_slow_interruptible(&cresv->lock,
+                                                      acquire_ctx);
+               if (!err) {
+                       lresv = cresv;
+                       cresv = NULL;
+                       goto retry;
+               }
+       }
+
+       ww_acquire_fini(acquire_ctx);
+
+       mutex_unlock(&ctx->ctx_lock);
+       return err;
+}
+
+static void
+pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx,
+                           u32 nr_pmrs,
+                           struct _PMR_ **pmrs)
+{
+       struct dma_resv *resv;
+       int i;
+       struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+       for (i = 0; i < nr_pmrs; i++) {
+               resv = pmr_reservation_object_get(pmrs[i]);
+               if (WARN_ON_ONCE(!resv))
+                       continue;
+               ww_mutex_unlock(&resv->lock);
+       }
+
+       ww_acquire_fini(acquire_ctx);
+
+       mutex_unlock(&ctx->ctx_lock);
+}
+
+static u32
+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
+                                u32 *pmr_flags)
+{
+       struct dma_resv *resv;
+       struct dma_resv_list *resv_list;
+       struct dma_fence *fence;
+       u32 fence_count = 0;
+       bool exclusive;
+       int i;
+
+       for (i = 0; i < nr_pmrs; i++) {
+               exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+               resv = pmr_reservation_object_get(pmrs[i]);
+               if (WARN_ON_ONCE(!resv))
+                       continue;
+
+               resv_list = dma_resv_shared_list(resv);
+               fence = dma_resv_excl_fence(resv);
+
+               if (fence &&
+                   (!exclusive || !resv_list || !resv_list->shared_count))
+                       fence_count++;
+
+               if (exclusive && resv_list)
+                       fence_count += resv_list->shared_count;
+       }
+
+       return fence_count;
+}
+
+static struct pvr_buffer_sync_check_data *
+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
+                                   PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+                                   u32 nr_pmrs,
+                                   struct _PMR_ **pmrs,
+                                   u32 *pmr_flags)
+{
+       struct pvr_buffer_sync_check_data *data;
+       struct dma_resv *resv;
+       struct dma_resv_list *resv_list;
+       struct dma_fence *fence;
+       u32 fence_count;
+       bool exclusive;
+       int i, j;
+       int err;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return NULL;
+
+       fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs,
+                                                      pmr_flags);
+       if (fence_count) {
+               data->fences = kcalloc(fence_count, sizeof(*data->fences),
+                                      GFP_KERNEL);
+               if (!data->fences)
+                       goto err_check_data_free;
+       }
+
+       for (i = 0; i < nr_pmrs; i++) {
+               resv = pmr_reservation_object_get(pmrs[i]);
+               if (WARN_ON_ONCE(!resv))
+                       continue;
+
+               exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+               if (!exclusive) {
+                       err = dma_resv_reserve_shared(resv
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
+                                                     , 1
+#endif
+                               );
+                       if (err)
+                               goto err_destroy_fences;
+               }
+
+               resv_list = dma_resv_shared_list(resv);
+               fence = dma_resv_excl_fence(resv);
+
+               if (fence &&
+                   (!exclusive || !resv_list || !resv_list->shared_count)) {
+                       data->fences[data->nr_fences++] =
+                               pvr_fence_create_from_fence(fence_ctx,
+                                                           sync_checkpoint_ctx,
+                                                           fence,
+                                                           PVRSRV_NO_FENCE,
+                                                           "exclusive check fence");
+                       if (!data->fences[data->nr_fences - 1]) {
+                               data->nr_fences--;
+                               PVR_FENCE_TRACE(fence,
+                                               "waiting on exclusive fence\n");
+                               WARN_ON(dma_fence_wait(fence, true) <= 0);
+                       }
+               }
+
+               if (exclusive && resv_list) {
+                       for (j = 0; j < resv_list->shared_count; j++) {
+                               fence = rcu_dereference_protected(resv_list->shared[j],
+                                                                 dma_resv_held(resv));
+                               data->fences[data->nr_fences++] =
+                                       pvr_fence_create_from_fence(fence_ctx,
+                                                                   sync_checkpoint_ctx,
+                                                                   fence,
+                                                                   PVRSRV_NO_FENCE,
+                                                                   "check fence");
+                               if (!data->fences[data->nr_fences - 1]) {
+                                       data->nr_fences--;
+                                       PVR_FENCE_TRACE(fence,
+                                                       "waiting on non-exclusive fence\n");
+                                       WARN_ON(dma_fence_wait(fence, true) <= 0);
+                               }
+                       }
+               }
+       }
+
+       WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count));
+
+       return data;
+
+err_destroy_fences:
+       for (i = 0; i < data->nr_fences; i++)
+               pvr_fence_destroy(data->fences[i]);
+       kfree(data->fences);
+err_check_data_free:
+       kfree(data);
+       return NULL;
+}
+
+static void
+pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data)
+{
+       int i;
+
+       for (i = 0; i < data->nr_fences; i++)
+               pvr_fence_destroy(data->fences[i]);
+
+       kfree(data->fences);
+       kfree(data);
+}
+
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(struct device *dev, const char *name)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+       struct pvr_buffer_sync_context *ctx;
+       int err;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+
+       ctx->fence_ctx = pvr_fence_context_create(priv->dev_node,
+                                                 NativeSyncGetFenceStatusWq(),
+                                                 name);
+       if (!ctx->fence_ctx) {
+               err = -ENOMEM;
+               goto err_free_ctx;
+       }
+
+       mutex_init(&ctx->ctx_lock);
+
+       return ctx;
+
+err_free_ctx:
+       kfree(ctx);
+err_exit:
+       return ERR_PTR(err);
+}
+
+void
+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx)
+{
+       pvr_fence_context_destroy(ctx->fence_ctx);
+       kfree(ctx);
+}
+
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+                                         PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+                                         u32 nr_pmrs,
+                                         struct _PMR_ **pmrs,
+                                         u32 *pmr_flags,
+                                         u32 *nr_fence_checkpoints_out,
+                                         PSYNC_CHECKPOINT **fence_checkpoints_out,
+                                         PSYNC_CHECKPOINT *update_checkpoints_out,
+                                         struct pvr_buffer_sync_append_data **data_out)
+{
+       struct pvr_buffer_sync_append_data *data;
+       PSYNC_CHECKPOINT *fence_checkpoints;
+       const size_t data_size = sizeof(*data);
+       const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs;
+       const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs;
+       int i;
+       int j;
+       int err;
+
+       if (unlikely((nr_pmrs && !(pmrs && pmr_flags)) ||
+           !nr_fence_checkpoints_out || !fence_checkpoints_out ||
+           !update_checkpoints_out))
+               return -EINVAL;
+
+       for (i = 0; i < nr_pmrs; i++) {
+               if (unlikely(!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK))) {
+                       pr_err("%s: Invalid flags %#08x for pmr %p\n",
+                              __func__, pmr_flags[i], pmrs[i]);
+                       return -EINVAL;
+               }
+       }
+
+#if defined(NO_HARDWARE)
+       /*
+        * For NO_HARDWARE there's no checking or updating of sync checkpoints
+        * which means SW waits on our fences will cause a deadlock (since they
+        * will never be signalled). Avoid this by not creating any fences.
+        */
+       nr_pmrs = 0;
+#endif
+
+       if (!nr_pmrs) {
+               *nr_fence_checkpoints_out = 0;
+               *fence_checkpoints_out = NULL;
+               *update_checkpoints_out = NULL;
+               *data_out = NULL;
+
+               return 0;
+       }
+
+       data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL);
+       if (unlikely(!data))
+               return -ENOMEM;
+
+       data->ctx = ctx;
+       data->pmrs = (struct _PMR_ **)(void *)(data + 1);
+       data->pmr_flags = (u32 *)(void *)(data->pmrs + nr_pmrs);
+
+       /*
+        * It's expected that user space will provide a set of unique PMRs
+        * but, as a PMR can have multiple handles, it's still possible to
+        * end up here with duplicates. Take this opportunity to filter out
+        * any remaining duplicates (updating flags when necessary) before
+        * trying to process them further.
+        */
+       for (i = 0; i < nr_pmrs; i++) {
+               for (j = 0; j < data->nr_pmrs; j++) {
+                       if (data->pmrs[j] == pmrs[i]) {
+                               data->pmr_flags[j] |= pmr_flags[i];
+                               break;
+                       }
+               }
+
+               if (j == data->nr_pmrs) {
+                       data->pmrs[j] = pmrs[i];
+                       data->pmr_flags[j] = pmr_flags[i];
+                       data->nr_pmrs++;
+               }
+       }
+
+       err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs);
+       if (unlikely(err)) {
+               /*
+                * -EINTR is returned if a signal arrives while trying to acquire a PMR
+                * lock. In this case the operation should be retried after the signal
+                * has been serviced. As this is expected behaviour, don't print an
+                * error in this case.
+                */
+               if (err != -EINTR) {
+                       pr_err("%s: failed to lock pmrs (errno=%d)\n",
+                              __func__, err);
+               }
+               goto err_free_data;
+       }
+
+       /* create the check data */
+       data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx,
+                                                        sync_checkpoint_ctx,
+                                                        data->nr_pmrs,
+                                                        data->pmrs,
+                                                        data->pmr_flags);
+       if (unlikely(!data->check_data)) {
+               err = -ENOMEM;
+               goto err_pmrs_unlock;
+       }
+
+       fence_checkpoints = kcalloc(data->check_data->nr_fences,
+                                   sizeof(*fence_checkpoints),
+                                   GFP_KERNEL);
+       if (fence_checkpoints) {
+               pvr_fence_get_checkpoints(data->check_data->fences,
+                                         data->check_data->nr_fences,
+                                         fence_checkpoints);
+       } else {
+               if (unlikely(data->check_data->nr_fences)) {
+                       err = -ENOMEM;
+                       goto err_free_check_data;
+               }
+       }
+
+       /* create the update fence */
+       data->update_fence = pvr_fence_create(ctx->fence_ctx,
+                       sync_checkpoint_ctx,
+                       SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence");
+       if (unlikely(!data->update_fence)) {
+               err = -ENOMEM;
+               goto err_free_fence_checkpoints;
+       }
+
+       /*
+        * We need to clean up the fences once the HW has finished with them.
+        * We can do this using fence callbacks. However, instead of adding a
+        * callback to every fence, which would result in more work, we can
+        * simply add one to the update fence since this will be the last fence
+        * to be signalled. This callback can do all the necessary clean up.
+        *
+        * Note: we take an additional reference on the update fence in case
+        * it signals before we can add it to a reservation object.
+        */
+       PVR_FENCE_TRACE(&data->update_fence->base,
+                       "create fence calling dma_fence_get\n");
+       dma_fence_get(&data->update_fence->base);
+
+       *nr_fence_checkpoints_out = data->check_data->nr_fences;
+       *fence_checkpoints_out = fence_checkpoints;
+       *update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence);
+       *data_out = data;
+
+       return 0;
+
+err_free_fence_checkpoints:
+       kfree(fence_checkpoints);
+err_free_check_data:
+       pvr_buffer_sync_check_fences_destroy(data->check_data);
+err_pmrs_unlock:
+       pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs);
+err_free_data:
+       kfree(data);
+       return err;
+}
+
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data)
+{
+       struct dma_resv *resv;
+       int i;
+
+       dma_fence_enable_sw_signaling(&data->update_fence->base);
+
+       for (i = 0; i < data->nr_pmrs; i++) {
+               resv = pmr_reservation_object_get(data->pmrs[i]);
+               if (WARN_ON_ONCE(!resv))
+                       continue;
+
+               if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
+                       PVR_FENCE_TRACE(&data->update_fence->base,
+                                       "added exclusive fence (%s) to resv %p\n",
+                                       data->update_fence->name, resv);
+                       dma_resv_add_excl_fence(resv,
+                                               &data->update_fence->base);
+               } else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
+                       PVR_FENCE_TRACE(&data->update_fence->base,
+                                       "added non-exclusive fence (%s) to resv %p\n",
+                                       data->update_fence->name, resv);
+                       dma_resv_add_shared_fence(resv,
+                                                 &data->update_fence->base);
+               }
+       }
+
+       /*
+        * Now that the fence has been added to the necessary
+        * reservation objects we can safely drop the extra reference
+        * we took in pvr_buffer_sync_resolve_and_create_fences().
+        */
+       dma_fence_put(&data->update_fence->base);
+       pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs,
+                                       data->pmrs);
+
+       /* destroy the check fences */
+       pvr_buffer_sync_check_fences_destroy(data->check_data);
+       /* destroy the update fence */
+       pvr_fence_destroy(data->update_fence);
+
+       /* free the append data */
+       kfree(data);
+}
+
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data)
+{
+
+       /* drop the extra reference we took on the update fence in
+        * pvr_buffer_sync_resolve_and_create_fences().
+        */
+       dma_fence_put(&data->update_fence->base);
+
+       if (data->nr_pmrs > 0)
+               pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs,
+                                           data->pmrs);
+
+       /* destroy the check fences */
+       pvr_buffer_sync_check_fences_destroy(data->check_data);
+       /* destroy the update fence */
+       pvr_fence_destroy(data->update_fence);
+
+       /* free the append data */
+       kfree(data);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_buffer_sync.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_buffer_sync.h
new file mode 100644 (file)
index 0000000..b6aadf9
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * @File        pvr_buffer_sync.h
+ * @Title       PowerVR Linux buffer sync interface
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PVR_BUFFER_SYNC_H
+#define PVR_BUFFER_SYNC_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct _PMR_;
+struct pvr_buffer_sync_context;
+struct pvr_buffer_sync_append_data;
+
+/**
+ * pvr_buffer_sync_context_create - creates a buffer sync context
+ * @dev: Linux device
+ * @name: context name (used for debugging)
+ *
+ * pvr_buffer_sync_context_destroy() should be used to clean up the buffer
+ * sync context.
+ *
+ * Return: A buffer sync context or NULL if it fails for any reason.
+ */
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(struct device *dev, const char *name);
+
+/**
+ * pvr_buffer_sync_context_destroy() - frees a buffer sync context
+ * @ctx: buffer sync context
+ */
+void
+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx);
+
+/**
+ * pvr_buffer_sync_resolve_and_create_fences() - create checkpoints from
+ *                                               buffers
+ * @ctx: buffer sync context
+ * @sync_checkpoint_ctx: context in which to create sync checkpoints
+ * @nr_pmrs: number of buffer objects (PMRs)
+ * @pmrs: buffer array
+ * @pmr_flags: internal flags
+ * @nr_fence_checkpoints_out: returned number of fence sync checkpoints
+ * @fence_checkpoints_out: returned array of fence sync checkpoints
+ * @update_checkpoint_out: returned update sync checkpoint
+ * @data_out: returned buffer sync data
+ *
+ * After this call, either pvr_buffer_sync_kick_succeeded() or
+ * pvr_buffer_sync_kick_failed() must be called.
+ *
+ * Return: 0 on success or an error code otherwise.
+ */
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+                                         PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+                                         u32 nr_pmrs,
+                                         struct _PMR_ **pmrs,
+                                         u32 *pmr_flags,
+                                         u32 *nr_fence_checkpoints_out,
+                                         PSYNC_CHECKPOINT **fence_checkpoints_out,
+                                         PSYNC_CHECKPOINT *update_checkpoint_out,
+                                         struct pvr_buffer_sync_append_data **data_out);
+
+/**
+ * pvr_buffer_sync_kick_succeeded() - cleans up after a successful kick
+ *                                    operation
+ * @data: buffer sync data returned by
+ *        pvr_buffer_sync_resolve_and_create_fences()
+ *
+ * Should only be called following pvr_buffer_sync_resolve_and_create_fences().
+ */
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data);
+
+/**
+ * pvr_buffer_sync_kick_failed() - cleans up after a failed kick operation
+ * @data: buffer sync data returned by
+ *        pvr_buffer_sync_resolve_and_create_fences()
+ *
+ * Should only be called following pvr_buffer_sync_resolve_and_create_fences().
+ */
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data);
+
+#endif /* PVR_BUFFER_SYNC_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_counting_timeline.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_counting_timeline.c
new file mode 100644 (file)
index 0000000..3fa8903
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * @File
+ * @Title       PowerVR Linux software "counting" timeline fence implementation
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+
+#include "services_kernel_client.h"
+#include "pvr_counting_timeline.h"
+#include "pvr_sw_fence.h"
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+       do {                                                             \
+               if (pfnDumpDebugPrintf)                                  \
+                       pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+                                          ## __VA_ARGS__);              \
+               else                                                     \
+                       pr_err(fmt "\n", ## __VA_ARGS__);                \
+       } while (0)
+
+struct pvr_counting_fence_timeline {
+       struct pvr_sw_fence_context *context;
+
+       void *dbg_request_handle;
+
+       spinlock_t active_fences_lock;
+       u64 current_value; /* guarded by active_fences_lock */
+       u64 next_value; /* guarded by active_fences_lock */
+       struct list_head active_fences;
+
+       struct kref kref;
+};
+
+struct pvr_counting_fence {
+       u64 value;
+       struct dma_fence *fence;
+       struct list_head active_list_entry;
+};
+
+void pvr_counting_fence_timeline_dump_timeline(
+       void *data,
+       DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+       void *dump_debug_file)
+{
+
+       struct pvr_counting_fence_timeline *timeline =
+               (struct pvr_counting_fence_timeline *) data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+       PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                         dump_debug_file,
+                                         "TL:%s SeqNum: %llu/%llu",
+                                         pvr_sw_fence_context_name(
+                                                         timeline->context),
+                                         timeline->current_value,
+                                         timeline->next_value);
+
+       spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+}
+
+static void
+pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity,
+                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile)
+{
+       struct pvr_counting_fence_timeline *timeline =
+               (struct pvr_counting_fence_timeline *)data;
+       struct pvr_counting_fence *obj;
+       unsigned long flags;
+       char value[128];
+
+       if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) {
+               spin_lock_irqsave(&timeline->active_fences_lock, flags);
+               pvr_sw_fence_context_value_str(timeline->context, value,
+                                              sizeof(value));
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 "sw: %s @%s cur=%llu",
+                                 pvr_sw_fence_context_name(timeline->context),
+                                 value, timeline->current_value);
+               list_for_each_entry(obj, &timeline->active_fences,
+                                   active_list_entry) {
+                       obj->fence->ops->fence_value_str(obj->fence,
+                                                        value, sizeof(value));
+                       PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                         " @%s: val=%llu", value, obj->value);
+               }
+               spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+       }
+}
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create(
+       const char *name)
+{
+       PVRSRV_ERROR srv_err;
+       struct pvr_counting_fence_timeline *timeline =
+               kzalloc(sizeof(*timeline), GFP_KERNEL);
+
+       if (!timeline)
+               goto err_out;
+
+       timeline->context = pvr_sw_fence_context_create(name,
+                                                       "pvr_sw_sync");
+       if (!timeline->context)
+               goto err_free_timeline;
+
+       srv_err = PVRSRVRegisterDriverDbgRequestNotify(
+                               &timeline->dbg_request_handle,
+                               pvr_counting_fence_timeline_debug_request,
+                               DEBUG_REQUEST_LINUXFENCE,
+                               timeline);
+       if (srv_err != PVRSRV_OK) {
+               pr_err("%s: failed to register debug request callback (%s)\n",
+                          __func__, PVRSRVGetErrorString(srv_err));
+               goto err_free_timeline_ctx;
+       }
+
+       timeline->current_value = 0;
+       timeline->next_value = 1;
+       kref_init(&timeline->kref);
+       spin_lock_init(&timeline->active_fences_lock);
+       INIT_LIST_HEAD(&timeline->active_fences);
+
+err_out:
+       return timeline;
+
+err_free_timeline_ctx:
+       pvr_sw_fence_context_destroy(timeline->context);
+
+err_free_timeline:
+       kfree(timeline);
+       timeline = NULL;
+       goto err_out;
+}
+
+void pvr_counting_fence_timeline_force_complete(
+       struct pvr_counting_fence_timeline *timeline)
+{
+       struct list_head *entry, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+#if defined(DEBUG) && !defined(SUPPORT_AUTOVZ)
+       /* This is just a safety measure. Normally we should never see any
+        * unsignaled sw fences when we come here. Warn if we still do!
+        */
+       WARN_ON(!list_empty(&timeline->active_fences));
+#endif
+
+       list_for_each_safe(entry, tmp, &timeline->active_fences) {
+               struct pvr_counting_fence *fence =
+                       list_entry(entry, struct pvr_counting_fence,
+                       active_list_entry);
+               dma_fence_signal(fence->fence);
+               dma_fence_put(fence->fence);
+               fence->fence = NULL;
+               list_del(&fence->active_list_entry);
+               kfree(fence);
+       }
+       spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+}
+
+static void pvr_counting_fence_timeline_destroy(
+       struct kref *kref)
+{
+       struct pvr_counting_fence_timeline *timeline =
+               container_of(kref, struct pvr_counting_fence_timeline, kref);
+
+       WARN_ON(!list_empty(&timeline->active_fences));
+
+       PVRSRVUnregisterDriverDbgRequestNotify(timeline->dbg_request_handle);
+
+       pvr_sw_fence_context_destroy(timeline->context);
+       kfree(timeline);
+}
+
+void pvr_counting_fence_timeline_put(
+       struct pvr_counting_fence_timeline *timeline)
+{
+       kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy);
+}
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get(
+       struct pvr_counting_fence_timeline *timeline)
+{
+       if (!timeline)
+               return NULL;
+       kref_get(&timeline->kref);
+       return timeline;
+}
+
+struct dma_fence *pvr_counting_fence_create(
+       struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx)
+{
+       unsigned long flags;
+       struct dma_fence *sw_fence;
+       struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+       if (!fence)
+               return NULL;
+
+       sw_fence = pvr_sw_fence_create(timeline->context);
+       if (!sw_fence)
+               goto err_free_fence;
+
+       fence->fence = dma_fence_get(sw_fence);
+
+       spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+       fence->value = timeline->next_value++;
+       if (sync_pt_idx)
+               *sync_pt_idx = fence->value;
+
+       list_add_tail(&fence->active_list_entry, &timeline->active_fences);
+
+       spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+
+       /* Counting fences can be signalled any time after creation */
+       dma_fence_enable_sw_signaling(sw_fence);
+
+       return sw_fence;
+
+err_free_fence:
+       kfree(fence);
+       return NULL;
+}
+
+bool pvr_counting_fence_timeline_inc(
+       struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx)
+{
+       struct list_head *entry, *tmp;
+       unsigned long flags;
+       bool res;
+
+       spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+       if (timeline->current_value == timeline->next_value-1) {
+               res = false;
+               goto exit_unlock;
+       }
+
+       timeline->current_value++;
+
+       if (sync_pt_idx)
+               *sync_pt_idx = timeline->current_value;
+
+       list_for_each_safe(entry, tmp, &timeline->active_fences) {
+               struct pvr_counting_fence *fence =
+                       list_entry(entry, struct pvr_counting_fence,
+                       active_list_entry);
+               if (fence->value <= timeline->current_value) {
+                       dma_fence_signal(fence->fence);
+                       dma_fence_put(fence->fence);
+                       fence->fence = NULL;
+                       list_del(&fence->active_list_entry);
+                       kfree(fence);
+               }
+       }
+
+       res = true;
+
+exit_unlock:
+       spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+
+       return res;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_counting_timeline.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_counting_timeline.h
new file mode 100644 (file)
index 0000000..2cb8db1
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * @File
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_COUNTING_TIMELINE_H__)
+#define __PVR_COUNTING_TIMELINE_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_counting_fence_timeline;
+
+void pvr_counting_fence_timeline_dump_timeline(
+       void *data,
+       DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+       void *dump_debug_file);
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create(
+       const char *name);
+void pvr_counting_fence_timeline_put(
+       struct pvr_counting_fence_timeline *fence_timeline);
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get(
+       struct pvr_counting_fence_timeline *fence_timeline);
+struct dma_fence *pvr_counting_fence_create(
+       struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx);
+bool pvr_counting_fence_timeline_inc(
+       struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx);
+void pvr_counting_fence_timeline_force_complete(
+       struct pvr_counting_fence_timeline *fence_timeline);
+
+#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debug.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debug.c
new file mode 100644 (file)
index 0000000..8cd34dc
--- /dev/null
@@ -0,0 +1,481 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides kernel side Debug Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "linkage.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "di_server.h"
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/******** BUFFERED LOG MESSAGES ********/
+
+/* Because we don't want to have to handle CCB wrapping, each buffered
+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means
+ * there is the same fixed number of messages that can be stored,
+ * regardless of message length.
+ */
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+
+#define PVRSRV_DEBUG_CCB_MESG_MAX      PVR_MAX_DEBUG_MESSAGE_LEN
+
+typedef struct
+{
+       const IMG_CHAR *pszFile;
+       IMG_INT iLine;
+       IMG_UINT32 ui32TID;
+       IMG_UINT32 ui32PID;
+       IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX];
+       struct timeval sTimeVal;
+}
+PVRSRV_DEBUG_CCB;
+
+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX];
+
+static IMG_UINT giOffset;
+
+/* protects access to gsDebugCCB */
+static DEFINE_SPINLOCK(gsDebugCCBLock);
+
+static void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+                          const IMG_CHAR *szBuffer)
+{
+       unsigned long uiFlags;
+
+       spin_lock_irqsave(&gsDebugCCBLock, uiFlags);
+
+       gsDebugCCB[giOffset].pszFile = pszFileName;
+       gsDebugCCB[giOffset].iLine   = ui32Line;
+       gsDebugCCB[giOffset].ui32TID = current->pid;
+       gsDebugCCB[giOffset].ui32PID = current->tgid;
+
+       do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal);
+
+       OSStringLCopy(gsDebugCCB[giOffset].pcMesg, szBuffer,
+                     PVRSRV_DEBUG_CCB_MESG_MAX);
+
+       giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX;
+
+       spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags);
+}
+
+void PVRSRVDebugPrintfDumpCCB(void)
+{
+       int i;
+       unsigned long uiFlags;
+
+       spin_lock_irqsave(&gsDebugCCBLock, uiFlags);
+
+       for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++)
+       {
+               PVRSRV_DEBUG_CCB *psDebugCCBEntry =
+                       &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX];
+
+               /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */
+               if (!psDebugCCBEntry->pszFile)
+               {
+                       continue;
+               }
+
+               printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n",
+                          psDebugCCBEntry->pszFile,
+                          psDebugCCBEntry->iLine,
+                          (long)psDebugCCBEntry->sTimeVal.tv_sec,
+                          (long)psDebugCCBEntry->sTimeVal.tv_usec,
+                          psDebugCCBEntry->ui32TID,
+                          psDebugCCBEntry->ui32PID,
+                          psDebugCCBEntry->pcMesg);
+
+               /* Clear this entry so it doesn't get printed the next time again. */
+               psDebugCCBEntry->pszFile = NULL;
+       }
+
+       spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags);
+}
+
+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+static INLINE void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+                          const IMG_CHAR *szBuffer)
+{
+       (void)pszFileName;
+       (void)szBuffer;
+       (void)ui32Line;
+}
+
+void PVRSRVDebugPrintfDumpCCB(void)
+{
+       /* Not available */
+}
+
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+static IMG_UINT32 gPVRDebugLevel =
+       (
+        DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+        | DBGPRIV_BUFFERED
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+        | DBGPRIV_DEBUG
+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
+       );
+
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel,
+                 "Sets the level of debug output (default 0x7)");
+
+IMG_UINT32 OSDebugLevel(void)
+{
+       return gPVRDebugLevel;
+}
+
+void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel)
+{
+       gPVRDebugLevel = ui32DebugLevel;
+}
+
+IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel)
+{
+       return (gPVRDebugLevel & ui32DebugLevel) != 0;
+}
+
+#else /* defined(PVRSRV_NEED_PVR_DPF) */
+
+IMG_UINT32 OSDebugLevel(void)
+{
+       return 0;
+}
+
+void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32DebugLevel);
+}
+
+IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32DebugLevel);
+       return IMG_FALSE;
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+#define        PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+/* Message buffer for messages */
+static IMG_CHAR gszBuffer[PVR_MAX_MSG_LEN + 1];
+
+/* The lock is used to control access to gszBuffer */
+static DEFINE_SPINLOCK(gsDebugLock);
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, pointed
+ * to by the var args list.
+ */
+__printf(3, 0)
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs)
+{
+       IMG_UINT32 ui32Used;
+       IMG_UINT32 ui32Space;
+       IMG_INT32 i32Len;
+
+       ui32Used = OSStringLength(pszBuf);
+       BUG_ON(ui32Used >= ui32BufSiz);
+       ui32Space = ui32BufSiz - ui32Used;
+
+       i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+       pszBuf[ui32BufSiz - 1] = 0;
+
+       /* Return true if string was truncated */
+       return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    To output an important message to the user in release builds
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+       va_list vaArgs;
+       unsigned long ulLockFlags = 0;
+       IMG_CHAR *pszBuf = gszBuffer;
+       IMG_UINT32 ui32BufSiz = sizeof(gszBuffer);
+       IMG_INT32  result;
+
+       va_start(vaArgs, pszFormat);
+
+       spin_lock_irqsave(&gsDebugLock, ulLockFlags);
+
+       result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K:  %u: ", current->pid);
+       PVR_ASSERT(result>0);
+       ui32BufSiz -= result;
+
+       if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+       {
+               printk(KERN_INFO "%s (truncated)\n", pszBuf);
+       }
+       else
+       {
+               printk(KERN_INFO "%s\n", pszBuf);
+       }
+
+       spin_unlock_irqrestore(&gsDebugLock, ulLockFlags);
+       va_end(vaArgs);
+}
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    To output a debug message to the user
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...)
+{
+       va_list VArgs;
+       unsigned long ulLockFlags = 0;
+       IMG_CHAR *pszBuf = gszBuffer;
+       IMG_UINT32 ui32BufSiz = sizeof(gszBuffer);
+       IMG_INT32  result;
+
+       va_start(VArgs, pszFormat);
+
+       spin_lock_irqsave(&gsDebugLock, ulLockFlags);
+
+       result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid);
+       PVR_ASSERT(result>0);
+       ui32BufSiz -= result;
+
+       if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+       {
+               printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+       }
+       else
+       {
+               printk(KERN_ERR "%s\n", pszBuf);
+       }
+
+       spin_unlock_irqrestore(&gsDebugLock, ulLockFlags);
+
+       va_end(VArgs);
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, calling
+ * VBAppend to do the actual work.
+ */
+__printf(3, 4)
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+       va_list VArgs;
+       IMG_BOOL bTrunc;
+
+       va_start (VArgs, pszFormat);
+
+       bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+       va_end (VArgs);
+
+       return bTrunc;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    To output a debug message to the user
+@Input          uDebugLevel The current debug level
+@Input          pszFile     The source file generating the message
+@Input          uLine       The line of the source file
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+                          const IMG_CHAR *pszFullFileName,
+                          IMG_UINT32 ui32Line,
+                          const IMG_CHAR *pszFormat,
+                          ...)
+{
+       const IMG_CHAR *pszFileName = pszFullFileName;
+       IMG_CHAR *pszLeafName;
+       va_list vaArgs;
+       unsigned long ulLockFlags = 0;
+       IMG_CHAR *pszBuf = gszBuffer;
+       IMG_UINT32 ui32BufSiz = sizeof(gszBuffer);
+
+       if (!(gPVRDebugLevel & ui32DebugLevel))
+       {
+               return;
+       }
+
+       va_start(vaArgs, pszFormat);
+
+       spin_lock_irqsave(&gsDebugLock, ulLockFlags);
+
+       switch (ui32DebugLevel)
+       {
+               case DBGPRIV_FATAL:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz);
+                       PVRSRV_REPORT_ERROR();
+                       break;
+               }
+               case DBGPRIV_ERROR:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz);
+                       PVRSRV_REPORT_ERROR();
+                       break;
+               }
+               case DBGPRIV_WARNING:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K:(Warn):  ", ui32BufSiz);
+                       break;
+               }
+               case DBGPRIV_MESSAGE:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K:(Mesg):  ", ui32BufSiz);
+                       break;
+               }
+               case DBGPRIV_VERBOSE:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K:(Verb):  ", ui32BufSiz);
+                       break;
+               }
+               case DBGPRIV_DEBUG:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz);
+                       break;
+               }
+               case DBGPRIV_CALLTRACE:
+               case DBGPRIV_ALLOC:
+               case DBGPRIV_BUFFERED:
+               default:
+               {
+                       OSStringLCopy(pszBuf, "PVR_K: ", ui32BufSiz);
+                       break;
+               }
+       }
+
+       if (current->pid == task_tgid_nr(current))
+       {
+               (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid);
+       }
+       else
+       {
+               (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */);
+       }
+
+       if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+       {
+               printk(KERN_ERR "%s (truncated)\n", pszBuf);
+       }
+       else
+       {
+               IMG_BOOL bTruncated = IMG_FALSE;
+
+#if !defined(__sh__)
+               pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/');
+
+               if (pszLeafName)
+               {
+                       pszFileName = pszLeafName+1;
+               }
+#endif /* __sh__ */
+
+#if defined(DEBUG)
+               {
+                       static const IMG_CHAR *lastFile;
+
+                       if (lastFile == pszFileName)
+                       {
+                               bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line);
+                       }
+                       else
+                       {
+                               bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line);
+                               lastFile = pszFileName;
+                       }
+               }
+#else
+               bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line);
+#endif
+
+               if (bTruncated)
+               {
+                       printk(KERN_ERR "%s (truncated)\n", pszBuf);
+               }
+               else
+               {
+                       if (ui32DebugLevel & DBGPRIV_BUFFERED)
+                       {
+                               AddToBufferCCB(pszFileName, ui32Line, pszBuf);
+                       }
+                       else
+                       {
+                               printk(KERN_ERR "%s\n", pszBuf);
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&gsDebugLock, ulLockFlags);
+
+       va_end (vaArgs);
+}
+
+#endif /* PVRSRV_NEED_PVR_DPF */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debugfs.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debugfs.c
new file mode 100644 (file)
index 0000000..fa6a94c
--- /dev/null
@@ -0,0 +1,623 @@
+/*************************************************************************/ /*!
+@File
+@Title          DebugFS implementation of Debug Info interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements osdi_impl.h API to provide access to driver's
+                debug data via DebugFS.
+
+                Note about locking in DebugFS module.
+
+                Access to DebugFS is protected against the race where any
+                file could be removed while being accessed or accessed while
+                being removed. Any calls to debugfs_remove() will block
+                until all operations are finished.
+
+                See implementation of proxy file operations (FULL_PROXY_FUNC)
+                and implementation of debugfs_file_[get|put]() in
+                fs/debugfs/file.c in Linux kernel sources for more details.
+
+                Not about locking for sequential files.
+
+                The seq_file objects have a mutex that protects access
+                to all of the file operations hence all of the sequential
+                *read* operations are protected.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_bridge_k.h"
+#include "pvr_uaccess.h"
+#include "osdi_impl.h"
+
+#define _DRIVER_THREAD_ENTER() \
+       do { \
+               PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \
+               if (eLocalError != PVRSRV_OK) \
+               { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \
+                               __func__, PVRSRVGetErrorString(eLocalError))); \
+                       return OSPVRSRVToNativeError(eLocalError); \
+               } \
+       } while (0)
+
+#define _DRIVER_THREAD_EXIT() \
+       PVRSRVDriverThreadExit()
+
+#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR
+
+typedef struct DFS_DIR
+{
+       struct dentry *psDirEntry;
+       struct DFS_DIR *psParentDir;
+} DFS_DIR;
+
+typedef struct DFS_ENTRY
+{
+       OSDI_IMPL_ENTRY sImplEntry;
+       DI_ITERATOR_CB sIterCb;
+} DFS_ENTRY;
+
+typedef struct DFS_FILE
+{
+       struct dentry *psFileEntry;
+       struct DFS_DIR *psParentDir;
+       const struct seq_operations *psSeqOps;
+       struct DFS_ENTRY sEntry;
+       DI_ENTRY_TYPE eType;
+} DFS_FILE;
+
+/* ----- native callbacks interface ----------------------------------------- */
+
+static void _WriteData(void *pvNativeHandle, const void *pvData,
+                       IMG_UINT32 uiSize)
+{
+       seq_write(pvNativeHandle, pvData, uiSize);
+}
+
+static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt,
+                     va_list pArgs)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+       seq_vprintf(pvNativeHandle, pszFmt, pArgs);
+#else
+       IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+
+       vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs);
+       seq_printf(pvNativeHandle, "%s", szBuffer);
+#endif
+}
+
+static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr)
+{
+       seq_puts(pvNativeHandle, pszStr);
+}
+
+static IMG_BOOL _HasOverflowed(void *pvNativeHandle)
+{
+       struct seq_file *psSeqFile = pvNativeHandle;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+       return seq_has_overflowed(psSeqFile);
+#else
+       return psSeqFile->count == psSeqFile->size;
+#endif
+}
+
+static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = {
+       .pfnWrite = _WriteData,
+       .pfnVPrintf = _VPrintf,
+       .pfnPuts = _Puts,
+       .pfnHasOverflowed = _HasOverflowed,
+};
+
+/* ----- sequential file operations ----------------------------------------- */
+
+static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos);
+
+       if (pvRet == DI_START_TOKEN)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return pvRet;
+}
+
+static void _Stop(struct seq_file *psSeqFile, void *pvPriv)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv);
+}
+
+static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos);
+}
+
+static int _Show(struct seq_file *psSeqFile, void *pvPriv)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       if (pvPriv == SEQ_START_TOKEN)
+       {
+               pvPriv = DI_START_TOKEN;
+       }
+
+       return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv);
+}
+
+static struct seq_operations _g_sSeqOps = {
+       .start = _Start,
+       .stop = _Stop,
+       .next = _Next,
+       .show = _Show
+};
+
+/* ----- file operations ---------------------------------------------------- */
+
+static int _Open(struct inode *psINode, struct file *psFile)
+{
+       DFS_FILE *psDFSFile;
+       int iRes;
+
+       PVR_LOG_RETURN_IF_FALSE(psINode != NULL && psINode->i_private != NULL,
+                               "psDFSFile is NULL", -EIO);
+
+       _DRIVER_THREAD_ENTER();
+
+       psDFSFile = psINode->i_private;
+
+       if (psDFSFile->sEntry.sIterCb.pfnStart != NULL)
+       {
+               iRes = seq_open(psFile, psDFSFile->psSeqOps);
+       }
+       else
+       {
+               /* private data is NULL as it's going to be set below */
+               iRes = single_open(psFile, _Show, NULL);
+       }
+
+       if (iRes == 0)
+       {
+               struct seq_file *psSeqFile = psFile->private_data;
+
+               DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry));
+               if (psEntry == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__));
+                       iRes = -ENOMEM;
+                       goto return_;
+               }
+
+               *psEntry = psDFSFile->sEntry;
+               psSeqFile->private = psEntry;
+               psEntry->sImplEntry.pvNative = psSeqFile;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d",
+                       __func__, iRes));
+       }
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static int _Close(struct inode *psINode, struct file *psFile)
+{
+       DFS_FILE *psDFSFile = psINode->i_private;
+       DFS_ENTRY *psEntry;
+       int iRes;
+
+       PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL",
+                               -EIO);
+
+       _DRIVER_THREAD_ENTER();
+
+       /* save pointer to DFS_ENTRY */
+       psEntry = ((struct seq_file *) psFile->private_data)->private;
+
+       if (psDFSFile->sEntry.sIterCb.pfnStart != NULL)
+       {
+               iRes = seq_release(psINode, psFile);
+       }
+       else
+       {
+               iRes = single_release(psINode, psFile);
+       }
+
+       /* free DFS_ENTRY allocated in _Open */
+       OSFreeMem(psEntry);
+
+       /* Validation check as seq_release (and single_release which calls it)
+        * never fail */
+       if (iRes != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d",
+                       __func__, iRes));
+       }
+
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static ssize_t _Read(struct file *psFile, char __user *pcBuffer,
+                     size_t uiCount, loff_t *puiPos)
+{
+       DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private;
+       ssize_t iRes = -1;
+
+       _DRIVER_THREAD_ENTER();
+
+       if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC)
+       {
+               iRes = seq_read(psFile, pcBuffer, uiCount, puiPos);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() "
+                               "returned %zd", __func__, iRes));
+                       goto return_;
+               }
+       }
+       else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS)
+       {
+               DFS_ENTRY *psEntry = &psDFSFile->sEntry;
+               IMG_UINT64 ui64Count = uiCount;
+
+               IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount);
+               PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_);
+
+               iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, puiPos,
+                                               psEntry->sImplEntry.pvPrivData);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() "
+                               "returned %zd", __func__, iRes));
+                       OSFreeMem(pcLocalBuffer);
+                       goto return_;
+               }
+
+               if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0)
+               {
+                       iRes = -1;
+               }
+
+               OSFreeMem(pcLocalBuffer);
+       }
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin)
+{
+       DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private;
+       loff_t iRes = -1;
+
+       _DRIVER_THREAD_ENTER();
+
+       if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC)
+       {
+               iRes = seq_lseek(psFile, iOffset, iOrigin);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position in psFile<%p> to offset "
+                               "%lld, iOrigin %d, seq_lseek() returned %lld (dentry='%s')", __func__,
+                               psFile, iOffset, iOrigin, iRes, psFile->f_path.dentry->d_name.name));
+                       goto return_;
+               }
+       }
+       else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS)
+       {
+               DFS_ENTRY *psEntry = &psDFSFile->sEntry;
+               IMG_UINT64 ui64Pos;
+
+               switch (iOrigin)
+               {
+                       case SEEK_SET:
+                               ui64Pos = psFile->f_pos + iOffset;
+                               break;
+                       case SEEK_CUR:
+                               ui64Pos = iOffset;
+                               break;
+                       case SEEK_END:
+                               /* not supported as we don't know the file size here */
+                               /* fall through */
+                       default:
+                               return -1;
+               }
+
+               /* only pass the absolute position to the callback, it's up to the
+                * implementer to determine if the position is valid */
+
+               iRes = psEntry->sIterCb.pfnSeek(ui64Pos,
+                                               psEntry->sImplEntry.pvPrivData);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset "
+                               "%lld, pfnSeek() returned %lld", __func__,
+                               iOffset, iRes));
+                       goto return_;
+               }
+
+               psFile->f_pos = ui64Pos;
+       }
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static ssize_t _Write(struct file *psFile, const char __user *pszBuffer,
+                      size_t uiCount, loff_t *puiPos)
+{
+       struct inode *psINode = psFile->f_path.dentry->d_inode;
+       DFS_FILE *psDFSFile = psINode->i_private;
+       DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb;
+       IMG_CHAR *pcLocalBuffer;
+       IMG_UINT64 ui64Count;
+       IMG_INT64 i64Res = -EIO;
+       IMG_UINT64 ui64Pos = *puiPos;
+
+       PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL",
+                               -EIO);
+       PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL",
+                               -EIO);
+
+
+       /* Make sure we allocate the smallest amount of needed memory*/
+       ui64Count = psIter->ui32WriteLenMax;
+       PVR_LOG_GOTO_IF_FALSE(uiCount <= ui64Count, "uiCount too long", return_);
+       ui64Count = MIN(uiCount + 1, ui64Count);
+
+       _DRIVER_THREAD_ENTER();
+
+       /* allocate buffer with one additional byte for NUL character */
+       pcLocalBuffer = OSAllocMem(ui64Count);
+       PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed",
+                             return_);
+
+       i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, ui64Count);
+       PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed",
+                             free_local_buffer_);
+
+       /* ensure that the framework user gets a NUL terminated buffer */
+       pcLocalBuffer[ui64Count - 1] = '\0';
+
+       i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos,
+                                 psDFSFile->sEntry.sImplEntry.pvPrivData);
+       PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_);
+
+       *puiPos = ui64Pos;
+
+free_local_buffer_:
+       OSFreeMem(pcLocalBuffer);
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return i64Res;
+}
+
+static const struct file_operations _g_psFileOpsGen = {
+       .owner = THIS_MODULE,
+       .open = _Open,
+       .release = _Close,
+       .read = _Read,
+       .llseek = _LSeek,
+       .write = _Write,
+};
+
+static const struct file_operations _g_psFileOpsRndAcc = {
+       .owner = THIS_MODULE,
+       .read = _Read,
+       .llseek = _LSeek,
+       .write = _Write,
+};
+
+/* ----- DI implementation interface ---------------------------------------- */
+
+static PVRSRV_ERROR _Init(void)
+{
+       return PVRSRV_OK;
+}
+
+static void _DeInit(void)
+{
+}
+
+static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName,
+                                DI_ENTRY_TYPE eType,
+                                const DI_ITERATOR_CB *psIterCb,
+                                void *pvPrivData,
+                                void *pvParentDir,
+                                void **pvFile)
+{
+       DFS_DIR *psParentDir = pvParentDir;
+       DFS_FILE *psFile;
+       umode_t uiMode = S_IFREG;
+       struct dentry *psEntry;
+       const struct file_operations *psFileOps = NULL;
+       PVRSRV_ERROR eError;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir");
+
+       switch (eType)
+       {
+               case DI_ENTRY_TYPE_GENERIC:
+                       psFileOps = &_g_psFileOpsGen;
+                       break;
+               case DI_ENTRY_TYPE_RANDOM_ACCESS:
+                       psFileOps = &_g_psFileOpsRndAcc;
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto return_;
+       }
+
+       psFile = OSAllocMem(sizeof(*psFile));
+       PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_);
+
+       uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ?
+               S_IRUGO : 0;
+       uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0;
+
+       psEntry = debugfs_create_file(pszName, uiMode, psParentDir->psDirEntry,
+                                     psFile, psFileOps);
+       if (IS_ERR_OR_NULL(psEntry))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file",
+                       __func__, pszName));
+
+               eError = psEntry == NULL ?
+                       PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE;
+               goto free_file_;
+       }
+
+       psFile->eType = eType;
+       psFile->psSeqOps = &_g_sSeqOps;
+       psFile->sEntry.sIterCb = *psIterCb;
+       psFile->sEntry.sImplEntry.pvPrivData = pvPrivData;
+       psFile->sEntry.sImplEntry.pvNative = NULL;
+       psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks;
+       psFile->psParentDir = psParentDir;
+       psFile->psFileEntry = psEntry;
+
+       *pvFile = psFile;
+
+       return PVRSRV_OK;
+
+free_file_:
+       OSFreeMem(psFile);
+
+return_:
+       return eError;
+}
+
+static void _DestroyFile(void *pvFile)
+{
+       DFS_FILE *psFile = pvFile;
+
+       PVR_ASSERT(psFile != NULL);
+
+       psFile->psFileEntry->d_inode->i_private = NULL;
+
+       debugfs_remove(psFile->psFileEntry);
+       OSFreeMem(psFile);
+}
+
+static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName,
+                               void *pvParentDir,
+                               void **ppvDir)
+{
+       DFS_DIR *psNewDir;
+       struct dentry *psDirEntry, *psParentDir = NULL;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir");
+
+       psNewDir = OSAllocMem(sizeof(*psNewDir));
+       PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem");
+
+       psNewDir->psParentDir = pvParentDir;
+
+       if (pvParentDir != NULL)
+       {
+               psParentDir = psNewDir->psParentDir->psDirEntry;
+       }
+
+       psDirEntry = debugfs_create_dir(pszName, psParentDir);
+       if (IS_ERR_OR_NULL(psDirEntry))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory",
+                       __func__, pszName));
+               OSFreeMem(psNewDir);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psNewDir->psDirEntry = psDirEntry;
+       *ppvDir = psNewDir;
+
+       return PVRSRV_OK;
+}
+
+static void _DestroyDir(void *pvDir)
+{
+       DFS_DIR *psDir = pvDir;
+
+       PVR_ASSERT(psDir != NULL);
+
+       debugfs_remove(psDir->psDirEntry);
+       OSFreeMem(psDir);
+}
+
+PVRSRV_ERROR PVRDebugFsRegister(void)
+{
+       OSDI_IMPL_CB sImplCb = {
+               .pfnInit = _Init,
+               .pfnDeInit = _DeInit,
+               .pfnCreateEntry = _CreateFile,
+               .pfnDestroyEntry = _DestroyFile,
+               .pfnCreateGroup = _CreateDir,
+               .pfnDestroyGroup = _DestroyDir
+       };
+
+       return DIRegisterImplementation("debugfs", &sImplCb);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debugfs.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_debugfs.h
new file mode 100644 (file)
index 0000000..23ae55b
--- /dev/null
@@ -0,0 +1,50 @@
+/*************************************************************************/ /*!
+@File
+@Title          DebugFS implementation of Debug Info interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DEBUGFS_H
+#define PVR_DEBUGFS_H
+
+#include "pvrsrv_error.h"
+
+PVRSRV_ERROR PVRDebugFsRegister(void);
+
+#endif /* PVR_DEBUGFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_drm.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_drm.c
new file mode 100644 (file)
index 0000000..2b3cd5c
--- /dev/null
@@ -0,0 +1,358 @@
+/*
+ * @File
+ * @Title       PowerVR DRM driver
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/version.h>
+
+#include <drm/drm.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_print.h>
+#include <linux/dma-mapping.h>
+#else
+#include <drm/drmP.h> /* include before drm_crtc.h for kernels older than 3.9 */
+#endif
+
+#include <drm/drm_crtc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+#include "module_common.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+#include "pvrversion.h"
+#include "services_kernel_client.h"
+#include "pvr_sync_ioctl_drm.h"
+
+#include "kernel_compatibility.h"
+
+#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME
+#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM"
+#define        PVR_DRM_DRIVER_DATE "20170530"
+
+/*
+ * Protects global PVRSRV_DATA on a multi device system. i.e. this is used to
+ * protect the PVRSRVCommonDeviceXXXX() APIs in the Server common layer which
+ * are not re-entrant for device creation and initialisation.
+ */
+static DEFINE_MUTEX(g_device_mutex);
+
+static int pvr_pm_suspend(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       DRM_DEBUG_DRIVER("device %p\n", dev);
+
+       return PVRSRVDeviceSuspend(priv->dev_node);
+}
+
+static int pvr_pm_resume(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       DRM_DEBUG_DRIVER("device %p\n", dev);
+
+       return PVRSRVDeviceResume(priv->dev_node);
+}
+
+static int pvr_pm_runtime_suspend(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       sPVRSRVDeviceSuspend(priv->dev_node);
+       return 0;
+}
+
+static int pvr_pm_runtime_resume(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       sPVRSRVDeviceResume(priv->dev_node);
+       return 0;
+}
+
+const struct dev_pm_ops pvr_pm_ops = {
+       .suspend = pvr_pm_suspend,
+       .resume = pvr_pm_resume,
+       .runtime_suspend = pvr_pm_runtime_suspend,
+       .runtime_resume = pvr_pm_runtime_resume,
+};
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static
+#endif
+int pvr_drm_load(struct drm_device *ddev, unsigned long flags)
+{
+       struct pvr_drm_private *priv;
+       enum PVRSRV_ERROR_TAG srv_err;
+       int err, deviceId;
+
+       DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+       dev_set_drvdata(ddev->dev, ddev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+       /*
+        * Older kernels do not have render drm_minor member in drm_device,
+        * so we fallback to primary node for device identification
+        */
+       deviceId = ddev->primary->index;
+#else
+       if (ddev->render)
+               deviceId = ddev->render->index;
+       else /* when render node is NULL, fallback to primary node */
+               deviceId = ddev->primary->index;
+#endif
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+       ddev->dev_private = priv;
+
+       if (!ddev->dev->dma_parms)
+               ddev->dev->dma_parms = &priv->dma_parms;
+       dma_set_max_seg_size(ddev->dev, DMA_BIT_MASK(32));
+
+       mutex_lock(&g_device_mutex);
+
+       srv_err = PVRSRVCommonDeviceCreate(ddev->dev, deviceId, &priv->dev_node);
+       if (srv_err != PVRSRV_OK) {
+               DRM_ERROR("failed to create device node for device %p (%s)\n",
+                         ddev->dev, PVRSRVGetErrorString(srv_err));
+               if (srv_err == PVRSRV_ERROR_PROBE_DEFER)
+                       err = -EPROBE_DEFER;
+               else
+                       err = -ENODEV;
+               goto err_unset_dma_parms;
+       }
+
+       err = PVRSRVDeviceInit(priv->dev_node);
+       if (err) {
+               DRM_ERROR("device %p initialisation failed (err=%d)\n",
+                         ddev->dev, err);
+               goto err_device_destroy;
+       }
+
+       drm_mode_config_init(ddev);
+
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE)
+       srv_err = PVRSRVCommonDeviceInitialise(priv->dev_node);
+       if (srv_err != PVRSRV_OK) {
+               err = -ENODEV;
+               DRM_ERROR("device %p initialisation failed (err=%d)\n",
+                         ddev->dev, err);
+               goto err_device_deinit;
+       }
+#endif
+
+       mutex_unlock(&g_device_mutex);
+
+       return 0;
+
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE)
+err_device_deinit:
+       drm_mode_config_cleanup(ddev);
+       PVRSRVDeviceDeinit(priv->dev_node);
+#endif
+err_device_destroy:
+       PVRSRVCommonDeviceDestroy(priv->dev_node);
+err_unset_dma_parms:
+       mutex_unlock(&g_device_mutex);
+       if (ddev->dev->dma_parms == &priv->dma_parms)
+               ddev->dev->dma_parms = NULL;
+       kfree(priv);
+err_exit:
+       return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+int pvr_drm_unload(struct drm_device *ddev)
+#else
+void pvr_drm_unload(struct drm_device *ddev)
+#endif
+{
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+       drm_mode_config_cleanup(ddev);
+
+       PVRSRVDeviceDeinit(priv->dev_node);
+
+       pm_runtime_disable(ddev->dev);
+       mutex_lock(&g_device_mutex);
+       PVRSRVCommonDeviceDestroy(priv->dev_node);
+       mutex_unlock(&g_device_mutex);
+
+       if (ddev->dev->dma_parms == &priv->dma_parms)
+               ddev->dev->dma_parms = NULL;
+
+       kfree(priv);
+       ddev->dev_private = NULL;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+       return 0;
+#endif
+}
+
+static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile)
+{
+#if (PVRSRV_DEVICE_INIT_MODE != PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       struct pvr_drm_private *priv = ddev->dev_private;
+       int err;
+#endif
+
+       if (!try_module_get(THIS_MODULE)) {
+               DRM_ERROR("failed to get module reference\n");
+               return -ENOENT;
+       }
+
+#if (PVRSRV_DEVICE_INIT_MODE != PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
+       err = PVRSRVDeviceServicesOpen(priv->dev_node, dfile);
+       if (err)
+               module_put(THIS_MODULE);
+
+       return err;
+#else
+       return 0;
+#endif
+}
+
+static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile)
+{
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       PVRSRVDeviceRelease(priv->dev_node, dfile);
+
+       module_put(THIS_MODULE);
+}
+
+/*
+ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set.
+ */
+static struct drm_ioctl_desc pvr_drm_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM,
+                         DRM_RENDER_ALLOW | DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(PVR_SRVKM_INIT, drm_pvr_srvkm_init,
+                         DRM_RENDER_ALLOW | DRM_UNLOCKED),
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE)
+       DRM_IOCTL_DEF_DRV(PVR_SYNC_RENAME_CMD, pvr_sync_rename_ioctl,
+                         DRM_RENDER_ALLOW | DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(PVR_SYNC_FORCE_SW_ONLY_CMD, pvr_sync_force_sw_only_ioctl,
+                         DRM_RENDER_ALLOW | DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_CREATE_FENCE_CMD, pvr_sw_sync_create_fence_ioctl,
+                         DRM_RENDER_ALLOW | DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_INC_CMD, pvr_sw_sync_inc_ioctl,
+                         DRM_RENDER_ALLOW | DRM_UNLOCKED),
+#endif
+};
+
+#if defined(CONFIG_COMPAT)
+static long pvr_compat_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(file, cmd, arg);
+
+       return drm_ioctl(file, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+const struct file_operations pvr_drm_fops = {
+       .owner                  = THIS_MODULE,
+       .open                   = drm_open,
+       .release                = drm_release,
+       .unlocked_ioctl         = drm_ioctl,
+#if defined(CONFIG_COMPAT)
+       .compat_ioctl           = pvr_compat_ioctl,
+#endif
+       .mmap                   = PVRSRV_MMap,
+       .poll                   = drm_poll,
+       .read                   = drm_read,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+       .fasync                 = drm_fasync,
+#endif
+};
+
+const struct drm_driver pvr_drm_generic_driver = {
+       .driver_features        = DRIVER_MODESET | DRIVER_RENDER,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+       .load                   = NULL,
+       .unload                 = NULL,
+#else
+       .load                   = pvr_drm_load,
+       .unload                 = pvr_drm_unload,
+#endif
+       .open                   = pvr_drm_open,
+       .postclose              = pvr_drm_release,
+
+       .ioctls                 = pvr_drm_ioctls,
+       .num_ioctls             = ARRAY_SIZE(pvr_drm_ioctls),
+       .fops                   = &pvr_drm_fops,
+
+       .name                   = PVR_DRM_DRIVER_NAME,
+       .desc                   = PVR_DRM_DRIVER_DESC,
+       .date                   = PVR_DRM_DRIVER_DATE,
+       .major                  = PVRVERSION_MAJ,
+       .minor                  = PVRVERSION_MIN,
+       .patchlevel             = PVRVERSION_BUILD,
+};
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_drv.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_drv.h
new file mode 100644 (file)
index 0000000..15887da
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * @File
+ * @Title       PowerVR DRM driver
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_DRV_H__)
+#define __PVR_DRV_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <linux/device.h>
+#else
+#include <drm/drmP.h>
+#endif
+
+#include <linux/pm.h>
+
+struct file;
+struct _PVRSRV_DEVICE_NODE_;
+struct workqueue_struct;
+struct vm_area_struct;
+
+/* This structure is used to store Linux specific per-device information. */
+struct pvr_drm_private {
+       struct _PVRSRV_DEVICE_NODE_ *dev_node;
+
+       /*
+        * This is needed for devices that don't already have their own dma
+        * parameters structure, e.g. platform devices, and, if necessary, will
+        * be assigned to the 'struct device' during device initialisation. It
+        * should therefore never be accessed directly via this structure as
+        * this may not be the version of dma parameters in use.
+        */
+       struct device_dma_parameters dma_parms;
+
+       /* PVR Sync debug notify handle */
+       void *sync_debug_notify_handle;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+       /* Only used in fence sync as sync_debug_notify_handle is used
+        * to print a header only. Content is registered separately.
+        * Used to print foreign sync debug
+        */
+       void *sync_foreign_debug_notify_handle;
+#endif
+};
+
+extern const struct dev_pm_ops pvr_pm_ops;
+extern const struct drm_driver pvr_drm_generic_driver;
+extern const struct file_operations pvr_drm_fops;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+int pvr_drm_load(struct drm_device *ddev, unsigned long flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+int pvr_drm_unload(struct drm_device *ddev);
+#else
+void pvr_drm_unload(struct drm_device *ddev);
+#endif
+#endif
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg,
+                           struct drm_file *file);
+int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma);
+
+#endif /* !defined(__PVR_DRV_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_dvfs_device.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_dvfs_device.c
new file mode 100644 (file)
index 0000000..e65a4ab
--- /dev/null
@@ -0,0 +1,810 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR devfreq device implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux module setup
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(NO_HARDWARE)
+
+#include <linux/devfreq.h>
+#if defined(CONFIG_DEVFREQ_THERMAL)
+#include <linux/devfreq_cooling.h>
+#endif
+#include <linux/version.h>
+#include <linux/device.h>
+#include <drm/drm.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#else
+#include <drm/drmP.h>
+#endif
+
+#include "power.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+
+#include "rgxdevice.h"
+#include "rgxinit.h"
+#include "sofunc_rgx.h"
+
+#include "syscommon.h"
+
+#include "pvr_dvfs_device.h"
+
+#include "kernel_compatibility.h"
+
+static int _device_get_devid(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       int deviceId;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+       /*
+        * Older kernels do not have render drm_minor member in drm_device,
+        * so we fallback to primary node for device identification
+        */
+       deviceId = ddev->primary->index;
+#else
+       if (ddev->render)
+               deviceId = ddev->render->index;
+       else /* when render node is NULL, fallback to primary node */
+               deviceId = ddev->primary->index;
+#endif
+
+       return deviceId;
+}
+
+static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_freq, IMG_UINT32 flags)
+{
+       int deviceId = _device_get_devid(dev);
+       PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId);
+       RGX_DATA                *psRGXData = NULL;
+       IMG_DVFS_DEVICE         *psDVFSDevice = NULL;
+       IMG_DVFS_DEVICE_CFG     *psDVFSDeviceCfg = NULL;
+       RGX_TIMING_INFORMATION  *psRGXTimingInfo = NULL;
+       IMG_UINT32              ui32Freq, ui32CurFreq, ui32Volt;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+       struct opp *opp;
+#else
+       struct dev_pm_opp *opp;
+#endif
+
+       /* Check the device is registered */
+       if (!psDeviceNode)
+       {
+               return -ENODEV;
+       }
+
+       psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+
+       /* Check the RGX device is initialised */
+       if (!psRGXData)
+       {
+               return -ENODATA;
+       }
+
+       psRGXTimingInfo = psRGXData->psRGXTimingInfo;
+       if (!psDVFSDevice->bEnabled)
+       {
+               *requested_freq = psRGXTimingInfo->ui32CoreClockSpeed;
+               return 0;
+       }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+       rcu_read_lock();
+#endif
+
+       opp = devfreq_recommended_opp(dev, requested_freq, flags);
+       if (IS_ERR(opp)) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+               rcu_read_unlock();
+#endif
+               PVR_DPF((PVR_DBG_ERROR, "Invalid OPP"));
+               return PTR_ERR(opp);
+       }
+
+       ui32Freq = dev_pm_opp_get_freq(opp);
+       ui32Volt = dev_pm_opp_get_voltage(opp);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+       rcu_read_unlock();
+#else
+       dev_pm_opp_put(opp);
+#endif
+
+       ui32CurFreq = psRGXTimingInfo->ui32CoreClockSpeed;
+
+       if (ui32CurFreq == ui32Freq)
+       {
+               return 0;
+       }
+
+       if (PVRSRV_OK != PVRSRVDevicePreClockSpeedChange(psDeviceNode,
+                                                                                                        psDVFSDeviceCfg->bIdleReq,
+                                                                                                        NULL))
+       {
+               dev_err(dev, "PVRSRVDevicePreClockSpeedChange failed\n");
+               return -EPERM;
+       }
+
+       /* Increasing frequency, change voltage first */
+       if (ui32Freq > ui32CurFreq)
+       {
+               psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+       }
+
+       psDVFSDeviceCfg->pfnSetFrequency(ui32Freq);
+
+       /* Decreasing frequency, change frequency first */
+       if (ui32Freq < ui32CurFreq)
+       {
+               psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+       }
+
+       psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq;
+
+       PVRSRVDevicePostClockSpeedChange(psDeviceNode, psDVFSDeviceCfg->bIdleReq,
+                                                                        NULL);
+
+       return 0;
+}
+
+static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+       int                      deviceId = _device_get_devid(dev);
+       PVRSRV_DEVICE_NODE      *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId);
+       PVRSRV_RGXDEV_INFO      *psDevInfo = NULL;
+       IMG_DVFS_DEVICE         *psDVFSDevice = NULL;
+       RGX_DATA                *psRGXData = NULL;
+       RGX_TIMING_INFORMATION  *psRGXTimingInfo = NULL;
+       RGXFWIF_GPU_UTIL_STATS   sGpuUtilStats;
+       PVRSRV_ERROR             eError;
+
+       /* Check the device is registered */
+       if (!psDeviceNode)
+       {
+               return -ENODEV;
+       }
+
+       psDevInfo = psDeviceNode->pvDevice;
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+       /* Check the RGX device is initialised */
+       if (!psDevInfo || !psRGXData)
+       {
+               return -ENODATA;
+       }
+
+       psRGXTimingInfo = psRGXData->psRGXTimingInfo;
+       stat->current_frequency = psRGXTimingInfo->ui32CoreClockSpeed;
+
+       if (psDevInfo->pfnGetGpuUtilStats == NULL)
+       {
+               /* Not yet ready. So set times to something sensible. */
+               stat->busy_time = 0;
+               stat->total_time = 0;
+               return 0;
+       }
+
+       eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+                                               psDVFSDevice->hGpuUtilUserDVFS,
+                                               &sGpuUtilStats);
+
+       if (eError != PVRSRV_OK)
+       {
+               return -EAGAIN;
+       }
+
+       stat->busy_time = sGpuUtilStats.ui64GpuStatActive;
+       stat->total_time = sGpuUtilStats.ui64GpuStatCumulative;
+
+       return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+       int deviceId = _device_get_devid(dev);
+       PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId);
+       RGX_DATA *psRGXData = NULL;
+
+       /* Check the device is registered */
+       if (!psDeviceNode)
+       {
+               return -ENODEV;
+       }
+
+       psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+       /* Check the RGX device is initialised */
+       if (!psRGXData)
+       {
+               return -ENODATA;
+       }
+
+       *freq = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+       return 0;
+}
+#endif
+
+static struct devfreq_dev_profile img_devfreq_dev_profile =
+{
+       .target             = devfreq_target,
+       .get_dev_status     = devfreq_get_dev_status,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       .get_cur_freq       = devfreq_cur_freq,
+#endif
+};
+
+static int FillOPPTable(struct device *dev, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       const IMG_OPP *iopp;
+       int i, err = 0;
+       IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL;
+
+       /* Check the device exists */
+       if (!dev || !psDeviceNode)
+       {
+               return -ENODEV;
+       }
+
+       psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+
+       for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable;
+            i < psDVFSDeviceCfg->ui32OPPTableSize;
+            i++, iopp++)
+       {
+               err = dev_pm_opp_add(dev, iopp->ui32Freq, iopp->ui32Volt);
+               if (err) {
+                       dev_err(dev, "Could not add OPP entry, %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static void ClearOPPTable(struct device *dev, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+       const IMG_OPP *iopp;
+       int i;
+       IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL;
+
+       /* Check the device exists */
+       if (!dev || !psDeviceNode)
+       {
+               return;
+       }
+
+       psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+
+       for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable;
+            i < psDVFSDeviceCfg->ui32OPPTableSize;
+            i++, iopp++)
+       {
+               dev_pm_opp_remove(dev, iopp->ui32Freq);
+       }
+#endif
+}
+
+static int GetOPPValues(struct device *dev,
+                        unsigned long *min_freq,
+                        unsigned long *min_volt,
+                        unsigned long *max_freq)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+       struct opp *opp;
+#else
+       struct dev_pm_opp *opp;
+#endif
+       int count, i, err = 0;
+       unsigned long freq;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \
+       (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+       unsigned int *freq_table;
+#else
+       unsigned long *freq_table;
+#endif
+
+       count = dev_pm_opp_get_opp_count(dev);
+       if (count < 0)
+       {
+               dev_err(dev, "Could not fetch OPP count, %d\n", count);
+               return count;
+       }
+
+       dev_info(dev, "Found %d OPP points.\n", count);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+       freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC);
+#else
+       freq_table = kcalloc(count, sizeof(*freq_table), GFP_ATOMIC);
+#endif
+       if (! freq_table)
+       {
+               return -ENOMEM;
+       }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+       /* Start RCU read-side critical section to map frequency to OPP */
+       rcu_read_lock();
+#endif
+
+       /* Iterate over OPP table; Iteration 0 finds "opp w/ freq >= 0 Hz".      */
+       freq = 0;
+       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+       if (IS_ERR(opp))
+       {
+               err = PTR_ERR(opp);
+               dev_err(dev, "Couldn't find lowest frequency, %d\n", err);
+               goto exit;
+       }
+
+       *min_volt = dev_pm_opp_get_voltage(opp);
+       *max_freq = *min_freq = freq_table[0] = freq;
+       dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count, freq, *min_volt);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+       dev_pm_opp_put(opp);
+#endif
+
+       /* Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)". */
+       for (i = 1; i < count; i++)
+       {
+               freq++;
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+               if (IS_ERR(opp))
+               {
+                       err = PTR_ERR(opp);
+                       dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err);
+                       goto exit;
+               }
+
+               freq_table[i] = freq;
+               *max_freq = freq;
+               dev_info(dev,
+                                "opp[%d/%d]: (%lu Hz, %lu uV)\n",
+                                 i + 1,
+                                 count,
+                                 freq,
+                                 dev_pm_opp_get_voltage(opp));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+               dev_pm_opp_put(opp);
+#endif
+       }
+
+exit:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+       rcu_read_unlock();
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       if (!err)
+       {
+               img_devfreq_dev_profile.freq_table = freq_table;
+               img_devfreq_dev_profile.max_state = count;
+       }
+       else
+#endif
+       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+               devm_kfree(dev, freq_table);
+#else
+               kfree(freq_table);
+#endif
+       }
+
+       return err;
+}
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+static int RegisterCoolingDevice(struct device *dev,
+                                                                IMG_DVFS_DEVICE *psDVFSDevice,
+                                                                struct devfreq_cooling_power *powerOps)
+{
+       struct device_node *of_node;
+       int err = 0;
+       PVRSRV_VZ_RET_IF_MODE(GUEST, err);
+
+       if (!psDVFSDevice)
+       {
+               return -EINVAL;
+       }
+
+       if (!powerOps)
+       {
+               dev_info(dev, "Cooling: power ops not registered, not enabling cooling");
+               return 0;
+       }
+
+       of_node = of_node_get(dev->of_node);
+
+       psDVFSDevice->psDevfreqCoolingDevice = of_devfreq_cooling_register_power(
+               of_node, psDVFSDevice->psDevFreq, powerOps);
+
+       if (IS_ERR(psDVFSDevice->psDevfreqCoolingDevice))
+       {
+               err = PTR_ERR(psDVFSDevice->psDevfreqCoolingDevice);
+               dev_err(dev, "Failed to register as devfreq cooling device %d", err);
+       }
+
+       of_node_put(of_node);
+
+       return err;
+}
+#endif
+
+#define TO_IMG_ERR(err) ((err == -EPROBE_DEFER) ? PVRSRV_ERROR_PROBE_DEFER : PVRSRV_ERROR_INIT_FAILURE)
+
+PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       IMG_DVFS_DEVICE        *psDVFSDevice = NULL;
+       IMG_DVFS_DEVICE_CFG    *psDVFSDeviceCfg = NULL;
+       struct device          *psDev;
+       PVRSRV_ERROR            eError;
+       int                     err;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+#if !defined(CONFIG_PM_OPP)
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+
+       if (!psDeviceNode)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "DVFS initialise pending for device node %p",
+                                psDeviceNode));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+       psDev = psDeviceNode->psDevConfig->pvOSDevice;
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+       psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending = IMG_TRUE;
+
+#if defined(SUPPORT_SOC_TIMER)
+       if (! psDeviceNode->psDevConfig->pfnSoCTimerRead)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "System layer SoC timer callback not implemented"));
+               return PVRSRV_ERROR_NOT_IMPLEMENTED;
+       }
+#endif
+
+       eError = SORgxGpuUtilStatsRegister(&psDVFSDevice->hGpuUtilUserDVFS);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to register to the GPU utilisation stats, %d", eError));
+               return eError;
+       }
+
+#if defined(CONFIG_OF)
+       err = dev_pm_opp_of_add_table(psDev);
+       if (err)
+       {
+               /*
+                * If there are no device tree or system layer provided operating points
+                * then return an error
+                */
+               if (err != -ENODEV || !psDVFSDeviceCfg->pasOPPTable)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to init opp table from devicetree, %d", err));
+                       eError = TO_IMG_ERR(err);
+                       goto err_exit;
+               }
+       }
+#endif
+
+       if (psDVFSDeviceCfg->pasOPPTable)
+       {
+               err = FillOPPTable(psDev, psDeviceNode);
+               if (err)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err));
+                       eError = TO_IMG_ERR(err);
+                       goto err_exit;
+               }
+       }
+
+       PVR_TRACE(("PVR DVFS init pending: dev = %p, PVR device = %p",
+                          psDev, psDeviceNode));
+
+       return PVRSRV_OK;
+
+err_exit:
+       DeinitDVFS(psDeviceNode);
+       return eError;
+}
+
+PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       IMG_DVFS_DEVICE        *psDVFSDevice = NULL;
+       IMG_DVFS_DEVICE_CFG    *psDVFSDeviceCfg = NULL;
+       IMG_DVFS_GOVERNOR_CFG  *psDVFSGovernorCfg = NULL;
+       RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL;
+       struct device          *psDev;
+       unsigned long           min_freq = 0, max_freq = 0, min_volt = 0;
+       PVRSRV_ERROR            eError;
+       int                     err;
+
+       if (!psDeviceNode)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (!psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "DVFS initialise not yet pending for device node %p",
+                                psDeviceNode));
+               return PVRSRV_ERROR_INIT_FAILURE;
+       }
+
+       psDev = psDeviceNode->psDevConfig->pvOSDevice;
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+       psDVFSGovernorCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSGovernorCfg;
+       psRGXTimingInfo = ((RGX_DATA *)psDeviceNode->psDevConfig->hDevData)->psRGXTimingInfo;
+       psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bInitPending = IMG_FALSE;
+       psDeviceNode->psDevConfig->sDVFS.sDVFSDevice.bReady = IMG_TRUE;
+
+       err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq);
+       if (err)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to read OPP points, %d", err));
+               eError = TO_IMG_ERR(err);
+               goto err_exit;
+       }
+
+       img_devfreq_dev_profile.initial_freq = min_freq;
+       img_devfreq_dev_profile.polling_ms = psDVFSDeviceCfg->ui32PollMs;
+
+       psRGXTimingInfo->ui32CoreClockSpeed = min_freq;
+
+       psDVFSDeviceCfg->pfnSetFrequency(min_freq);
+       psDVFSDeviceCfg->pfnSetVoltage(min_volt);
+
+       psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold;
+       psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+       psDVFSDevice->psDevFreq = devm_devfreq_add_device(psDev,
+                                                                                                         &img_devfreq_dev_profile,
+                                                                                                         "simple_ondemand",
+                                                                                                         &psDVFSDevice->data);
+#else
+       psDVFSDevice->psDevFreq = devfreq_add_device(psDev,
+                                                                                                &img_devfreq_dev_profile,
+                                                                                                "simple_ondemand",
+                                                                                                &psDVFSDevice->data);
+#endif
+
+       if (IS_ERR(psDVFSDevice->psDevFreq))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                                "Failed to add as devfreq device %p, %ld",
+                                psDVFSDevice->psDevFreq,
+                                PTR_ERR(psDVFSDevice->psDevFreq)));
+               eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq));
+               goto err_exit;
+       }
+
+       eError = SuspendDVFS(psDeviceNode);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "PVRSRVInit: Failed to suspend DVFS"));
+               goto err_exit;
+       }
+
+#if defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+       psDVFSDevice->psDevFreq->policy.user.min_freq = min_freq;
+       psDVFSDevice->psDevFreq->policy.user.max_freq = max_freq;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+       psDVFSDevice->psDevFreq->scaling_min_freq = min_freq;
+       psDVFSDevice->psDevFreq->scaling_max_freq = max_freq;
+#else
+       psDVFSDevice->psDevFreq->min_freq = min_freq;
+       psDVFSDevice->psDevFreq->max_freq = max_freq;
+#endif
+
+       err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq);
+       if (err)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Failed to register opp notifier, %d", err));
+               eError = TO_IMG_ERR(err);
+               goto err_exit;
+       }
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+       err = RegisterCoolingDevice(psDev, psDVFSDevice, psDVFSDeviceCfg->psPowerOps);
+       if (err)
+       {
+               eError = TO_IMG_ERR(err);
+               goto err_exit;
+       }
+#endif
+
+       PVR_TRACE(("PVR DVFS activated: %lu-%lu Hz, Period: %ums",
+                          min_freq,
+                          max_freq,
+                          psDVFSDeviceCfg->ui32PollMs));
+
+       return PVRSRV_OK;
+
+err_exit:
+       UnregisterDVFSDevice(psDeviceNode);
+       return eError;
+}
+
+void UnregisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       IMG_DVFS_DEVICE *psDVFSDevice = NULL;
+       struct device *psDev = NULL;
+       IMG_INT32 i32Error;
+
+       /* Check the device exists */
+       if (!psDeviceNode)
+       {
+               return;
+       }
+
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psDev = psDeviceNode->psDevConfig->pvOSDevice;
+
+       if (! psDVFSDevice)
+       {
+               return;
+       }
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+       if (!IS_ERR_OR_NULL(psDVFSDevice->psDevfreqCoolingDevice))
+       {
+               devfreq_cooling_unregister(psDVFSDevice->psDevfreqCoolingDevice);
+               psDVFSDevice->psDevfreqCoolingDevice = NULL;
+       }
+#endif
+
+       if (psDVFSDevice->psDevFreq)
+       {
+               i32Error = devfreq_unregister_opp_notifier(psDev, psDVFSDevice->psDevFreq);
+               if (i32Error < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to unregister OPP notifier"));
+               }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+               devfreq_remove_device(psDVFSDevice->psDevFreq);
+#else
+               devm_devfreq_remove_device(psDev, psDVFSDevice->psDevFreq);
+#endif
+
+               psDVFSDevice->psDevFreq = NULL;
+       }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) && \
+     LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+       kfree(img_devfreq_dev_profile.freq_table);
+#endif
+
+       psDVFSDevice->bInitPending = IMG_FALSE;
+       psDVFSDevice->bReady = IMG_FALSE;
+}
+
+void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       IMG_DVFS_DEVICE *psDVFSDevice = NULL;
+       struct device *psDev = NULL;
+
+       /* Check the device exists */
+       if (!psDeviceNode)
+       {
+               return;
+       }
+
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psDev = psDeviceNode->psDevConfig->pvOSDevice;
+
+       /* Remove OPP entries for this device */
+       ClearOPPTable(psDev, psDeviceNode);
+
+#if defined(CONFIG_OF)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || \
+       (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+       dev_pm_opp_of_remove_table(psDev);
+#endif
+#endif
+
+       SORgxGpuUtilStatsUnregister(psDVFSDevice->hGpuUtilUserDVFS);
+       psDVFSDevice->hGpuUtilUserDVFS = NULL;
+       psDVFSDevice->bInitPending = IMG_FALSE;
+       psDVFSDevice->bReady = IMG_FALSE;
+}
+
+PVRSRV_ERROR SuspendDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       IMG_DVFS_DEVICE *psDVFSDevice = NULL;
+
+       /* Check the device is registered */
+       if (!psDeviceNode)
+       {
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+       psDVFSDevice->bEnabled = IMG_FALSE;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR ResumeDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+       IMG_DVFS_DEVICE *psDVFSDevice = NULL;
+
+       /* Check the device is registered */
+       if (!psDeviceNode)
+       {
+               return PVRSRV_ERROR_INVALID_DEVICE;
+       }
+
+       psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+
+       /* Not supported in GuestOS drivers */
+       psDVFSDevice->bEnabled = !PVRSRV_VZ_MODE_IS(GUEST);
+
+       return PVRSRV_OK;
+}
+
+#endif /* !NO_HARDWARE */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_dvfs_device.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_dvfs_device.h
new file mode 100644 (file)
index 0000000..a246b24
--- /dev/null
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File           pvr_dvfs.c
+@Title          System level interface for DVFS
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DVFS_DEVICE_H
+#define PVR_DVFS_DEVICE_H
+
+#include "opaque_types.h"
+#include "pvrsrv_error.h"
+
+
+PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR RegisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+void UnregisterDVFSDevice(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR SuspendDVFS(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR ResumeDVFS(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+#endif /* PVR_DVFS_DEVICE_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence.c
new file mode 100644 (file)
index 0000000..e94522a
--- /dev/null
@@ -0,0 +1,1135 @@
+/*
+ * @File
+ * @Title       PowerVR Linux fence interface
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_fence.h"
+#include "services_kernel_client.h"
+#include "sync_checkpoint_external.h"
+
+#define CREATE_TRACE_POINTS
+#include "pvr_fence_trace.h"
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+/* Global kmem_cache for pvr_fence object allocations */
+static struct kmem_cache *pvr_fence_cache;
+static DEFINE_MUTEX(pvr_fence_cache_mutex);
+static u32 pvr_fence_cache_refcount;
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+       do {                                                             \
+               if (pfnDumpDebugPrintf)                                  \
+                       pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+                                          ## __VA_ARGS__);              \
+               else                                                     \
+                       pr_err(fmt "\n", ## __VA_ARGS__);                \
+       } while (0)
+
+static inline void
+pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags)
+{
+       SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags);
+}
+
+static inline bool
+pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags)
+{
+       return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint,
+                                        fence_sync_flags);
+}
+
+static inline u32
+pvr_fence_sync_value(struct pvr_fence *pvr_fence)
+{
+       if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint,
+                                   PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               return PVRSRV_SYNC_CHECKPOINT_ERRORED;
+       else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint,
+                                          PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               return PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+       else
+               return PVRSRV_SYNC_CHECKPOINT_ACTIVE;
+}
+
+static void
+pvr_fence_context_check_status(struct work_struct *data)
+{
+       PVRSRVCheckStatus(NULL);
+}
+
+void
+pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size)
+{
+       snprintf(str, size,
+                "%u ctx=%llu refs=%u",
+                atomic_read(&fctx->fence_seqno),
+                fctx->fence_context,
+                refcount_read(&fctx->kref.refcount));
+}
+
+static void
+pvr_fence_context_fences_dump(struct pvr_fence_context *fctx,
+                             DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                             void *pvDumpDebugFile)
+{
+       struct pvr_fence *pvr_fence;
+       unsigned long flags;
+       char value[128];
+
+       spin_lock_irqsave(&fctx->list_lock, flags);
+       pvr_context_value_str(fctx, value, sizeof(value));
+       PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                        "%s: @%s", fctx->name, value);
+       list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+               struct dma_fence *fence = pvr_fence->fence;
+               const char *timeline_value_str = "unknown timeline value";
+               const char *fence_value_str = "unknown fence value";
+
+               pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value,
+                                                    sizeof(value));
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 " @%s", value);
+
+               if (is_pvr_fence(fence))
+                       continue;
+
+               if (fence->ops->timeline_value_str) {
+                       fence->ops->timeline_value_str(fence, value,
+                                                      sizeof(value));
+                       timeline_value_str = value;
+               }
+
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 " | %s: %s (driver: %s)",
+                                 fence->ops->get_timeline_name(fence),
+                                 timeline_value_str,
+                                 fence->ops->get_driver_name(fence));
+
+               if (fence->ops->fence_value_str) {
+                       fence->ops->fence_value_str(fence, value,
+                                                   sizeof(value));
+                       fence_value_str = value;
+               }
+
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 " |  @%s (foreign)", value);
+       }
+       spin_unlock_irqrestore(&fctx->list_lock, flags);
+}
+
+static inline unsigned int
+pvr_fence_context_seqno_next(struct pvr_fence_context *fctx)
+{
+       return atomic_inc_return(&fctx->fence_seqno) - 1;
+}
+
+/* This function prepends seqno to fence name */
+static inline void
+pvr_fence_prepare_name(char *fence_name, size_t fence_name_size,
+               const char *name, unsigned int seqno)
+{
+       unsigned int len;
+
+       len = OSStringUINT32ToStr(fence_name, fence_name_size, seqno);
+       if (likely((len > 0) && (fence_name_size >= (len + 1)))) {
+               fence_name[len] = '-';
+               fence_name[len + 1] = '\0';
+       }
+       strlcat(fence_name, name, fence_name_size);
+}
+
+static void
+pvr_fence_sched_free(struct rcu_head *rcu)
+{
+       struct pvr_fence *pvr_fence = container_of(rcu, struct pvr_fence, rcu);
+
+       kmem_cache_free(pvr_fence_cache, pvr_fence);
+}
+
+static inline void
+pvr_fence_context_free_deferred(struct pvr_fence_context *fctx)
+{
+       struct pvr_fence *pvr_fence, *tmp;
+       LIST_HEAD(deferred_free_list);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fctx->list_lock, flags);
+       list_for_each_entry_safe(pvr_fence, tmp,
+                                &fctx->deferred_free_list,
+                                fence_head)
+               list_move(&pvr_fence->fence_head, &deferred_free_list);
+       spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+       list_for_each_entry_safe(pvr_fence, tmp,
+                                &deferred_free_list,
+                                fence_head) {
+               list_del(&pvr_fence->fence_head);
+               SyncCheckpointFree(pvr_fence->sync_checkpoint);
+               call_rcu(&pvr_fence->rcu, pvr_fence_sched_free);
+               module_put(THIS_MODULE);
+       }
+}
+
+void
+pvr_fence_context_free_deferred_callback(void *data)
+{
+       struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+       /*
+        * Free up any fence objects we have deferred freeing.
+        */
+       pvr_fence_context_free_deferred(fctx);
+}
+
+static void
+pvr_fence_context_signal_fences(void *data)
+{
+       struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+       struct pvr_fence *pvr_fence, *tmp;
+       unsigned long flags1;
+
+       LIST_HEAD(signal_list);
+
+       /*
+        * We can't call fence_signal while holding the lock as we can end up
+        * in a situation whereby pvr_fence_foreign_signal_sync, which also
+        * takes the list lock, ends up being called as a result of the
+        * fence_signal below, i.e. fence_signal(fence) -> fence->callback()
+        *  -> fence_signal(foreign_fence) -> foreign_fence->callback() where
+        * the foreign_fence callback is pvr_fence_foreign_signal_sync.
+        *
+        * So extract the items we intend to signal and add them to their own
+        * queue.
+        */
+       spin_lock_irqsave(&fctx->list_lock, flags1);
+       list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, signal_head) {
+               if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+                       list_move_tail(&pvr_fence->signal_head, &signal_list);
+       }
+       spin_unlock_irqrestore(&fctx->list_lock, flags1);
+
+       list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) {
+
+               PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n",
+                               pvr_fence->name);
+               trace_pvr_fence_signal_fence(pvr_fence);
+               spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags1);
+               list_del(&pvr_fence->signal_head);
+               spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags1);
+               dma_fence_signal(pvr_fence->fence);
+               dma_fence_put(pvr_fence->fence);
+       }
+
+       /*
+        * Take this opportunity to free up any fence objects we
+        * have deferred freeing.
+        */
+       pvr_fence_context_free_deferred(fctx);
+}
+
+void
+pvr_fence_context_signal_fences_nohw(void *data)
+{
+       pvr_fence_context_signal_fences(data);
+}
+
+static void
+pvr_fence_context_destroy_internal(struct pvr_fence_context *fctx)
+{
+       pvr_fence_context_free_deferred(fctx);
+
+       if (WARN_ON(!list_empty_careful(&fctx->fence_list)))
+               pvr_fence_context_fences_dump(fctx, NULL, NULL);
+
+       PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+
+       // wait for all fences to be freed before kmem_cache_destroy() is called
+       rcu_barrier();
+
+       /* Destroy pvr_fence object cache, if no one is using it */
+       WARN_ON(pvr_fence_cache == NULL);
+       mutex_lock(&pvr_fence_cache_mutex);
+       if (--pvr_fence_cache_refcount == 0)
+               kmem_cache_destroy(pvr_fence_cache);
+       mutex_unlock(&pvr_fence_cache_mutex);
+
+       kfree(fctx);
+}
+
+static void
+pvr_fence_context_unregister_dbg(void *dbg_request_handle)
+{
+       PVRSRVUnregisterDeviceDbgRequestNotify(dbg_request_handle);
+}
+
+static void
+pvr_fence_foreign_context_destroy_work(struct work_struct *data)
+{
+       struct pvr_fence_context *fctx =
+               container_of(data, struct pvr_fence_context, destroy_work);
+
+       pvr_fence_context_destroy_internal(fctx);
+}
+
+static void
+pvr_fence_context_destroy_work(struct work_struct *data)
+{
+       struct pvr_fence_context *fctx =
+               container_of(data, struct pvr_fence_context, destroy_work);
+
+       pvr_fence_context_unregister_dbg(fctx->dbg_request_handle);
+       pvr_fence_context_destroy_internal(fctx);
+}
+
+static void
+pvr_fence_context_debug_request(void *data, u32 verbosity,
+                               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile)
+{
+       struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+       if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+               pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf,
+                                             pvDumpDebugFile);
+}
+
+static struct pvr_fence_context *
+pvr_fence_context_create_internal(struct workqueue_struct *fence_status_wq,
+                       const char *name,
+                       work_func_t destroy_callback)
+{
+       struct pvr_fence_context *fctx;
+       PVRSRV_ERROR srv_err;
+
+       fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+       if (!fctx)
+               return NULL;
+
+       spin_lock_init(&fctx->lock);
+       atomic_set(&fctx->fence_seqno, 0);
+       INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status);
+       INIT_WORK(&fctx->destroy_work, destroy_callback);
+       spin_lock_init(&fctx->list_lock);
+       INIT_LIST_HEAD(&fctx->signal_list);
+       INIT_LIST_HEAD(&fctx->fence_list);
+       INIT_LIST_HEAD(&fctx->deferred_free_list);
+
+       fctx->fence_wq = fence_status_wq;
+
+       fctx->fence_context = dma_fence_context_alloc(1);
+       strlcpy(fctx->name, name, sizeof(fctx->name));
+
+       srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle,
+                               pvr_fence_context_signal_fences,
+                               fctx);
+       if (srv_err != PVRSRV_OK) {
+               pr_err("%s: failed to register command complete callback (%s)\n",
+                      __func__, PVRSRVGetErrorString(srv_err));
+               goto err_free_fctx;
+       }
+
+       /* Create pvr_fence object cache, if not already created */
+       mutex_lock(&pvr_fence_cache_mutex);
+       if (pvr_fence_cache_refcount == 0) {
+               pvr_fence_cache = KMEM_CACHE(pvr_fence, 0);
+               if (!pvr_fence_cache) {
+                       pr_err("%s: failed to allocate pvr_fence cache\n",
+                                       __func__);
+                       mutex_unlock(&pvr_fence_cache_mutex);
+                       goto err_unregister_cmd_complete_notify;
+               }
+       }
+       pvr_fence_cache_refcount++;
+       mutex_unlock(&pvr_fence_cache_mutex);
+
+       kref_init(&fctx->kref);
+
+       PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name);
+       trace_pvr_fence_context_create(fctx);
+
+       return fctx;
+
+err_unregister_cmd_complete_notify:
+       PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+err_free_fctx:
+       kfree(fctx);
+       return NULL;
+}
+
+/**
+ * pvr_fence_context_register_dbg - registers the debug handler for a
+ * fence context
+ *
+ * @dbg_request_handle: handle used to keep a reference for deregister
+ * @dev: device to attach the debug notifier.
+ * @pvr_fence_context: context used as data to the callback for debug
+ *
+ * Registers a debug notifier for a given context for a given device.
+ *
+ * Returns PVRSRV_OK if successful.
+ */
+PVRSRV_ERROR pvr_fence_context_register_dbg(void *dbg_request_handle,
+                               void *dev,
+                               struct pvr_fence_context *fctx)
+{
+       PVRSRV_ERROR srv_err;
+
+       srv_err = PVRSRVRegisterDeviceDbgRequestNotify(dbg_request_handle,
+                               dev,
+                               pvr_fence_context_debug_request,
+                               DEBUG_REQUEST_LINUXFENCE,
+                               fctx);
+       if (srv_err != PVRSRV_OK) {
+               pr_err("%s: failed to register debug request callback (%s)\n",
+                      __func__, PVRSRVGetErrorString(srv_err));
+       }
+
+       return srv_err;
+}
+
+/**
+ * pvr_fence_foreign_context_create - creates a PVR fence context
+ * @fence_status_wq: linux workqueue used to signal foreign fences
+ * @name: context name (used for debugging)
+ *
+ * Creates a PVR foreign fence context that can be used to create PVR fences
+ * or to create PVR fences from an existing fence.
+ *
+ * pvr_fence_context_destroy should be called to clean up the fence context.
+ *
+ * Returns NULL if a context cannot be created.
+ */
+struct pvr_fence_context *
+pvr_fence_foreign_context_create(struct workqueue_struct *fence_status_wq,
+               const char *name)
+{
+       return pvr_fence_context_create_internal(fence_status_wq, name,
+                                                       pvr_fence_foreign_context_destroy_work);
+}
+
+/**
+ * pvr_fence_context_create - creates a PVR fence context
+ * @dev_cookie: services device cookie
+ * @fence_status_wq: Status workqueue to queue fence update CBs.
+ * @name: context name (used for debugging)
+ *
+ * Creates a PVR fence context that can be used to create PVR fences or to
+ * create PVR fences from an existing fence.
+ *
+ * pvr_fence_context_destroy should be called to clean up the fence context.
+ *
+ * Returns NULL if a context cannot be created.
+ */
+struct pvr_fence_context *
+pvr_fence_context_create(void *dev_cookie,
+                        struct workqueue_struct *fence_status_wq,
+                        const char *name)
+{
+       struct pvr_fence_context *fctx;
+       PVRSRV_ERROR eError;
+
+       fctx = pvr_fence_context_create_internal(fence_status_wq, name,
+                                       pvr_fence_context_destroy_work);
+       if (fctx == NULL) {
+               pr_err("%s: failed to create fence context", __func__);
+               goto err_out;
+       }
+
+       eError = pvr_fence_context_register_dbg(&fctx->dbg_request_handle,
+                                       dev_cookie,
+                                       fctx);
+       if (eError != PVRSRV_OK) {
+               pr_err("%s: failed to register fence context debug (%s)\n",
+                      __func__, PVRSRVGetErrorString(eError));
+               goto err_destroy_ctx;
+       }
+
+       return fctx;
+
+err_destroy_ctx:
+       pvr_fence_context_destroy(fctx);
+err_out:
+       return NULL;
+}
+
+static void pvr_fence_context_destroy_kref(struct kref *kref)
+{
+       struct pvr_fence_context *fctx =
+               container_of(kref, struct pvr_fence_context, kref);
+
+       PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name);
+
+       trace_pvr_fence_context_destroy_kref(fctx);
+
+       schedule_work(&fctx->destroy_work);
+}
+
+/**
+ * pvr_fence_context_destroy - destroys a context
+ * @fctx: PVR fence context to destroy
+ *
+ * Destroys a PVR fence context with the expectation that all fences have been
+ * destroyed.
+ */
+void
+pvr_fence_context_destroy(struct pvr_fence_context *fctx)
+{
+       trace_pvr_fence_context_destroy(fctx);
+
+       kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+}
+
+static const char *
+pvr_fence_get_driver_name(struct dma_fence *fence)
+{
+       return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_get_timeline_name(struct dma_fence *fence)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+       if (pvr_fence)
+               return pvr_fence->fctx->name;
+       return NULL;
+}
+
+static
+void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+       if (!pvr_fence)
+               return;
+
+       snprintf(str, size,
+                "%llu: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s",
+                (u64) pvr_fence->fence->seqno,
+                test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+                         &pvr_fence->fence->flags) ? "+" : "-",
+                test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                         &pvr_fence->fence->flags) ? "+" : "-",
+                refcount_read(&pvr_fence->fence->refcount.refcount),
+                SyncCheckpointGetFirmwareAddr(
+                        pvr_fence->sync_checkpoint),
+                SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint),
+                SyncCheckpointGetStateString(pvr_fence->sync_checkpoint),
+                pvr_fence->name,
+                (&pvr_fence->base != pvr_fence->fence) ?
+                "(foreign)" : "");
+}
+
+static
+void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+       if (pvr_fence)
+               pvr_context_value_str(pvr_fence->fctx, str, size);
+}
+
+static bool
+pvr_fence_enable_signaling(struct dma_fence *fence)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+       unsigned long flags;
+
+       if (!pvr_fence)
+               return false;
+
+       WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock));
+
+       if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               return false;
+
+       dma_fence_get(&pvr_fence->base);
+
+       spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags);
+       list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list);
+       spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags);
+
+       PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n",
+                       pvr_fence->name);
+       trace_pvr_fence_enable_signaling(pvr_fence);
+
+       return true;
+}
+
+static bool
+pvr_fence_is_signaled(struct dma_fence *fence)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+       if (pvr_fence)
+               return pvr_fence_sync_is_signaled(pvr_fence,
+                                                 PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT);
+       return false;
+}
+
+static void
+pvr_fence_release(struct dma_fence *fence)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+       unsigned long flags;
+
+       if (pvr_fence) {
+               struct pvr_fence_context *fctx = pvr_fence->fctx;
+
+               PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n",
+                               pvr_fence->name);
+               trace_pvr_fence_release(pvr_fence);
+
+               spin_lock_irqsave(&fctx->list_lock, flags);
+               list_move(&pvr_fence->fence_head,
+                         &fctx->deferred_free_list);
+               spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+               kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+       }
+}
+
+const struct dma_fence_ops pvr_fence_ops = {
+       .get_driver_name = pvr_fence_get_driver_name,
+       .get_timeline_name = pvr_fence_get_timeline_name,
+       .fence_value_str = pvr_fence_fence_value_str,
+       .timeline_value_str = pvr_fence_timeline_value_str,
+       .enable_signaling = pvr_fence_enable_signaling,
+       .signaled = pvr_fence_is_signaled,
+       .wait = dma_fence_default_wait,
+       .release = pvr_fence_release,
+};
+
+/**
+ * pvr_fence_create - creates a PVR fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @sync_checkpoint_ctx: context in which to create sync checkpoints
+ * @timeline_fd: timeline on which the PVR fence should be created
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence.
+ *
+ * Once the fence is finished with, pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+struct pvr_fence *
+pvr_fence_create(struct pvr_fence_context *fctx,
+               struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx,
+               int timeline_fd, const char *name)
+{
+       struct pvr_fence *pvr_fence;
+       unsigned int seqno;
+       unsigned long flags;
+       PVRSRV_ERROR srv_err;
+
+       if (!try_module_get(THIS_MODULE))
+               goto err_exit;
+
+       /* Note: As kmem_cache is used to allocate pvr_fence objects,
+        * make sure that all members of pvr_fence struct are initialized
+        * here
+        */
+       pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL);
+       if (unlikely(!pvr_fence))
+               goto err_module_put;
+
+       srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx,
+                                     (PVRSRV_TIMELINE) timeline_fd, PVRSRV_NO_FENCE,
+                                     name, &pvr_fence->sync_checkpoint);
+       if (unlikely(srv_err != PVRSRV_OK))
+               goto err_free_fence;
+
+       INIT_LIST_HEAD(&pvr_fence->fence_head);
+       INIT_LIST_HEAD(&pvr_fence->signal_head);
+       pvr_fence->fctx = fctx;
+       seqno = pvr_fence_context_seqno_next(fctx);
+       /* Add the seqno to the fence name for easier debugging */
+       pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name),
+                       name, seqno);
+
+       /* Reset cb to zero */
+       memset(&pvr_fence->cb, 0, sizeof(pvr_fence->cb));
+       pvr_fence->fence = &pvr_fence->base;
+
+       dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock,
+                      fctx->fence_context, seqno);
+
+       spin_lock_irqsave(&fctx->list_lock, flags);
+       list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+       spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+       kref_get(&fctx->kref);
+
+       PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name);
+       trace_pvr_fence_create(pvr_fence);
+
+       return pvr_fence;
+
+err_free_fence:
+       kmem_cache_free(pvr_fence_cache, pvr_fence);
+err_module_put:
+       module_put(THIS_MODULE);
+err_exit:
+       return NULL;
+}
+
+static const char *
+pvr_fence_foreign_get_driver_name(struct dma_fence *fence)
+{
+       return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_foreign_get_timeline_name(struct dma_fence *fence)
+{
+       return "foreign";
+}
+
+static
+void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str,
+                                      int size)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+       u32 sync_addr = 0;
+       u32 sync_value_next;
+
+       if (WARN_ON(!pvr_fence))
+               return;
+
+       sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint);
+       sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+       /*
+        * Include the fence flag bits from the foreign fence instead of our
+        * shadow copy. This is done as the shadow fence flag bits aren't used.
+        */
+       snprintf(str, size,
+                "%llu: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s",
+                (u64) fence->seqno,
+                test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+                         &pvr_fence->fence->flags) ? "+" : "-",
+                test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                         &pvr_fence->fence->flags) ? "+" : "-",
+                refcount_read(&fence->refcount.refcount),
+                sync_addr,
+                pvr_fence_sync_value(pvr_fence),
+                sync_value_next,
+                pvr_fence->name);
+}
+
+static
+void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str,
+                                         int size)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+       if (pvr_fence)
+               pvr_context_value_str(pvr_fence->fctx, str, size);
+}
+
+static bool
+pvr_fence_foreign_enable_signaling(struct dma_fence *fence)
+{
+       WARN_ON("cannot enable signalling on foreign fence");
+       return false;
+}
+
+static signed long
+pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout)
+{
+       WARN_ON("cannot wait on foreign fence");
+       return 0;
+}
+
+static void
+pvr_fence_foreign_release(struct dma_fence *fence)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+       unsigned long flags;
+
+       if (pvr_fence) {
+               struct pvr_fence_context *fctx = pvr_fence->fctx;
+               struct dma_fence *foreign_fence = pvr_fence->fence;
+
+               PVR_FENCE_TRACE(&pvr_fence->base,
+                               "released fence for foreign fence %llu#%d (%s)\n",
+                               (u64) pvr_fence->fence->context,
+                               pvr_fence->fence->seqno, pvr_fence->name);
+               trace_pvr_fence_foreign_release(pvr_fence);
+
+               spin_lock_irqsave(&fctx->list_lock, flags);
+               list_move(&pvr_fence->fence_head,
+                         &fctx->deferred_free_list);
+               spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+               dma_fence_put(foreign_fence);
+
+               kref_put(&fctx->kref,
+                        pvr_fence_context_destroy_kref);
+       }
+}
+
+const struct dma_fence_ops pvr_fence_foreign_ops = {
+       .get_driver_name = pvr_fence_foreign_get_driver_name,
+       .get_timeline_name = pvr_fence_foreign_get_timeline_name,
+       .fence_value_str = pvr_fence_foreign_fence_value_str,
+       .timeline_value_str = pvr_fence_foreign_timeline_value_str,
+       .enable_signaling = pvr_fence_foreign_enable_signaling,
+       .wait = pvr_fence_foreign_wait,
+       .release = pvr_fence_foreign_release,
+};
+
+static void
+pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+       struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb);
+       struct pvr_fence_context *fctx = pvr_fence->fctx;
+
+       WARN_ON_ONCE(is_pvr_fence(fence));
+
+       /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */
+       pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC);
+
+       trace_pvr_fence_foreign_signal(pvr_fence);
+
+       queue_work(fctx->fence_wq, &fctx->check_status_work);
+
+       PVR_FENCE_TRACE(&pvr_fence->base,
+                       "foreign fence %llu#%d signalled (%s)\n",
+                       (u64) pvr_fence->fence->context,
+                       pvr_fence->fence->seqno, pvr_fence->name);
+
+       /* Drop the reference on the base fence */
+       dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_create_from_fence - creates a PVR fence from a fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @sync_checkpoint_ctx: context in which to create sync checkpoints
+ * @fence: fence from which the PVR fence should be created
+ * @fence_fd: fd for the sync file to which the fence belongs. If it doesn't
+ *            belong to a sync file then PVRSRV_NO_FENCE should be given
+ *            instead.
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence from an existing fence. If the fence is a foreign fence,
+ * i.e. one that doesn't originate from a PVR fence context, then a new PVR
+ * fence will be created using the specified sync_checkpoint_context.
+ * Otherwise, a reference will be taken on the underlying fence and the PVR
+ * fence will be returned.
+ *
+ * Once the fence is finished with, pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+
+struct pvr_fence *
+pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+                           struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx,
+                           struct dma_fence *fence,
+                           PVRSRV_FENCE fence_fd,
+                           const char *name)
+{
+       struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+       unsigned int seqno;
+       unsigned long flags;
+       PVRSRV_ERROR srv_err;
+       int err;
+
+       if (pvr_fence) {
+               if (WARN_ON(fence->ops == &pvr_fence_foreign_ops))
+                       return NULL;
+               dma_fence_get(fence);
+
+               PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n",
+                               name);
+               return pvr_fence;
+       }
+
+       if (!try_module_get(THIS_MODULE))
+               goto err_exit;
+
+       /* Note: As kmem_cache is used to allocate pvr_fence objects,
+        * make sure that all members of pvr_fence struct are initialized
+        * here
+        */
+       pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL);
+       if (!pvr_fence)
+               goto err_module_put;
+
+       srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx,
+                                         SYNC_CHECKPOINT_FOREIGN_CHECKPOINT,
+                                         fence_fd,
+                                         name, &pvr_fence->sync_checkpoint);
+       if (srv_err != PVRSRV_OK)
+               goto err_free_pvr_fence;
+
+       INIT_LIST_HEAD(&pvr_fence->fence_head);
+       INIT_LIST_HEAD(&pvr_fence->signal_head);
+       pvr_fence->fctx = fctx;
+       pvr_fence->fence = dma_fence_get(fence);
+       seqno = pvr_fence_context_seqno_next(fctx);
+       /* Add the seqno to the fence name for easier debugging */
+       pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name),
+                       name, seqno);
+
+       /*
+        * We use the base fence to refcount the PVR fence and to do the
+        * necessary clean up once the refcount drops to 0.
+        */
+       dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock,
+                      fctx->fence_context, seqno);
+
+       /*
+        * Take an extra reference on the base fence that gets dropped when the
+        * foreign fence is signalled.
+        */
+       dma_fence_get(&pvr_fence->base);
+
+       spin_lock_irqsave(&fctx->list_lock, flags);
+       list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+       spin_unlock_irqrestore(&fctx->list_lock, flags);
+       kref_get(&fctx->kref);
+
+       PVR_FENCE_TRACE(&pvr_fence->base,
+                       "created fence from foreign fence %llu#%d (%s)\n",
+                       (u64) pvr_fence->fence->context,
+                       pvr_fence->fence->seqno, name);
+
+       err = dma_fence_add_callback(fence, &pvr_fence->cb,
+                                    pvr_fence_foreign_signal_sync);
+       if (err) {
+               if (err != -ENOENT) {
+                       pr_err("%s: failed to add fence callback (err=%d)",
+                              __func__, err);
+                       goto err_put_ref;
+               }
+
+               /*
+                * The fence has already signalled so set the sync as signalled.
+                * The "signalled" hwperf packet should be emitted because the
+                * callback won't be called for already signalled fence hence,
+                * PVRSRV_FENCE_FLAG_NONE flag.
+                */
+               pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE);
+               PVR_FENCE_TRACE(&pvr_fence->base,
+                               "foreign fence %llu#%d already signaled (%s)\n",
+                               (u64) pvr_fence->fence->context,
+                               pvr_fence->fence->seqno,
+                               name);
+               dma_fence_put(&pvr_fence->base);
+       }
+
+       trace_pvr_fence_foreign_create(pvr_fence);
+
+       return pvr_fence;
+
+err_put_ref:
+       kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+       spin_lock_irqsave(&fctx->list_lock, flags);
+       list_del(&pvr_fence->fence_head);
+       spin_unlock_irqrestore(&fctx->list_lock, flags);
+       SyncCheckpointFree(pvr_fence->sync_checkpoint);
+err_free_pvr_fence:
+       kmem_cache_free(pvr_fence_cache, pvr_fence);
+err_module_put:
+       module_put(THIS_MODULE);
+err_exit:
+       return NULL;
+}
+
+/**
+ * pvr_fence_destroy - destroys a PVR fence
+ * @pvr_fence: PVR fence to destroy
+ *
+ * Destroys a PVR fence. Upon return, the PVR fence may still exist if something
+ * else still references the underlying fence, e.g. a reservation object, or if
+ * software signalling has been enabled and the fence hasn't yet been signalled.
+ */
+void
+pvr_fence_destroy(struct pvr_fence *pvr_fence)
+{
+       PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n",
+                       pvr_fence->name);
+
+       dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_sw_signal - signals a PVR fence sync
+ * @pvr_fence: PVR fence to signal
+ *
+ * Sets the PVR fence sync value to signalled.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sw_signal(struct pvr_fence *pvr_fence)
+{
+       if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+               return -EINVAL;
+
+       pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE);
+
+       queue_work(pvr_fence->fctx->fence_wq,
+                  &pvr_fence->fctx->check_status_work);
+
+       PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n",
+                       pvr_fence->name);
+
+       return 0;
+}
+
+/**
+ * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence
+ * @pvr_fence: PVR fence to error
+ *
+ * Sets the PVR fence sync checkpoint value to errored.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sw_error(struct pvr_fence *pvr_fence)
+{
+       if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+               return -EINVAL;
+
+       SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE);
+       PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n",
+                       pvr_fence->name);
+
+       return 0;
+}
+
+int
+pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences,
+                         struct SYNC_CHECKPOINT_TAG **fence_checkpoints)
+{
+       struct SYNC_CHECKPOINT_TAG **next_fence_checkpoint = fence_checkpoints;
+       struct pvr_fence **next_pvr_fence = pvr_fences;
+       int fence_checkpoint_idx;
+
+       if (nr_fences > 0) {
+
+               for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences;
+                    fence_checkpoint_idx++) {
+                       struct pvr_fence *next_fence = *next_pvr_fence++;
+                       *next_fence_checkpoint++ = next_fence->sync_checkpoint;
+                       /* Take reference on sync checkpoint (will be dropped
+                        * later by kick code)
+                        */
+                       SyncCheckpointTakeRef(next_fence->sync_checkpoint);
+               }
+       }
+
+       return 0;
+}
+
+struct SYNC_CHECKPOINT_TAG *
+pvr_fence_get_checkpoint(struct pvr_fence *update_fence)
+{
+       return update_fence->sync_checkpoint;
+}
+
+/**
+ * pvr_fence_dump_info_on_stalled_ufos - displays debug
+ * information on a native fence associated with any of
+ * the ufos provided. This function will be called from
+ * pvr_sync_file.c if the driver determines any GPU work
+ * is stuck waiting for a sync checkpoint representing a
+ * foreign sync to be signalled.
+ * @nr_ufos: number of ufos in vaddrs
+ * @vaddrs:  array of FW addresses of UFOs which the
+ *           driver is waiting on.
+ *
+ * Output debug information to kernel log on linux fences
+ * which would be responsible for signalling the sync
+ * checkpoints indicated by the ufo vaddresses.
+ *
+ * Returns the number of ufos in the array which were found
+ * to be associated with foreign syncs.
+ */
+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
+                                       u32 nr_ufos, u32 *vaddrs)
+{
+       int our_ufo_ct = 0;
+       struct pvr_fence *pvr_fence;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fctx->list_lock, flags);
+       /* dump info on any ufos in our active list */
+       list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+               u32 *this_ufo_vaddr = vaddrs;
+               int ufo_num;
+               DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL;
+
+               for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++) {
+                       struct SYNC_CHECKPOINT_TAG *checkpoint =
+                               pvr_fence->sync_checkpoint;
+                       const u32 fence_ufo_addr =
+                               SyncCheckpointGetFirmwareAddr(checkpoint);
+
+                       if (fence_ufo_addr != this_ufo_vaddr[ufo_num])
+                               continue;
+
+                       /* Dump sync info */
+                       PVR_DUMPDEBUG_LOG(pfnDummy, NULL,
+                                         "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)",
+                                         SyncCheckpointGetId(checkpoint),
+                                         fence_ufo_addr,
+                                         SyncCheckpointGetTimeline(checkpoint),
+                                         pvr_fence->fence,
+                                         pvr_fence->name);
+                       our_ufo_ct++;
+               }
+       }
+       spin_unlock_irqrestore(&fctx->list_lock, flags);
+       return our_ufo_ct;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence.h
new file mode 100644 (file)
index 0000000..21870ba
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * @File
+ * @Title       PowerVR Linux fence interface
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_FENCE_H__)
+#define __PVR_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+static inline void pvr_fence_cleanup(void)
+{
+}
+#else
+#include "services_kernel_client.h"
+#include "pvr_linux_fence.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct SYNC_CHECKPOINT_CONTEXT_TAG;
+struct SYNC_CHECKPOINT_TAG;
+
+/**
+ * pvr_fence_context - PVR fence context used to create and manage PVR fences
+ * @lock: protects the context and fences created on the context
+ * @name: fence context name (used for debugging)
+ * @dbg_request_handle: handle for callback used to dump debug data
+ * @fence_context: fence context with which to associate fences
+ * @fence_seqno: sequence number to use for the next fence
+ * @fence_wq: work queue for signalled fence work
+ * @check_status_work: work item used to inform services when a foreign fence
+ * has signalled
+ * @cmd_complete_handle: handle for callback used to signal fences when fence
+ * syncs are met
+ * @list_lock: protects the active and active foreign lists
+ * @signal_list: list of fences waiting to be signalled
+ * @fence_list: list of fences (used for debugging)
+ * @deferred_free_list: list of fences that we will free when we are no longer
+ * holding spinlocks.  The frees get implemented when an update fence is
+ * signalled or the context is freed.
+ */
+struct pvr_fence_context {
+       spinlock_t lock;
+       char name[32];
+       void *dbg_request_handle;
+       u64 fence_context;
+       atomic_t fence_seqno;
+
+       struct workqueue_struct *fence_wq;
+       struct work_struct check_status_work;
+
+       void *cmd_complete_handle;
+
+       spinlock_t list_lock;
+       struct list_head signal_list;
+       struct list_head fence_list;
+       struct list_head deferred_free_list;
+
+       struct kref kref;
+       struct work_struct destroy_work;
+};
+
+/**
+ * pvr_fence - PVR fence that represents both native and foreign fences
+ * @base: fence structure
+ * @fctx: fence context on which this fence was created
+ * @name: fence name (used for debugging)
+ * @fence: pointer to base fence structure or foreign fence
+ * @sync_checkpoint: services sync checkpoint used by hardware
+ * @fence_head: entry on the context fence and deferred free list
+ * @signal_head: entry on the context signal list
+ * @cb: foreign fence callback to set the sync to signalled
+ */
+struct pvr_fence {
+       struct dma_fence base;
+       struct pvr_fence_context *fctx;
+       char name[32];
+
+       struct dma_fence *fence;
+       struct SYNC_CHECKPOINT_TAG *sync_checkpoint;
+
+       struct list_head fence_head;
+       struct list_head signal_head;
+       struct dma_fence_cb cb;
+       struct rcu_head rcu;
+};
+
+extern const struct dma_fence_ops pvr_fence_ops;
+extern const struct dma_fence_ops pvr_fence_foreign_ops;
+
+static inline bool is_our_fence(struct pvr_fence_context *fctx,
+                               struct dma_fence *fence)
+{
+       return (fence->context == fctx->fence_context);
+}
+
+static inline bool is_pvr_fence(struct dma_fence *fence)
+{
+       return ((fence->ops == &pvr_fence_ops) ||
+               (fence->ops == &pvr_fence_foreign_ops));
+}
+
+static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence)
+{
+       if (is_pvr_fence(fence))
+               return container_of(fence, struct pvr_fence, base);
+
+       return NULL;
+}
+
+PVRSRV_ERROR pvr_fence_context_register_dbg(void *dbg_request_handle,
+                               void *dev,
+                               struct pvr_fence_context *fctx);
+struct pvr_fence_context *
+pvr_fence_foreign_context_create(struct workqueue_struct *fence_status_wq,
+               const char *name);
+struct pvr_fence_context *
+pvr_fence_context_create(void *dev_cookie,
+                        struct workqueue_struct *fence_status_wq,
+                        const char *name);
+void pvr_fence_context_destroy(struct pvr_fence_context *fctx);
+void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size);
+
+struct pvr_fence *
+pvr_fence_create(struct pvr_fence_context *fctx,
+                struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx,
+                int timeline_fd, const char *name);
+struct pvr_fence *
+pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+                           struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx,
+                           struct dma_fence *fence,
+                           PVRSRV_FENCE fence_fd,
+                           const char *name);
+void pvr_fence_destroy(struct pvr_fence *pvr_fence);
+int pvr_fence_sw_signal(struct pvr_fence *pvr_fence);
+int pvr_fence_sw_error(struct pvr_fence *pvr_fence);
+
+int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences,
+                             struct SYNC_CHECKPOINT_TAG **fence_checkpoints);
+struct SYNC_CHECKPOINT_TAG *
+pvr_fence_get_checkpoint(struct pvr_fence *update_fence);
+
+void pvr_fence_context_signal_fences_nohw(void *data);
+
+void pvr_fence_context_free_deferred_callback(void *data);
+
+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
+                                       u32 nr_ufos,
+                                       u32 *vaddrs);
+
+static inline void pvr_fence_cleanup(void)
+{
+       /*
+        * Ensure all PVR fence contexts have been destroyed, by flushing
+        * the global workqueue.
+        */
+       flush_scheduled_work();
+}
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)                                   \
+       do {                                                               \
+               struct pvr_fence_context *__fctx = (c);                    \
+               pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context,  \
+                      ## __VA_ARGS__);                                    \
+       } while (0)
+#else
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)
+#endif
+
+#define PVR_FENCE_CTX_WARN(c, fmt, ...)                                    \
+       do {                                                               \
+               struct pvr_fence_context *__fctx = (c);                    \
+               pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \
+                       ## __VA_ARGS__);                                   \
+       } while (0)
+
+#define PVR_FENCE_CTX_ERR(c, fmt, ...)                                     \
+       do {                                                               \
+               struct pvr_fence_context *__fctx = (c);                    \
+               pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context,  \
+                      ## __VA_ARGS__);                                    \
+       } while (0)
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_TRACE(f, fmt, ...)                                       \
+       DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+#else
+#define PVR_FENCE_TRACE(f, fmt, ...)
+#endif
+
+#define PVR_FENCE_WARN(f, fmt, ...)                                        \
+       DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#define PVR_FENCE_ERR(f, fmt, ...)                                         \
+       DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+#endif /* !defined(__PVR_FENCE_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence_trace.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_fence_trace.h
new file mode 100644 (file)
index 0000000..e2f044c
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pvr_fence
+
+#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PVR_FENCE_H
+
+#include <linux/tracepoint.h>
+
+struct pvr_fence;
+struct pvr_fence_context;
+
+DECLARE_EVENT_CLASS(pvr_fence_context,
+
+       TP_PROTO(struct pvr_fence_context *fctx),
+       TP_ARGS(fctx),
+
+       TP_STRUCT__entry(
+               __string(name, fctx->name)
+               __array(char, val, 128)
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, fctx->name)
+               pvr_context_value_str(fctx, __entry->val,
+                       sizeof(__entry->val));
+       ),
+
+       TP_printk("name=%s val=%s",
+                 __get_str(name),
+                 __entry->val
+       )
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create,
+       TP_PROTO(struct pvr_fence_context *fctx),
+       TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy,
+       TP_PROTO(struct pvr_fence_context *fctx),
+       TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref,
+       TP_PROTO(struct pvr_fence_context *fctx),
+       TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences,
+       TP_PROTO(struct pvr_fence_context *fctx),
+       TP_ARGS(fctx)
+);
+
+DECLARE_EVENT_CLASS(pvr_fence,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence),
+
+       TP_STRUCT__entry(
+               __string(driver,
+                       fence->base.ops->get_driver_name(&fence->base))
+               __string(timeline,
+                       fence->base.ops->get_timeline_name(&fence->base))
+               __array(char, val, 128)
+               __field(u64, context)
+       ),
+
+       TP_fast_assign(
+               __assign_str(driver,
+                       fence->base.ops->get_driver_name(&fence->base))
+               __assign_str(timeline,
+                       fence->base.ops->get_timeline_name(&fence->base))
+               fence->base.ops->fence_value_str(&fence->base,
+                       __entry->val, sizeof(__entry->val));
+               __entry->context = fence->base.context;
+       ),
+
+       TP_printk("driver=%s timeline=%s ctx=%llu val=%s",
+                 __get_str(driver), __get_str(timeline),
+                 __entry->context, __entry->val
+       )
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_create,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_release,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+DECLARE_EVENT_CLASS(pvr_fence_foreign,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence),
+
+       TP_STRUCT__entry(
+               __string(driver,
+                       fence->base.ops->get_driver_name(&fence->base))
+               __string(timeline,
+                       fence->base.ops->get_timeline_name(&fence->base))
+               __array(char, val, 128)
+               __field(u64, context)
+               __string(foreign_driver,
+                       fence->fence->ops->get_driver_name ?
+                       fence->fence->ops->get_driver_name(fence->fence) :
+                       "unknown")
+               __string(foreign_timeline,
+                       fence->fence->ops->get_timeline_name ?
+                       fence->fence->ops->get_timeline_name(fence->fence) :
+                       "unknown")
+               __array(char, foreign_val, 128)
+               __field(u64, foreign_context)
+       ),
+
+       TP_fast_assign(
+               __assign_str(driver,
+                       fence->base.ops->get_driver_name(&fence->base))
+               __assign_str(timeline,
+                       fence->base.ops->get_timeline_name(&fence->base))
+               fence->base.ops->fence_value_str(&fence->base, __entry->val,
+                       sizeof(__entry->val));
+               __entry->context = fence->base.context;
+               __assign_str(foreign_driver,
+                       fence->fence->ops->get_driver_name ?
+                       fence->fence->ops->get_driver_name(fence->fence) :
+                       "unknown")
+               __assign_str(foreign_timeline,
+                       fence->fence->ops->get_timeline_name ?
+                       fence->fence->ops->get_timeline_name(fence->fence) :
+                       "unknown")
+               fence->fence->ops->fence_value_str ?
+                       fence->fence->ops->fence_value_str(
+                               fence->fence, __entry->foreign_val,
+                               sizeof(__entry->foreign_val)) :
+                       (void) strlcpy(__entry->foreign_val,
+                               "unknown", sizeof(__entry->foreign_val));
+               __entry->foreign_context = fence->fence->context;
+       ),
+
+       TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s",
+                 __get_str(driver), __get_str(timeline), __entry->context,
+                 __entry->val, __get_str(foreign_driver),
+                 __get_str(foreign_timeline), __entry->foreign_context,
+                 __entry->foreign_val
+       )
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal,
+       TP_PROTO(struct pvr_fence *fence),
+       TP_ARGS(fence)
+);
+
+#endif /* _TRACE_PVR_FENCE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE pvr_fence_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_gputrace.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_gputrace.c
new file mode 100644 (file)
index 0000000..3e65aa3
--- /dev/null
@@ -0,0 +1,1281 @@
+/*************************************************************************/ /*!
+@File           pvr_gputrace.c
+@Title          PVR GPU Trace module Linux implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+#include <linux/trace_events.h>
+#else
+#include <linux/ftrace_event.h>
+#endif
+
+#include "pvrsrv_error.h"
+#include "pvrsrv_apphint.h"
+#include "pvr_debug.h"
+#include "ospvr_gputrace.h"
+#include "rgxhwperf.h"
+#include "rgxtimecorr.h"
+#include "device.h"
+#include "trace_events.h"
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "tlclient.h"
+#include "pvr_debug.h"
+#define CREATE_TRACE_POINTS
+#include "rogue_trace_events.h"
+
+/******************************************************************************
+ Module internal implementation
+******************************************************************************/
+
+typedef enum {
+       PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0,
+
+       PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1,
+       PVR_GPUTRACE_SWITCH_TYPE_END = 2,
+       PVR_GPUTRACE_SWITCH_TYPE_SINGLE = 3
+} PVR_GPUTRACE_SWITCH_TYPE;
+
+typedef struct RGX_HWPERF_FTRACE_DATA {
+       /* This lock ensures the HWPerf TL stream reading resources are not destroyed
+        * by one thread disabling it while another is reading from it. Keeps the
+        * state and resource create/destroy atomic and consistent. */
+       POS_LOCK    hFTraceResourceLock;
+
+       IMG_HANDLE  hGPUTraceCmdCompleteHandle;
+       IMG_HANDLE  hGPUTraceTLStream;
+       IMG_UINT64  ui64LastSampledTimeCorrOSTimeStamp;
+       IMG_UINT32  ui32FTraceLastOrdinal;
+} RGX_HWPERF_FTRACE_DATA;
+
+/* This lock ensures state change of GPU_TRACING on/off is done atomically */
+static POS_LOCK ghGPUTraceStateLock;
+static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU;
+
+/* This lock ensures that the reference counting operation on the FTrace UFO
+ * events and enable/disable operation on firmware event are performed as
+ * one atomic operation. This should ensure that there are no race conditions
+ * between reference counting and firmware event state change.
+ * See below comment for guiUfoEventRef.
+ */
+static POS_LOCK ghLockFTraceEventLock;
+
+/* Multiple FTrace UFO events are reflected in the firmware as only one event. When
+ * we enable FTrace UFO event we want to also at the same time enable it in
+ * the firmware. Since there is a multiple-to-one relation between those events
+ * we count how many FTrace UFO events is enabled. If at least one event is
+ * enabled we enabled the firmware event. When all FTrace UFO events are disabled
+ * we disable firmware event. */
+static IMG_UINT guiUfoEventRef;
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+static PVRSRV_ERROR _GpuTraceDisable(
+       PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+       IMG_BOOL bDeInit);
+
+static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+
+PVRSRV_ERROR PVRGpuTraceSupportInit(void)
+{
+       PVRSRV_ERROR eError;
+
+       if (ghLockFTraceEventLock != NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized"));
+               return PVRSRV_OK;
+       }
+
+       /* common module params initialization */
+       eError = OSLockCreate(&ghLockFTraceEventLock);
+       PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
+
+       eError = OSLockCreate(&ghGPUTraceStateLock);
+       PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate");
+
+       return PVRSRV_OK;
+}
+
+void PVRGpuTraceSupportDeInit(void)
+{
+       if (ghGPUTraceStateLock)
+       {
+               OSLockDestroy(ghGPUTraceStateLock);
+       }
+
+       if (ghLockFTraceEventLock)
+       {
+               OSLockDestroy(ghLockFTraceEventLock);
+               ghLockFTraceEventLock = NULL;
+       }
+}
+
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_ERROR eError;
+       RGX_HWPERF_FTRACE_DATA *psData;
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+
+       psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA));
+       psDevInfo->pvGpuFtraceData = psData;
+       PVR_LOG_GOTO_IF_NOMEM(psData, eError, e0);
+
+       /* We initialise it only once because we want to track if any
+        * packets were dropped. */
+       psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1;
+
+       eError = OSLockCreate(&psData->hFTraceResourceLock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
+
+       return PVRSRV_OK;
+
+e0:
+       PVRGpuTraceDeInitDevice(psDeviceNode);
+       return eError;
+}
+
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+       RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData;
+
+       PVRSRV_VZ_RETN_IF_MODE(GUEST);
+       if (psData)
+       {
+               /* first disable the tracing, to free up TL resources */
+               if (psData->hFTraceResourceLock)
+               {
+                       OSLockAcquire(psData->hFTraceResourceLock);
+                       _GpuTraceDisable(psDeviceNode->pvDevice, IMG_TRUE);
+                       OSLockRelease(psData->hFTraceResourceLock);
+
+                       /* now free all the FTrace resources */
+                       OSLockDestroy(psData->hFTraceResourceLock);
+               }
+               OSFreeMem(psData);
+               psDevInfo->pvGpuFtraceData = NULL;
+       }
+}
+
+IMG_BOOL PVRGpuTraceIsEnabled(void)
+{
+       return gbFTraceGPUEventsEnabled;
+}
+
+void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       if (PVRGpuTraceIsEnabled())
+       {
+               PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing"
+                                       " (%s)", PVRSRVGetErrorString(eError)));
+               }
+
+               /* below functions will enable FTrace events which in turn will
+                * execute HWPerf callbacks that set appropriate filter values
+                * note: unfortunately the functions don't allow to pass private
+                *       data so they enable events for all of the devices
+                *       at once, which means that this can happen more than once
+                *       if there is more than one device */
+
+               /* single events can be enabled by calling trace_set_clr_event()
+                * with the event name, e.g.:
+                * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */
+#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */
+#if defined(ANDROID) || defined(CHROMIUMOS_KERNEL)
+               if (trace_set_clr_event("gpu", NULL, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event"
+                                       " group"));
+               }
+               else
+               {
+                       PVR_LOG(("FTrace events from \"gpu\" group enabled"));
+               }
+#endif /* defined(ANDROID) || defined(CHROMIUMOS_KERNEL) */
+               if (trace_set_clr_event("rogue", NULL, 1))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event"
+                                       " group"));
+               }
+               else
+               {
+                       PVR_LOG(("FTrace events from \"rogue\" group enabled"));
+               }
+#endif /* defined(CONFIG_EVENT_TRACING) */
+       }
+}
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       RGX_HWPERF_FTRACE_DATA *psFtraceData;
+       PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+       IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psRgxDevInfo);
+
+       psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+       PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+       /* return if already enabled */
+       if (psFtraceData->hGPUTraceTLStream)
+       {
+               return PVRSRV_OK;
+       }
+
+#if defined(SUPPORT_RGX)
+       /* Signal FW to enable event generation */
+       if (psRgxDevInfo->bFirmwareInitialised)
+       {
+               IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter &
+                       (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO);
+
+               /* Do not call into PVRSRVRGXCtrlHWPerfKM if we're in GUEST mode. */
+               if (PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       eError = PVRSRV_OK;
+               }
+               else
+               {
+                       eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+                                                      RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+                                                      RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+                                                      ui64UFOFilter);
+               }
+               PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+       }
+       else
+#endif
+       {
+               /* only set filter and exit */
+               psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+                       ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) &
+                       psRgxDevInfo->ui64HWPerfFilter);
+
+               PVR_DPF((PVR_DBG_WARNING,
+                                "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+                                psRgxDevInfo->ui64HWPerfFilter));
+
+               return PVRSRV_OK;
+       }
+
+       /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+       if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+                                       PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32OsDeviceID) < 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                        "%s: Failed to form HWPerf stream name for device %d",
+                        __func__,
+                        psRgxDevNode->sDevId.i32OsDeviceID));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* Open the TL Stream for HWPerf data consumption */
+       eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+                                                               pszHWPerfStreamName,
+                                                               PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+                                                               &psFtraceData->hGPUTraceTLStream);
+       PVR_LOG_GOTO_IF_ERROR(eError, "TLClientOpenStream", err_out);
+
+#if defined(SUPPORT_RGX)
+       if (RGXTimeCorrGetClockSource(psRgxDevNode) != RGXTIMECORR_CLOCK_SCHED)
+       {
+               /* Set clock source for timer correlation data to sched_clock */
+               psRgxDevInfo->ui32LastClockSource = RGXTimeCorrGetClockSource(psRgxDevNode);
+               RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED);
+       }
+#endif
+
+       /* Reset the OS timestamp coming from the timer correlation data
+        * associated with the latest HWPerf event we processed.
+        */
+       psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0;
+
+       /* Register a notifier to collect HWPerf data whenever the HW completes
+        * an operation.
+        */
+       eError = PVRSRVRegisterCmdCompleteNotify(
+               &psFtraceData->hGPUTraceCmdCompleteHandle,
+               &_GpuTraceCmdCompleteNotify,
+               psRgxDevInfo);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream);
+
+err_out:
+       PVR_DPF_RETURN_RC(eError);
+
+err_close_stream:
+       TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+                                               psFtraceData->hGPUTraceTLStream);
+       psFtraceData->hGPUTraceTLStream = NULL;
+       goto err_out;
+}
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       RGX_HWPERF_FTRACE_DATA *psFtraceData;
+#if defined(SUPPORT_RGX)
+       PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+#endif
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psRgxDevInfo);
+
+       psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+       PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+       /* if FW is not yet initialised, just set filter and exit */
+       if (!psRgxDevInfo->bFirmwareInitialised)
+       {
+               psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE;
+#if !defined(NO_HARDWARE)
+               PVR_DPF((PVR_DBG_WARNING,
+                        "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+                        psRgxDevInfo->ui64HWPerfFilter));
+#endif
+               return PVRSRV_OK;
+       }
+
+       if (NULL == psFtraceData->hGPUTraceTLStream)
+       {
+               /* Tracing already disabled, just return */
+               return PVRSRV_OK;
+       }
+
+#if defined(SUPPORT_RGX)
+       if (!bDeInit)
+       {
+               /* Do not call into PVRSRVRGXCtrlHWPerfKM if we are in GUEST mode. */
+               if (PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       eError = PVRSRV_OK;
+               }
+               else
+               {
+                       eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+                                                      RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+                                                      (RGX_HWPERF_EVENT_MASK_NONE));
+               }
+               PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+       }
+#endif
+
+       if (psFtraceData->hGPUTraceCmdCompleteHandle)
+       {
+               /* Tracing is being turned off. Unregister the notifier. */
+               eError = PVRSRVUnregisterCmdCompleteNotify(
+                               psFtraceData->hGPUTraceCmdCompleteHandle);
+               PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify");
+               psFtraceData->hGPUTraceCmdCompleteHandle = NULL;
+       }
+
+       if (psFtraceData->hGPUTraceTLStream)
+       {
+               IMG_PBYTE pbTmp = NULL;
+               IMG_UINT32 ui32Tmp = 0;
+
+               /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
+                * are some events left unprocessed in this FTrace/systrace "session"
+                * (note that even if we have just disabled HWPerf on the FW some packets
+                * could have been generated and already copied to L2 by the MISR handler).
+                *
+                * With the following calls we will both copy new data to the Host buffer
+                * (done by the producer callback in TLClientAcquireData) and advance
+                * the read offset in the buffer to catch up with the latest events.
+                */
+               eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+                                            psFtraceData->hGPUTraceTLStream,
+                                            &pbTmp, &ui32Tmp);
+               PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+               /* Let close stream perform the release data on the outstanding acquired data */
+               eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+                                            psFtraceData->hGPUTraceTLStream);
+               PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+               psFtraceData->hGPUTraceTLStream = NULL;
+       }
+
+#if defined(SUPPORT_RGX)
+       if (psRgxDevInfo->ui32LastClockSource != RGXTIMECORR_CLOCK_SCHED)
+       {
+               RGXTimeCorrSetClockSource(psRgxDevNode, psRgxDevInfo->ui32LastClockSource);
+       }
+#endif
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+                                        IMG_BOOL bNewValue)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+       PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psRgxDevInfo);
+       psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+       /* About to create/destroy FTrace resources, lock critical section
+        * to avoid HWPerf MISR thread contention.
+        */
+       OSLockAcquire(psFtraceData->hFTraceResourceLock);
+
+       eError = (bNewValue ? _GpuTraceEnable(psRgxDevInfo)
+                                          : _GpuTraceDisable(psRgxDevInfo, IMG_FALSE));
+
+       OSLockRelease(psFtraceData->hFTraceResourceLock);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+       /* enable/disable GPU trace on all devices */
+       while (psDeviceNode)
+       {
+               eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue);
+               if (eError != PVRSRV_OK)
+               {
+                       break;
+               }
+               psDeviceNode = psDeviceNode->psNext;
+       }
+
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+       PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   IMG_BOOL bNewValue)
+{
+       return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue);
+}
+
+/* ----- HWPerf to FTrace packet processing and events injection ------------ */
+
+static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType)
+{
+       static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = {
+#if defined(RGX_FEATURE_HWPERF_VOLCANIC)
+               "TA3D", "CDM", "RS", "SHG", "TQTDM", "SYNC", "TA", "3D", "LAST"
+#else
+               "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "TA", "3D", "LAST"
+#endif
+       };
+
+       /* cast in case of negative value */
+       if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST)
+       {
+               return "<UNKNOWN>";
+       }
+
+       return aszKickType[eKickType];
+}
+
+void PVRGpuTraceEnqueueEvent(
+               PVRSRV_DEVICE_NODE *psDevNode,
+               IMG_UINT32 ui32FirmwareCtx,
+               IMG_UINT32 ui32ExtJobRef,
+               IMG_UINT32 ui32IntJobRef,
+               RGX_HWPERF_KICK_TYPE eKickType)
+{
+       const IMG_CHAR *pszKickType = _HWPerfKickTypeToStr(eKickType);
+
+       PVR_DPF((PVR_DBG_MESSAGE, "PVRGpuTraceEnqueueEvent(%s): contextId %u, "
+               "jobId %u", pszKickType, ui32FirmwareCtx, ui32IntJobRef));
+
+       if (PVRGpuTraceIsEnabled())
+       {
+               trace_rogue_job_enqueue(ui32FirmwareCtx, ui32IntJobRef, ui32ExtJobRef,
+                                       pszKickType);
+       }
+}
+
+static void _GpuTraceWorkSwitch(
+               IMG_UINT64 ui64HWTimestampInOSTime,
+               IMG_UINT32 ui32CtxId,
+               IMG_UINT32 ui32CtxPriority,
+               IMG_UINT32 ui32ExtJobRef,
+               IMG_UINT32 ui32IntJobRef,
+               const IMG_CHAR* pszWorkType,
+               PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+       PVR_ASSERT(pszWorkType);
+       trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime,
+                       ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, ui32ExtJobRef);
+}
+
+static void _GpuTraceUfo(
+               IMG_UINT64 ui64OSTimestamp,
+               const RGX_HWPERF_UFO_EV eEvType,
+               const IMG_UINT32 ui32CtxId,
+               const IMG_UINT32 ui32ExtJobRef,
+               const IMG_UINT32 ui32IntJobRef,
+               const IMG_UINT32 ui32UFOCount,
+               const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+       switch (eEvType) {
+               case RGX_HWPERF_UFO_EV_UPDATE:
+                       trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId,
+                                       ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData);
+                       break;
+               case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+                       trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+                                       ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount,
+                                       puData);
+                       break;
+               case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+                       trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+                                       ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount,
+                                       puData);
+                       break;
+               case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+                       trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+                                       ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount,
+                                       puData);
+                       break;
+               case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+                       trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+                                       ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount,
+                                       puData);
+                       break;
+               default:
+                       break;
+       }
+}
+
+static void _GpuTraceFirmware(
+               IMG_UINT64 ui64HWTimestampInOSTime,
+               const IMG_CHAR* pszWorkType,
+               PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+       trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType);
+}
+
+static void _GpuTraceEventsLost(
+               const RGX_HWPERF_STREAM_ID eStreamId,
+               const IMG_UINT32 ui32LastOrdinal,
+               const IMG_UINT32 ui32CurrOrdinal)
+{
+       trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal);
+}
+
+/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */
+static uint64_t CalculateEventTimestamp(
+       PVRSRV_RGXDEV_INFO *psDevInfo,
+       uint32_t ui32TimeCorrIndex,
+       uint64_t ui64EventTimestamp)
+{
+       RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+       RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+       RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex];
+       uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp;
+       uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp;
+       uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs;
+       uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+       if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp)
+       {
+               /* The previous packet had a time reference (time correlation data) more
+                * recent than the one in the current packet, it means the timer
+                * correlation array wrapped too quickly (buffer too small) and in the
+                * previous call to _GpuTraceUfoEvent we read one of the
+                * newest timer correlations rather than one of the oldest ones.
+                */
+               PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be "
+                                "wrong! The time correlation array size should be increased "
+                                "to avoid this.", __func__));
+       }
+
+       psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp;
+
+       /* RGX CR timer ticks delta */
+       deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp;
+       /* RGX time delta in nanoseconds */
+       delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+       /* Calculate OS time of HWPerf event */
+       ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns;
+
+       PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u",
+                        __func__, ui64CRTimeStamp, ui64OSTimeStamp,
+                        psTimeCorr->ui32CoreClockSpeed));
+
+       return ui64EventOSTimestamp;
+}
+
+static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+               PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+       IMG_UINT64 ui64Timestamp;
+       RGX_HWPERF_HW_DATA* psHWPerfPktData;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psHWPerfPkt);
+       PVR_ASSERT(pszWorkName);
+
+       psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+       ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+                                                                                       psHWPerfPkt->ui64Timestamp);
+
+       PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d",
+                       pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType));
+
+       _GpuTraceWorkSwitch(ui64Timestamp,
+                           psHWPerfPktData->ui32DMContext,
+                           psHWPerfPktData->ui32CtxPriority,
+                           psHWPerfPktData->ui32ExtJobRef,
+                           psHWPerfPktData->ui32IntJobRef,
+                           pszWorkName,
+                           eSwType);
+
+       PVR_DPF_RETURN;
+}
+
+static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+                              RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+       IMG_UINT64 ui64Timestamp;
+       RGX_HWPERF_UFO_DATA *psHWPerfPktData;
+       IMG_UINT32 ui32UFOCount;
+       RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+       psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+       ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo);
+       puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) IMG_OFFSET_ADDR(psHWPerfPktData, RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo));
+
+       ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+                                                                                       psHWPerfPkt->ui64Timestamp);
+
+       PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceUfoEvent: ui32ExtJobRef=%d, "
+               "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef,
+               psHWPerfPktData->ui32IntJobRef));
+
+       _GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType,
+                    psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef,
+                    psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData);
+}
+
+static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+               PVR_GPUTRACE_SWITCH_TYPE eSwType)
+
+{
+       uint64_t ui64Timestamp;
+       RGX_HWPERF_FW_DATA *psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+       ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+                                                                                       psHWPerfPkt->ui64Timestamp);
+
+       _GpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType);
+}
+
+static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+               RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+       RGX_HWPERF_EVENT_TYPE eType;
+       RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+       IMG_UINT32 ui32HwEventTypeIndex;
+       static const struct {
+               IMG_CHAR* pszName;
+               PVR_GPUTRACE_SWITCH_TYPE eSwType;
+       } aszHwEventTypeMap[] = {
+#define _T(T) PVR_GPUTRACE_SWITCH_TYPE_##T
+               { "BG",             _T(BEGIN)  }, /* RGX_HWPERF_FW_BGSTART */
+               { "BG",             _T(END)    }, /* RGX_HWPERF_FW_BGEND */
+               { "IRQ",            _T(BEGIN)  }, /* RGX_HWPERF_FW_IRQSTART */
+               { "IRQ",            _T(END)    }, /* RGX_HWPERF_FW_IRQEND */
+               { "DBG",            _T(BEGIN)  }, /* RGX_HWPERF_FW_DBGSTART */
+               { "DBG",            _T(END)    }, /* RGX_HWPERF_FW_DBGEND */
+               { "PMOOM_TAPAUSE",  _T(END)    }, /* RGX_HWPERF_HW_PMOOM_TAPAUSE */
+               { "TA",             _T(BEGIN)  }, /* RGX_HWPERF_HW_TAKICK */
+               { "TA",             _T(END)    }, /* RGX_HWPERF_HW_TAFINISHED */
+               { "TQ3D",           _T(BEGIN)  }, /* RGX_HWPERF_HW_3DTQKICK */
+               { "3D",             _T(BEGIN)  }, /* RGX_HWPERF_HW_3DKICK */
+               { "3D",             _T(END)    }, /* RGX_HWPERF_HW_3DFINISHED */
+               { "CDM",            _T(BEGIN)  }, /* RGX_HWPERF_HW_CDMKICK */
+               { "CDM",            _T(END)    }, /* RGX_HWPERF_HW_CDMFINISHED */
+               { "TQ2D",           _T(BEGIN)  }, /* RGX_HWPERF_HW_TLAKICK */
+               { "TQ2D",           _T(END)    }, /* RGX_HWPERF_HW_TLAFINISHED */
+               { "3DSPM",          _T(BEGIN)  }, /* RGX_HWPERF_HW_3DSPMKICK */
+               { NULL,             0          }, /* RGX_HWPERF_HW_PERIODIC (unsupported) */
+               { "RTU",            _T(BEGIN)  }, /* RGX_HWPERF_HW_RTUKICK */
+               { "RTU",            _T(END)    }, /* RGX_HWPERF_HW_RTUFINISHED */
+               { "SHG",            _T(BEGIN)  }, /* RGX_HWPERF_HW_SHGKICK */
+               { "SHG",            _T(END)    }, /* RGX_HWPERF_HW_SHGFINISHED */
+               { "TQ3D",           _T(END)    }, /* RGX_HWPERF_HW_3DTQFINISHED */
+               { "3DSPM",          _T(END)    }, /* RGX_HWPERF_HW_3DSPMFINISHED */
+               { "PMOOM_TARESUME", _T(BEGIN)  }, /* RGX_HWPERF_HW_PMOOM_TARESUME */
+               { "TDM",            _T(BEGIN)  }, /* RGX_HWPERF_HW_TDMKICK */
+               { "TDM",            _T(END)    }, /* RGX_HWPERF_HW_TDMFINISHED */
+               { "NULL",           _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */
+#undef _T
+       };
+       static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1,
+                                 "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE");
+
+       PVR_ASSERT(psHWPerfPkt);
+       eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
+
+       if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+       {
+               RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
+               _GpuTraceEventsLost(eStreamId,
+                                   psFtraceData->ui32FTraceLastOrdinal,
+                                   psHWPerfPkt->ui32Ordinal);
+               PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
+                        eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+       }
+
+       psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal;
+
+       /* Process UFO packets */
+       if (eType == RGX_HWPERF_UFO)
+       {
+               _GpuTraceUfoEvent(psDevInfo, psHWPerfPkt);
+               return IMG_TRUE;
+       }
+
+       if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE)
+       {
+               /* this ID belongs to range 0, so index directly in range 0 */
+               ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+       }
+       else
+       {
+               /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */
+               ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) +
+                                      (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1);
+       }
+
+       if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap))
+               goto err_unsupported;
+
+       if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL)
+       {
+               /* Not supported map entry, ignore event */
+               goto err_unsupported;
+       }
+
+       if (HWPERF_PACKET_IS_HW_TYPE(eType))
+       {
+               if (aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType == PVR_GPUTRACE_SWITCH_TYPE_SINGLE)
+               {
+                       _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt,
+                                            aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+                                            PVR_GPUTRACE_SWITCH_TYPE_BEGIN);
+                       _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt,
+                                            aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+                                            PVR_GPUTRACE_SWITCH_TYPE_END);
+               }
+               else
+               {
+                       _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt,
+                                            aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+                                            aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+               }
+       }
+       else if (HWPERF_PACKET_IS_FW_TYPE(eType))
+       {
+               _GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt,
+                                                                               aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+                                                                               aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+       }
+       else
+       {
+               goto err_unsupported;
+       }
+
+       return IMG_TRUE;
+
+err_unsupported:
+       PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType));
+       return IMG_FALSE;
+}
+
+
+static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
+               void *pBuffer, IMG_UINT32 ui32ReadLen)
+{
+       IMG_UINT32                      ui32TlPackets = 0;
+       IMG_UINT32                      ui32HWPerfPackets = 0;
+       IMG_UINT32                      ui32HWPerfPacketsSent = 0;
+       void                            *pBufferEnd;
+       PVRSRVTL_PPACKETHDR psHDRptr;
+       PVRSRVTL_PACKETTYPE ui16TlType;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psDevInfo);
+       PVR_ASSERT(pBuffer);
+       PVR_ASSERT(ui32ReadLen);
+
+       /* Process the TL Packets
+        */
+       pBufferEnd = IMG_OFFSET_ADDR(pBuffer, ui32ReadLen);
+       psHDRptr = GET_PACKET_HDR(pBuffer);
+       while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+       {
+               ui16TlType = GET_PACKET_TYPE(psHDRptr);
+               if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+               {
+                       IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+                       if (0 == ui16DataLen)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "_GpuTraceProcessPackets: ZERO Data in TL data packet: %p", psHDRptr));
+                       }
+                       else
+                       {
+                               RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt;
+                               RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd;
+
+                               /* Check for lost hwperf data packets */
+                               psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen);
+                               psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr));
+                               do
+                               {
+                                       if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt))
+                                       {
+                                               ui32HWPerfPacketsSent++;
+                                       }
+                                       ui32HWPerfPackets++;
+                                       psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt);
+                               }
+                               while (psHWPerfPkt < psHWPerfEnd);
+                       }
+               }
+               else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+               {
+                       PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Indication that the transport buffer was full"));
+               }
+               else
+               {
+                       /* else Ignore padding packet type and others */
+                       PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Ignoring TL packet, type %d", ui16TlType ));
+               }
+
+               psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+               ui32TlPackets++;
+       }
+
+       PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceProcessPackets: TL "
+                       "Packets processed %03d, HWPerf packets %03d, sent %03d",
+                       ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent));
+
+       PVR_DPF_RETURN;
+}
+
+
+static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+       PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
+       RGX_HWPERF_FTRACE_DATA* psFtraceData;
+       PVRSRV_ERROR            eError;
+       IMG_PBYTE                       pBuffer;
+       IMG_UINT32                      ui32ReadLen;
+       IMG_BOOL                        bFTraceLockAcquired = IMG_FALSE;
+
+       PVR_DPF_ENTERED;
+
+       PVR_ASSERT(psDeviceInfo != NULL);
+
+       psFtraceData = psDeviceInfo->pvGpuFtraceData;
+
+       /* Command-complete notifiers can run concurrently. If this is
+        * happening, just bail out and let the previous call finish.
+        * This is ok because we can process the queued packets on the next call.
+        */
+       bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock);
+       if (IMG_FALSE == bFTraceLockAcquired)
+       {
+               PVR_DPF_RETURN;
+       }
+
+       /* If this notifier is called, it means the TL resources will be valid at-least
+        * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock
+        * to clean-up the TL resources and un-register the notifier, so just assert here.
+        */
+       PVR_ASSERT(psFtraceData->hGPUTraceTLStream);
+
+       /* If we have a valid stream attempt to acquire some data */
+       eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen);
+       if (eError == PVRSRV_OK)
+       {
+               /* Process the HWPerf packets and release the data */
+               if (ui32ReadLen > 0)
+               {
+                       PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+
+                       /* Process the transport layer data for HWPerf packets... */
+                       _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
+
+                       eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream);
+                       if (eError != PVRSRV_OK)
+                       {
+                               PVR_LOG_ERROR(eError, "TLClientReleaseData");
+
+                               /* Serious error, disable FTrace GPU events */
+
+                               /* Release TraceLock so we always have the locking
+                                * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
+                               OSLockRelease(psFtraceData->hFTraceResourceLock);
+                               OSLockAcquire(psFtraceData->hFTraceResourceLock);
+                               _GpuTraceDisable(psDeviceInfo, IMG_FALSE);
+                               OSLockRelease(psFtraceData->hFTraceResourceLock);
+                               goto out;
+
+                       }
+               } /* else no data, ignore */
+       }
+       else if (eError != PVRSRV_ERROR_TIMEOUT)
+       {
+               PVR_LOG_ERROR(eError, "TLClientAcquireData");
+       }
+       if (bFTraceLockAcquired)
+       {
+               OSLockRelease(psFtraceData->hFTraceResourceLock);
+       }
+out:
+       PVR_DPF_RETURN;
+}
+
+/* ----- AppHint interface -------------------------------------------------- */
+
+static PVRSRV_ERROR _GpuTraceIsEnabledCallback(
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data,
+       IMG_BOOL *value)
+{
+       PVR_UNREFERENCED_PARAMETER(device);
+       PVR_UNREFERENCED_PARAMETER(private_data);
+
+       *value = gbFTraceGPUEventsEnabled;
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GpuTraceSetEnabledCallback(
+       const PVRSRV_DEVICE_NODE *device,
+       const void *private_data,
+       IMG_BOOL value)
+{
+       PVR_UNREFERENCED_PARAMETER(device);
+
+       /* Lock down the state to avoid concurrent writes */
+       OSLockAcquire(ghGPUTraceStateLock);
+
+       if (value != gbFTraceGPUEventsEnabled)
+       {
+               PVRSRV_ERROR eError;
+               if ((eError = _GpuTraceSetEnabledForAllDevices(value)) == PVRSRV_OK)
+               {
+                       PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED"));
+                       gbFTraceGPUEventsEnabled = value;
+               }
+               else
+               {
+                       PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable"));
+                       /* On failure, partial enable/disable might have resulted.
+                        * Try best to restore to previous state. Ignore error */
+                       _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled);
+
+                       OSLockRelease(ghGPUTraceStateLock);
+                       return eError;
+               }
+       }
+       else
+       {
+               PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled"));
+       }
+
+       OSLockRelease(ghGPUTraceStateLock);
+
+       return PVRSRV_OK;
+}
+
+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU,
+                                         _GpuTraceIsEnabledCallback,
+                                         _GpuTraceSetEnabledCallback,
+                                         psDeviceNode, NULL);
+}
+
+/* ----- FTrace event callbacks -------------------------------------------- */
+
+void PVRGpuTraceEnableUfoCallback(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(SUPPORT_RGX)
+       PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+       PVRSRV_ERROR eError;
+#endif
+
+       /* Lock down events state, for consistent value of guiUfoEventRef */
+       OSLockAcquire(ghLockFTraceEventLock);
+       if (guiUfoEventRef++ == 0)
+       {
+               OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+               psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+               /* make sure UFO events are enabled on all rogue devices */
+               while (psDeviceNode)
+               {
+#if defined(SUPPORT_RGX)
+                       IMG_UINT64 ui64Filter;
+
+                       psRgxDevInfo = psDeviceNode->pvDevice;
+                       ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) |
+                                                       psRgxDevInfo->ui64HWPerfFilter;
+                       /* Small chance exists that ui64HWPerfFilter can be changed here and
+                        * the newest filter value will be changed to the old one + UFO event.
+                        * This is not a critical problem. */
+                       eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+                                                                                       IMG_FALSE, ui64Filter);
+                       if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+                       {
+                               /* If we land here that means that the FW is not initialised yet.
+                                * We stored the filter and it will be passed to the firmware
+                                * during its initialisation phase. So ignore. */
+                       }
+                       else if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32OsDeviceID));
+                       }
+#endif
+                       psDeviceNode = psDeviceNode->psNext;
+               }
+
+               OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+       }
+       OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableUfoCallback(void)
+{
+#if defined(SUPPORT_RGX)
+       PVRSRV_ERROR eError;
+#endif
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+
+       /* We have to check if lock is valid because on driver unload
+        * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace
+        * events. This means that the lock will be destroyed before this callback
+        * is called.
+        * We can safely return if that situation happens because driver will be
+        * unloaded so we don't care about HWPerf state anymore. */
+       if (ghLockFTraceEventLock == NULL)
+               return;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+       /* Lock down events state, for consistent value of guiUfoEventRef */
+       OSLockAcquire(ghLockFTraceEventLock);
+       if (--guiUfoEventRef == 0)
+       {
+               /* make sure UFO events are disabled on all rogue devices */
+               while (psDeviceNode)
+               {
+#if defined(SUPPORT_RGX)
+                       IMG_UINT64 ui64Filter;
+                       PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+
+                       ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) &
+                                       psRgxDevInfo->ui64HWPerfFilter;
+                       /* Small chance exists that ui64HWPerfFilter can be changed here and
+                        * the newest filter value will be changed to the old one + UFO event.
+                        * This is not a critical problem. */
+                       eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+                                                      IMG_FALSE, ui64Filter);
+                       if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+                       {
+                               /* If we land here that means that the FW is not initialised yet.
+                                * We stored the filter and it will be passed to the firmware
+                                * during its initialisation phase. So ignore. */
+                       }
+                       else if (eError != PVRSRV_OK)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d",
+                                       psDeviceNode->sDevId.i32OsDeviceID));
+                       }
+#endif
+
+                       psDeviceNode = psDeviceNode->psNext;
+               }
+       }
+       OSLockRelease(ghLockFTraceEventLock);
+
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+}
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(SUPPORT_RGX)
+       PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+       uint64_t ui64Filter, ui64FWEventsFilter = 0;
+       int i;
+
+       for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+                i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+       {
+               ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i);
+       }
+#endif
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+       OSLockAcquire(ghLockFTraceEventLock);
+       /* Enable all FW events on all the devices */
+       while (psDeviceNode)
+       {
+#if defined(SUPPORT_RGX)
+               PVRSRV_ERROR eError;
+               psRgxDevInfo = psDeviceNode->pvDevice;
+               ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter;
+
+               eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+                                              IMG_FALSE, ui64Filter);
+               if ((eError != PVRSRV_OK) && !PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware"
+                               " task timings (%s).", PVRSRVGetErrorString(eError)));
+               }
+#endif
+               psDeviceNode = psDeviceNode->psNext;
+       }
+       OSLockRelease(ghLockFTraceEventLock);
+
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+}
+
+void PVRGpuTraceDisableFirmwareActivityCallback(void)
+{
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(SUPPORT_RGX)
+       IMG_UINT64 ui64FWEventsFilter = ~0;
+       int i;
+#endif
+
+       /* We have to check if lock is valid because on driver unload
+        * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace
+        * events. This means that the lock will be destroyed before this callback
+        * is called.
+        * We can safely return if that situation happens because driver will be
+        * unloaded so we don't care about HWPerf state anymore. */
+       if (ghLockFTraceEventLock == NULL)
+               return;
+
+       OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+       psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
+#if defined(SUPPORT_RGX)
+       for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+                i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+       {
+               ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i);
+       }
+#endif
+
+       OSLockAcquire(ghLockFTraceEventLock);
+
+       /* Disable all FW events on all the devices */
+       while (psDeviceNode)
+       {
+#if defined(SUPPORT_RGX)
+               PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+               IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter;
+
+               if ((PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+                                         IMG_FALSE, ui64Filter) != PVRSRV_OK) &&
+                   !PVRSRV_VZ_MODE_IS(GUEST))
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings."));
+               }
+#endif
+               psDeviceNode = psDeviceNode->psNext;
+       }
+
+       OSLockRelease(ghLockFTraceEventLock);
+
+       OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+}
+
+/******************************************************************************
+ End of file (pvr_gputrace.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_ion_stats.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_ion_stats.h
new file mode 100644 (file)
index 0000000..c341807
--- /dev/null
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for recording ION memory stats.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_ION_STATS_H
+#define PVR_ION_STATS_H
+
+#include "pvrsrv_error.h"
+#include "img_defs.h"
+
+struct dma_buf;
+
+#if defined(PVRSRV_ENABLE_PVR_ION_STATS)
+PVRSRV_ERROR PVRSRVIonStatsInitialise(void);
+
+void PVRSRVIonStatsDestroy(void);
+
+void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf);
+
+void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf);
+#else
+static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void)
+{
+       return PVRSRV_OK;
+}
+
+static INLINE void PVRSRVIonStatsDestroy(void)
+{
+}
+
+static INLINE void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf)
+{
+       PVR_UNREFERENCED_PARAMETER(psDmaBuf);
+}
+
+static INLINE void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf)
+{
+       PVR_UNREFERENCED_PARAMETER(psDmaBuf);
+}
+#endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */
+
+#endif /* PVR_ION_STATS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_platform_drv.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_platform_drv.c
new file mode 100644 (file)
index 0000000..58f7c0a
--- /dev/null
@@ -0,0 +1,330 @@
+/*
+ * @File
+ * @Title       PowerVR DRM platform driver
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#else
+#include <drm/drmP.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "module_common.h"
+#include "pvr_drv.h"
+#include "pvrmodule.h"
+#include "sysinfo.h"
+
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+static struct drm_driver pvr_drm_platform_driver;
+
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+/*
+ * This is an arbitrary value. If it's changed then the 'num_devices' module
+ * parameter description should also be updated to match.
+ */
+#define MAX_DEVICES 16
+
+static unsigned int pvr_num_devices = 1;
+static struct platform_device **pvr_devices;
+
+#if defined(NO_HARDWARE)
+static int pvr_num_devices_set(const char *val,
+                              const struct kernel_param *param)
+{
+       int err;
+
+       err = param_set_uint(val, param);
+       if (err)
+               return err;
+
+       if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES)
+               return -EINVAL;
+
+       return 0;
+}
+
+static const struct kernel_param_ops pvr_num_devices_ops = {
+       .set = pvr_num_devices_set,
+       .get = param_get_uint,
+};
+
+module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444);
+MODULE_PARM_DESC(num_devices,
+                "Number of platform devices to register (default: 1 - max: 16)");
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+#if 0
+static int pvr_devices_register(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+       struct platform_device_info pvr_dev_info = {
+               .name = SYS_RGX_DEV_NAME,
+               .id = -2,
+#if defined(NO_HARDWARE)
+               /* Not all cores have 40 bit physical support, but this
+                * will work unless > 32 bit address is returned on those cores.
+                * In the future this will be fixed more correctly.
+                */
+               .dma_mask = DMA_BIT_MASK(40),
+#else
+               .dma_mask = DMA_BIT_MASK(32),
+#endif
+       };
+       unsigned int i;
+
+       BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES);
+
+       pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices),
+                                   GFP_KERNEL);
+       if (!pvr_devices)
+               return -ENOMEM;
+
+       for (i = 0; i < pvr_num_devices; i++) {
+               pvr_devices[i] = platform_device_register_full(&pvr_dev_info);
+               if (IS_ERR(pvr_devices[i])) {
+                       DRM_ERROR("unable to register device %u (err=%ld)\n",
+                                 i, PTR_ERR(pvr_devices[i]));
+                       pvr_devices[i] = NULL;
+                       return -ENODEV;
+               }
+       }
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+       return 0;
+}
+#endif
+
+static void pvr_devices_unregister(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+       unsigned int i;
+
+       BUG_ON(!pvr_devices);
+
+       for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++)
+               platform_device_unregister(pvr_devices[i]);
+
+       kfree(pvr_devices);
+       pvr_devices = NULL;
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+}
+
+static int pvr_probe(struct platform_device *pdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+       struct drm_device *ddev;
+       int ret;
+
+       DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+       ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
+#else
+       if (!ddev)
+               return -ENOMEM;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+       /* Needed by drm_platform_set_busid */
+       ddev->platformdev = pdev;
+#endif
+
+       /*
+        * The load callback, called from drm_dev_register, is deprecated,
+        * because of potential race conditions. Calling the function here,
+        * before calling drm_dev_register, avoids those potential races.
+        */
+       BUG_ON(pvr_drm_platform_driver.load != NULL);
+       ret = pvr_drm_load(ddev, 0);
+       if (ret)
+               goto err_drm_dev_put;
+
+       ret = drm_dev_register(ddev, 0);
+       if (ret)
+               goto err_drm_dev_unload;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+               pvr_drm_platform_driver.name,
+               pvr_drm_platform_driver.major,
+               pvr_drm_platform_driver.minor,
+               pvr_drm_platform_driver.patchlevel,
+               pvr_drm_platform_driver.date,
+               ddev->primary->index);
+#endif
+       return 0;
+
+err_drm_dev_unload:
+       pvr_drm_unload(ddev);
+err_drm_dev_put:
+       drm_dev_put(ddev);
+       return  ret;
+#else
+       DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+       return drm_platform_init(&pvr_drm_platform_driver, pdev);
+#endif
+}
+
+static int pvr_remove(struct platform_device *pdev)
+{
+       struct drm_device *ddev = platform_get_drvdata(pdev);
+
+       DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+       drm_dev_unregister(ddev);
+
+       /* The unload callback, called from drm_dev_unregister, is
+        * deprecated. Call the unload function directly.
+        */
+       BUG_ON(pvr_drm_platform_driver.unload != NULL);
+       pvr_drm_unload(ddev);
+
+       drm_dev_put(ddev);
+#else
+       drm_put_dev(ddev);
+#endif
+       return 0;
+}
+
+static void pvr_shutdown(struct platform_device *pdev)
+{
+       struct drm_device *ddev = platform_get_drvdata(pdev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+       PVRSRVDeviceShutdown(priv->dev_node);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+static const struct of_device_id pvr_of_ids[] = {
+#if defined(SYS_RGX_OF_COMPATIBLE)
+       { .compatible = SYS_RGX_OF_COMPATIBLE, },
+#endif
+       {},
+};
+
+#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE)
+MODULE_DEVICE_TABLE(of, pvr_of_ids);
+#endif
+#endif
+
+static struct platform_device_id pvr_platform_ids[] = {
+#if defined(SYS_RGX_DEV_NAME)
+       { SYS_RGX_DEV_NAME, 0 },
+#endif
+       { }
+};
+
+#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE)
+MODULE_DEVICE_TABLE(platform, pvr_platform_ids);
+#endif
+
+static struct platform_driver pvr_platform_driver = {
+       .driver = {
+               .name           = DRVNAME,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+               .of_match_table = of_match_ptr(pvr_of_ids),
+#endif
+               .pm             = &pvr_pm_ops,
+       },
+       .id_table               = pvr_platform_ids,
+       .probe                  = pvr_probe,
+       .remove                 = pvr_remove,
+       .shutdown               = pvr_shutdown,
+};
+
+static int __init pvr_init(void)
+{
+       int err;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       pvr_drm_platform_driver = pvr_drm_generic_driver;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+       pvr_drm_platform_driver.set_busid = drm_platform_set_busid;
+#endif
+
+       printk("@@#########################@@\n");
+       err = PVRSRVDriverInit();
+       if (err)
+               return err;
+
+       err = platform_driver_register(&pvr_platform_driver);
+       if (err)
+               return err;
+
+       return 0;//pvr_devices_register();
+}
+
+static void __exit pvr_exit(void)
+{
+       DRM_DEBUG_DRIVER("\n");
+
+       pvr_devices_unregister();
+       platform_driver_unregister(&pvr_platform_driver);
+       PVRSRVDriverDeinit();
+
+       DRM_DEBUG_DRIVER("done\n");
+}
+
+module_init(pvr_init);
+//late_initcall(pvr_init);
+module_exit(pvr_exit);
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_procfs.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_procfs.c
new file mode 100644 (file)
index 0000000..8ca3886
--- /dev/null
@@ -0,0 +1,643 @@
+/*************************************************************************/ /*!
+@File
+@Title          ProcFS implementation of Debug Info interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements osdi_impl.h API to provide access to driver's
+                debug data via ProcFS.
+
+                Note about locking in ProcFS module.
+
+                Access to ProcFS is protected against the race where any
+                file could be removed while being accessed or accessed while
+                being removed. Any calls to proc_remove() will block until all
+                operations are finished.
+
+                See implementation of file operations (proc_reg_*()) and
+                implementation of (un)use_pde() and proc_entry_rundown() in
+                source/fs/proc/inode.c in Linux kernel sources for more details.
+
+                Not about locking for sequential files.
+
+                The seq_file objects have a mutex that protects access
+                to all of the file operations hence all of the sequential
+                *read* operations are protected.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvr_procfs.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_bridge_k.h"
+#include "pvr_uaccess.h"
+#include "osdi_impl.h"
+
+#define _DRIVER_THREAD_ENTER() \
+       do { \
+               PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \
+               if (eLocalError != PVRSRV_OK) \
+               { \
+                       PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \
+                               __func__, PVRSRVGetErrorString(eLocalError))); \
+                       return OSPVRSRVToNativeError(eLocalError); \
+               } \
+       } while (0)
+
+#define _DRIVER_THREAD_EXIT() \
+       PVRSRVDriverThreadExit()
+
+#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR
+
+typedef struct DFS_DIR
+{
+       struct proc_dir_entry *psDirEntry;
+       struct DFS_DIR *psParentDir;
+} DFS_DIR;
+
+typedef struct DFS_ENTRY
+{
+       OSDI_IMPL_ENTRY sImplEntry;
+       DI_ITERATOR_CB sIterCb;
+} DFS_ENTRY;
+
+typedef struct DFS_FILE
+{
+       struct proc_dir_entry *psFileEntry;
+       struct DFS_DIR *psParentDir;
+       const struct seq_operations *psSeqOps;
+       struct DFS_ENTRY sEntry;
+       DI_ENTRY_TYPE eType;
+} DFS_FILE;
+
+/* ----- native callbacks interface ----------------------------------------- */
+
+static void _WriteData(void *pvNativeHandle, const void *pvData,
+                       IMG_UINT32 uiSize)
+{
+       seq_write(pvNativeHandle, pvData, uiSize);
+}
+
+static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt,
+                     va_list pArgs)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+       seq_vprintf(pvNativeHandle, pszFmt, pArgs);
+#else
+       IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+
+       vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs);
+       seq_printf(pvNativeHandle, "%s", szBuffer);
+#endif
+}
+
+static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr)
+{
+       seq_puts(pvNativeHandle, pszStr);
+}
+
+static IMG_BOOL _HasOverflowed(void *pvNativeHandle)
+{
+       struct seq_file *psSeqFile = pvNativeHandle;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+       return seq_has_overflowed(psSeqFile);
+#else
+       return psSeqFile->count == psSeqFile->size;
+#endif
+}
+
+static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = {
+       .pfnWrite = _WriteData,
+       .pfnVPrintf = _VPrintf,
+       .pfnPuts = _Puts,
+       .pfnHasOverflowed = _HasOverflowed,
+};
+
+/* ----- sequential file operations ----------------------------------------- */
+
+static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos);
+
+       if (pvRet == DI_START_TOKEN)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return pvRet;
+}
+
+static void _Stop(struct seq_file *psSeqFile, void *pvPriv)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv);
+}
+
+static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos);
+}
+
+static int _Show(struct seq_file *psSeqFile, void *pvPriv)
+{
+       DFS_ENTRY *psEntry = psSeqFile->private;
+
+       if (pvPriv == SEQ_START_TOKEN)
+       {
+               pvPriv = DI_START_TOKEN;
+       }
+
+       return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv);
+}
+
+static struct seq_operations _g_sSeqOps = {
+       .start = _Start,
+       .stop = _Stop,
+       .next = _Next,
+       .show = _Show
+};
+
+/* ----- file operations ---------------------------------------------------- */
+
+static int _Open(struct inode *psINode, struct file *psFile)
+{
+       DFS_FILE *psDFSFile = PDE_DATA(psINode);
+       int iRes;
+
+       PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", -EIO);
+
+       _DRIVER_THREAD_ENTER();
+
+       if (psDFSFile->sEntry.sIterCb.pfnStart != NULL)
+       {
+               iRes = seq_open(psFile, psDFSFile->psSeqOps);
+       }
+       else
+       {
+               /* private data is NULL as it's going to be set below */
+               iRes = single_open(psFile, _Show, NULL);
+       }
+
+       if (iRes == 0)
+       {
+               struct seq_file *psSeqFile = psFile->private_data;
+
+               DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry));
+               if (psEntry == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__));
+                       iRes = -ENOMEM;
+                       goto return_;
+               }
+
+               *psEntry = psDFSFile->sEntry;
+               psSeqFile->private = psEntry;
+               psEntry->sImplEntry.pvNative = psSeqFile;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d",
+                       __func__, iRes));
+       }
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static int _Close(struct inode *psINode, struct file *psFile)
+{
+       DFS_FILE *psDFSFile = PDE_DATA(psINode);
+       DFS_ENTRY *psEntry;
+       int iRes;
+
+       PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL",
+                               -EIO);
+
+       _DRIVER_THREAD_ENTER();
+
+       /* save pointer to DFS_ENTRY */
+       psEntry = ((struct seq_file *) psFile->private_data)->private;
+
+       if (psDFSFile->sEntry.sIterCb.pfnStart != NULL)
+       {
+               iRes = seq_release(psINode, psFile);
+       }
+       else
+       {
+               iRes = single_release(psINode, psFile);
+       }
+
+       /* free DFS_ENTRY allocated in _Open */
+       OSFreeMem(psEntry);
+
+       /* Validation check as seq_release (and single_release which calls it)
+        * never fail */
+       if (iRes != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d",
+                       __func__, iRes));
+       }
+
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static ssize_t _Read(struct file *psFile, char __user *pcBuffer,
+                     size_t uiCount, loff_t *puiPos)
+{
+       DFS_FILE *psDFSFile = PDE_DATA(psFile->f_path.dentry->d_inode);
+       ssize_t iRes = -1;
+
+       _DRIVER_THREAD_ENTER();
+
+       if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC)
+       {
+               iRes = seq_read(psFile, pcBuffer, uiCount, puiPos);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: filed to read from file pfnRead() "
+                               "returned %zd", __func__, iRes));
+                       goto return_;
+               }
+       }
+       else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS)
+       {
+               DFS_ENTRY *psEntry = &psDFSFile->sEntry;
+               IMG_UINT64 ui64Count = uiCount, ui64Pos;
+
+               IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount);
+               PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_);
+
+               iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, &ui64Pos,
+                                               psEntry->sImplEntry.pvPrivData);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: filed to read from file pfnRead() "
+                               "returned %zd", __func__, iRes));
+                       OSFreeMem(pcLocalBuffer);
+                       goto return_;
+               }
+
+               if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0)
+               {
+                       iRes = -1;
+               }
+
+               OSFreeMem(pcLocalBuffer);
+
+               *puiPos = ui64Pos;
+       }
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin)
+{
+       DFS_FILE *psDFSFile = PDE_DATA(psFile->f_path.dentry->d_inode);
+       loff_t iRes = -1;
+
+       _DRIVER_THREAD_ENTER();
+
+       if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC)
+       {
+               iRes = seq_lseek(psFile, iOffset, iOrigin);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: filed to set file position to "
+                               "offset %lld, pfnSeek() returned %lld", __func__,
+                               iOffset, iRes));
+                       goto return_;
+               }
+       }
+       else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS)
+       {
+               DFS_ENTRY *psEntry = &psDFSFile->sEntry;
+               IMG_UINT64 ui64Pos;
+
+               switch (iOrigin)
+               {
+                       case SEEK_SET:
+                               ui64Pos = psFile->f_pos + iOffset;
+                               break;
+                       case SEEK_CUR:
+                               ui64Pos = iOffset;
+                               break;
+                       case SEEK_END:
+                               /* not supported as we don't know the file size here */
+                               /* fall through */
+                       default:
+                               return -1;
+               }
+
+               /* only pass the absolute position to the callback, it's up to the
+                * implementer to determine if the position is valid */
+
+               iRes = psEntry->sIterCb.pfnSeek(ui64Pos,
+                                               psEntry->sImplEntry.pvPrivData);
+               if (iRes < 0)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: filed to set file position to "
+                               "offset %lld, pfnSeek() returned %lld", __func__,
+                               iOffset, iRes));
+                       goto return_;
+               }
+
+               psFile->f_pos = ui64Pos;
+       }
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return iRes;
+}
+
+static ssize_t _Write(struct file *psFile, const char __user *pszBuffer,
+                      size_t uiCount, loff_t *puiPos)
+{
+       struct inode *psINode = psFile->f_path.dentry->d_inode;
+       DFS_FILE *psDFSFile = PDE_DATA(psINode);
+       DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb;
+       IMG_CHAR *pcLocalBuffer;
+       IMG_UINT64 ui64Count;
+       IMG_INT64 i64Res = -EIO;
+       IMG_UINT64 ui64Pos = *puiPos;
+
+       PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL",
+                               -EIO);
+       PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL",
+                               -EIO);
+
+       /* Make sure we allocate the smallest amount of needed memory*/
+       ui64Count = psIter->ui32WriteLenMax;
+       PVR_LOG_GOTO_IF_FALSE(uiCount <= ui64Count, "uiCount too long", return_);
+       ui64Count = MIN(uiCount+1, ui64Count);
+
+       _DRIVER_THREAD_ENTER();
+
+       /* allocate buffer with one additional byte for NUL character */
+       pcLocalBuffer = OSAllocMem(ui64Count);
+       PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed",
+                             return_);
+
+       i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, ui64Count);
+       PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed",
+                             free_local_buffer_);
+
+       /* ensure that the framework user gets a NUL terminated buffer */
+       pcLocalBuffer[ui64Count - 1] = '\0';
+
+       i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos,
+                                 psDFSFile->sEntry.sImplEntry.pvPrivData);
+       PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_);
+
+       *puiPos = ui64Pos;
+
+free_local_buffer_:
+       OSFreeMem(pcLocalBuffer);
+
+return_:
+       _DRIVER_THREAD_EXIT();
+
+       return i64Res;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))
+
+static const struct file_operations _g_psFileOpsGen = {
+       .owner = THIS_MODULE,
+       .open = _Open,
+       .release = _Close,
+       .read = _Read,
+       .llseek = _LSeek,
+       .write = _Write,
+};
+
+static const struct file_operations _g_psFileOpsRndAcc = {
+       .owner = THIS_MODULE,
+       .read = _Read,
+       .llseek = _LSeek,
+       .write = _Write,
+};
+
+#else
+
+static const struct proc_ops _g_psFileOpsGen = {
+       .proc_open    = _Open,
+       .proc_read    = _Read,
+       .proc_write   = _Write,
+       .proc_lseek   = _LSeek,
+       .proc_release = _Close,
+};
+
+static const struct proc_ops _g_psFileOpsRndAcc = {
+       .proc_read  = _Read,
+       .proc_write = _Write,
+       .proc_lseek = _LSeek,
+};
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) */
+
+/* ----- DI implementation interface ---------------------------------------- */
+
+static PVRSRV_ERROR _Init(void)
+{
+       return PVRSRV_OK;
+}
+
+static void _DeInit(void)
+{
+}
+
+static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName,
+                                DI_ENTRY_TYPE eType,
+                                const DI_ITERATOR_CB *psIterCb,
+                                void *pvPrivData,
+                                void *pvParentDir,
+                                void **pvFile)
+{
+       DFS_DIR *psParentDir = pvParentDir;
+       DFS_FILE *psFile;
+       umode_t uiMode = S_IFREG;
+       struct proc_dir_entry *psEntry;
+       PVRSRV_ERROR eError;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))
+       const struct file_operations *psProcOps = NULL;
+#else
+       const struct proc_ops *psProcOps = NULL;
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) */
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir");
+
+       switch (eType)
+       {
+               case DI_ENTRY_TYPE_GENERIC:
+                       psProcOps = &_g_psFileOpsGen;
+                       break;
+               case DI_ENTRY_TYPE_RANDOM_ACCESS:
+                       psProcOps = &_g_psFileOpsRndAcc;
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__));
+                       eError = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto return_;
+       }
+
+       psFile = OSAllocMem(sizeof(*psFile));
+       PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_);
+
+       uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ?
+               S_IRUGO : 0;
+       uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0;
+
+       psEntry = proc_create_data(pszName, uiMode, psParentDir->psDirEntry,
+                                  psProcOps, psFile);
+       if (IS_ERR_OR_NULL(psEntry))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file",
+                       __func__, pszName));
+
+               eError = psEntry == NULL ?
+                       PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE;
+               goto free_file_;
+       }
+
+       psFile->eType = eType;
+       psFile->psSeqOps = &_g_sSeqOps;
+       psFile->sEntry.sIterCb = *psIterCb;
+       psFile->sEntry.sImplEntry.pvPrivData = pvPrivData;
+       psFile->sEntry.sImplEntry.pvNative = NULL;
+       psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks;
+       psFile->psParentDir = psParentDir;
+       psFile->psFileEntry = psEntry;
+
+       *pvFile = psFile;
+
+       return PVRSRV_OK;
+
+free_file_:
+       OSFreeMem(psFile);
+
+return_:
+       return eError;
+}
+
+static void _DestroyFile(void *pvFile)
+{
+       DFS_FILE *psFile = pvFile;
+
+       PVR_ASSERT(psFile != NULL);
+
+       proc_remove(psFile->psFileEntry);
+       OSFreeMem(psFile);
+}
+
+static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName,
+                               void *pvParentDir,
+                               void **ppvDir)
+{
+       DFS_DIR *psNewDir;
+       struct proc_dir_entry *psDirEntry, *psParentDir = NULL;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName");
+       PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir");
+
+       psNewDir = OSAllocMem(sizeof(*psNewDir));
+       PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem");
+
+       psNewDir->psParentDir = pvParentDir;
+
+       if (pvParentDir != NULL)
+       {
+               psParentDir = psNewDir->psParentDir->psDirEntry;
+       }
+
+       psDirEntry = proc_mkdir(pszName, psParentDir);
+       if (IS_ERR_OR_NULL(psDirEntry))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory",
+                       __func__, pszName));
+               OSFreeMem(psNewDir);
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psNewDir->psDirEntry = psDirEntry;
+       *ppvDir = psNewDir;
+
+       return PVRSRV_OK;
+}
+
+static void _DestroyDir(void *pvDir)
+{
+       DFS_DIR *psDir = pvDir;
+
+       PVR_ASSERT(psDir != NULL);
+
+       proc_remove(psDir->psDirEntry);
+       OSFreeMem(psDir);
+}
+
+PVRSRV_ERROR PVRProcFsRegister(void)
+{
+       OSDI_IMPL_CB sImplCb = {
+               .pfnInit = _Init,
+               .pfnDeInit = _DeInit,
+               .pfnCreateEntry = _CreateFile,
+               .pfnDestroyEntry = _DestroyFile,
+               .pfnCreateGroup = _CreateDir,
+               .pfnDestroyGroup = _DestroyDir
+       };
+
+       return DIRegisterImplementation("procfs", &sImplCb);
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_procfs.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_procfs.h
new file mode 100644 (file)
index 0000000..61a1f0e
--- /dev/null
@@ -0,0 +1,50 @@
+/*************************************************************************/ /*!
+@File
+@Title          ProcFS implementation of Debug Info interface.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_PROCFS_H
+#define PVR_PROCFS_H
+
+#include "pvrsrv_error.h"
+
+PVRSRV_ERROR PVRProcFsRegister(void);
+
+#endif /* PVR_PROCFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sw_fence.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sw_fence.c
new file mode 100644 (file)
index 0000000..4f2404a
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * @File
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock_types.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+#include "pvr_sw_fence.h"
+
+struct pvr_sw_fence_context {
+       struct kref kref;
+       unsigned int context;
+       char context_name[32];
+       char driver_name[32];
+       atomic_t seqno;
+       atomic_t fence_count;
+};
+
+struct pvr_sw_fence {
+       struct dma_fence base;
+       struct pvr_sw_fence_context *fence_context;
+       spinlock_t lock;
+};
+
+#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base)
+
+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx)
+{
+       return fctx->context_name;
+}
+
+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx,
+                                   char *str, int size)
+{
+       snprintf(str, size, "%d", atomic_read(&fctx->seqno));
+}
+
+static inline unsigned
+pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context)
+{
+       return atomic_inc_return(&fence_context->seqno) - 1;
+}
+
+static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence)
+{
+       struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+       return pvr_sw_fence->fence_context->driver_name;
+}
+
+static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence)
+{
+       struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+       return pvr_sw_fence_context_name(pvr_sw_fence->fence_context);
+}
+
+static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%llu", (u64) fence->seqno);
+}
+
+static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence,
+                                           char *str, int size)
+{
+       struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+       pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size);
+}
+
+static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence)
+{
+       return true;
+}
+
+static void pvr_sw_fence_context_destroy_kref(struct kref *kref)
+{
+       struct pvr_sw_fence_context *fence_context =
+               container_of(kref, struct pvr_sw_fence_context, kref);
+       unsigned int fence_count;
+
+       fence_count = atomic_read(&fence_context->fence_count);
+       if (WARN_ON(fence_count))
+               pr_debug("%s context has %u fence(s) remaining\n",
+                        fence_context->context_name, fence_count);
+
+       kfree(fence_context);
+}
+
+static void pvr_sw_fence_release(struct dma_fence *fence)
+{
+       struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+       atomic_dec(&pvr_sw_fence->fence_context->fence_count);
+       kref_put(&pvr_sw_fence->fence_context->kref,
+               pvr_sw_fence_context_destroy_kref);
+       kfree(pvr_sw_fence);
+}
+
+static const struct dma_fence_ops pvr_sw_fence_ops = {
+       .get_driver_name = pvr_sw_fence_get_driver_name,
+       .get_timeline_name = pvr_sw_fence_get_timeline_name,
+       .fence_value_str = pvr_sw_fence_value_str,
+       .timeline_value_str = pvr_sw_fence_timeline_value_str,
+       .enable_signaling = pvr_sw_fence_enable_signaling,
+       .wait = dma_fence_default_wait,
+       .release = pvr_sw_fence_release,
+};
+
+struct pvr_sw_fence_context *
+pvr_sw_fence_context_create(const char *context_name, const char *driver_name)
+{
+       struct pvr_sw_fence_context *fence_context;
+
+       fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL);
+       if (!fence_context)
+               return NULL;
+
+       fence_context->context = dma_fence_context_alloc(1);
+       strlcpy(fence_context->context_name, context_name,
+               sizeof(fence_context->context_name));
+       strlcpy(fence_context->driver_name, driver_name,
+               sizeof(fence_context->driver_name));
+       atomic_set(&fence_context->seqno, 0);
+       atomic_set(&fence_context->fence_count, 0);
+       kref_init(&fence_context->kref);
+
+       return fence_context;
+}
+
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context)
+{
+       kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref);
+}
+
+struct dma_fence *
+pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context)
+{
+       struct pvr_sw_fence *pvr_sw_fence;
+       unsigned int seqno;
+
+       pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL);
+       if (!pvr_sw_fence)
+               return NULL;
+
+       spin_lock_init(&pvr_sw_fence->lock);
+       pvr_sw_fence->fence_context = fence_context;
+
+       seqno = pvr_sw_fence_context_seqno_next(fence_context);
+       dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops,
+                      &pvr_sw_fence->lock, fence_context->context, seqno);
+
+       atomic_inc(&fence_context->fence_count);
+       kref_get(&fence_context->kref);
+
+       return &pvr_sw_fence->base;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sw_fence.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sw_fence.h
new file mode 100644 (file)
index 0000000..bebbcb7
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * @File
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_SW_FENCES_H__)
+#define __PVR_SW_FENCES_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_sw_fence_context;
+
+struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name,
+                               const char *driver_name);
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context);
+struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context *
+                                     fence_context);
+
+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx);
+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx,
+                                   char *str, int size);
+
+#endif /* !defined(__PVR_SW_FENCES_H__) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync.h
new file mode 100644 (file)
index 0000000..a8ecd8b
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * @File        pvr_sync.h
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PVR_SYNC_H
+#define _PVR_SYNC_H
+
+#include <linux/device.h>
+
+#include "pvr_fd_sync_kernel.h"
+#include "services_kernel_client.h"
+
+
+/* Services internal interface */
+
+/**
+ * pvr_sync_register_functions()
+ *
+ * Return: PVRSRV_OK on success.
+ */
+enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void);
+
+/**
+ *  pvr_sync_init - register the pvr_sync misc device
+ *
+ *  Return: error code, 0 on success.
+ */
+int pvr_sync_init(void);
+
+/**
+ * pvr_sync_deinit - unregister the pvr_sync misc device
+ */
+void pvr_sync_deinit(void);
+
+/**
+ * pvr_sync_device_init() - create an internal sync context
+ * @dev: Linux device
+ *
+ * Return: PVRSRV_OK on success.
+ */
+enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev);
+
+/**
+ * pvr_sync_device_deinit() - destroy an internal sync context
+ *
+ * Drains any work items with outstanding sync fence updates/dependencies.
+ */
+void pvr_sync_device_deinit(struct device *dev);
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms);
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence);
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out);
+
+enum PVRSRV_ERROR_TAG
+pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node,
+                                 int timeline_fd,
+                                 const char *fence_name,
+                                 int *fence_fd_out,
+                                 u64 *sync_pt_idx);
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline,
+                                              u64 *sync_pt_idx);
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline);
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd,
+                                          void **timeline_out);
+
+enum PVRSRV_ERROR_TAG
+sync_dump_fence(void *sw_fence_obj,
+               DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+               void *dump_debug_file);
+
+enum PVRSRV_ERROR_TAG
+sync_sw_dump_timeline(void *sw_timeline_obj,
+                     DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                     void *dump_debug_file);
+
+#endif /* _PVR_SYNC_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync2.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync2.c
new file mode 100644 (file)
index 0000000..d454de8
--- /dev/null
@@ -0,0 +1,2759 @@
+/*
+ * @File        pvr_sync.c
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pvr_drv.h"
+#include "pvr_fd_sync_kernel.h"
+#include "services_kernel_client.h"
+#include "pvr_sync.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_checkpoint_external.h"
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/syscalls.h>
+#include <linux/miscdevice.h>
+#include <linux/anon_inodes.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#else
+#include <../drivers/staging/android/sync.h>
+#include <../drivers/staging/android/sw_sync.h>
+#endif
+
+#include "linux_sw_sync.h"
+
+#include "pvr_sync_api.h"
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+static inline int sync_fence_get_status(struct sync_fence *psFence)
+{
+       return psFence->status;
+}
+
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+       return pt->parent;
+}
+
+static inline int sync_pt_get_status(struct sync_pt *pt)
+{
+       return pt->status;
+}
+
+static inline ktime_t sync_pt_get_timestamp(struct sync_pt *pt)
+{
+       return pt->timestamp;
+}
+
+#define for_each_sync_pt(s, f, c) \
+       list_for_each_entry((s), &(f)->pt_list_head, pt_list)
+
+#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+static inline int sync_fence_get_status(struct sync_fence *psFence)
+{
+       int iStatus = atomic_read(&psFence->status);
+
+       /*
+        * When Android sync was rebased on top of fences the sync_fence status
+        * values changed from 0 meaning 'active' to 'signalled' and, likewise,
+        * values greater than 0 went from meaning 'signalled' to 'active'
+        * (where the value corresponds to the number of active sync points).
+        *
+        * Convert to the old style status values.
+        */
+       return iStatus > 0 ? 0 : iStatus ? iStatus : 1;
+}
+
+static inline int sync_pt_get_status(struct sync_pt *pt)
+{
+       /* No error state for raw dma-buf fences */
+       return fence_is_signaled(&pt->base) ? 1 : 0;
+}
+
+static inline ktime_t sync_pt_get_timestamp(struct sync_pt *pt)
+{
+       return pt->base.timestamp;
+}
+
+#define for_each_sync_pt(s, f, c) \
+       for ((c) = 0, (s) = (f)->num_fences == 0 ? \
+               NULL : (struct sync_pt *)(f)->cbs[0].sync_pt; \
+            (c) < (f)->num_fences; \
+            (c)++,   (s) = (c) < (f)->num_fences ? \
+               (struct sync_pt *)(f)->cbs[c].sync_pt : NULL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+/* #define DEBUG_OUTPUT 1 */
+
+#ifdef DEBUG_OUTPUT
+#define DPF(fmt, ...) pr_err("pvr_sync2: " fmt "\n", __VA_ARGS__)
+#else
+#define DPF(fmt, ...) do {} while (0)
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ...) \
+       do { \
+               if (pfnDumpDebugPrintf) { \
+                       pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \
+               } else { \
+                       pr_info("pvr_sync2: " __VA_ARGS__); \
+               } \
+       } while (0)
+
+#if defined(PDUMP)
+#define SYNC_MAX_POOL_SIZE 0
+#else
+#define SYNC_MAX_POOL_SIZE 10
+#endif
+
+enum {
+       SYNC_TL_TYPE = 0,
+       SYNC_PT_FENCE_TYPE = 1,
+       SYNC_PT_CLEANUP_TYPE = 2,
+       SYNC_PT_FOREIGN_FENCE_TYPE = 3,
+       SYNC_PT_FOREIGN_CLEANUP_TYPE = 4,
+};
+
+/* Services client sync prim wrapper. This is used to hold debug information
+ * and make it possible to cache unused syncs.
+ */
+struct pvr_sync_native_sync_prim {
+       /* List for the sync pool support. */
+       struct list_head list;
+
+       /* Base services sync prim structure */
+       struct PVRSRV_CLIENT_SYNC_PRIM_TAG *client_sync;
+
+       /* The next queued value which should be used */
+       u32 next_value;
+
+       /* Every sync data will get some unique id */
+       u32 id;
+
+       /* FWAddr used by the client sync */
+       u32 vaddr;
+
+       /* The type this sync is used for in our driver. Used in
+        * pvr_sync_debug_request().
+        */
+       u8 type;
+
+       /* A debug class name also printed in pvr_sync_debug_request(). */
+       char class[32];
+};
+
+struct pvr_sync_native_sync_checkpoint {
+       /* List for the sync pool support. */
+       struct list_head list;
+
+       /* Base services sync checkpoint */
+       PSYNC_CHECKPOINT client_sync_checkpoint;
+
+       /* Every sync data will get some unique id */
+       u32 id;
+
+       /* FWAddr used by the client sync */
+       u32 vaddr;
+
+       /* The type this sync is used for in our driver. Used in
+        * pvr_sync_debug_request().
+        */
+       u8 type;
+
+       /* A debug class name also printed in pvr_sync_debug_request(). */
+       char class[32];
+
+       /* We store the foreign sync fence (if applicable), for debug purposes. */
+       struct sync_fence *foreign_sync_fence;
+       char foreign_sync_fence_name[32];
+};
+
+struct pvr_sw_sync_timeline {
+       /* sw_sync_timeline must come first to allow casting of a ptr */
+       /* to the wrapping struct to a ptr to the sw_sync_timeline    */
+       struct sw_sync_timeline *sw_sync_timeline;
+       u64 current_value;
+       u64 next_value;
+       /* Reference count for this object */
+       struct kref kref;
+};
+
+/* This is the actual timeline metadata. We might keep this around after the
+ * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
+ */
+struct pvr_sync_timeline {
+       /* Back reference to the sync_timeline. Not always valid */
+       struct sync_timeline *obj;
+
+       /* Global timeline list support */
+       struct list_head list;
+
+       /* List of sync points alive on this timeline. */
+       struct list_head sync_list;
+
+       /* Timeline sync */
+       struct pvr_sync_timeline_kernel_pair *kernel;
+
+       /* Reference count for this object */
+       struct kref kref;
+
+       /* Used only by pvr_sync_update_all_timelines(). False if the timeline
+        * has been detected as racing with pvr_sync_destroy_timeline().
+        */
+       bool valid;
+};
+
+/* This is the IMG extension of a sync_timeline */
+struct pvr_sync_timeline_wrapper {
+       /* Original timeline struct. Needs to come first. */
+       struct sync_timeline obj;
+
+       /* Pointer to extra timeline data. Separated life-cycle. */
+       struct pvr_sync_timeline *timeline;
+};
+
+struct pvr_sync_timeline_kernel_pair {
+       /* Binary sync point representing the android native sync in hw. */
+       struct pvr_sync_native_sync_prim *fence_sync;
+
+       /* Sync points can go away when there are deferred hardware operations
+        * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until
+        * the hardware is finished, so we add it to a defer list which is
+        * processed periodically ("defer-free").
+        *
+        * Note that the defer-free list is global, not per-timeline.
+        */
+       struct list_head list;
+};
+
+struct pvr_sync_kernel_pair {
+       /* Binary sync point representing the android native sync in hw. */
+       struct pvr_sync_native_sync_checkpoint *fence_sync;
+
+       /* Sync points can go away when there are deferred hardware operations
+        * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until
+        * the hardware is finished, so we add it to a defer list which is
+        * processed periodically ("defer-free").
+        *
+        * Note that the defer-free list is global, not per-timeline.
+        */
+       struct list_head list;
+};
+
+struct pvr_sync_data {
+       /* Every sync point has a services sync object. This object is used
+        * by the hardware to enforce ordering -- it is attached as a source
+        * dependency to various commands.
+        */
+       struct pvr_sync_kernel_pair *kernel;
+
+       /* The timeline update value for this sync point. */
+       u32 timeline_update_value;
+
+       /* This refcount is incremented at create and dup time, and decremented
+        * at free time. It ensures the object doesn't start the defer-free
+        * process until it is no longer referenced.
+        */
+       struct kref kref;
+};
+
+/* This is the IMG extension of a sync_pt */
+struct pvr_sync_pt {
+       /* Original sync_pt structure. Needs to come first. */
+       struct sync_pt pt;
+
+       /* Private shared data */
+       struct pvr_sync_data *sync_data;
+
+       /* The timeline on which this pvr_sync_pt was created */
+       struct pvr_sync_timeline *timeline;
+};
+
+/* This is the IMG extension of a sync_fence */
+struct pvr_sync_fence {
+       /* Original sync_fence structure. Needs to come first. */
+       struct sync_fence *fence;
+
+       /* To ensure callbacks are always received for fences / sync_pts, even
+        * after the fence has been 'put' (freed), we must take a reference to
+        * the fence. We still need to 'put' the fence ourselves, but this might
+        * happen in irq context, where fput() is not allowed (in kernels <3.6).
+        * We must add the fence to a list which is processed in WQ context.
+        */
+       struct list_head list;
+};
+
+/* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow"
+ * sync prim. This is modelled as a software operation. The foreign driver
+ * completes the operation by calling a callback we registered with it.
+ */
+struct pvr_sync_fence_waiter {
+       /* Base sync driver waiter structure */
+       struct sync_fence_waiter waiter;
+
+       /* "Shadow" sync prim backing the foreign driver's sync_pt */
+       struct pvr_sync_kernel_pair *kernel;
+
+       /* Optimizes lookup of fence for defer-put operation */
+       struct pvr_sync_fence *sync_fence;
+};
+
+/* Global data for the sync driver */
+static struct {
+       /* Complete notify handle */
+       void *command_complete_handle;
+
+       /* Defer-free workqueue. Syncs may still be in use by the HW when freed,
+        * so we have to keep them around until the HW is done with them at
+        * some later time. This workqueue iterates over the list of free'd
+        * syncs, checks if they are in use, and frees the sync device memory
+        * when done with.
+        */
+       struct workqueue_struct *defer_free_wq;
+       struct work_struct defer_free_work;
+
+       struct work_struct check_status_work;
+
+       /* Context used to create client sync prims. */
+       struct SYNC_PRIM_CONTEXT_TAG *sync_prim_context;
+
+       /* Unique id counter for the sync prims */
+       atomic_t sync_id;
+
+       /* The global event object (used to wait between checks for
+        * deferred-free sync status).
+        */
+       void *event_object_handle;
+
+       /* struct used to register with sync_checkpoint.c */
+       PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops;
+} pvr_sync_data;
+
+/* List of timelines created by this driver */
+static LIST_HEAD(timeline_list);
+static DEFINE_SPINLOCK(timeline_list_lock);
+
+/* Sync pool support */
+static LIST_HEAD(sync_pool_free_list);
+static LIST_HEAD(sync_pool_active_list);
+static DEFINE_MUTEX(sync_pool_mutex);
+static s32 sync_pool_size;// = 0;
+static u32 sync_pool_created;// = 0;
+static u32 sync_pool_reused;// = 0;
+
+/* pvr_sync_pt_active_list is used for debug - when a
+ * pvr sync_native_sync_checkpoint is created it is added
+ * to this list (which contains all existing points for
+ * all pvr timelines).
+ */
+static LIST_HEAD(pvr_sync_pt_active_list);
+static DEFINE_SPINLOCK(pvr_sync_pt_active_list_spinlock);
+/* pvr_sw_sync_pt_active_list is used for debug - when a
+ * pvr sw_sync_native_sync_checkpoint is created it is added
+ * to this list (which contains all existing points for
+ * all pvr sw timelines).
+ */
+static LIST_HEAD(pvr_sw_sync_pt_active_list);
+static DEFINE_MUTEX(pvr_sw_sync_pt_active_list_mutex);
+
+/* The "defer-free" sync_checkpoint list. Driver global. */
+static LIST_HEAD(sync_checkpoint_free_list);
+static DEFINE_SPINLOCK(sync_checkpoint_free_list_spinlock);
+
+/* The "defer-free-timeline" object list. Driver global. */
+static LIST_HEAD(timeline_free_list);
+static DEFINE_SPINLOCK(timeline_free_list_spinlock);
+
+/* The "defer-put" object list. Driver global. */
+static LIST_HEAD(sync_fence_put_list);
+static DEFINE_SPINLOCK(sync_fence_put_list_spinlock);
+
+static void pvr_sync_update_all_timelines(void *command_complete_handle);
+static void pvr_sync_free_checkpoint_list_mem(void *mem_ptr);
+
+static void _dump_fence(struct sync_fence *fence,
+                                               DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                                               void *dump_debug_file)
+{
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0))
+       struct sync_pt *sync_point;
+       char time_str[16]  = { '\0' };
+       char pt_value_str[64]  = { '\0' };
+       char timeline_value_str[64]  = { '\0' };
+       char value_str[132] = { '\0' };
+       int status = sync_fence_get_status(fence);
+       int i;
+
+       PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                         dump_debug_file,
+                                         "[%p] %s: %s ref=%u Sync Points:\n",
+                                         fence,
+                                         fence->name,
+                                         (status > 0 ?
+                                                         "Signalled" : status ?
+                                                                         "Error" : "Active"),
+                                         atomic_read(&fence->kref.refcount));
+
+       for_each_sync_pt(sync_point, fence, i) {
+
+               struct sync_timeline *timeline = sync_pt_parent(sync_point);
+               ktime_t timestamp = sync_pt_get_timestamp(sync_point);
+               struct timeval tv = ktime_to_timeval(timestamp);
+               int i_pt_status = sync_pt_get_status(sync_point);
+
+               char time_pt[16] = { '\0' };
+               const struct fence_ops *fence_ops = sync_point->base.ops;
+
+               snprintf(time_str,
+                                sizeof(time_str),
+                                "@%ld.%06ld",
+                                tv.tv_sec,
+                                tv.tv_usec);
+
+               if (timeline->ops->pt_value_str &&
+                       timeline->ops->timeline_value_str) {
+                       timeline->ops->pt_value_str(sync_point, pt_value_str, sizeof(pt_value_str));
+                       timeline->ops->timeline_value_str(timeline,
+                                                         timeline_value_str,
+                                                         sizeof(timeline_value_str));
+                       snprintf(value_str, sizeof(value_str), "%s / %s",
+                                timeline_value_str, pt_value_str);
+               }
+               fence_ops->timeline_value_str(&sync_point->base, time_pt, sizeof(time_pt));
+
+               PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                 dump_debug_file,
+                                 "\t@%u Ref=%u TS=%s State=%s %s TLN=%s\n",
+                                 sync_point->base.seqno,
+                                 atomic_read(&sync_point->base.refcount.refcount),
+                                 time_pt,
+                                 (i_pt_status > 0 ? "signalled" : i_pt_status ?
+                                  "error" : "active"),
+                                 value_str,
+                                 fence_ops->get_timeline_name(&sync_point->base));
+       }
+#else
+               PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                                 dump_debug_file,
+                                                 "Fence stats not available on this platform!");
+#endif
+}
+
+/* Sync prim helpers */
+static inline void set_sync_prim_value(struct pvr_sync_native_sync_prim *sync,
+                                 u32 value)
+{
+       *(sync->client_sync->pui32LinAddr) = value;
+}
+
+static inline u32 get_sync_prim_value(struct pvr_sync_native_sync_prim *sync)
+{
+       return *(sync->client_sync->pui32LinAddr);
+}
+
+static inline void complete_sync_prim(struct pvr_sync_native_sync_prim *sync)
+{
+       *(sync->client_sync->pui32LinAddr) = sync->next_value;
+}
+
+static inline int is_sync_prim_met(struct pvr_sync_native_sync_prim *sync)
+{
+       return *(sync->client_sync->pui32LinAddr) == sync->next_value;
+}
+
+/* Checkpoint helpers */
+static inline u32 get_sync_checkpoint_value(struct pvr_sync_native_sync_checkpoint *sync)
+{
+       PVRSRV_SYNC_CHECKPOINT_STATE checkpoint_state = PVRSRV_SYNC_CHECKPOINT_ACTIVE;
+
+       if (SyncCheckpointIsSignalled(sync->client_sync_checkpoint,
+                                     PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               checkpoint_state = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+       else if (SyncCheckpointIsErrored(sync->client_sync_checkpoint,
+                                        PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               checkpoint_state = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+       return (u32)checkpoint_state;
+}
+
+static inline char get_sync_checkpoint_char(struct pvr_sync_native_sync_checkpoint *sync)
+{
+       char cState = 'A';
+
+       if (SyncCheckpointIsSignalled(sync->client_sync_checkpoint,
+                                     PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               cState = 'S';
+       else if (SyncCheckpointIsErrored(sync->client_sync_checkpoint,
+                                        PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+               cState = 'E';
+
+       return cState;
+}
+
+static inline void error_sync_checkpoint(struct pvr_sync_native_sync_checkpoint *sync,
+                                        u32 fence_sync_flags)
+{
+       SyncCheckpointError(sync->client_sync_checkpoint, fence_sync_flags);
+}
+
+static inline void complete_sync_checkpoint(struct pvr_sync_native_sync_checkpoint *sync,
+                                           u32 fence_sync_flags)
+{
+       SyncCheckpointSignal(sync->client_sync_checkpoint, fence_sync_flags);
+}
+
+static inline int is_sync_checkpoint_met(struct pvr_sync_native_sync_checkpoint *sync,
+                                        u32 fence_sync_flags)
+{
+       return (int)SyncCheckpointIsSignalled(sync->client_sync_checkpoint, fence_sync_flags);
+}
+
+static inline int is_sync_checkpoint_errored(struct pvr_sync_native_sync_checkpoint *sync,
+                                            u32 fence_sync_flags)
+{
+       return (int)SyncCheckpointIsErrored(sync->client_sync_checkpoint, fence_sync_flags);
+}
+
+/* Timeline helpers */
+static inline struct pvr_sync_timeline *get_timeline(struct sync_timeline *obj)
+{
+       return ((struct pvr_sync_timeline_wrapper *)obj)->timeline;
+}
+
+static inline struct pvr_sync_timeline *get_timeline_pt(struct sync_pt *pt)
+{
+       return get_timeline(sync_pt_parent(pt));
+}
+
+static inline int
+pvr_sync_has_kernel_signaled(struct pvr_sync_kernel_pair *kernel, u32 fence_sync_flags)
+{
+       /* Idle syncs are always signaled */
+       if (!kernel)
+               return 1;
+
+       return is_sync_checkpoint_met(kernel->fence_sync, fence_sync_flags);
+}
+
+#ifdef DEBUG_OUTPUT
+
+static char *debug_info_timeline(struct pvr_sync_timeline *timeline)
+{
+       static char info[256];
+
+       if (timeline->kernel->fence_sync) {
+               snprintf(info, sizeof(info),
+                        "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u",
+                        timeline->obj ? timeline->obj->name : "?",
+                        timeline->kernel->fence_sync->id,
+                        timeline->kernel->fence_sync->vaddr,
+                        get_sync_prim_value(timeline->kernel->fence_sync),
+                        timeline->kernel->fence_sync->next_value);
+       } else {
+               snprintf(info, sizeof(info),
+                        "n='%s' id=n/a fw=n/a tl_curr=n/a tl_next=n/a",
+                        timeline->obj ? timeline->obj->name : "?");
+       }
+
+       return info;
+}
+
+static char *debug_info_sync_pt(struct sync_pt *pt)
+{
+       //struct pvr_sync_timeline *timeline = get_timeline_pt(pt);
+       //struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt;
+       //struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel;
+       static char info[256], info1[256];
+
+#if 1
+       info[0] = '\0';
+       info1[0] = '\0';
+#else
+       if (kernel) {
+               if (timeline->kernel->fence_sync) {
+                       snprintf(info, sizeof(info),
+                                "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s",
+                                pvr_sync_has_kernel_signaled(kernel,
+                                                             PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT),
+                                pvr_pt->sync_data->timeline_update_value,
+                                atomic_read(&pvr_pt->sync_data->kref.refcount),
+                                kernel->fence_sync->id,
+                                kernel->fence_sync->vaddr,
+                                get_sync_prim_value(timeline->kernel->fence_sync),
+                                kernel->fence_sync->next_value,
+                                info1, debug_info_timeline(timeline));
+               }
+       } else {
+               snprintf(info, sizeof(info),
+                        "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s",
+                        pvr_sync_has_kernel_signaled(kernel, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT),
+                        pvr_pt->sync_data->timeline_update_value,
+                        atomic_read(&pvr_pt->sync_data->kref.refcount),
+                        debug_info_timeline(timeline));
+       }
+#endif
+       return info;
+}
+
+#endif /* DEBUG_OUTPUT */
+
+static u32 sync_pool_get_callers;
+static enum PVRSRV_ERROR_TAG
+sync_pool_get(struct pvr_sync_native_sync_prim **_sync,
+             const char *class_name, u8 type)
+{
+       struct pvr_sync_native_sync_prim *sync;
+       enum PVRSRV_ERROR_TAG error = PVRSRV_OK;
+       u32 sync_addr;
+
+       mutex_lock(&sync_pool_mutex);
+       sync_pool_get_callers++;
+
+       if (list_empty(&sync_pool_free_list)) {
+               /* If there is nothing in the pool, create a new sync prim. */
+               sync = kmalloc(sizeof(*sync),
+                              GFP_KERNEL);
+               if (!sync) {
+                       pr_err("pvr_sync2: %s: Failed to allocate sync data\n",
+                              __func__);
+                       error = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto err_unlock;
+               }
+
+               error = SyncPrimAlloc(pvr_sync_data.sync_prim_context,
+                                     &sync->client_sync, class_name);
+               if (error != PVRSRV_OK) {
+                       pr_err("pvr_sync2: %s: Failed to allocate sync prim (%s)\n",
+                              __func__, PVRSRVGetErrorString(error));
+                       goto err_free;
+               }
+
+               error = SyncPrimGetFirmwareAddr(sync->client_sync, &sync_addr);
+               if (error != PVRSRV_OK) {
+                       pr_err("pvr_sync2: %s: Failed to get FW address (%s)\n",
+                              __func__, PVRSRVGetErrorString(error));
+                       goto err_sync_prim_free;
+               }
+               sync->vaddr = sync_addr;
+
+               list_add_tail(&sync->list, &sync_pool_active_list);
+               ++sync_pool_created;
+       } else {
+               sync = list_first_entry(&sync_pool_free_list,
+                                       struct pvr_sync_native_sync_prim, list);
+               list_move_tail(&sync->list, &sync_pool_active_list);
+               --sync_pool_size;
+               ++sync_pool_reused;
+       }
+
+       sync->id = atomic_inc_return(&pvr_sync_data.sync_id);
+       sync->type = type;
+
+       strlcpy(sync->class, class_name, sizeof(sync->class));
+       /* It's crucial to reset the sync to zero */
+       set_sync_prim_value(sync, 0);
+       sync->next_value = 0;
+
+       *_sync = sync;
+
+err_unlock:
+       sync_pool_get_callers--;
+       mutex_unlock(&sync_pool_mutex);
+       return error;
+
+err_sync_prim_free:
+       SyncPrimFree(sync->client_sync);
+
+err_free:
+       kfree(sync);
+       goto err_unlock;
+}
+
+static u32 sync_pool_put_callers;
+
+static void sync_pool_put(struct pvr_sync_native_sync_prim *sync)
+{
+       mutex_lock(&sync_pool_mutex);
+       sync_pool_put_callers++;
+
+       if (sync_pool_size < SYNC_MAX_POOL_SIZE) {
+               /* Mark it as unused */
+               set_sync_prim_value(sync, 0xffffffff);
+
+               list_move(&sync->list, &sync_pool_free_list);
+               ++sync_pool_size;
+       } else {
+               /* Mark it as invalid */
+               set_sync_prim_value(sync, 0xdeadbeef);
+
+               list_del(&sync->list);
+               SyncPrimFree(sync->client_sync);
+               kfree(sync);
+       }
+
+       sync_pool_put_callers--;
+       mutex_unlock(&sync_pool_mutex);
+}
+
+static void sync_pool_clear(void)
+{
+       struct pvr_sync_native_sync_prim *sync, *n;
+
+       mutex_lock(&sync_pool_mutex);
+
+       list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) {
+               /* Mark it as invalid */
+               set_sync_prim_value(sync, 0xdeadbeef);
+
+               list_del(&sync->list);
+               SyncPrimFree(sync->client_sync);
+               kfree(sync);
+               --sync_pool_size;
+       }
+
+       mutex_unlock(&sync_pool_mutex);
+}
+
+static void pvr_sync_debug_request(void *hDebugRequestHandle,
+                                  u32 ui32VerbLevel,
+                                  DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile)
+{
+       struct pvr_sync_timeline *tl;
+       struct pvr_sync_native_sync_checkpoint *sync;
+       unsigned long flags;
+
+       static const char *const type_names[] = {
+               "Timeline", "Fence", "Cleanup",
+               "Foreign Fence", "Foreign Cleanup"
+       };
+
+       if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) {
+               /* if timeline_list_lock and pvr_sync_pt_active_list_spinlock
+                * are acquired together timeline_list_lock must be always acquired first
+                */
+               spin_lock_irqsave(&timeline_list_lock, flags);
+               spin_lock(&pvr_sync_pt_active_list_spinlock);
+
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 "------[ Native Fence Sync: timelines ]------");
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 "foreign timeline:\n");
+
+               list_for_each_entry(sync, &pvr_sync_pt_active_list, list) {
+                       BUG_ON(sync->type >= ARRAY_SIZE(type_names));
+
+                       PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                         "  @%u: fwaddr=%#08x enqu=%u ref=%u state=%s %s (%s)\n",
+                                         sync->id,
+                                         sync->vaddr,
+                                         SyncCheckpointGetEnqueuedCount(sync->client_sync_checkpoint),
+                                         SyncCheckpointGetReferenceCount(sync->client_sync_checkpoint),
+                                         SyncCheckpointGetStateString(sync->client_sync_checkpoint),
+                                         sync->class,
+                                         type_names[sync->type]);
+               }
+
+               list_for_each_entry(tl, &timeline_list, list) {
+                       if (tl->kernel->fence_sync) {
+                               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                 "%s: @%u/%u refs=%u fwaddr=%#08x\n",
+                                                 tl->obj->name,
+                                                 get_sync_prim_value(tl->kernel->fence_sync),
+                                                 tl->kernel->fence_sync->next_value,
+                                                 refcount_read(&tl->kref.refcount),
+                                                 tl->kernel->fence_sync->vaddr);
+                       } else {
+                               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                 "%s: refs=%u\n",
+                                                 tl->obj->name,
+                                                 refcount_read(&tl->kref.refcount));
+                       }
+
+                       list_for_each_entry(sync, &tl->sync_list, list) {
+                               BUG_ON(sync->type >= ARRAY_SIZE(type_names));
+
+                               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                                 "  @%u: fwaddr=%#08x enq=%u ref=%u state=%s %s (%s)\n",
+                                                 sync->id,
+                                                 sync->vaddr,
+                                                 SyncCheckpointGetEnqueuedCount(sync->client_sync_checkpoint),
+                                                 SyncCheckpointGetReferenceCount(sync->client_sync_checkpoint),
+                                                 SyncCheckpointGetStateString(sync->client_sync_checkpoint),
+                                                 sync->class,
+                                                 type_names[sync->type]);
+                       }
+               }
+
+               spin_unlock(&pvr_sync_pt_active_list_spinlock);
+               spin_unlock_irqrestore(&timeline_list_lock, flags);
+       }
+}
+
+static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt)
+{
+       struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt;
+       struct pvr_sync_pt *pvr_pt_b = NULL;
+
+       DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+       pvr_pt_b = (struct pvr_sync_pt *)
+               sync_pt_create(sync_pt_parent(sync_pt),
+                              sizeof(*pvr_pt_b));
+       if (!pvr_pt_b) {
+               pr_err("pvr_sync2: %s: Failed to dup sync pt\n", __func__);
+               goto err_out;
+       }
+
+       kref_get(&pvr_pt_a->sync_data->kref);
+
+       pvr_pt_b->sync_data = pvr_pt_a->sync_data;
+
+err_out:
+       return (struct sync_pt *)pvr_pt_b;
+}
+
+static int pvr_sync_has_signaled(struct sync_pt *sync_pt)
+{
+       struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+       DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+       return pvr_sync_has_kernel_signaled(pvr_pt->sync_data->kernel,
+                                           PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT);
+}
+
+static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b)
+{
+       u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value;
+       u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value;
+
+       DPF("%s: a # %s", __func__, debug_info_sync_pt(a));
+       DPF("%s: b # %s", __func__, debug_info_sync_pt(b));
+
+       if (a1 == b1)
+               return 0;
+
+       /* Take integer wrapping into account */
+       return ((s32)a1 - (s32)b1) < 0 ? -1 : 1;
+}
+
+static void check_for_sync_prim(struct pvr_sync_native_sync_prim *sync)
+{
+#ifndef NO_HARDWARE
+       void *event_object;
+       enum PVRSRV_ERROR_TAG error = PVRSRV_OK;
+
+       if (!sync || is_sync_prim_met(sync))
+               return;
+
+       error = OSEventObjectOpen(
+                       pvr_sync_data.event_object_handle,
+                       &event_object);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Error opening event object (%s)\n",
+                          __func__,
+                          PVRSRVGetErrorString(error));
+               return;
+       }
+
+       if (!is_sync_prim_met(sync)) {
+               /* This debug will indicate if pvr_sync is stuck waiting for a sync prim */
+               pr_err("pvr_sync2: %s: sync prim<%p> %s (%d != %d)\n",
+                       __func__, sync->client_sync, sync->class,
+                        *(sync->client_sync->pui32LinAddr), sync->next_value);
+       }
+
+       OSEventObjectClose(event_object);
+#endif /* NO_HARDWARE */
+}
+
+static void pvr_sync_defer_free_checkpoints(struct pvr_sync_kernel_pair *kernel)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sync_checkpoint_free_list_spinlock, flags);
+       list_add_tail(&kernel->list, &sync_checkpoint_free_list);
+       spin_unlock_irqrestore(&sync_checkpoint_free_list_spinlock, flags);
+
+       queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work);
+}
+
+static void pvr_sync_timeline_defer_free(struct pvr_sync_timeline_kernel_pair *kernel)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&timeline_free_list_spinlock, flags);
+       list_add_tail(&kernel->list, &timeline_free_list);
+       spin_unlock_irqrestore(&timeline_free_list_spinlock, flags);
+
+       queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work);
+}
+
+/* This function assumes the timeline_list_lock is held while it runs */
+
+static void pvr_sync_destroy_timeline_locked(struct kref *kref)
+{
+       unsigned long flags;
+       struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *)
+               container_of(kref, struct pvr_sync_timeline, kref);
+
+       pvr_sync_timeline_defer_free(timeline->kernel);
+       /* timeline_list_lock is already locked so it's safe to acquire this here */
+       spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags);
+       list_del(&timeline->sync_list);
+       spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags);
+       list_del(&timeline->list);
+       kfree(timeline);
+}
+
+static void pvr_sw_sync_destroy_timeline(struct kref *kref)
+{
+       struct pvr_sw_sync_timeline *pvr_sw_timeline = (struct pvr_sw_sync_timeline *)
+               container_of(kref, struct pvr_sw_sync_timeline, kref);
+       struct sync_timeline *obj = (void *)pvr_sw_timeline->sw_sync_timeline;
+       u32 unsignalled_points = 0;
+
+       /* signal any unsignalled points on the sw timeline */
+       while (pvr_sw_timeline->current_value < pvr_sw_timeline->next_value-1) {
+               pvr_sync_sw_timeline_advance(pvr_sw_timeline, NULL);
+               unsignalled_points++;
+       }
+
+       if (unsignalled_points > 0) {
+               pr_err("pvr_sync2: %s: signalled %d sw sync pts for timeline <%p> %s\n",
+                      __func__, unsignalled_points, pvr_sw_timeline, obj->name);
+       }
+
+       sync_timeline_destroy(obj);
+       kfree(pvr_sw_timeline);
+}
+
+static void pvr_sync_release_timeline(struct sync_timeline *obj)
+{
+       struct pvr_sync_timeline *timeline = get_timeline(obj);
+       unsigned long flags;
+
+       /* If pvr_sync_open failed after calling sync_timeline_create, this
+        * can be called with a timeline that has not got a timeline sync
+        * or been added to our timeline list. Use a NULL timeline to
+        * detect and handle this condition
+        */
+       if (!timeline)
+               return;
+
+       DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+       if (timeline->kernel->fence_sync)
+               check_for_sync_prim(timeline->kernel->fence_sync);
+
+       /* Take timeline_list_lock before clearing timeline->obj, to
+        * avoid the chance of doing so while the list is being iterated
+        * by pvr_sync_update_all_timelines().
+        */
+       spin_lock_irqsave(&timeline_list_lock, flags);
+
+       /* Whether or not we're the last reference, obj is going away
+        * after this function returns, so remove our back reference
+        * to it.
+        */
+       timeline->obj = NULL;
+
+       /* This might be the last reference to the timeline object.
+        * If so, we'll go ahead and delete it now.
+        */
+       kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked);
+
+       spin_unlock_irqrestore(&timeline_list_lock, flags);
+}
+
+/* The print_obj() and print_pt() functions have been removed, so we're forced
+ * to use the timeline_value_str() and pt_value_str() functions. These are
+ * worse because we're limited to 64 characters, and the strings for sync
+ * pts have to be formatted like:
+ *
+ *   pt active: pt_info / tl_info
+ *
+ * For us, the tl_info is complicated and doesn't need to be repeated over
+ * and over. So try to detect the way sync_print_pt() calls the two value_str
+ * functions and change what pvr_sync_timeline_value_str() returns dynamically.
+ */
+static struct sync_timeline *last_pt_timeline;
+
+static void pvr_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+                                       char *str, int size)
+{
+       struct pvr_sync_timeline *timeline = get_timeline(sync_timeline);
+
+       if (timeline->kernel->fence_sync) {
+               if (sync_timeline != last_pt_timeline) {
+                       snprintf(str, size, "%u 0x%x %u/%u",
+                                timeline->kernel->fence_sync->id,
+                                timeline->kernel->fence_sync->vaddr,
+                                get_sync_prim_value(timeline->kernel->fence_sync),
+                                timeline->kernel->fence_sync->next_value);
+               } else {
+                       snprintf(str, size, "%u",
+                                get_sync_prim_value(timeline->kernel->fence_sync));
+               }
+       } else {
+               snprintf(str, size, "n/a");
+       }
+}
+
+static void pvr_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size)
+{
+       struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+       struct pvr_sync_kernel_pair *kernel;
+
+       if (!pvr_pt->sync_data)
+               return;
+
+       kernel = pvr_pt->sync_data->kernel;
+
+       /* Messages must be at most 64 bytes (including the null terminator):
+        *
+        * 123456789012345678901234567890123456789012345678901234567890123
+        *
+        * ID     FW ADDR    C/N # REF TAKEN
+        * 123456 0xdeadbeef 0/1 # r=2 123456
+        *
+        * ID     FW ADDR    C/N # ID     FW ADDR    C/N # REF TAKEN
+        * 123456 0xdeadbeef 0/1 # 123456 0xdeadbeef 0/1 # r=2 123456
+        */
+       if (kernel && kernel->fence_sync) {
+               snprintf(str, size,
+                        "%u 0x%x %c e=%d r=%d %u",
+                        kernel->fence_sync->id,
+                        kernel->fence_sync->vaddr,
+                        get_sync_checkpoint_char(kernel->fence_sync),
+                        SyncCheckpointGetEnqueuedCount(kernel->fence_sync->client_sync_checkpoint),
+                        atomic_read(&pvr_pt->sync_data->kref.refcount),
+                        pvr_pt->sync_data->timeline_update_value);
+       } else {
+               snprintf(str, size, "idle # r=%d %u",
+                        atomic_read(&pvr_pt->sync_data->kref.refcount),
+                        pvr_pt->sync_data->timeline_update_value);
+       }
+
+       last_pt_timeline = sync_pt_parent(sync_pt);
+}
+
+/* pvr_sync_create_sync_data() should be called with the bridge lock held */
+static struct pvr_sync_data *
+pvr_sync_create_sync_data(struct pvr_sync_timeline *timeline,
+               const s32 timeline_fd,
+               PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+               const char *pt_name)
+{
+       struct pvr_sync_data *sync_data = NULL;
+       enum PVRSRV_ERROR_TAG error;
+       unsigned long flags;
+
+       sync_data = kzalloc(sizeof(*sync_data), GFP_KERNEL);
+       if (!sync_data)
+               goto err_out;
+
+       kref_init(&sync_data->kref);
+
+       sync_data->kernel =
+               kzalloc(sizeof(*sync_data->kernel),
+               GFP_KERNEL);
+
+       if (!sync_data->kernel)
+               goto err_free_data;
+
+       INIT_LIST_HEAD(&sync_data->kernel->list);
+
+       sync_data->kernel->fence_sync =
+               kzalloc(sizeof(struct pvr_sync_native_sync_checkpoint), GFP_KERNEL);
+       if (!sync_data->kernel->fence_sync)
+               goto err_free_kernel;
+       INIT_LIST_HEAD(&sync_data->kernel->fence_sync->list);
+
+       error = SyncCheckpointAlloc(psSyncCheckpointContext,
+                                   (PVRSRV_TIMELINE)timeline_fd,
+                                   PVRSRV_NO_FENCE,
+                                   pt_name,
+                                   &sync_data->kernel->fence_sync->client_sync_checkpoint);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Failed to allocate sync checkpoint (%s)\n",
+                      __func__, PVRSRVGetErrorString(error));
+               goto err_free_fence;
+       }
+
+       sync_data->kernel->fence_sync->foreign_sync_fence = NULL;
+       sync_data->kernel->fence_sync->foreign_sync_fence_name[0] = '\0';
+
+       sync_data->kernel->fence_sync->vaddr =
+               SyncCheckpointGetFirmwareAddr(sync_data->kernel->fence_sync->client_sync_checkpoint);
+       sync_data->kernel->fence_sync->id =
+               SyncCheckpointGetId(sync_data->kernel->fence_sync->client_sync_checkpoint);
+       sync_data->kernel->fence_sync->type = SYNC_PT_FENCE_TYPE;
+       strlcpy(sync_data->kernel->fence_sync->class, pt_name,
+                       sizeof(sync_data->kernel->fence_sync->class));
+
+       /* Update list (for debug ) */
+       spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags);
+       list_add_tail(&sync_data->kernel->fence_sync->list, &timeline->sync_list);
+       spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags);
+
+err_out:
+       return sync_data;
+
+err_free_fence:
+       kfree(sync_data->kernel->fence_sync);
+err_free_kernel:
+       kfree(sync_data->kernel);
+err_free_data:
+       kfree(sync_data);
+       sync_data = NULL;
+       goto err_out;
+}
+
+static void pvr_sync_free_sync_data(struct kref *kref)
+{
+       struct pvr_sync_data *sync_data = (struct pvr_sync_data *)
+               container_of(kref, struct pvr_sync_data, kref);
+
+       if (sync_data->kernel)
+               pvr_sync_defer_free_checkpoints(sync_data->kernel);
+
+       kfree(sync_data);
+}
+
+static void pvr_sync_free_sync(struct sync_pt *sync_pt)
+{
+       struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+       DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+       kref_put(&pvr_pt->sync_data->kref, pvr_sync_free_sync_data);
+}
+
+/* this function uses pvr_sync_timeline_ops defined below */
+static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int);
+
+static struct sync_timeline_ops pvr_sync_timeline_ops = {
+       .driver_name        = PVRSYNC_MODNAME,
+       .dup                = pvr_sync_dup,
+       .has_signaled       = pvr_sync_has_signaled,
+       .compare            = pvr_sync_compare,
+       .free_pt            = pvr_sync_free_sync,
+       .release_obj        = pvr_sync_release_timeline,
+       .timeline_value_str = pvr_sync_timeline_value_str,
+       .pt_value_str       = pvr_sync_pt_value_str,
+       .fill_driver_data   = pvr_sync_fill_driver_data,
+};
+
+static inline bool is_pvr_timeline(struct sync_timeline *obj)
+{
+       return obj->ops == &pvr_sync_timeline_ops;
+}
+
+static inline bool is_pvr_timeline_pt(struct sync_pt *pt)
+{
+       return is_pvr_timeline(sync_pt_parent(pt));
+}
+
+static int
+pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size)
+{
+       struct pvr_sync_pt_info *info = (struct pvr_sync_pt_info *)data;
+       struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+       struct pvr_sync_data *sync_data = pvr_pt->sync_data;
+       struct pvr_sync_kernel_pair *kernel = sync_data->kernel;
+
+       if (size < sizeof(*info))
+               return -ENOMEM;
+
+       info->ui32TlTaken = sync_data->timeline_update_value;
+
+       if (kernel && kernel->fence_sync) {
+               info->id         = kernel->fence_sync->id;
+               info->ui32FWAddr = kernel->fence_sync->vaddr;
+               info->ui32CurrOp = get_sync_checkpoint_value(kernel->fence_sync);
+               info->ui32NextOp = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+       } else {
+               info->id         = 0;
+               info->ui32FWAddr = 0;
+               info->ui32CurrOp = 0;
+               info->ui32NextOp = 0;
+       }
+
+       return sizeof(*info);
+}
+
+/* foreign sync handling */
+
+static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence,
+                                             struct sync_fence_waiter *_waiter)
+{
+       struct pvr_sync_fence_waiter *waiter =
+               (struct pvr_sync_fence_waiter *)_waiter;
+       unsigned long flags;
+
+       /* Complete the SW operation and free the sync if we can. If we can't,
+        * it will be checked by a later workqueue kick.
+        */
+       if (is_sync_checkpoint_errored(waiter->kernel->fence_sync,
+                                      PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT) ||
+           !is_sync_checkpoint_met(waiter->kernel->fence_sync,
+                                   PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) {
+
+               if (!is_sync_checkpoint_met(waiter->kernel->fence_sync,
+                                           PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+                       complete_sync_checkpoint(waiter->kernel->fence_sync,
+                                                PVRSRV_FENCE_FLAG_CTX_ATOMIC);
+
+               /* We can 'put' the fence now, but this function might be called in
+                * irq context so we must defer to WQ.
+                * This WQ is triggered in pvr_sync_defer_free, so adding it to the
+                * put list before that should guarantee it's cleaned up on the next
+                * wq run.
+                */
+               spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
+               list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list);
+               spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
+
+               pvr_sync_defer_free_checkpoints(waiter->kernel);
+
+               /* The completed sw-sync may allow other tasks to complete,
+                * so we need to allow them to progress.
+                */
+               queue_work(NativeSyncGetFenceStatusWq(),
+                       &pvr_sync_data.check_status_work);
+
+               kfree(waiter);
+       } else {
+               pr_err("pvr_sync2: %s:   this sync checkpoint has already been signalled - "
+                      "why are we asked to do this more than once?!\n", __func__);
+       }
+}
+
+static PSYNC_CHECKPOINT
+pvr_sync_create_waiter_for_foreign_sync(int fd, PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
+{
+       struct pvr_sync_kernel_pair *kernel = NULL;
+       struct pvr_sync_fence_waiter *waiter;
+       struct pvr_sync_fence *sync_fence;
+       PSYNC_CHECKPOINT checkpoint = NULL;
+       struct sync_fence *fence;
+       enum PVRSRV_ERROR_TAG error;
+       int err;
+       unsigned long flags;
+
+       fence = sync_fence_fdget(fd);
+       if (!fence) {
+               pr_err("pvr_sync2: %s: Failed to take reference on fence\n",
+                      __func__);
+               goto err_out;
+       }
+
+       kernel = kmalloc(sizeof(*kernel), GFP_KERNEL);
+       if (!kernel) {
+               pr_err("pvr_sync2: %s: Failed to allocate sync kernel\n",
+                      __func__);
+               goto err_put_fence;
+       }
+
+       sync_fence = kmalloc(sizeof(*sync_fence), GFP_KERNEL);
+       if (!sync_fence) {
+               pr_err("pvr_sync2: %s: Failed to allocate pvr sync fence\n",
+                      __func__);
+               goto err_free_kernel;
+       }
+
+       sync_fence->fence = fence;
+
+       kernel->fence_sync = kzalloc(sizeof(struct pvr_sync_native_sync_checkpoint), GFP_KERNEL);
+       if (!kernel->fence_sync)
+               goto err_free_fence;
+
+       INIT_LIST_HEAD(&kernel->fence_sync->list);
+
+       /* Create sync checkpoint for the foreign sync, with an invalid
+        * timeline (as we do not know it)
+        */
+       error = SyncCheckpointAlloc(psSyncCheckpointContext,
+                                                               SYNC_CHECKPOINT_FOREIGN_CHECKPOINT,
+                                                               fd, /* fence_to_resolve */
+                                                               fence->name,
+                                                               &checkpoint);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Failed to allocate sync checkpoint (%s)\n",
+                      __func__, PVRSRVGetErrorString(error));
+               goto err_free_fence_sync;
+       }
+       kernel->fence_sync->client_sync_checkpoint = checkpoint;
+
+       kernel->fence_sync->foreign_sync_fence = fence;
+       strlcpy(kernel->fence_sync->foreign_sync_fence_name,
+                       fence->name,
+                       sizeof(kernel->fence_sync->foreign_sync_fence_name));
+
+       kernel->fence_sync->vaddr =
+               SyncCheckpointGetFirmwareAddr(kernel->fence_sync->client_sync_checkpoint);
+       kernel->fence_sync->id =
+               SyncCheckpointGetId(kernel->fence_sync->client_sync_checkpoint);
+       kernel->fence_sync->type = SYNC_PT_FOREIGN_FENCE_TYPE;
+       strlcpy(kernel->fence_sync->class, fence->name, sizeof(kernel->fence_sync->class));
+
+       /* The custom waiter structure is freed in the waiter callback */
+       waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
+       if (!waiter) {
+               pr_err("pvr_sync2: %s: Failed to allocate waiter\n", __func__);
+               goto err_free_cleanup_sync;
+       }
+
+       waiter->kernel = kernel;
+       waiter->sync_fence = sync_fence;
+
+       /* Take an extra ref on the checkpoint for the reference handed over to
+        * the firmware.
+        * This must be done before the waiter_init, as the waiter can be called
+        * and it's reference dropped at _any time_
+        */
+       SyncCheckpointTakeRef(checkpoint);
+
+       sync_fence_waiter_init(&waiter->waiter,
+                              pvr_sync_foreign_sync_pt_signaled);
+
+       spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags);
+       err = sync_fence_wait_async(fence, &waiter->waiter);
+       if (err) {
+               spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags);
+               /* -1 means the fence was broken, 1 means the fence already
+                * signalled. In either case, roll back what we've done and
+                * skip using this sync_pt for synchronisation.
+                */
+               goto err_put_checkpoint_ref;
+       }
+
+       /* Update list (for debug ) */
+       list_add_tail(&kernel->fence_sync->list, &pvr_sync_pt_active_list);
+       spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags);
+
+err_out:
+       return checkpoint;
+err_put_checkpoint_ref:
+       SyncCheckpointDropRef(checkpoint);
+       kfree(waiter);
+err_free_cleanup_sync:
+       SyncCheckpointFree(checkpoint);
+       checkpoint = NULL;
+err_free_fence_sync:
+       kfree(kernel->fence_sync);
+       kernel->fence_sync = NULL;
+err_free_fence:
+       kfree(sync_fence);
+       sync_fence = NULL;
+err_free_kernel:
+       kfree(kernel);
+       kernel = NULL;
+err_put_fence:
+       sync_fence_put(fence);
+       goto err_out;
+}
+
+static
+struct pvr_sync_pt *pvr_sync_create_pt(struct pvr_sync_timeline *timeline,
+               const s32 timeline_fd,
+               PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+               const char *pt_name)
+{
+       struct pvr_sync_data *sync_data;
+       struct pvr_sync_pt *pvr_pt = NULL;
+
+       sync_data = pvr_sync_create_sync_data(timeline, timeline_fd,
+                       psSyncCheckpointContext, pt_name);
+       if (!sync_data) {
+               pr_err("pvr_sync2: %s: Failed to create sync data\n", __func__);
+               goto err_out;
+       }
+
+       pvr_pt = (struct pvr_sync_pt *)
+               sync_pt_create(timeline->obj, sizeof(struct pvr_sync_pt));
+       if (!pvr_pt) {
+               pr_err("pvr_sync2: %s: Failed to create sync pt\n", __func__);
+               goto err_rollback_fence;
+       }
+
+       pvr_pt->sync_data = sync_data;
+
+       pvr_pt->timeline = timeline;
+
+       /* Increment the timeline next value */
+       pvr_pt->sync_data->timeline_update_value =
+               timeline->kernel->fence_sync->next_value++;
+
+       return pvr_pt;
+
+err_rollback_fence:
+       /* Error the sync checkpoint (so the deferred free considers it 'met') */
+       error_sync_checkpoint(sync_data->kernel->fence_sync, PVRSRV_FENCE_FLAG_NONE);
+       kref_put(&sync_data->kref, pvr_sync_free_sync_data);
+err_out:
+       return NULL;
+}
+
+int pvr_sync_api_init(void *file_handle, void **api_priv)
+{
+       struct pvr_sync_timeline_wrapper *timeline_wrapper;
+       struct pvr_sync_timeline *timeline;
+       char task_comm[TASK_COMM_LEN];
+       unsigned long flags;
+
+       get_task_comm(task_comm, current);
+
+       timeline_wrapper = (struct pvr_sync_timeline_wrapper *)
+               sync_timeline_create(&pvr_sync_timeline_ops,
+                       sizeof(*timeline_wrapper), task_comm);
+       if (!timeline_wrapper) {
+               pr_err("pvr_sync2: %s: sync_timeline_create failed\n", __func__);
+               goto err_out;
+       }
+
+       timeline = kmalloc(sizeof(*timeline), GFP_KERNEL);
+       if (!timeline) {
+               pr_err("pvr_sync2: %s: Out of memory\n", __func__);
+               goto err_free_timeline_wrapper;
+       }
+
+       timeline->kernel = kzalloc(sizeof(*timeline->kernel),
+                                  GFP_KERNEL);
+       if (!timeline->kernel) {
+               pr_err("pvr_sync2: %s: Out of memory\n", __func__);
+               goto err_free_timeline;
+       }
+
+       timeline_wrapper->timeline = timeline;
+
+       timeline->obj = &timeline_wrapper->obj;
+       kref_init(&timeline->kref);
+       INIT_LIST_HEAD(&timeline->sync_list);
+
+       spin_lock_irqsave(&timeline_list_lock, flags);
+       list_add_tail(&timeline->list, &timeline_list);
+       spin_unlock_irqrestore(&timeline_list_lock, flags);
+
+       DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+       *api_priv = (void *)timeline_wrapper;
+
+       return 0;
+
+err_free_timeline:
+       kfree(timeline);
+
+       /* Use a NULL timeline to detect this partially-setup timeline in the
+        * timeline release function (called by sync_timeline_destroy) and
+        * handle it appropriately.
+        */
+       timeline_wrapper->timeline = NULL;
+err_free_timeline_wrapper:
+       sync_timeline_destroy(&timeline_wrapper->obj);
+err_out:
+       return -ENOMEM;
+}
+
+int pvr_sync_api_deinit(void *api_priv, bool is_sw)
+{
+       if (!api_priv)
+               return 0;
+
+       if (!is_sw) {
+               struct sync_timeline *obj = api_priv;
+
+               DPF("%s: # %s", __func__,
+                   debug_info_timeline(get_timeline(obj)));
+
+               sync_timeline_destroy(obj);
+       } else {
+               struct pvr_sw_sync_timeline *pvr_sw_sync_timeline = api_priv;
+
+               /* SW timeline */
+               kref_put(&pvr_sw_sync_timeline->kref, pvr_sw_sync_destroy_timeline);
+       }
+       return 0;
+}
+
+/*
+ * This is the function that kick code will call in order to 'finalise' a
+ * created output fence just prior to returning from the kick function.
+ * The OS native sync code needs to implement a function meeting this
+ * specification - the implementation may be a nop if the OS does not need
+ * to perform any actions at this point.
+ *
+ * Input: fence_fd            The PVRSRV_FENCE to be 'finalised'. This value
+ *                            will have been returned by an earlier call to
+ *                            pvr_sync_create_fence().
+ * Input: finalise_data       The finalise data returned by an earlier call
+ *                            to pvr_sync_create_fence().
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data)
+{
+       struct sync_fence *native_fence = (struct sync_fence *)finalise_data;
+
+       if (!native_fence || (fence_fd < 0))
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       sync_fence_install(native_fence, fence_fd);
+       return PVRSRV_OK;
+}
+
+/*
+ * This is the function that kick code will call in order to obtain a new
+ * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used
+ * in that fence. The OS native sync code needs to implement a function
+ * meeting this specification.
+ *
+ * Input: device                   Not currently used.
+ * Input: fence_name               A string to annotate the fence with (for
+ *                                 debug).
+ * Input: timeline                 The timeline on which the new fence is to be
+ *                                 created.
+ * Output: new_fence               The new PVRSRV_FENCE to be returned by the
+ *                                 kick call.
+ * Output: fence_uid               Unique ID of the update fence.
+ * Output: fence_finalise_data     Pointer to data needed to finalise the fence.
+ * Output: new_checkpoint_handle   The PSYNC_CHECKPOINT used by the new fence.
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_create_fence(struct _PVRSRV_DEVICE_NODE_ *device,
+                     const char *fence_name,
+                     PVRSRV_TIMELINE new_fence_timeline,
+                     PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                     PVRSRV_FENCE *new_fence,
+                     u64 *fence_uid,
+                     void **fence_finalise_data,
+                     PSYNC_CHECKPOINT *new_checkpoint_handle,
+                     void **timeline_update_sync,
+                     __u32 *timeline_update_value)
+{
+       PVRSRV_ERROR err;
+       PVRSRV_FENCE new_fence_fd = -1;
+       struct file *timeline_file;
+       struct sync_timeline *obj;
+       struct pvr_sync_timeline *timeline;
+       struct pvr_sync_pt *native_sync_point = NULL;
+       struct sync_fence *native_fence = NULL;
+       struct pvr_sync_kernel_pair *sync_kernel;
+
+       if (new_fence_timeline < 0 || !new_fence ||
+           !new_checkpoint_handle || !fence_finalise_data) {
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       /* We reserve the new fence FD before taking any operations
+        * as we do not want to fail (e.g. run out of FDs)
+        */
+       new_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+       if (new_fence_fd < 0) {
+               err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+               goto err_out;
+       }
+
+       timeline_file = fget(new_fence_timeline);
+       if (!timeline_file) {
+               pr_err("pvr_sync2: %s: Failed to open supplied timeline fd (%d)\n",
+                       __func__, new_fence_timeline);
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_put_fd;
+       }
+
+       obj = pvr_sync_get_api_priv(timeline_file);
+       if (!obj) {
+               pr_err("pvr_sync2: %s: Supplied timeline not pvr_sync timeline\n",
+                       __func__);
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_put_timeline;
+       }
+
+       timeline = get_timeline(obj);
+       if (!timeline) {
+               pr_err("pvr_sync2: %s: Supplied timeline has no private data\n",
+                       __func__);
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_put_timeline;
+       }
+
+       /* Check if this timeline already has a sync prim, if not create it now */
+       if (!timeline->kernel->fence_sync) {
+               err = sync_pool_get(&timeline->kernel->fence_sync,
+                                                       timeline->obj->name,
+                                                       SYNC_TL_TYPE);
+
+               if (err != PVRSRV_OK) {
+                       pr_err("pvr_sync2: %s: Failed to allocate timeline sync prim (%s)\n",
+                                                       __func__, PVRSRVGetErrorString(err));
+                       err = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto err_put_timeline;
+               }
+       }
+
+       native_sync_point = pvr_sync_create_pt(timeline, new_fence_timeline,
+                       psSyncCheckpointContext, fence_name);
+       if (!native_sync_point) {
+               pr_err("pvr_sync2: %s: Failed to create sync point\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_timeline;
+       }
+
+       native_fence = sync_fence_create(fence_name, &native_sync_point->pt);
+       if (!native_fence) {
+               struct pvr_sync_native_sync_prim *timeline_prim =
+                       timeline->kernel->fence_sync;
+
+               pr_err("pvr_sync2: %s: Failed to create sync fence\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+
+               /* If the point was created but the fence failed to be
+                * created, the point must be manually freed as a
+                * fence has not yet taken ownership.
+                */
+               timeline_prim->next_value--;
+               /* Error the new fence's sync checkpoint
+                * (so the deferred free considers it 'met')
+                */
+               error_sync_checkpoint(native_sync_point->sync_data->kernel->fence_sync,
+                                     PVRSRV_FENCE_FLAG_NONE);
+               pvr_sync_free_sync(&native_sync_point->pt);
+               goto err_put_timeline;
+       }
+
+       sync_kernel = native_sync_point->sync_data->kernel;
+
+       /* For Linux, we do not return the fence fd here, but via
+        * pvr_sync_finalise_fence() - this is because once we
+        * associate the fd with the fence, it can only be closed
+        * from client code so it should only be done once we
+        * know we will definitely require it.
+        */
+       *new_fence = new_fence_fd;
+       *fence_finalise_data = (void *)native_fence;
+       *new_checkpoint_handle = sync_kernel->fence_sync->client_sync_checkpoint;
+
+       if (timeline_update_sync && timeline_update_value) {
+               *timeline_update_sync = (void *)timeline->kernel->fence_sync->client_sync;
+               *timeline_update_value = timeline->kernel->fence_sync->next_value;
+       }
+
+       *fence_uid = OSGetCurrentClientProcessIDKM();
+       *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX);
+
+       fput(timeline_file);
+
+       return PVRSRV_OK;
+
+err_put_timeline:
+       fput(timeline_file);
+err_put_fd:
+       pr_err("pvr_sync2: %s: putting fd %d back to unused\n", __func__, new_fence_fd);
+       put_unused_fd(new_fence_fd);
+       *fence_uid = PVRSRV_NO_FENCE;
+err_out:
+       return err;
+}
+
+/*
+ * This is the function that kick code will call in order to 'rollback' a
+ * created output fence should an error occur when submitting the kick.
+ * The OS native sync code needs to implement a function meeting this
+ * specification.
+ *
+ * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence
+ *                          should be destroyed and any actions taken due to
+ *                          its creation that need to be undone should be
+ *                          reverted.
+ * Input: finalise_data     The finalise data for the fence to be 'rolled back'.
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback,
+                            void *fence_data_to_rollback)
+{
+       PVRSRV_ERROR err = PVRSRV_OK;
+       struct sync_fence *sync_fence = (struct sync_fence *)fence_data_to_rollback;
+       struct sync_pt *sync_pt;
+       struct pvr_sync_pt *pvr_pt = NULL;
+       int j = 0;
+
+       if (!sync_fence) {
+               pr_err("pvr_sync2: %s: Failed to recognise fence_to_rollback(%d)\n",
+                      __func__, fence_to_rollback);
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       (void)j;
+       for_each_sync_pt(sync_pt, sync_fence, j) {
+               if (!is_pvr_timeline_pt(sync_pt)) {
+                       pr_err("pvr_sync2: %s: Fence(%d) contains non-pvr timeline sync_pt\n",
+                              __func__, fence_to_rollback);
+                       err = PVRSRV_ERROR_INVALID_PARAMS;
+                       goto err_out2;
+               }
+
+               pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+               SyncCheckpointError(pvr_pt->sync_data->kernel->fence_sync->client_sync_checkpoint,
+                                   PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT);
+
+               /* rollback timeline next_value */
+               pvr_pt->timeline->kernel->fence_sync->next_value--;
+       }
+
+       /* close the fence */
+       sync_fence_put(sync_fence);
+
+err_out2:
+       put_unused_fd(fence_to_rollback);
+
+err_out:
+       return err;
+}
+
+/*
+ * This is the function that kick code will call in order to obtain a list of
+ * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function.
+ * The OS native sync code will allocate the memory to hold the returned list
+ * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has
+ * finished referencing it.
+ *
+ * Input: fence                     The input (check) fence
+ * Output: nr_checkpoints           The number of PVRSRV_SYNC_CHECKPOINT ptrs
+ *                                  returned in the checkpoint_handles
+ *                                  parameter.
+ * Output: fence_uid                Unique ID of the check fence
+ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs.
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                      PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints,
+                      PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid)
+{
+       PVRSRV_ERROR err = PVRSRV_OK;
+
+       if (!nr_checkpoints || !checkpoint_handles) {
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       if (fence_to_resolve < 0) {
+               /* Null fence passed, so return 0 checkpoints */
+               *nr_checkpoints = 0;
+               *checkpoint_handles = NULL;
+               *fence_uid = 0;
+       } else {
+               struct sync_fence *sync_fence = sync_fence_fdget(fence_to_resolve);
+               struct sync_pt *sync_pt;
+               struct pvr_sync_kernel_pair *sync_kernel;
+               u32 points_on_fence = 0;
+               PSYNC_CHECKPOINT foreign_checkpoint = NULL;
+               PSYNC_CHECKPOINT *next_checkpoint;
+               bool add_foreign_sync = true;
+               int j = 0;
+
+               if (!sync_fence) {
+                       pr_err("pvr_sync2: %s: Failed to read sync private data for fd %d\n",
+                               __func__, fence_to_resolve);
+                       err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+                       goto err_out;
+               }
+
+               /* Alloc memory to hold list of PSYNC_CHECKPOINTs */
+               /* (Alloc memory for MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoint handles) */
+               *checkpoint_handles =
+                       kmalloc_array(MAX_SYNC_CHECKPOINTS_PER_FENCE,
+                                     sizeof(PSYNC_CHECKPOINT), GFP_KERNEL);
+               if (!(*checkpoint_handles)) {
+                       pr_err("pvr_sync2: %s: Failed to alloc memory for returned list of sync checkpoints\n",
+                               __func__);
+                       err = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto err_out2;
+               }
+
+               next_checkpoint = *checkpoint_handles;
+
+               (void)j;
+               for_each_sync_pt(sync_pt, sync_fence, j) {
+                       struct pvr_sync_pt *pvr_pt = NULL;
+
+                       /* Make sure that we do not overrun the memory we allocated */
+                       if (points_on_fence >= MAX_SYNC_CHECKPOINTS_PER_FENCE) {
+                               pr_err("pvr_sync2: Maximum number of sync checkpoints in a fence exceeded (greater than %d)",
+                                      MAX_SYNC_CHECKPOINTS_PER_FENCE);
+                               err = PVRSRV_ERROR_INVALID_PARAMS;
+
+                               for (j = 0; j < points_on_fence; j++)
+                                       SyncCheckpointDropRef((*checkpoint_handles)[j]);
+
+                               kfree(*checkpoint_handles);
+                               goto err_out2;
+                       }
+
+                       if (is_pvr_timeline_pt(sync_pt)) {
+                               pvr_pt = (struct pvr_sync_pt *)sync_pt;
+                               sync_kernel = pvr_pt->sync_data->kernel;
+
+                               if (!sync_kernel ||
+                                   is_sync_checkpoint_met(sync_kernel->fence_sync,
+                                                          PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) {
+                                       continue;
+                               }
+
+                               /* Take ref on sync_checkpoint - this will be dropped by the
+                                * caller (Kick code) once it has incremented the checkpoint's
+                                * CCB enqueued count. We only really need to do this for
+                                * foreign sync checkpoints, to prevent the sync_checkpoint
+                                * from being destroyed if it gets signalled while being processed
+                                * by the Kick code, but the Kick code has no knowledge of whether a
+                                * sync_checkpoint is foreign, so we take a ref on all checkpoints.
+                                */
+                               SyncCheckpointTakeRef(sync_kernel->fence_sync->client_sync_checkpoint);
+
+                               *next_checkpoint = sync_kernel->fence_sync->client_sync_checkpoint;
+                               next_checkpoint++;
+                               points_on_fence++;
+                       } else if (add_foreign_sync) {
+                               foreign_checkpoint = pvr_sync_create_waiter_for_foreign_sync(fence_to_resolve, psSyncCheckpointContext);
+
+                               if (foreign_checkpoint) {
+                                       /* Take ref on sync_checkpoint - this will be dropped
+                                        * by the caller (see comment for the other call to
+                                        * SyncCheckpointTakeRef, above).
+                                        */
+                                       /* For foreign points, an extra
+                                        * checkpoint reference was taken at
+                                        * creation time to ensure it wasn't
+                                        * completed and free'd before we got
+                                        * here, so ownership of that reference
+                                        * is effectively passed to the firmware
+                                        */
+                                       *next_checkpoint = foreign_checkpoint;
+                                       next_checkpoint++;
+                                       points_on_fence++;
+                                       add_foreign_sync = false;
+                               }
+                       }
+               }
+
+               if (0) {
+                       int ii;
+
+                       pr_err("pvr_sync2: %s: returning nr_checkpoints=%d\n",
+                              __func__, points_on_fence);
+                       for (ii = 0; ii < points_on_fence; ii++) {
+                               PSYNC_CHECKPOINT *psTmp = *(checkpoint_handles + ii);
+
+                               pr_err("pvr_sync2: %s:   pt %d: sync checkpoint <%p>,\n",
+                                      __func__, ii, psTmp);
+                               pr_err("pvr_sync2: %s:          ID=%d\n",
+                                      __func__, SyncCheckpointGetId(*psTmp));
+                       }
+               }
+               *nr_checkpoints = points_on_fence;
+               *fence_uid = OSGetCurrentClientProcessIDKM();
+               *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX);
+
+err_out2:
+               sync_fence_put(sync_fence);
+       }
+
+err_out:
+       return err;
+}
+
+#if defined(PDUMP)
+static enum PVRSRV_ERROR_TAG
+pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints,
+                               struct SYNC_CHECKPOINT_TAG ***checkpoint_handles)
+{
+       enum PVRSRV_ERROR_TAG err;
+       struct sync_fence *sync_fence;
+       struct sync_pt *sync_pt;
+       struct pvr_sync_kernel_pair *sync_kernel;
+       u32 points_on_fence = 0;
+       struct SYNC_CHECKPOINT_TAG **next_checkpoint;
+       struct SYNC_CHECKPOINT_TAG **checkpoints = NULL;
+       int j = 0;
+
+       if (!nr_checkpoints || !checkpoint_handles) {
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       if (fence_to_pdump < 0) {
+               /* Null fence passed, so return 0 checkpoints */
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       sync_fence = sync_fence_fdget(fence_to_pdump);
+       if (!sync_fence) {
+               pr_err("pvr_sync2: %s: Failed to read sync private data for fd %d\n",
+                       __func__, fence_to_pdump);
+               err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+               goto err_out;
+       }
+
+       /* Alloc memory to hold list of PSYNC_CHECKPOINTs */
+       checkpoints = kmalloc_array(MAX_SYNC_CHECKPOINTS_PER_FENCE,
+                                     sizeof(*checkpoints), GFP_KERNEL);
+       if (!checkpoints) {
+               pr_err("pvr_sync2: %s: Failed to alloc memory for returned list of sync checkpoints\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_fence;
+       }
+
+       next_checkpoint = checkpoints;
+
+       (void)j;
+       for_each_sync_pt(sync_pt, sync_fence, j) {
+               struct pvr_sync_pt *pvr_pt = NULL;
+
+               /* Make sure that we do not overrun the memory we allocated */
+               if (points_on_fence >= MAX_SYNC_CHECKPOINTS_PER_FENCE) {
+                       pr_err("pvr_sync2: Maximum number of sync checkpoints in a fence exceeded (greater than %d)",
+                              MAX_SYNC_CHECKPOINTS_PER_FENCE);
+                       err = PVRSRV_ERROR_INVALID_PARAMS;
+                       kfree(*checkpoint_handles);
+                       goto err_put_fence;
+               }
+
+               if (is_pvr_timeline_pt(sync_pt)) {
+                       pvr_pt = (struct pvr_sync_pt *)sync_pt;
+                       sync_kernel = pvr_pt->sync_data->kernel;
+                       if (!sync_kernel)
+                               continue;
+                       *next_checkpoint = sync_kernel->fence_sync->client_sync_checkpoint;
+                       next_checkpoint++;
+                       points_on_fence++;
+               }
+       }
+
+       *checkpoint_handles = checkpoints;
+       *nr_checkpoints = points_on_fence;
+       err = PVRSRV_OK;
+err_put_fence:
+               sync_fence_put(sync_fence);
+err_out:
+       return err;
+
+}
+#endif
+
+static u32
+pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs)
+{
+       u32 our_ufo_ct = 0;
+       struct pvr_sync_native_sync_checkpoint *sync;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags);
+       /* dump info on any ufos in our active list */
+       list_for_each_entry(sync, &pvr_sync_pt_active_list, list) {
+               u32 *this_ufo_vaddr = vaddrs;
+               u32 ufo_num;
+               DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL;
+
+               for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++, this_ufo_vaddr++) {
+                       if (sync->vaddr == *this_ufo_vaddr) {
+                               static const char *const type_names[] = {
+                                       "Timeline", "Fence", "Cleanup",
+                                       "Foreign Fence", "Foreign Cleanup"
+                               };
+
+                               /* Dump sync info */
+                               PVR_DUMPDEBUG_LOG(pfnDummy, NULL,
+                                                 "\tSyncID = %d, FWAddr = 0x%08x: %s (%s - [%p] %s)",
+                                                 sync->id, sync->vaddr,
+                                                 sync->class,
+                                                 type_names[sync->type],
+                                                 sync->foreign_sync_fence,
+                                                 sync->foreign_sync_fence_name);
+                               our_ufo_ct++;
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags);
+       return our_ufo_ct;
+}
+
+int pvr_sync_api_rename(void *api_priv, void *user_data)
+{
+       struct sync_timeline *obj = api_priv;
+       struct pvr_sync_timeline *timeline = get_timeline(obj);
+       struct pvr_sync_rename_ioctl_data *data = user_data;
+
+       data->szName[sizeof(data->szName) - 1] = '\0';
+       strlcpy(timeline->obj->name, data->szName, sizeof(timeline->obj->name));
+
+       return 0;
+}
+
+int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new)
+{
+       struct sync_timeline *obj = api_priv;
+       struct pvr_sync_timeline *timeline = get_timeline(obj);
+       struct pvr_sw_sync_timeline *pvr_sw_sync_timeline;
+
+       /* We can only convert an empty GPU timeline */
+       if (timeline->kernel->fence_sync &&
+           timeline->kernel->fence_sync->next_value) {
+               pr_err("pvr_sync2: %s ERROR! timeline->kernel->fence_sync=<%p>, timeline->kernel->fence_sync->next_value=%d\n",
+                      __func__, timeline->kernel->fence_sync,
+                      timeline->kernel->fence_sync->next_value);
+               return -EFAULT;
+       }
+
+       /* Create a pvr_sw_sync timeline */
+       pvr_sw_sync_timeline = kmalloc(sizeof(*pvr_sw_sync_timeline), GFP_KERNEL);
+       if (!pvr_sw_sync_timeline) {
+               pr_err("pvr_sync2: %s ERROR! no memory to allocate pvr_sw_sync_timeline struct\n",
+                      __func__);
+               return -ENOMEM;
+       }
+
+       pvr_sw_sync_timeline->current_value = 0;
+       pvr_sw_sync_timeline->next_value = 1;
+       kref_init(&pvr_sw_sync_timeline->kref);
+
+       /* Create a sw_sync timeline with the old GPU timeline's name */
+       pvr_sw_sync_timeline->sw_sync_timeline = sw_sync_timeline_create(timeline->obj->name);
+       if (!pvr_sw_sync_timeline->sw_sync_timeline) {
+               pr_err("pvr_sync2: %s ERROR! error returned from sw_sync_timeline_create() for timeline->obj->name '%s'\n",
+                      __func__, timeline->obj->name);
+               kfree(pvr_sw_sync_timeline);
+               return -ENOMEM;
+       }
+
+       /* Destroy the old GPU timeline and update the struct file */
+       DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+       sync_timeline_destroy(timeline->obj);
+       DPF("%s pvr_sw_sync_timeline<%p>, sw_sync_timeline<%p> curr=%llu,next=%llu",
+           pvr_sw_sync_timeline,
+           pvr_sw_sync_timeline->sw_sync_timeline,
+           pvr_sw_sync_timeline->current_value,
+           pvr_sw_sync_timeline->next_value);
+
+       *api_priv_new = (void *)pvr_sw_sync_timeline;
+
+       return 0;
+}
+
+int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data)
+{
+       struct pvr_sw_sync_timeline *pvr_sw_timeline = api_priv;
+       struct pvr_sw_sync_create_fence_data *data = user_data;
+       struct sync_fence *fence;
+       int fd = get_unused_fd_flags(O_CLOEXEC);
+       struct sync_pt *sync_pt;
+       struct sw_sync_timeline *timeline;
+       int err;
+
+       if (fd < 0) {
+               pr_err("pvr_sync2: %s: Failed to find unused fd (%d)\n",
+                      __func__, fd);
+               err = -EMFILE;
+               goto err_out;
+       }
+
+       timeline = pvr_sw_timeline->sw_sync_timeline;
+
+       sync_pt = sw_sync_pt_create(timeline, pvr_sw_timeline->next_value);
+       if (!sync_pt) {
+               pr_err("pvr_sync2: %s: Failed to create a sync point (%d)\n",
+                      __func__, fd);
+               err = -ENOMEM;
+               goto err_put_fd;
+       }
+
+       data->name[sizeof(data->name) - 1] = '\0';
+       fence = sync_fence_create(data->name, sync_pt);
+       if (!fence) {
+               pr_err("pvr_sync2: %s: Failed to create a fence (%d)\n",
+                      __func__, fd);
+               sync_pt_free(sync_pt);
+               err = -ENOMEM;
+               goto err_put_fd;
+       }
+
+       data->fence = fd;
+       data->sync_pt_idx = pvr_sw_timeline->next_value;
+
+       sync_fence_install(fence, fd);
+       pvr_sw_timeline->next_value++;
+
+       return 0;
+
+err_put_fd:
+       pr_err("pvr_sync2: %s: putting fd %d back to unused\n", __func__, fd);
+       put_unused_fd(fd);
+err_out:
+       return err;
+}
+
+int pvr_sync_api_sw_inc(void *api_priv, void *user_data)
+{
+       struct pvr_sw_sync_timeline *pvr_timeline = api_priv;
+       struct sw_sync_timeline *timeline;
+       struct pvr_sw_timeline_advance_data *data = user_data;
+
+       timeline = pvr_timeline->sw_sync_timeline;
+
+       /* Don't allow sw timeline to be advanced beyond the last defined point */
+       if (pvr_timeline->current_value == (pvr_timeline->next_value-1)) {
+               pr_err("pvr_sync2: attempt to advance SW timeline beyond last defined point\n");
+               return -EPERM;
+       }
+
+       sw_sync_timeline_inc(timeline, 1);
+       pvr_timeline->current_value++;
+       data->sync_pt_idx = pvr_timeline->current_value;
+
+       return 0;
+}
+
+static void
+pvr_sync_check_status_work_queue_function(struct work_struct *data)
+{
+       /* A completed SW operation may un-block the GPU */
+       PVRSRVCheckStatus(NULL);
+}
+
+/* Returns true if the freelist still has entries, else false if empty */
+static bool
+pvr_sync_clean_freelist(void)
+{
+       struct pvr_sync_kernel_pair *kernel, *k;
+       struct pvr_sync_timeline_kernel_pair *tl_kernel, *tl_k;
+       struct pvr_sync_fence *sync_fence, *f;
+       LIST_HEAD(unlocked_free_checkpoint_list);
+       LIST_HEAD(unlocked_free_timeline_list);
+       LIST_HEAD(unlocked_free_list);
+       unsigned long flags;
+       bool freelist_empty;
+
+       /* We can't free the sync directly in this loop because
+        * that will take the mmap mutex. We can't take mutexes while we have
+        * this list locked with a spinlock. So move all the items we want to
+        * free to another, local list (no locking required) and process it
+        * in a second loop.
+        */
+
+       spin_lock_irqsave(&sync_checkpoint_free_list_spinlock, flags);
+       list_for_each_entry_safe(kernel, k, &sync_checkpoint_free_list, list) {
+               /* Check if this sync is not used anymore. */
+               if ((kernel->fence_sync) &&
+                   !is_sync_checkpoint_met(kernel->fence_sync,
+                                           PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) {
+                       continue;
+               }
+
+               /* Remove the entry from the free list. */
+               list_move_tail(&kernel->list, &unlocked_free_checkpoint_list);
+       }
+
+       /* Wait and loop if there are still syncs on the free list (IE
+        * are still in use by the HW).
+        */
+       freelist_empty = list_empty(&sync_checkpoint_free_list);
+
+       spin_unlock_irqrestore(&sync_checkpoint_free_list_spinlock, flags);
+
+       spin_lock_irqsave(&timeline_free_list_spinlock, flags);
+       list_for_each_entry_safe(tl_kernel, tl_k, &timeline_free_list, list) {
+               /* Check if this sync is not used anymore. */
+               if (tl_kernel->fence_sync && !is_sync_prim_met(tl_kernel->fence_sync))
+                       continue;
+
+               /* Remove the entry from the free list. */
+               list_move_tail(&tl_kernel->list, &unlocked_free_timeline_list);
+       }
+
+       /* Wait and loop if there are still syncs on the free list (IE
+        * are still in use by the HW).
+        */
+       freelist_empty &= list_empty(&timeline_free_list);
+
+       spin_unlock_irqrestore(&timeline_free_list_spinlock, flags);
+
+
+       list_for_each_entry_safe(kernel, k, &unlocked_free_checkpoint_list, list) {
+               list_del(&kernel->list);
+
+               if (kernel->fence_sync && kernel->fence_sync->client_sync_checkpoint) {
+                       spin_lock_irqsave(&pvr_sync_pt_active_list_spinlock, flags);
+                       if (!list_empty(&kernel->fence_sync->list))
+                               list_del_init(&kernel->fence_sync->list);
+
+                       spin_unlock_irqrestore(&pvr_sync_pt_active_list_spinlock, flags);
+                       SyncCheckpointFree(kernel->fence_sync->client_sync_checkpoint);
+                       kernel->fence_sync->client_sync_checkpoint = NULL;
+               }
+               kfree(kernel->fence_sync);
+               kfree(kernel);
+       }
+
+       list_for_each_entry_safe(tl_kernel, tl_k, &unlocked_free_timeline_list, list) {
+               list_del(&tl_kernel->list);
+
+               if (tl_kernel->fence_sync)
+                       sync_pool_put(tl_kernel->fence_sync);
+               kfree(tl_kernel);
+       }
+
+       /* sync_fence_put() must be called from process/WQ context
+        * because it uses fput(), which is not allowed to be called
+        * from interrupt context in kernels <3.6.
+        */
+       INIT_LIST_HEAD(&unlocked_free_list);
+
+       spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
+       list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list)
+               list_move_tail(&sync_fence->list, &unlocked_free_list);
+
+       spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
+
+       list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) {
+               list_del(&sync_fence->list);
+               sync_fence_put(sync_fence->fence);
+               kfree(sync_fence);
+       }
+
+       return !freelist_empty;
+}
+
+static void
+pvr_sync_defer_free_work_queue_function(struct work_struct *data)
+{
+       enum PVRSRV_ERROR_TAG error = PVRSRV_OK;
+       void *event_object;
+
+       error = OSEventObjectOpen(pvr_sync_data.event_object_handle,
+               &event_object);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Error opening event object (%s)\n",
+                       __func__, PVRSRVGetErrorString(error));
+               return;
+
+       }
+
+       while (pvr_sync_clean_freelist()) {
+
+               error = OSEventObjectWait(event_object);
+
+               switch (error) {
+
+               case PVRSRV_OK:
+               case PVRSRV_ERROR_TIMEOUT:
+                       /* Timeout is normal behaviour */
+                       continue;
+               default:
+                       pr_err("pvr_sync2: %s: Error waiting for event object (%s)\n",
+                               __func__, PVRSRVGetErrorString(error));
+                       break;
+               }
+       }
+       error = OSEventObjectClose(event_object);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Error closing event object (%s)\n",
+                       __func__, PVRSRVGetErrorString(error));
+       }
+}
+
+static
+void pvr_sync_free_checkpoint_list_mem(void *mem_ptr)
+{
+       kfree(mem_ptr);
+}
+
+static
+void pvr_sync_update_all_timelines(void *command_complete_handle)
+{
+       struct pvr_sync_timeline *timeline, *n;
+       u32 num_signalled = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&timeline_list_lock, flags);
+
+       list_for_each_entry(timeline, &timeline_list, list) {
+               /* If a timeline is destroyed via pvr_sync_release_timeline()
+                * in parallel with a call to pvr_sync_update_all_timelines(),
+                * the timeline_list_lock will block destruction of the
+                * 'timeline' pointer. Use kref_get_unless_zero() to detect
+                * and handle this race. Skip the timeline if it's being
+                * destroyed, blocked only on the timeline_list_lock.
+                */
+               timeline->valid =
+                       kref_get_unless_zero(&timeline->kref) ? true : false;
+       }
+
+       list_for_each_entry_safe(timeline, n, &timeline_list, list) {
+               /* We know timeline is valid at this point because we're
+                * holding the list lock (so pvr_sync_destroy_timeline() has
+                * to wait).
+                */
+               void *obj = timeline->obj;
+
+               /* If we're racing with pvr_sync_release_timeline(), ignore */
+               if (!timeline->valid)
+                       continue;
+
+               /* If syncs have signaled on the GPU, echo this in pvr_sync.
+                *
+                * At this point we know the timeline is valid, but obj might
+                * have raced and been set to NULL. It's only important that
+                * we use NULL / non-NULL consistently with the if() and call
+                * to sync_timeline_signal() -- the timeline->obj can't be
+                * freed (pvr_sync_release_timeline() will be stuck waiting
+                * for the timeline_list_lock) but it might have been made
+                * invalid by the base sync driver, in which case this call
+                * will bounce harmlessly.
+                */
+               if (obj) {
+                       sync_timeline_signal(obj);
+                       num_signalled++;
+               }
+
+               /* We're already holding the timeline_list_lock */
+               kref_put(&timeline->kref, pvr_sync_destroy_timeline_locked);
+       }
+
+       spin_unlock_irqrestore(&timeline_list_lock, flags);
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void)
+{
+       /* Initialise struct and register with sync_checkpoint.c */
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence;
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence;
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data;
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence;
+       pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_update_all_timelines;
+       pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem =
+               pvr_sync_free_checkpoint_list_mem;
+       pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs =
+               pvr_sync_dump_info_on_stalled_ufos;
+       strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName,
+               "pvr_sync2", SYNC_CHECKPOINT_IMPL_MAX_STRLEN);
+#if defined(PDUMP)
+       pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints =
+               pvr_sync_fence_get_checkpoints;
+#endif
+
+       return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops);
+}
+
+int pvr_sync_init(void)
+{
+       return pvr_sync_ioctl_init();
+}
+
+void pvr_sync_deinit(void)
+{
+       pvr_sync_ioctl_deinit();
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+       enum PVRSRV_ERROR_TAG error;
+
+       /* Multi-Device not supported for sync2, if we attempt to init
+        * another device then print a big warning to kernel log
+        */
+       if (WARN_ON(pvr_sync_data.defer_free_wq)) {
+               pr_err("pvr_sync2: Multi-Device not supported\n");
+               return PVRSRV_ERROR_ALREADY_EXISTS;
+       }
+
+       DPF("%s", __func__);
+
+       atomic_set(&pvr_sync_data.sync_id, 0);
+
+       error = PVRSRVAcquireGlobalEventObjectKM(
+               &pvr_sync_data.event_object_handle);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Failed to acquire global event object (%s)\n",
+                       __func__, PVRSRVGetErrorString(error));
+               goto err_out;
+       }
+
+       error = SyncPrimContextCreate(priv->dev_node,
+                                     &pvr_sync_data.sync_prim_context);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Failed to create sync prim context (%s)\n",
+                      __func__, PVRSRVGetErrorString(error));
+               goto err_release_event_object;
+       }
+
+       pvr_sync_data.defer_free_wq =
+               create_freezable_workqueue("pvr_sync_defer_free_workqueue");
+       if (!pvr_sync_data.defer_free_wq) {
+               pr_err("pvr_sync2: %s: Failed to create pvr_sync defer_free workqueue\n",
+                      __func__);
+               goto err_free_sync_context;
+       }
+
+       INIT_WORK(&pvr_sync_data.defer_free_work,
+               pvr_sync_defer_free_work_queue_function);
+
+       INIT_WORK(&pvr_sync_data.check_status_work,
+               pvr_sync_check_status_work_queue_function);
+       error = PVRSRVRegisterCmdCompleteNotify(
+                       &pvr_sync_data.command_complete_handle,
+                       &pvr_sync_update_all_timelines,
+                       &priv->dev_node);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Failed to register MISR notification (%s)\n",
+                      __func__, PVRSRVGetErrorString(error));
+               goto err_destroy_defer_free_wq;
+       }
+
+       error = PVRSRVRegisterDeviceDbgRequestNotify(
+                       &priv->sync_debug_notify_handle,
+                       priv->dev_node,
+                       pvr_sync_debug_request,
+                       DEBUG_REQUEST_ANDROIDSYNC,
+                       NULL);
+       if (error != PVRSRV_OK) {
+               pr_err("pvr_sync2: %s: Failed to register debug notifier (%s)\n",
+                       __func__, PVRSRVGetErrorString(error));
+               goto err_unregister_cmd_complete;
+       }
+
+       error = PVRSRV_OK;
+       return error;
+
+err_unregister_cmd_complete:
+       PVRSRVUnregisterCmdCompleteNotify(
+               pvr_sync_data.command_complete_handle);
+err_destroy_defer_free_wq:
+       destroy_workqueue(pvr_sync_data.defer_free_wq);
+err_free_sync_context:
+       SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
+err_release_event_object:
+       PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle);
+err_out:
+
+       return error;
+}
+
+void pvr_sync_device_deinit(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       DPF("%s", __func__);
+
+       PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle);
+
+       PVRSRVUnregisterCmdCompleteNotify(
+               pvr_sync_data.command_complete_handle);
+
+       /* This will drain the workqueue, so we guarantee that all deferred
+        * syncs are free'd before returning.
+        */
+       destroy_workqueue(pvr_sync_data.defer_free_wq);
+
+       sync_pool_clear();
+
+       SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
+
+
+       PVRSRVReleaseGlobalEventObjectKM(pvr_sync_data.event_object_handle);
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms)
+{
+       int err;
+
+       DPF("fence<%p>, to=%d", fence, timeout_in_ms);
+
+       err = sync_fence_wait(fence, timeout_in_ms);
+       /* -ETIME means active. In this case we will retry later again. If the
+        * return value is an error or zero we will close this fence and
+        * proceed. This makes sure that we are not getting stuck here when a
+        * fence changes into an error state for whatever reason.
+        */
+       if (err == -ETIME) {
+               DPF("timeout", __func__);
+#ifdef DEBUG_OUTPUT
+               _dump_fence(fence, NULL, NULL);
+#endif
+               return PVRSRV_ERROR_TIMEOUT;
+       } else if (err != 0) {
+               pr_err("%s: failed dependencies\n", __func__);
+               return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+       }
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence)
+{
+       sync_fence_put(fence);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **pfence)
+{
+       struct file *file;
+
+       file = fget(fence_fd);
+       if (file == NULL || file->private_data == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       *pfence = file->private_data;
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG
+pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node,
+                                 int timeline_fd,
+                                 const char *fence_name,
+                                 int *fence_fd_out,
+                                 u64 *sync_pt_idx)
+{
+       enum PVRSRV_ERROR_TAG srv_err;
+       struct file *file;
+       struct pvr_sw_sync_timeline *pvr_sw_timeline;
+       struct sync_fence *fence = NULL;
+       struct sync_pt *sync_point;
+       int fd;
+
+       (void)(pvrsrv_dev_node);
+
+       fd = get_unused_fd_flags(O_CLOEXEC);
+       if (fd < 0) {
+               pr_err("%s: invalid fd\n", __func__);
+
+               return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+       }
+
+       file = fget(timeline_fd);
+       pvr_sw_timeline = pvr_sync_get_api_priv(file);
+       if (!pvr_sw_timeline) {
+               /* unrecognised timeline */
+               pr_err("%s: unrecognised timeline\n", __func__);
+
+               srv_err = PVRSRV_ERROR_INVALID_PARAMS;
+               if (file)
+                       goto err_put_file;
+               else
+                       goto err_put_fd;
+       }
+
+       DPF("pvr_sw_timeline<%p>", pvr_sw_timeline);
+       DPF("psSWTimeline<%p>", pvr_sw_timeline->sw_sync_timeline);
+
+       sync_point = sw_sync_pt_create(pvr_sw_timeline->sw_sync_timeline,
+                                      pvr_sw_timeline->next_value);
+       if (!sync_point) {
+               srv_err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_file;
+       }
+
+       fence = sync_fence_create(fence_name, sync_point);
+       if (!fence) {
+               srv_err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_free_pt;
+       }
+
+       sync_fence_install(fence, fd);
+
+       if (sync_pt_idx)
+               *sync_pt_idx = pvr_sw_timeline->next_value;
+       pvr_sw_timeline->next_value++;
+
+       fput(file);
+
+       *fence_fd_out = fd;
+
+       DPF("returned fence fd %d <%p> '%s'", *fence_fd_out, fence, fence_name);
+
+       return PVRSRV_OK;
+
+err_free_pt:
+       sync_pt_free(sync_point);
+err_put_file:
+       fput(file);
+err_put_fd:
+       put_unused_fd(fd);
+       return srv_err;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx)
+{
+       struct sw_sync_timeline *sw_timeline;
+       struct pvr_sw_sync_timeline *pvr_sw_timeline;
+
+       if (timeline == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       pvr_sw_timeline = (struct pvr_sw_sync_timeline *)timeline;
+       sw_timeline = (struct sw_sync_timeline *)pvr_sw_timeline->sw_sync_timeline;
+
+       if (pvr_sw_timeline->current_value == (pvr_sw_timeline->next_value - 1)) {
+               pr_err("%s: attempt to advance SW timeline beyond last defined point\n",
+                      __func__);
+               return PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT;
+       }
+
+       sw_sync_timeline_inc(sw_timeline, 1);
+       pvr_sw_timeline->current_value++;
+
+       if (sync_pt_idx)
+               *sync_pt_idx = pvr_sw_timeline->current_value;
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline)
+{
+       struct pvr_sw_sync_timeline *pvr_sw_timeline;
+
+       if (timeline == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       pvr_sw_timeline = (struct pvr_sw_sync_timeline *)timeline;
+       kref_put(&pvr_sw_timeline->kref, pvr_sw_sync_destroy_timeline);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd,
+                                          void **timeline_out)
+{
+       enum PVRSRV_ERROR_TAG srv_err;
+       struct file *file;
+       struct pvr_sw_sync_timeline *pvr_sw_timeline;
+       int ret;
+
+       file = fget(timeline_fd);
+       pvr_sw_timeline = pvr_sync_get_api_priv(file);
+       if (!pvr_sw_timeline) {
+               pr_err("%s: invalid params\n", __func__);
+               srv_err = PVRSRV_ERROR_INVALID_PARAMS;
+               if (file)
+                       goto err_put_file;
+               else
+                       goto err_out;
+       }
+
+       *timeline_out = (void *)pvr_sw_timeline;
+
+       /* Take ref on pvr_sw_timeline */
+       ret = kref_get_unless_zero(&pvr_sw_timeline->kref);
+       if (ret)
+               srv_err = PVRSRV_OK;
+       else
+               srv_err = PVRSRV_ERROR_INVALID_PARAMS;
+
+       DPF("pvr_sw_timeline=<%p>, pvr_sw_timeline->c=%llu, n=%llu",
+           pvr_sw_timeline->sw_sync_timeline, pvr_sw_timeline->current_value,
+           pvr_sw_timeline->next_value);
+       DPF("&pvr_sw_timeline->current_value=<%p>",
+           &pvr_sw_timeline->current_value);
+       DPF("returned, *timeline_out=<%p>", *timeline_out);
+
+err_put_file:
+       fput(file);
+err_out:
+       return srv_err;
+}
+
+enum PVRSRV_ERROR_TAG sync_dump_fence(void *sw_fence_obj,
+                                 DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                                 void *dump_debug_file)
+{
+       struct sync_fence *fence = (struct sync_fence *) sw_fence_obj;
+
+       _dump_fence(fence, dump_debug_printf, dump_debug_file);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG sync_sw_dump_timeline(void *sw_timeline_obj,
+                                       DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                                       void *dump_debug_file)
+{
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0))
+       struct pvr_sw_sync_timeline *timeline =
+                       (struct pvr_sw_sync_timeline *) sw_timeline_obj;
+
+       PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                         dump_debug_file,
+                                         "TL:%s SeqNum: %llu/%llu",
+                                         timeline->sw_sync_timeline->obj.name,
+                                         timeline->current_value,
+                                         timeline->next_value);
+#else
+       PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                         dump_debug_file,
+                                         "Timeline Stats not available on this kernel!");
+#endif
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_api.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_api.h
new file mode 100644 (file)
index 0000000..ce91df1
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * @File        pvr_sync_api.h
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PVR_SYNC_API_H
+#define _PVR_SYNC_API_H
+
+#include <img_types.h>
+
+int pvr_sync_api_init(void *file_handle, void **api_priv);
+int pvr_sync_api_deinit(void *api_priv, bool is_sw);
+int pvr_sync_api_rename(void *api_priv, void *user_data);
+int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new);
+int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data);
+int pvr_sync_api_sw_inc(void *api_priv, void *user_data);
+
+struct file;
+
+int pvr_sync_ioctl_init(void);
+void pvr_sync_ioctl_deinit(void);
+void *pvr_sync_get_api_priv(struct file *file);
+struct file *pvr_sync_get_file_struct(void *file_handle);
+
+#endif /* _PVR_SYNC_API_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_file.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_file.c
new file mode 100644 (file)
index 0000000..e365610
--- /dev/null
@@ -0,0 +1,1094 @@
+/*
+ * @File        pvr_sync_file.c
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "services_kernel_client.h"
+#include "pvr_drv.h"
+#include "pvr_sync.h"
+#include "pvr_fence.h"
+#include "pvr_counting_timeline.h"
+
+#include "linux_sw_sync.h"
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/sync_file.h>
+#include <linux/file.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#include "pvr_sync_api.h"
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL)
+#define sync_file_user_name(s) ((s)->name)
+#else
+#define sync_file_user_name(s) ((s)->user_name)
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+       do {                                                             \
+               if (pfnDumpDebugPrintf)                                  \
+                       pfnDumpDebugPrintf(pvDumpDebugFile, fmt,         \
+                                          ## __VA_ARGS__);              \
+               else                                                     \
+                       pr_err(fmt "\n", ## __VA_ARGS__);                \
+       } while (0)
+
+#define        FILE_NAME "pvr_sync_file"
+
+struct sw_sync_create_fence_data {
+       __u32 value;
+       char name[32];
+       __s32 fence;
+};
+#define SW_SYNC_IOC_MAGIC 'W'
+#define SW_SYNC_IOC_CREATE_FENCE \
+       (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data))
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+/* Global data for the sync driver */
+static struct {
+       struct pvr_fence_context *foreign_fence_context;
+       PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops;
+} pvr_sync_data;
+
+#if defined(NO_HARDWARE)
+static DEFINE_MUTEX(pvr_timeline_active_list_lock);
+static struct list_head pvr_timeline_active_list;
+#endif
+
+/* This is the actual timeline metadata. We might keep this around after the
+ * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
+ */
+struct pvr_sync_timeline {
+       char name[32];
+       void *file_handle;
+       bool is_sw;
+       /* Fence context used for hw fences */
+       struct pvr_fence_context *hw_fence_context;
+       /* Timeline and context for sw fences */
+       struct pvr_counting_fence_timeline *sw_fence_timeline;
+#if defined(NO_HARDWARE)
+       /* List of all timelines (used to advance all timelines in nohw builds) */
+       struct list_head list;
+#endif
+};
+
+static
+void pvr_sync_free_checkpoint_list_mem(void *mem_ptr)
+{
+       kfree(mem_ptr);
+}
+
+#if defined(NO_HARDWARE)
+/* function used to signal pvr fence in nohw builds */
+static
+void pvr_sync_nohw_signal_fence(void *fence_data_to_signal)
+{
+       struct pvr_sync_timeline *this_timeline;
+
+       mutex_lock(&pvr_timeline_active_list_lock);
+       list_for_each_entry(this_timeline, &pvr_timeline_active_list, list) {
+               pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context);
+       }
+       mutex_unlock(&pvr_timeline_active_list_lock);
+}
+#endif
+
+static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd)
+{
+       struct file *file = fget(fd);
+       struct pvr_sync_timeline *timeline;
+
+       if (!file)
+               return NULL;
+
+       timeline = pvr_sync_get_api_priv(file);
+       if (!timeline)
+               fput(file);
+
+       return timeline;
+}
+
+static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline)
+{
+       struct file *file = pvr_sync_get_file_struct(timeline->file_handle);
+
+       if (file)
+               fput(file);
+       else
+               pr_err(FILE_NAME ": %s: Timeline incomplete\n", __func__);
+}
+
+/* ioctl and fops handling */
+
+int pvr_sync_api_init(void *file_handle, void **api_priv)
+{
+       struct pvr_sync_timeline *timeline;
+       char task_comm[TASK_COMM_LEN];
+
+       get_task_comm(task_comm, current);
+
+       timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+       if (!timeline)
+               return -ENOMEM;
+
+       strlcpy(timeline->name, task_comm, sizeof(timeline->name));
+       timeline->file_handle = file_handle;
+       timeline->is_sw = false;
+
+       *api_priv = (void *)timeline;
+
+       return 0;
+}
+
+int pvr_sync_api_deinit(void *api_priv, bool is_sw)
+{
+       struct pvr_sync_timeline *timeline = api_priv;
+
+       if (!timeline)
+               return 0;
+
+       if (timeline->sw_fence_timeline) {
+               /* This makes sure any outstanding SW syncs are marked as
+                * complete at timeline close time. Otherwise it'll leak the
+                * timeline (as outstanding fences hold a ref) and possibly
+                * wedge the system if something is waiting on one of those
+                * fences
+                */
+               pvr_counting_fence_timeline_force_complete(
+                       timeline->sw_fence_timeline);
+               pvr_counting_fence_timeline_put(timeline->sw_fence_timeline);
+       }
+
+       if (timeline->hw_fence_context) {
+#if defined(NO_HARDWARE)
+               mutex_lock(&pvr_timeline_active_list_lock);
+               list_del(&timeline->list);
+               mutex_unlock(&pvr_timeline_active_list_lock);
+#endif
+               pvr_fence_context_destroy(timeline->hw_fence_context);
+       }
+
+       kfree(timeline);
+
+       return 0;
+}
+
+/*
+ * This is the function that kick code will call in order to 'finalise' a
+ * created output fence just prior to returning from the kick function.
+ * The OS native sync code needs to implement a function meeting this
+ * specification - the implementation may be a nop if the OS does not need
+ * to perform any actions at this point.
+ *
+ * Input: fence_fd            The PVRSRV_FENCE to be 'finalised'. This value
+ *                            will have been returned by an earlier call to
+ *                            pvr_sync_create_fence().
+ * Input: finalise_data       The finalise data returned by an earlier call
+ *                            to pvr_sync_create_fence().
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data)
+{
+       struct sync_file *sync_file = finalise_data;
+       struct pvr_fence *pvr_fence;
+
+       if (!sync_file || (fence_fd < 0)) {
+               pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__);
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       pvr_fence = to_pvr_fence(sync_file->fence);
+
+       if (!pvr_fence) {
+               pr_err(FILE_NAME ": %s: Fence not a pvr fence\n", __func__);
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* pvr fences can be signalled any time after creation */
+       dma_fence_enable_sw_signaling(&pvr_fence->base);
+
+       fd_install(fence_fd, sync_file->file);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * This is the function that kick code will call in order to obtain a new
+ * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used
+ * in that fence. The OS native sync code needs to implement a function
+ * meeting this specification.
+ *
+ * Input: device                   Device node to use in creating a hw_fence_ctx
+ * Input: fence_name               A string to annotate the fence with (for
+ *                                 debug).
+ * Input: timeline                 The timeline on which the new fence is to be
+ *                                 created.
+ * Output: new_fence               The new PVRSRV_FENCE to be returned by the
+ *                                 kick call.
+ * Output: fence_uid               Unique ID of the update fence.
+ * Output: fence_finalise_data     Pointer to data needed to finalise the fence.
+ * Output: new_checkpoint_handle   The PSYNC_CHECKPOINT used by the new fence.
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_create_fence(
+                     struct _PVRSRV_DEVICE_NODE_ *device,
+                     const char *fence_name,
+                     PVRSRV_TIMELINE new_fence_timeline,
+                     PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                     PVRSRV_FENCE *new_fence, u64 *fence_uid,
+                     void **fence_finalise_data,
+                     PSYNC_CHECKPOINT *new_checkpoint_handle,
+                     void **timeline_update_sync,
+                     __u32 *timeline_update_value)
+{
+       PVRSRV_ERROR err = PVRSRV_OK;
+       PVRSRV_FENCE new_fence_fd = -1;
+       struct pvr_sync_timeline *timeline;
+       struct pvr_fence *pvr_fence;
+       PSYNC_CHECKPOINT checkpoint;
+       struct sync_file *sync_file;
+
+       if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle
+               || !fence_finalise_data) {
+               pr_err(FILE_NAME ": %s: Invalid input params\n", __func__);
+               err =  PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       /* We reserve the new fence FD before taking any operations
+        * as we do not want to fail (e.g. run out of FDs)
+        */
+       new_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+       if (new_fence_fd < 0) {
+               pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__);
+               err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+               goto err_out;
+       }
+
+       timeline = pvr_sync_timeline_fget(new_fence_timeline);
+       if (!timeline) {
+               pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n",
+                       __func__, new_fence_timeline);
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_put_fd;
+       }
+
+       if (timeline->is_sw) {
+               /* This should never happen! */
+               pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n",
+                       __func__, new_fence_timeline);
+               err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_put_timeline;
+       }
+
+       if (!timeline->hw_fence_context) {
+               /* First time we use this timeline, so create a context. */
+               timeline->hw_fence_context =
+                       pvr_fence_context_create(
+                               device,
+                               NativeSyncGetFenceStatusWq(),
+                               timeline->name);
+               if (!timeline->hw_fence_context) {
+                       pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n",
+                              __func__, new_fence_timeline);
+                       err = PVRSRV_ERROR_OUT_OF_MEMORY;
+                       goto err_put_timeline;
+               }
+#if defined(NO_HARDWARE)
+               /* Add timeline to active list */
+               INIT_LIST_HEAD(&timeline->list);
+               mutex_lock(&pvr_timeline_active_list_lock);
+               list_add_tail(&timeline->list, &pvr_timeline_active_list);
+               mutex_unlock(&pvr_timeline_active_list_lock);
+#endif
+       }
+
+       pvr_fence = pvr_fence_create(timeline->hw_fence_context,
+                                                                psSyncCheckpointContext,
+                                                                new_fence_timeline,
+                                                                fence_name);
+       if (!pvr_fence) {
+               pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_timeline;
+       }
+
+       checkpoint = pvr_fence_get_checkpoint(pvr_fence);
+       if (!checkpoint) {
+               pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_destroy_fence;
+       }
+
+       sync_file = sync_file_create(&pvr_fence->base);
+       if (!sync_file) {
+               pr_err(FILE_NAME ": %s: Failed to create sync_file\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_destroy_fence;
+       }
+       strlcpy(sync_file_user_name(sync_file),
+               pvr_fence->name,
+               sizeof(sync_file_user_name(sync_file)));
+       dma_fence_put(&pvr_fence->base);
+
+       *new_fence = new_fence_fd;
+       *fence_finalise_data = sync_file;
+       *new_checkpoint_handle = checkpoint;
+       *fence_uid = OSGetCurrentClientProcessIDKM();
+       *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX);
+       /* not used but don't want to return dangling pointers */
+       *timeline_update_sync = NULL;
+       *timeline_update_value = 0;
+
+       pvr_sync_timeline_fput(timeline);
+err_out:
+       return err;
+
+err_destroy_fence:
+       pvr_fence_destroy(pvr_fence);
+err_put_timeline:
+       pvr_sync_timeline_fput(timeline);
+err_put_fd:
+       put_unused_fd(new_fence_fd);
+       *fence_uid = PVRSRV_NO_FENCE;
+       goto err_out;
+}
+
+/*
+ * This is the function that kick code will call in order to 'rollback' a
+ * created output fence should an error occur when submitting the kick.
+ * The OS native sync code needs to implement a function meeting this
+ * specification.
+ *
+ * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence
+ *                          should be destroyed and any actions taken due to
+ *                          its creation that need to be undone should be
+ *                          reverted.
+ * Input: finalise_data     The finalise data for the fence to be 'rolled back'.
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback,
+                            void *fence_data_to_rollback)
+{
+       struct sync_file *sync_file = fence_data_to_rollback;
+       struct pvr_fence *pvr_fence;
+
+       if (!sync_file || fence_to_rollback < 0) {
+               pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__,
+                       fence_to_rollback);
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       pvr_fence = to_pvr_fence(sync_file->fence);
+       if (!pvr_fence) {
+               pr_err(FILE_NAME
+                       ": %s: Non-PVR fence (%p)\n",
+                       __func__, sync_file->fence);
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       fput(sync_file->file);
+
+       put_unused_fd(fence_to_rollback);
+
+       return PVRSRV_OK;
+}
+
+/*
+ * This is the function that kick code will call in order to obtain a list of
+ * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function.
+ * The OS native sync code will allocate the memory to hold the returned list
+ * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has
+ * finished referencing it.
+ *
+ * Input: fence                     The input (check) fence
+ * Output: nr_checkpoints           The number of PVRSRV_SYNC_CHECKPOINT ptrs
+ *                                  returned in the checkpoint_handles
+ *                                  parameter.
+ * Output: fence_uid                Unique ID of the check fence
+ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs.
+ */
+static enum PVRSRV_ERROR_TAG
+pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                      PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints,
+                      PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid)
+{
+       PSYNC_CHECKPOINT *checkpoints = NULL;
+       unsigned int i, num_fences = 0, num_used_fences = 0;
+       struct dma_fence **fences = NULL;
+       struct dma_fence *fence;
+       PVRSRV_ERROR err = PVRSRV_OK;
+
+       if (!nr_checkpoints || !checkpoint_handles || !fence_uid) {
+               pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n",
+                       __func__);
+               err =  PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       *nr_checkpoints = 0;
+       *checkpoint_handles = NULL;
+       *fence_uid = 0;
+
+       if (fence_to_resolve < 0)
+               goto err_out;
+
+       fence = sync_file_get_fence(fence_to_resolve);
+       if (!fence) {
+               pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n",
+                       __func__, fence_to_resolve);
+               err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+               goto err_out;
+       }
+
+       if (dma_fence_is_array(fence)) {
+               struct dma_fence_array *array = to_dma_fence_array(fence);
+
+               if (array) {
+                       fences = array->fences;
+                       num_fences = array->num_fences;
+               }
+       } else {
+               fences = &fence;
+               num_fences = 1;
+       }
+
+       checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT),
+                             GFP_KERNEL);
+       if (!checkpoints) {
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_fence;
+       }
+       for (i = 0; i < num_fences; i++) {
+               /*
+                * Only return the checkpoint if the fence is still active.
+                * Don't checked for signalled on PDUMP drivers as we need
+                * to make sure that all fences make it to the pdump.
+                */
+#if !defined(PDUMP)
+               if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                             &fences[i]->flags))
+#endif
+               {
+                       struct pvr_fence *pvr_fence =
+                               pvr_fence_create_from_fence(
+                                       pvr_sync_data.foreign_fence_context,
+                                       psSyncCheckpointContext,
+                                       fences[i],
+                                       fence_to_resolve,
+                                       "foreign");
+                       if (!pvr_fence) {
+                               pr_err(FILE_NAME ": %s: Failed to create fence\n",
+                                      __func__);
+                               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+                               goto err_free_checkpoints;
+                       }
+                       checkpoints[num_used_fences] =
+                               pvr_fence_get_checkpoint(pvr_fence);
+                       SyncCheckpointTakeRef(checkpoints[num_used_fences]);
+                       ++num_used_fences;
+                       dma_fence_put(&pvr_fence->base);
+               }
+       }
+       /* If we don't return any checkpoints, delete the array because
+        * the caller will not.
+        */
+       if (num_used_fences == 0) {
+               kfree(checkpoints);
+               checkpoints = NULL;
+       }
+
+       *checkpoint_handles = checkpoints;
+       *nr_checkpoints = num_used_fences;
+       *fence_uid = OSGetCurrentClientProcessIDKM();
+       *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX);
+
+err_put_fence:
+       dma_fence_put(fence);
+err_out:
+       return err;
+
+err_free_checkpoints:
+       for (i = 0; i < num_used_fences; i++) {
+               if (checkpoints[i])
+                       SyncCheckpointDropRef(checkpoints[i]);
+       }
+       kfree(checkpoints);
+       goto err_put_fence;
+}
+
+/*
+ * This is the function that driver code will call in order to request the
+ * sync implementation to output debug information relating to any sync
+ * checkpoints it may have created which appear in the provided array of
+ * FW addresses of Unified Fence Objects (UFOs).
+ *
+ * Input: nr_ufos             The number of FW addresses provided in the
+ *                            vaddrs parameter.
+ * Input: vaddrs              The array of FW addresses of UFOs. The sync
+ *                            implementation should check each of these to
+ *                            see if any relate to sync checkpoints it has
+ *                            created and where they do output debug information
+ *                            pertaining to the native/fallback sync with
+ *                            which it is associated.
+ */
+static u32
+pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs)
+{
+       return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context,
+                                                  nr_ufos,
+                                                  vaddrs);
+}
+
+#if defined(PDUMP)
+static enum PVRSRV_ERROR_TAG
+pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints,
+                               struct SYNC_CHECKPOINT_TAG ***checkpoint_handles)
+{
+       struct dma_fence **fences = NULL;
+       struct dma_fence *fence;
+       struct pvr_fence *pvr_fence;
+       struct SYNC_CHECKPOINT_TAG **checkpoints = NULL;
+       unsigned int i, num_fences, num_used_fences = 0;
+       enum PVRSRV_ERROR_TAG err;
+
+       if (fence_to_pdump < 0) {
+               err =  PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       if (!nr_checkpoints || !checkpoint_handles) {
+               pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n",
+                       __func__);
+               err =  PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_out;
+       }
+
+       fence = sync_file_get_fence(fence_to_pdump);
+       if (!fence) {
+               pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n",
+                       __func__, fence_to_pdump);
+               err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+               goto err_out;
+       }
+
+       if (dma_fence_is_array(fence)) {
+               struct dma_fence_array *array = to_dma_fence_array(fence);
+
+               fences = array->fences;
+               num_fences = array->num_fences;
+       } else {
+               fences = &fence;
+               num_fences = 1;
+       }
+
+       checkpoints = kmalloc_array(num_fences, sizeof(*checkpoints),
+                             GFP_KERNEL);
+       if (!checkpoints) {
+               pr_err("pvr_sync_file: %s: Failed to alloc memory for returned list of sync checkpoints\n",
+                       __func__);
+               err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_fence;
+       }
+
+       for (i = 0; i < num_fences; i++) {
+               pvr_fence = to_pvr_fence(fences[i]);
+               if (!pvr_fence)
+                       continue;
+               checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence);
+               ++num_used_fences;
+       }
+
+       *checkpoint_handles = checkpoints;
+       *nr_checkpoints = num_used_fences;
+       err =  PVRSRV_OK;
+
+err_put_fence:
+       dma_fence_put(fence);
+err_out:
+       return err;
+}
+#endif
+
+int pvr_sync_api_rename(void *api_priv, void *user_data)
+{
+       struct pvr_sync_timeline *timeline = api_priv;
+       struct pvr_sync_rename_ioctl_data *data = user_data;
+
+       data->szName[sizeof(data->szName) - 1] = '\0';
+       strlcpy(timeline->name, data->szName, sizeof(timeline->name));
+       if (timeline->hw_fence_context)
+               strlcpy(timeline->hw_fence_context->name, data->szName,
+                       sizeof(timeline->hw_fence_context->name));
+
+       return 0;
+}
+
+int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new)
+{
+       struct pvr_sync_timeline *timeline = api_priv;
+
+       /* Already in SW mode? */
+       if (timeline->sw_fence_timeline)
+               return 0;
+
+       /* Create a sw_sync timeline with the old GPU timeline's name */
+       timeline->sw_fence_timeline = pvr_counting_fence_timeline_create(
+               timeline->name);
+       if (!timeline->sw_fence_timeline)
+               return -ENOMEM;
+
+       timeline->is_sw = true;
+
+       return 0;
+}
+
+int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data)
+{
+       struct pvr_sync_timeline *timeline = api_priv;
+       struct pvr_sw_sync_create_fence_data *data = user_data;
+       struct sync_file *sync_file;
+       int fd = get_unused_fd_flags(O_CLOEXEC);
+       struct dma_fence *fence;
+       int err;
+
+       if (fd < 0) {
+               pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n",
+                      __func__, fd);
+               err = -EMFILE;
+               goto err_out;
+       }
+
+       fence = pvr_counting_fence_create(timeline->sw_fence_timeline, &data->sync_pt_idx);
+       if (!fence) {
+               pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n",
+                      __func__, fd);
+               err = -ENOMEM;
+               goto err_put_fd;
+       }
+
+       sync_file = sync_file_create(fence);
+       dma_fence_put(fence);
+       if (!sync_file) {
+               pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n",
+                       __func__, fd);
+               err = -ENOMEM;
+               goto err_put_fd;
+       }
+
+       data->fence = fd;
+
+       fd_install(fd, sync_file->file);
+
+       return 0;
+
+err_put_fd:
+       put_unused_fd(fd);
+err_out:
+       return err;
+}
+
+int pvr_sync_api_sw_inc(void *api_priv, void *user_data)
+{
+       struct pvr_sync_timeline *timeline = api_priv;
+       struct pvr_sw_timeline_advance_data *data = user_data;
+       bool res;
+
+       res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, &data->sync_pt_idx);
+
+       /* pvr_counting_fence_timeline_inc won't allow sw timeline to be
+        * advanced beyond the last defined point
+        */
+       if (!res) {
+               pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n");
+               return -EPERM;
+       }
+
+       return 0;
+}
+
+static void
+pvr_sync_debug_request_heading(void *data, u32 verbosity,
+                               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile)
+{
+       if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM))
+               PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+                                 "------[ Native Fence Sync: timelines ]------");
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void)
+{
+       /* Register the resolve fence and create fence functions with
+        * sync_checkpoint.c
+        * The pvr_fence context registers its own EventObject callback to
+        * update sync status
+        */
+       /* Initialise struct and register with sync_checkpoint.c */
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence;
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence;
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data;
+       pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence;
+#if defined(NO_HARDWARE)
+       pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence;
+#else
+       pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL;
+#endif
+       pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem =
+               pvr_sync_free_checkpoint_list_mem;
+       pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs =
+               pvr_sync_dump_info_on_stalled_ufos;
+       strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file",
+               SYNC_CHECKPOINT_IMPL_MAX_STRLEN);
+#if defined(PDUMP)
+       pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints =
+               pvr_sync_fence_get_checkpoints;
+#endif
+
+       return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops);
+}
+
+int pvr_sync_init(void)
+{
+       int err;
+
+       pvr_sync_data.foreign_fence_context =
+                       pvr_fence_foreign_context_create(
+                                       NativeSyncGetFenceStatusWq(),
+                                       "foreign_sync");
+       if (!pvr_sync_data.foreign_fence_context) {
+               pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n",
+                       __func__);
+               err = -ENOMEM;
+               goto err_out;
+       }
+
+#if defined(NO_HARDWARE)
+       INIT_LIST_HEAD(&pvr_timeline_active_list);
+#endif
+
+       err = pvr_sync_ioctl_init();
+       if (err) {
+               pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n",
+                      __func__, err);
+               goto err_ioctl_init;
+       }
+
+       return 0;
+
+err_ioctl_init:
+       pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context);
+       pvr_fence_cleanup();
+err_out:
+       return err;
+}
+
+void pvr_sync_deinit(void)
+{
+       pvr_sync_ioctl_deinit();
+       pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context);
+       pvr_fence_cleanup();
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+       enum PVRSRV_ERROR_TAG error;
+
+       error = PVRSRVRegisterDeviceDbgRequestNotify(
+                               &priv->sync_debug_notify_handle,
+                               priv->dev_node,
+                               pvr_sync_debug_request_heading,
+                               DEBUG_REQUEST_LINUXFENCE,
+                               NULL);
+       if (error != PVRSRV_OK) {
+               pr_err("%s: failed to register debug request callback (%s)\n",
+                      __func__, PVRSRVGetErrorString(error));
+               goto err_out;
+       }
+
+       /* Register the foreign sync context debug notifier on each device */
+       error = pvr_fence_context_register_dbg(
+                               &priv->sync_foreign_debug_notify_handle,
+                               priv->dev_node,
+                               pvr_sync_data.foreign_fence_context);
+       if (error != PVRSRV_OK) {
+               pr_err("%s: failed to register fence debug request callback (%s)\n",
+                      __func__, PVRSRVGetErrorString(error));
+               goto err_context_regdbg;
+       }
+
+#if defined(NO_HARDWARE)
+       INIT_LIST_HEAD(&pvr_timeline_active_list);
+#endif
+
+       return PVRSRV_OK;
+
+err_context_regdbg:
+       PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle);
+err_out:
+       return error;
+}
+
+void pvr_sync_device_deinit(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct pvr_drm_private *priv = ddev->dev_private;
+
+       PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_foreign_debug_notify_handle);
+       PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle);
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms)
+{
+       long timeout = msecs_to_jiffies(timeout_in_ms);
+       int err;
+
+       err = dma_fence_wait_timeout(fence, true, timeout);
+       /*
+        * dma_fence_wait_timeout returns:
+        * - the remaining timeout on success
+        * - 0 on timeout
+        * - -ERESTARTSYS if interrupted
+        */
+       if (err > 0)
+               return PVRSRV_OK;
+       else if (err == 0)
+               return PVRSRV_ERROR_TIMEOUT;
+
+       return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence)
+{
+       dma_fence_put(fence);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out)
+{
+       struct dma_fence *fence;
+
+       fence = sync_file_get_fence(fence_fd);
+       if (fence == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       *fence_out = fence;
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG
+pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node,
+                                 int timeline_fd,
+                                 const char *fence_name,
+                                 int *fence_fd_out,
+                                 u64 *sync_pt_idx)
+{
+       enum PVRSRV_ERROR_TAG srv_err;
+       struct pvr_sync_timeline *timeline;
+       struct dma_fence *fence = NULL;
+       struct sync_file *sync_file = NULL;
+       int fd;
+
+       (void)(pvrsrv_dev_node);
+
+       fd = get_unused_fd_flags(O_CLOEXEC);
+       if (fd < 0)
+               return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+
+       timeline = pvr_sync_timeline_fget(timeline_fd);
+       if (!timeline) {
+               /* unrecognised timeline */
+               srv_err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+               goto err_put_fd;
+       }
+       if (!timeline->is_sw) {
+               pvr_sync_timeline_fput(timeline);
+               srv_err = PVRSRV_ERROR_INVALID_PARAMS;
+               goto err_put_fd;
+       }
+
+       fence = pvr_counting_fence_create(timeline->sw_fence_timeline, sync_pt_idx);
+       pvr_sync_timeline_fput(timeline);
+       if (!fence) {
+               srv_err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_fd;
+       }
+
+       sync_file = sync_file_create(fence);
+       dma_fence_put(fence);
+       if (!sync_file) {
+               srv_err = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto err_put_fd;
+       }
+
+       fd_install(fd, sync_file->file);
+
+       *fence_fd_out = fd;
+
+       return PVRSRV_OK;
+
+err_put_fd:
+       put_unused_fd(fd);
+       return srv_err;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx)
+{
+       if (timeline == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       pvr_counting_fence_timeline_inc(timeline, sync_pt_idx);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline)
+{
+       if (timeline == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       pvr_counting_fence_timeline_put(timeline);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd,
+                                          void **timeline_out)
+{
+       struct pvr_counting_fence_timeline *sw_timeline;
+       struct pvr_sync_timeline *timeline;
+
+       timeline = pvr_sync_timeline_fget(timeline_fd);
+       if (!timeline)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       sw_timeline =
+               pvr_counting_fence_timeline_get(timeline->sw_fence_timeline);
+       pvr_sync_timeline_fput(timeline);
+       if (!sw_timeline)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       *timeline_out = sw_timeline;
+
+       return PVRSRV_OK;
+}
+static void _dump_sync_point(struct dma_fence *fence,
+                                                         DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                                                         void *dump_debug_file)
+{
+       const struct dma_fence_ops *fence_ops = fence->ops;
+       bool signaled = dma_fence_is_signaled(fence);
+       char time[16] = { '\0' };
+
+       fence_ops->timeline_value_str(fence, time, sizeof(time));
+
+       PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                         dump_debug_file,
+                                         "<%p> Seq#=%llu TS=%s State=%s TLN=%s",
+                                         fence,
+                                         (u64) fence->seqno,
+                                         time,
+                                         (signaled) ? "Signalled" : "Active",
+                                         fence_ops->get_timeline_name(fence));
+}
+
+static void _dump_fence(struct dma_fence *fence,
+                       DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                       void *dump_debug_file)
+{
+       if (dma_fence_is_array(fence)) {
+               struct dma_fence_array *fence_array = to_dma_fence_array(fence);
+               int i;
+
+               if (fence_array) {
+                       PVR_DUMPDEBUG_LOG(dump_debug_printf,
+                                         dump_debug_file,
+                                         "Fence: [%p] Sync Points:\n",
+                                         fence_array);
+
+                       for (i = 0; i < fence_array->num_fences; i++)
+                               _dump_sync_point(fence_array->fences[i],
+                                                dump_debug_printf,
+                                                dump_debug_file);
+               }
+
+       } else {
+               _dump_sync_point(fence, dump_debug_printf, dump_debug_file);
+       }
+}
+
+enum PVRSRV_ERROR_TAG
+sync_dump_fence(void *sw_fence_obj,
+               DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+               void *dump_debug_file)
+{
+       struct dma_fence *fence = (struct dma_fence *) sw_fence_obj;
+
+       _dump_fence(fence, dump_debug_printf, dump_debug_file);
+
+       return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR_TAG
+sync_sw_dump_timeline(void *sw_timeline_obj,
+                     DUMPDEBUG_PRINTF_FUNC *dump_debug_printf,
+                     void *dump_debug_file)
+{
+       pvr_counting_fence_timeline_dump_timeline(sw_timeline_obj,
+                                                 dump_debug_printf,
+                                                 dump_debug_file);
+
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_common.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_common.c
new file mode 100644 (file)
index 0000000..60ba355
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * @File        pvr_sync_ioctl_common.c
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "pvr_drm.h"
+#include "pvr_sync_api.h"
+#include "pvr_sync_ioctl_common.h"
+
+/*
+ * The PVR Sync API is unusual in that some operations configure the
+ * timeline for use, and are no longer allowed once the timeline is
+ * in use. A locking mechanism, such as a read/write semaphore, would
+ * be one method of helping to ensure the API rules are followed, but
+ * this would add unnecessary overhead once the timeline has been
+ * configured, as read locks would continue to have to be taken after
+ * the timeline is in use. To avoid locks, two atomic variables are used,
+ * together with memory barriers. The in_setup variable indicates a "rename"
+ * or "force software only" ioctl is in progress. At most one of these two
+ * configuration ioctls can be in progress at any one time, and they can't
+ * overlap with any other Sync ioctl. The in_use variable indicates one
+ * of the other Sync ioctls has started. Once set, in_use stays set, and
+ * prevents any further configuration ioctls. Non-configuration ioctls
+ * are allowed to overlap.
+ * It is possible for a configuration and non-configuration ioctl to race,
+ * but at most one will be allowed to proceed, and perhaps neither.
+ * Given the intended usage of the API in user space, where the timeline
+ * is fully configured before being used, the race behaviour won't be
+ * an issue.
+ */
+
+struct pvr_sync_file_data {
+       atomic_t in_setup;
+       atomic_t in_use;
+       void *api_private;
+       bool is_sw;
+};
+
+static bool pvr_sync_set_in_use(struct pvr_sync_file_data *fdata)
+{
+       if (atomic_read(&fdata->in_use) < 2) {
+               atomic_set(&fdata->in_use, 1);
+               /* Ensure in_use change is visible before in_setup is read */
+               smp_mb();
+               if (atomic_read(&fdata->in_setup) != 0)
+                       return false;
+
+               atomic_set(&fdata->in_use, 2);
+       } else {
+               /* Ensure stale private data isn't read */
+               smp_rmb();
+       }
+
+       return true;
+}
+
+static bool pvr_sync_set_in_setup(struct pvr_sync_file_data *fdata)
+{
+       int in_setup;
+
+       in_setup = atomic_inc_return(&fdata->in_setup);
+       if (in_setup > 1 || atomic_read(&fdata->in_use) != 0) {
+               atomic_dec(&fdata->in_setup);
+               return false;
+       }
+
+       return true;
+}
+
+static inline void pvr_sync_reset_in_setup(struct pvr_sync_file_data *fdata)
+{
+       /*
+        * Ensure setup changes are visible before allowing other
+        * operations to proceed.
+        */
+       smp_mb__before_atomic();
+       atomic_dec(&fdata->in_setup);
+}
+
+void *pvr_sync_get_api_priv_common(struct file *file)
+{
+       if (file != NULL && pvr_sync_is_timeline(file)) {
+               struct pvr_sync_file_data *fdata = pvr_sync_get_private_data(file);
+
+               if (fdata != NULL && pvr_sync_set_in_use(fdata))
+                       return fdata->api_private;
+       }
+
+       return NULL;
+}
+
+int pvr_sync_open_common(void *connection_data, void *file_handle)
+{
+       void *data = NULL;
+       struct pvr_sync_file_data *fdata;
+       int err;
+
+       fdata = kzalloc(sizeof(*fdata), GFP_KERNEL);
+       if (!fdata)
+               return -ENOMEM;
+
+       atomic_set(&fdata->in_setup, 0);
+       atomic_set(&fdata->in_use, 0);
+
+       if (!pvr_sync_set_private_data(connection_data, fdata)) {
+               kfree(fdata);
+               return -EINVAL;
+       }
+
+       err = pvr_sync_api_init(file_handle, &data);
+       if (err)
+               kfree(fdata);
+       else
+               fdata->api_private = data;
+
+       return err;
+}
+
+int pvr_sync_close_common(void *connection_data)
+{
+       struct pvr_sync_file_data *fdata;
+
+       fdata = pvr_sync_connection_private_data(connection_data);
+       if (fdata) {
+               int err;
+
+               err = pvr_sync_api_deinit(fdata->api_private, fdata->is_sw);
+
+               kfree(fdata);
+
+               return err;
+       }
+
+       return 0;
+}
+
+static inline int pvr_sync_ioctl_rename(void *api_priv, void *arg)
+{
+       struct pvr_sync_rename_ioctl_data *data = arg;
+
+       return pvr_sync_api_rename(api_priv, data);
+}
+
+static inline int pvr_sync_ioctl_force_sw_only(struct pvr_sync_file_data *fdata)
+{
+       void *data = fdata->api_private;
+       int err;
+
+       err = pvr_sync_api_force_sw_only(fdata->api_private, &data);
+       if (!err) {
+               if (data != fdata->api_private)
+                       fdata->api_private = data;
+
+               fdata->is_sw = true;
+       }
+
+       return err;
+}
+
+static inline int pvr_sync_ioctl_sw_create_fence(void *api_priv, void *arg)
+{
+       struct pvr_sw_sync_create_fence_data *data = arg;
+
+       return pvr_sync_api_sw_create_fence(api_priv, data);
+}
+
+static inline int pvr_sync_ioctl_sw_inc(void *api_priv, void *arg)
+{
+       struct pvr_sw_timeline_advance_data *data = arg;
+
+       return pvr_sync_api_sw_inc(api_priv, data);
+}
+
+int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg)
+{
+       int err = -ENOTTY;
+       struct pvr_sync_file_data *fdata;
+       bool in_setup;
+
+       fdata = pvr_sync_get_private_data(file);
+       if (!fdata)
+               return -EINVAL;
+
+       switch (cmd) {
+       case DRM_PVR_SYNC_RENAME_CMD:
+       case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD:
+               if (!pvr_sync_set_in_setup(fdata))
+                       return -EBUSY;
+
+               in_setup = true;
+               break;
+       default:
+               if (!pvr_sync_set_in_use(fdata))
+                       return -EBUSY;
+
+               in_setup = false;
+               break;
+       }
+
+       if (in_setup) {
+               if (fdata->is_sw)
+                       err = -ENOTTY;
+               else
+                       switch (cmd) {
+                       case DRM_PVR_SYNC_RENAME_CMD:
+                               err = pvr_sync_ioctl_rename(fdata->api_private,
+                                                           arg);
+                               break;
+                       case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD:
+                               err = pvr_sync_ioctl_force_sw_only(fdata);
+                               break;
+                       default:
+                               break;
+                       }
+       } else {
+               if (!fdata->is_sw)
+                       err = -ENOTTY;
+               else
+                       switch (cmd) {
+                       case DRM_PVR_SW_SYNC_CREATE_FENCE_CMD:
+                               err = pvr_sync_ioctl_sw_create_fence(fdata->api_private,
+                                                                    arg);
+                               break;
+                       case DRM_PVR_SW_SYNC_INC_CMD:
+                               err = pvr_sync_ioctl_sw_inc(fdata->api_private,
+                                                           arg);
+                               break;
+                       default:
+                               break;
+                       }
+       }
+
+       if (in_setup)
+               pvr_sync_reset_in_setup(fdata);
+
+       return err;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_common.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_common.h
new file mode 100644 (file)
index 0000000..ef12dc2
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * @File        pvr_sync_ioctl_common.h
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PVR_SYNC_IOCTL_COMMON_H
+#define _PVR_SYNC_IOCTL_COMMON_H
+
+struct file;
+
+/* Functions provided by pvr_sync_ioctl_common */
+
+int pvr_sync_open_common(void *connection_data, void *file_handle);
+int pvr_sync_close_common(void *connection_data);
+int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg);
+void *pvr_sync_get_api_priv_common(struct file *file);
+
+struct pvr_sync_file_data;
+
+/* Functions required by pvr_sync_ioctl_common */
+
+bool pvr_sync_set_private_data(void *connection_data,
+                              struct pvr_sync_file_data *fdata);
+
+struct pvr_sync_file_data *
+pvr_sync_connection_private_data(void *connection_data);
+
+struct pvr_sync_file_data *
+pvr_sync_get_private_data(struct file *file);
+
+bool pvr_sync_is_timeline(struct file *file);
+
+#endif /* _PVR_SYNC_IOCTL_COMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_drm.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_drm.c
new file mode 100644 (file)
index 0000000..423c8d3
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * @File        pvr_sync_ioctl_drm.c
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pvr_drv.h"
+#include "pvr_drm.h"
+#include "private_data.h"
+#include "env_connection.h"
+#include "pvr_sync_api.h"
+#include "pvr_sync_ioctl_common.h"
+#include "pvr_sync_ioctl_drm.h"
+
+bool pvr_sync_set_private_data(void *connection_data,
+                              struct pvr_sync_file_data *fdata)
+{
+       if (connection_data) {
+               ENV_CONNECTION_DATA *env_data;
+
+               env_data = PVRSRVConnectionPrivateData(connection_data);
+               if (env_data) {
+                       env_data->pvPvrSyncPrivateData = fdata;
+
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+struct pvr_sync_file_data *
+pvr_sync_connection_private_data(void *connection_data)
+{
+       if (connection_data) {
+               ENV_CONNECTION_DATA *env_data;
+
+               env_data = PVRSRVConnectionPrivateData(connection_data);
+
+               if (env_data)
+                       return env_data->pvPvrSyncPrivateData;
+       }
+
+       return NULL;
+}
+
+struct pvr_sync_file_data *
+pvr_sync_get_private_data(struct file *file)
+{
+       CONNECTION_DATA *connection_data = LinuxSyncConnectionFromFile(file);
+
+       return pvr_sync_connection_private_data(connection_data);
+}
+
+bool pvr_sync_is_timeline(struct file *file)
+{
+       return file->f_op == &pvr_drm_fops;
+}
+
+void *pvr_sync_get_api_priv(struct file *file)
+{
+       return pvr_sync_get_api_priv_common(file);
+}
+
+struct file *pvr_sync_get_file_struct(void *file_handle)
+{
+       if (file_handle) {
+               struct drm_file *file = file_handle;
+
+               return file->filp;
+       }
+
+       return NULL;
+}
+
+int pvr_sync_open(void *connection_data, struct drm_file *file)
+{
+       /*
+        * The file structure pointer (file->filp) may not have been
+        * initialised at this point, so pass down a pointer to the
+        * drm_file structure instead.
+        */
+       return pvr_sync_open_common(connection_data, file);
+}
+
+void pvr_sync_close(void *connection_data)
+{
+       int iErr = pvr_sync_close_common(connection_data);
+
+       if (iErr < 0)
+               pr_err("%s: ERROR (%d) returned by pvr_sync_close_common()\n",
+                      __func__, iErr);
+}
+
+
+int pvr_sync_rename_ioctl(struct drm_device __maybe_unused *dev,
+                         void *arg, struct drm_file *file)
+{
+       return pvr_sync_ioctl_common(file->filp,
+                                    DRM_PVR_SYNC_RENAME_CMD, arg);
+}
+
+int pvr_sync_force_sw_only_ioctl(struct drm_device __maybe_unused *dev,
+                                void *arg, struct drm_file *file)
+{
+       return pvr_sync_ioctl_common(file->filp,
+                                    DRM_PVR_SYNC_FORCE_SW_ONLY_CMD, arg);
+}
+
+int pvr_sw_sync_create_fence_ioctl(struct drm_device __maybe_unused *dev,
+                                  void *arg, struct drm_file *file)
+{
+       return pvr_sync_ioctl_common(file->filp,
+                                    DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, arg);
+}
+
+int pvr_sw_sync_inc_ioctl(struct drm_device __maybe_unused *dev,
+                         void *arg, struct drm_file *file)
+{
+       return pvr_sync_ioctl_common(file->filp,
+                                    DRM_PVR_SW_SYNC_INC_CMD, arg);
+}
+
+int pvr_sync_ioctl_init(void)
+{
+       return 0;
+}
+
+void pvr_sync_ioctl_deinit(void)
+{
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_drm.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_sync_ioctl_drm.h
new file mode 100644 (file)
index 0000000..756ce4b
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * @File        pvr_sync_ioctl_drm.h
+ * @Title       Kernel driver for Android's sync mechanism
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PVR_SYNC_IOCTL_DRM_H
+#define _PVR_SYNC_IOCTL_DRM_H
+
+struct drm_device;
+struct drm_file;
+
+int pvr_sync_open(void *connection_data, struct drm_file *file);
+void pvr_sync_close(void *connection_data);
+
+int pvr_sync_rename_ioctl(struct drm_device *dev, void *arg,
+                         struct drm_file *file);
+int pvr_sync_force_sw_only_ioctl(struct drm_device *dev, void *arg,
+                                struct drm_file *file);
+int pvr_sw_sync_create_fence_ioctl(struct drm_device *dev, void *arg,
+                                  struct drm_file *file);
+int pvr_sw_sync_inc_ioctl(struct drm_device *dev, void *arg,
+                         struct drm_file *file);
+
+#endif /* _PVR_SYNC_IOCTL_DRM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_uaccess.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/pvr_uaccess.h
new file mode 100644 (file)
index 0000000..13864ea
--- /dev/null
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Title          Utility functions for user space access
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVR_UACCESS_H
+#define PVR_UACCESS_H
+
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+       if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+#else
+       if (access_ok(pvTo, ulBytes))
+#endif
+       {
+               return __copy_to_user(pvTo, pvFrom, ulBytes);
+       }
+
+       return ulBytes;
+}
+
+
+#if defined(__KLOCWORK__)
+       /* this part is only to tell Klocwork not to report false positive because
+          it doesn't understand that pvr_copy_from_user will initialise the memory
+          pointed to by pvTo */
+#include <linux/string.h> /* get the memset prototype */
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+       if (pvTo != NULL)
+       {
+               memset(pvTo, 0xAA, ulBytes);
+               return 0;
+       }
+       return 1;
+}
+
+#else /* real implementation */
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+       /*
+        * The compile time correctness checking introduced for copy_from_user in
+        * Linux 2.6.33 isn't fully compatible with our usage of the function.
+        */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+       if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+#else
+       if (access_ok(pvFrom, ulBytes))
+#endif
+       {
+               return __copy_from_user(pvTo, pvFrom, ulBytes);
+       }
+
+       return ulBytes;
+}
+#endif /* klocworks */
+
+#endif /* PVR_UACCESS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/rogue_trace_events.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/rogue_trace_events.h
new file mode 100644 (file)
index 0000000..e592307
--- /dev/null
@@ -0,0 +1,543 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rogue
+
+#if !defined(ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ROGUE_TRACE_EVENTS_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+       ({ \
+               u64 t = ns + (NSEC_PER_USEC / 2); \
+               do_div(t, NSEC_PER_SEC); \
+               t; \
+       })
+
+#define show_usecs_from_ns(ns) \
+       ({ \
+               u64 t = ns + (NSEC_PER_USEC / 2); \
+               u32 rem; \
+               do_div(t, NSEC_PER_USEC); \
+               rem = do_div(t, USEC_PER_SEC); \
+       })
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void);
+#else
+void trace_fence_update_enabled_callback(void);
+#endif
+void trace_fence_update_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_update,
+
+       TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+               u32 sync_fwaddr, u32 sync_value),
+
+       TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+       TP_STRUCT__entry(
+               __string(       comm,           comm            )
+               __string(       cmd,            cmd             )
+               __string(       dm,             dm              )
+               __field(        u32,            ctx_id          )
+               __field(        u32,            offset          )
+               __field(        u32,            sync_fwaddr     )
+               __field(        u32,            sync_value      )
+       ),
+
+       TP_fast_assign(
+               __assign_str(comm, comm);
+               __assign_str(cmd, cmd);
+               __assign_str(dm, dm);
+               __entry->ctx_id = ctx_id;
+               __entry->offset = offset;
+               __entry->sync_fwaddr = sync_fwaddr;
+               __entry->sync_value = sync_value;
+       ),
+
+       TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+               __get_str(comm),
+               __get_str(cmd),
+               __get_str(dm),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->offset,
+               (unsigned long)__entry->sync_fwaddr,
+               (unsigned long)__entry->sync_value),
+
+       trace_fence_update_enabled_callback,
+       trace_fence_update_disabled_callback
+);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void);
+#else
+void trace_fence_check_enabled_callback(void);
+#endif
+void trace_fence_check_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_check,
+
+       TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+               u32 sync_fwaddr, u32 sync_value),
+
+       TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+       TP_STRUCT__entry(
+               __string(       comm,           comm            )
+               __string(       cmd,            cmd             )
+               __string(       dm,             dm              )
+               __field(        u32,            ctx_id          )
+               __field(        u32,            offset          )
+               __field(        u32,            sync_fwaddr     )
+               __field(        u32,            sync_value      )
+       ),
+
+       TP_fast_assign(
+               __assign_str(comm, comm);
+               __assign_str(cmd, cmd);
+               __assign_str(dm, dm);
+               __entry->ctx_id = ctx_id;
+               __entry->offset = offset;
+               __entry->sync_fwaddr = sync_fwaddr;
+               __entry->sync_value = sync_value;
+       ),
+
+       TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+               __get_str(comm),
+               __get_str(cmd),
+               __get_str(dm),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->offset,
+               (unsigned long)__entry->sync_fwaddr,
+               (unsigned long)__entry->sync_value),
+
+       trace_fence_check_enabled_callback,
+       trace_fence_check_disabled_callback
+);
+
+TRACE_EVENT(rogue_job_enqueue,
+
+       TP_PROTO(u32 ctx_id, u32 int_id, u32 ext_id,
+                const char *kick_type),
+
+       TP_ARGS(ctx_id, int_id, ext_id, kick_type),
+
+       TP_STRUCT__entry(
+               __field(u32, ctx_id)
+               __field(u32, int_id)
+               __field(u32, ext_id)
+               __string(kick_type, kick_type)
+       ),
+
+       TP_fast_assign(
+               __entry->ctx_id = ctx_id;
+               __entry->int_id = int_id;
+               __entry->ext_id = ext_id;
+               __assign_str(kick_type, kick_type);
+       ),
+
+       TP_printk("ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s",
+               (unsigned long) __entry->ctx_id,
+               (unsigned long) __entry->int_id,
+               (unsigned long) __entry->ext_id,
+               __get_str(kick_type)
+       )
+);
+
+TRACE_EVENT(rogue_sched_switch,
+
+       TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 next_ctx_id,
+                u32 next_prio, u32 next_int_id, u32 next_ext_id),
+
+       TP_ARGS(work_type, switch_type, timestamp, next_ctx_id, next_prio, next_int_id, next_ext_id),
+
+       TP_STRUCT__entry(
+               __string(work_type, work_type)
+               __field(u32, switch_type)
+               __field(u64, timestamp)
+               __field(u32, next_ctx_id)
+               __field(u32, next_prio)
+               __field(u32, next_int_id)
+               __field(u32, next_ext_id)
+       ),
+
+       TP_fast_assign(
+               __assign_str(work_type, work_type);
+               __entry->switch_type = switch_type;
+               __entry->timestamp = timestamp;
+               __entry->next_ctx_id = next_ctx_id;
+               __entry->next_prio = next_prio;
+               __entry->next_int_id = next_int_id;
+               __entry->next_ext_id = next_ext_id;
+       ),
+
+       TP_printk("ts=%llu.%06lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu"
+               " next_prio=%lu work_type=%s switch_type=%s",
+               (unsigned long long) show_secs_from_ns(__entry->timestamp),
+               (unsigned long) show_usecs_from_ns(__entry->timestamp),
+               (unsigned long) __entry->next_ctx_id,
+               (unsigned long) __entry->next_int_id,
+               (unsigned long) __entry->next_ext_id,
+               (unsigned long) __entry->next_prio,
+               __get_str(work_type),
+               __print_symbolic(__entry->switch_type,
+                       /* These values are from ospvr_gputrace.h. */
+                       { 1, "begin" },
+                       { 2, "end" })
+       )
+);
+
+TRACE_EVENT(rogue_create_fw_context,
+
+       TP_PROTO(const char *comm, const char *dm, u32 ctx_id),
+
+       TP_ARGS(comm, dm, ctx_id),
+
+       TP_STRUCT__entry(
+               __string(       comm,           comm            )
+               __string(       dm,             dm              )
+               __field(        u32,            ctx_id          )
+       ),
+
+       TP_fast_assign(
+               __assign_str(comm, comm);
+               __assign_str(dm, dm);
+               __entry->ctx_id = ctx_id;
+       ),
+
+       TP_printk("comm=%s dm=%s ctx_id=%lu",
+               __get_str(comm),
+               __get_str(dm),
+               (unsigned long)__entry->ctx_id)
+);
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableUfoCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableUfoCallbackWrapper \
+               PVRGpuTraceEnableUfoCallback
+#endif
+
+TRACE_EVENT_FN(rogue_ufo_update,
+
+       TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+                u32 fwaddr, u32 old_value, u32 new_value),
+
+       TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, old_value,
+               new_value),
+
+       TP_STRUCT__entry(
+               __field(        u64,            timestamp   )
+               __field(        u32,            ctx_id      )
+               __field(        u32,            int_id      )
+               __field(        u32,            ext_id      )
+               __field(        u32,            fwaddr      )
+               __field(        u32,            old_value   )
+               __field(        u32,            new_value   )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->ctx_id = ctx_id;
+               __entry->int_id = int_id;
+               __entry->ext_id = ext_id;
+               __entry->fwaddr = fwaddr;
+               __entry->old_value = old_value;
+               __entry->new_value = new_value;
+       ),
+
+       TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+               " fwaddr=%#lx old_value=%#lx new_value=%#lx",
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->int_id,
+               (unsigned long)__entry->ext_id,
+               (unsigned long)__entry->fwaddr,
+               (unsigned long)__entry->old_value,
+               (unsigned long)__entry->new_value),
+       PVRGpuTraceEnableUfoCallbackWrapper,
+       PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_fail,
+
+       TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+                u32 fwaddr, u32 value, u32 required),
+
+       TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required),
+
+       TP_STRUCT__entry(
+               __field(        u64,            timestamp   )
+               __field(        u32,            ctx_id      )
+               __field(        u32,            int_id      )
+               __field(        u32,            ext_id      )
+               __field(        u32,            fwaddr      )
+               __field(        u32,            value       )
+               __field(        u32,            required    )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->ctx_id = ctx_id;
+               __entry->int_id = int_id;
+               __entry->ext_id = ext_id;
+               __entry->fwaddr = fwaddr;
+               __entry->value = value;
+               __entry->required = required;
+       ),
+
+       TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+               " fwaddr=%#lx value=%#lx required=%#lx",
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->int_id,
+               (unsigned long)__entry->ext_id,
+               (unsigned long)__entry->fwaddr,
+               (unsigned long)__entry->value,
+               (unsigned long)__entry->required),
+       PVRGpuTraceEnableUfoCallbackWrapper,
+       PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_fail,
+
+       TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+                u32 fwaddr, u32 value, u32 required),
+
+       TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required),
+
+       TP_STRUCT__entry(
+               __field(        u64,            timestamp   )
+               __field(        u32,            ctx_id      )
+               __field(        u32,            int_id      )
+               __field(        u32,            ext_id      )
+               __field(        u32,            fwaddr      )
+               __field(        u32,            value       )
+               __field(        u32,            required    )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->ctx_id = ctx_id;
+               __entry->int_id = int_id;
+               __entry->ext_id = ext_id;
+               __entry->fwaddr = fwaddr;
+               __entry->value = value;
+               __entry->required = required;
+       ),
+
+       TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+               " fwaddr=%#lx value=%#lx required=%#lx",
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->int_id,
+               (unsigned long)__entry->ext_id,
+               (unsigned long)__entry->fwaddr,
+               (unsigned long)__entry->value,
+               (unsigned long)__entry->required),
+       PVRGpuTraceEnableUfoCallbackWrapper,
+       PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_success,
+
+       TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+                u32 fwaddr, u32 value),
+
+       TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value),
+
+       TP_STRUCT__entry(
+               __field(        u64,            timestamp   )
+               __field(        u32,            ctx_id      )
+               __field(        u32,            int_id      )
+               __field(        u32,            ext_id      )
+               __field(        u32,            fwaddr      )
+               __field(        u32,            value       )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->ctx_id = ctx_id;
+               __entry->int_id = int_id;
+               __entry->ext_id = ext_id;
+               __entry->fwaddr = fwaddr;
+               __entry->value = value;
+       ),
+
+       TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+               " fwaddr=%#lx value=%#lx",
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->int_id,
+               (unsigned long)__entry->ext_id,
+               (unsigned long)__entry->fwaddr,
+               (unsigned long)__entry->value),
+       PVRGpuTraceEnableUfoCallbackWrapper,
+       PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_success,
+
+       TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+                u32 fwaddr, u32 value),
+
+       TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value),
+
+       TP_STRUCT__entry(
+               __field(        u64,            timestamp   )
+               __field(        u32,            ctx_id      )
+               __field(        u32,            int_id      )
+               __field(        u32,            ext_id      )
+               __field(        u32,            fwaddr      )
+               __field(        u32,            value       )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->ctx_id = ctx_id;
+               __entry->int_id = int_id;
+               __entry->ext_id = ext_id;
+               __entry->fwaddr = fwaddr;
+               __entry->value = value;
+       ),
+
+       TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+               " fwaddr=%#lx value=%#lx",
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->int_id,
+               (unsigned long)__entry->ext_id,
+               (unsigned long)__entry->fwaddr,
+               (unsigned long)__entry->value),
+       PVRGpuTraceEnableUfoCallbackWrapper,
+       PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT(rogue_events_lost,
+
+       TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal),
+
+       TP_ARGS(event_source, last_ordinal, curr_ordinal),
+
+       TP_STRUCT__entry(
+               __field(        u32,            event_source     )
+               __field(        u32,            last_ordinal     )
+               __field(        u32,            curr_ordinal     )
+       ),
+
+       TP_fast_assign(
+               __entry->event_source = event_source;
+               __entry->last_ordinal = last_ordinal;
+               __entry->curr_ordinal = curr_ordinal;
+       ),
+
+       TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u",
+               __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}),
+               __entry->last_ordinal,
+               __entry->curr_ordinal)
+);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \
+               PVRGpuTraceEnableFirmwareActivityCallback
+#endif
+
+TRACE_EVENT_FN(rogue_firmware_activity,
+
+       TP_PROTO(u64 timestamp, const char *task, u32 fw_event),
+
+       TP_ARGS(timestamp, task, fw_event),
+
+       TP_STRUCT__entry(
+               __field(        u64,            timestamp       )
+               __string(       task,           task            )
+               __field(        u32,            fw_event        )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __assign_str(task, task);
+               __entry->fw_event = fw_event;
+       ),
+
+       TP_printk("ts=%llu.%06lu task=%s event=%s",
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               __get_str(task),
+               __print_symbolic(__entry->fw_event,
+                       /* These values are from ospvr_gputrace.h. */
+                       { 1, "begin" },
+                       { 2, "end" })),
+
+       PVRGpuTraceEnableFirmwareActivityCallbackWrapper,
+       PVRGpuTraceDisableFirmwareActivityCallback
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* ROGUE_TRACE_EVENTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE rogue_trace_events
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/services_kernel_client.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/services_kernel_client.h
new file mode 100644 (file)
index 0000000..aaca47f
--- /dev/null
@@ -0,0 +1,291 @@
+/*************************************************************************/ /*!
+@File           services_kernel_client.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This file contains a partial redefinition of the PowerVR Services 5
+ * interface for use by components which are checkpatch clean. This
+ * header is included by the unrefined, non-checkpatch clean headers
+ * to ensure that prototype/typedef/macro changes break the build.
+ */
+
+#ifndef __SERVICES_KERNEL_CLIENT__
+#define __SERVICES_KERNEL_CLIENT__
+
+#include "pvrsrv_error.h"
+
+#include <linux/types.h>
+
+#include "pvrsrv_sync_km.h"
+#include "sync_checkpoint_external.h"
+
+/* included for the define PVRSRV_LINUX_DEV_INIT_ON_PROBE */
+#include "pvr_drm.h"
+
+#ifndef __pvrsrv_defined_struct_enum__
+
+/* sync_external.h */
+
+struct PVRSRV_CLIENT_SYNC_PRIM_TAG {
+       volatile __u32 *pui32LinAddr;
+};
+
+struct PVRSRV_CLIENT_SYNC_PRIM_OP {
+       __u32 ui32Flags;
+       struct pvrsrv_sync_prim *psSync;
+       __u32 ui32FenceValue;
+       __u32 ui32UpdateValue;
+};
+
+#else /* __pvrsrv_defined_struct_enum__ */
+
+struct PVRSRV_CLIENT_SYNC_PRIM_TAG;
+struct PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+enum tag_img_bool;
+
+#endif /* __pvrsrv_defined_struct_enum__ */
+
+struct _PMR_;
+struct _PVRSRV_DEVICE_NODE_;
+struct dma_buf;
+struct SYNC_PRIM_CONTEXT_TAG;
+
+/* pvr_notifier.h */
+
+#ifndef CMDCOMPNOTIFY_PFN
+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle);
+#define CMDCOMPNOTIFY_PFN
+#endif
+enum PVRSRV_ERROR_TAG PVRSRVRegisterCmdCompleteNotify(void **phNotify,
+       PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData);
+enum PVRSRV_ERROR_TAG PVRSRVUnregisterCmdCompleteNotify(void *hNotify);
+void PVRSRVCheckStatus(void *hCmdCompCallerHandle);
+
+#define DEBUG_REQUEST_DC               0
+#define DEBUG_REQUEST_SYNCTRACKING     1
+#define DEBUG_REQUEST_SRV              2
+#define DEBUG_REQUEST_SYS              3
+#define DEBUG_REQUEST_RGX              4
+#define DEBUG_REQUEST_ANDROIDSYNC      5
+#define DEBUG_REQUEST_LINUXFENCE       6
+#define DEBUG_REQUEST_SYNCCHECKPOINT   7
+#define DEBUG_REQUEST_HTB              8
+#define DEBUG_REQUEST_APPHINT          9
+#define DEBUG_REQUEST_FALLBACKSYNC     10
+
+#define DEBUG_REQUEST_VERBOSITY_LOW    0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH   2
+#define DEBUG_REQUEST_VERBOSITY_MAX    DEBUG_REQUEST_VERBOSITY_HIGH
+
+#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk))
+
+#ifndef DBGNOTIFY_PFNS
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+       const char *fmt, ...) __printf(2, 3);
+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle,
+       __u32 ui32VerbLevel,
+       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+       void *pvDumpDebugFile);
+#define DBGNOTIFY_PFNS
+#endif
+enum PVRSRV_ERROR_TAG PVRSRVRegisterDeviceDbgRequestNotify(void **phNotify,
+       struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+       PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+       __u32 ui32RequesterID,
+       void *hDbgRequestHandle);
+enum PVRSRV_ERROR_TAG PVRSRVUnregisterDeviceDbgRequestNotify(void *hNotify);
+enum PVRSRV_ERROR_TAG PVRSRVRegisterDriverDbgRequestNotify(void **phNotify,
+       PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+       __u32 ui32RequesterID,
+       void *hDbgRequestHandle);
+enum PVRSRV_ERROR_TAG PVRSRVUnregisterDriverDbgRequestNotify(void *hNotify);
+
+/* physmem_dmabuf.h */
+
+struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR);
+
+/* pvrsrv.h */
+
+enum PVRSRV_ERROR_TAG PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject);
+enum PVRSRV_ERROR_TAG PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject);
+
+/* sync.h */
+
+enum PVRSRV_ERROR_TAG SyncPrimContextCreate(
+       struct _PVRSRV_DEVICE_NODE_ *psDevConnection,
+       struct SYNC_PRIM_CONTEXT_TAG **phSyncPrimContext);
+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT_TAG *hSyncPrimContext);
+
+enum PVRSRV_ERROR_TAG SyncPrimAlloc(struct SYNC_PRIM_CONTEXT_TAG *hSyncPrimContext,
+       struct PVRSRV_CLIENT_SYNC_PRIM_TAG **ppsSync, const char *pszClassName);
+enum PVRSRV_ERROR_TAG SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM_TAG *psSync);
+enum PVRSRV_ERROR_TAG SyncPrimGetFirmwareAddr(
+       struct PVRSRV_CLIENT_SYNC_PRIM_TAG *psSync,
+       __u32 *sync_addr);
+
+/* osfunc.h */
+enum PVRSRV_ERROR_TAG OSEventObjectWait(void *hOSEventKM);
+enum PVRSRV_ERROR_TAG OSEventObjectOpen(void *hEventObject, void **phOSEventKM);
+enum PVRSRV_ERROR_TAG OSEventObjectClose(void *hOSEventKM);
+__u32 OSGetCurrentClientProcessIDKM(void);
+__u32 OSStringUINT32ToStr(char *pszBuf, size_t uSize, __u32 ui32Num);
+
+/* srvkm.h */
+
+enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceCreate(void *pvOSDevice,
+       int i32OsDeviceID,
+       struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceDestroy(
+       struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+const char *PVRSRVGetErrorString(enum PVRSRV_ERROR_TAG eError);
+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE)
+enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceInitialise(
+       struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+#endif
+
+#ifndef CHECKPOINT_PFNS
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid);
+
+#ifndef CHECKPOINT_PFNS
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(
+               struct _PVRSRV_DEVICE_NODE_ *device,
+               const char *fence_name,
+               PVRSRV_TIMELINE timeline,
+               PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+               PVRSRV_FENCE *new_fence,
+               u64 *fence_uid,
+               void **fence_finalise_data,
+               PSYNC_CHECKPOINT *new_checkpoint_handle,
+               void **timeline_update_sync,
+               __u32 *timeline_update_value);
+#endif
+
+#ifndef CHECKPOINT_PFNS
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+#endif
+
+#ifndef CHECKPOINT_PFNS
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+#endif
+
+#ifndef CHECKPOINT_PFNS
+typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs);
+#endif
+
+#ifndef CHECKPOINT_PFNS
+typedef enum tag_img_bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)(
+       __u32 ui32FwAddr, __u32 ui32Value);
+typedef enum PVRSRV_ERROR_TAG (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void);
+typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void);
+#if defined(PDUMP)
+typedef PVRSRV_ERROR(*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence,
+                                               IMG_UINT32 *puiNumCheckpoints,
+                                               PSYNC_CHECKPOINT **papsCheckpoints);
+#endif
+#endif
+
+/* This is the function that kick code will call in a NO_HARDWARE build only after
+ * sync checkpoints have been manually signalled, to allow the OS native sync
+ * implementation to update its timelines (as the usual callback notification
+ * of signalled checkpoints is not supported for NO_HARDWARE).
+ */
+#ifndef CHECKPOINT_PFNS
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+
+#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20
+
+typedef struct {
+       PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve;
+       PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate;
+       PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback;
+       PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise;
+       PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines;
+       PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem;
+       PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs;
+       char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN];
+#if defined(PDUMP)
+       PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints;
+#endif
+} PFN_SYNC_CHECKPOINT_STRUCT;
+
+enum PVRSRV_ERROR_TAG SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns);
+
+#define CHECKPOINT_PFNS
+#endif
+
+/* sync_checkpoint.h */
+enum PVRSRV_ERROR_TAG SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext);
+enum PVRSRV_ERROR_TAG SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext);
+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext);
+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext);
+enum PVRSRV_ERROR_TAG SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum PVRSRV_ERROR_TAG SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum PVRSRV_ERROR_TAG SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint);
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+struct _PVRSRV_DEVICE_NODE_ *SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+#endif
+
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC)
+/*************************************************************************/ /*!
+@Function       NativeSyncGetFenceStatusWq
+@Description    Called to get the Foreign Fence status workqueue used in
+                Fence sync and Buffer sync.
+@Return         struct workqueue_struct ptr on success, NULL otherwise.
+*/ /**************************************************************************/
+struct workqueue_struct *NativeSyncGetFenceStatusWq(void);
+#endif
+
+#endif /* __SERVICES_KERNEL_CLIENT__ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/trace_events.c b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/trace_events.c
new file mode 100644 (file)
index 0000000..39242ed
--- /dev/null
@@ -0,0 +1,265 @@
+/*************************************************************************/ /*!
+@Title          Linux trace event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT)
+#if !defined(CONFIG_TRACE_GPU_MEM)
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu_mem.h>
+#undef CREATE_TRACE_POINTS
+#else /* !defined(CONFIG_TRACE_GPU_MEM) */
+#include <trace/events/gpu_mem.h>
+#endif /* !defined(CONFIG_TRACE_GPU_MEM) */
+#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */
+
+#include "img_types.h"
+#include "trace_events.h"
+#include "rogue_trace_events.h"
+#include "sync_checkpoint_external.h"
+
+static bool fence_update_event_enabled, fence_check_event_enabled;
+
+bool trace_rogue_are_fence_updates_traced(void)
+{
+       return fence_update_event_enabled;
+}
+
+bool trace_rogue_are_fence_checks_traced(void)
+{
+       return fence_check_event_enabled;
+}
+
+/*
+ * Call backs referenced from rogue_trace_events.h. Note that these are not
+ * thread-safe, however, since running trace code when tracing is not enabled is
+ * simply a no-op, there is no harm in it.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void)
+#else
+void trace_fence_update_enabled_callback(void)
+#endif
+{
+       fence_update_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+       return 0;
+#endif
+}
+
+void trace_fence_update_disabled_callback(void)
+{
+       fence_update_event_enabled = false;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void)
+#else
+void trace_fence_check_enabled_callback(void)
+#endif
+{
+       fence_check_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+       return 0;
+#endif
+}
+
+void trace_fence_check_disabled_callback(void)
+{
+       fence_check_event_enabled = false;
+}
+
+#if defined(SUPPORT_RGX)
+/* This is a helper that calls trace_rogue_fence_update for each fence in an
+ * array.
+ */
+void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+                                                          IMG_UINT32 ui32Offset,
+                                                          IMG_UINT uCount,
+                                                          PRGXFWIF_UFO_ADDR *pauiAddresses,
+                                                          IMG_UINT32 *paui32Values)
+{
+       IMG_UINT i;
+       for (i = 0; i < uCount; i++)
+       {
+               trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+                                                                pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+       }
+}
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+                                                         IMG_UINT32 ui32Offset,
+                                                         IMG_UINT uCount,
+                                                         PRGXFWIF_UFO_ADDR *pauiAddresses,
+                                                         IMG_UINT32 *paui32Values)
+{
+       IMG_UINT i;
+       for (i = 0; i < uCount; i++)
+       {
+               trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+                                                         pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+       }
+}
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+                                                        IMG_UINT32 ui32FWCtx,
+                                                        IMG_UINT32 ui32ExtJobRef,
+                                                        IMG_UINT32 ui32IntJobRef,
+                                                        IMG_UINT32 ui32UFOCount,
+                                                        const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+       IMG_UINT i;
+       for (i = 0; i < ui32UFOCount; i++)
+       {
+               trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx,
+                               ui32IntJobRef,
+                               ui32ExtJobRef,
+                               puData->sUpdate.ui32FWAddr,
+                               puData->sUpdate.ui32OldValue,
+                               puData->sUpdate.ui32NewValue);
+               puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate));
+       }
+}
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+                                                                       IMG_UINT32 ui32FWCtx,
+                                                                       IMG_UINT32 ui32ExtJobRef,
+                                                                       IMG_UINT32 ui32IntJobRef,
+                                                                       IMG_BOOL bPrEvent,
+                                                                       IMG_UINT32 ui32UFOCount,
+                                                                       const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+       IMG_UINT i;
+       for (i = 0; i < ui32UFOCount; i++)
+       {
+               if (bPrEvent)
+               {
+                       trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx,
+                                       ui32IntJobRef, ui32ExtJobRef,
+                                       puData->sCheckSuccess.ui32FWAddr,
+                                       puData->sCheckSuccess.ui32Value);
+               }
+               else
+               {
+                       trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx,
+                                       ui32IntJobRef, ui32ExtJobRef,
+                                       puData->sCheckSuccess.ui32FWAddr,
+                                       puData->sCheckSuccess.ui32Value);
+               }
+               puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess));
+       }
+}
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+                                                                IMG_UINT32 ui32FWCtx,
+                                                                IMG_UINT32 ui32ExtJobRef,
+                                                                IMG_UINT32 ui32IntJobRef,
+                                                                IMG_BOOL bPrEvent,
+                                                                IMG_UINT32 ui32UFOCount,
+                                                                const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+       IMG_UINT i;
+       for (i = 0; i < ui32UFOCount; i++)
+       {
+               if (bPrEvent)
+               {
+                       trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx,
+                                       ui32IntJobRef, ui32ExtJobRef,
+                                       puData->sCheckFail.ui32FWAddr,
+                                       puData->sCheckFail.ui32Value,
+                                       puData->sCheckFail.ui32Required);
+               }
+               else
+               {
+                       trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx,
+                                       ui32IntJobRef, ui32ExtJobRef,
+                                       puData->sCheckFail.ui32FWAddr,
+                                       puData->sCheckFail.ui32Value,
+                                       puData->sCheckFail.ui32Required);
+               }
+               puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail));
+       }
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int PVRGpuTraceEnableUfoCallbackWrapper(void)
+{
+
+#if defined(SUPPORT_RGX)
+       PVRGpuTraceEnableUfoCallback();
+#endif
+
+       return 0;
+}
+
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void)
+{
+
+#if defined(SUPPORT_RGX)
+       PVRGpuTraceEnableFirmwareActivityCallback();
+#endif
+
+       return 0;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */
+
+void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId,
+                                                                 IMG_UINT64 ui64Size)
+{
+#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT)
+       trace_gpu_mem_total(ui8GPUId, 0, ui64Size);
+#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */
+}
+
+void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId,
+                                                                         IMG_UINT32 ui32Pid,
+                                                                         IMG_UINT64 ui64Size)
+{
+#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT)
+       trace_gpu_mem_total(ui8GPUId, ui32Pid, ui64Size);
+#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/env/linux/trace_events.h b/drivers/gpu/drm/img/img-rogue/services/server/env/linux/trace_events.h
new file mode 100644 (file)
index 0000000..0a8fffd
--- /dev/null
@@ -0,0 +1,198 @@
+/*************************************************************************/ /*!
+@Title          Linux trace events and event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TRACE_EVENTS_H)
+#define TRACE_EVENTS_H
+
+#include "rgx_fwif_km.h"
+#include "rgx_hwperf.h"
+
+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't
+ * enabled, just like the actual trace event functions that the kernel
+ * defines for us.
+ */
+#ifdef CONFIG_EVENT_TRACING
+bool trace_rogue_are_fence_checks_traced(void);
+
+bool trace_rogue_are_fence_updates_traced(void);
+
+void trace_job_enqueue(IMG_UINT32 ui32FWContext,
+                       IMG_UINT32 ui32ExtJobRef,
+                       IMG_UINT32 ui32IntJobRef,
+                       const char *pszKickType);
+
+#if defined(SUPPORT_RGX)
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+                                                          IMG_UINT32 ui32FWContext,
+                                                          IMG_UINT32 ui32Offset,
+                                                          IMG_UINT uCount,
+                                                          PRGXFWIF_UFO_ADDR *pauiAddresses,
+                                                          IMG_UINT32 *paui32Values);
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+                                                         IMG_UINT32 ui32FWContext,
+                                                         IMG_UINT32 ui32Offset,
+                                                         IMG_UINT uCount,
+                                                         PRGXFWIF_UFO_ADDR *pauiAddresses,
+                                                         IMG_UINT32 *paui32Values);
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+                                                        IMG_UINT32 ui32FWCtx,
+                                                        IMG_UINT32 ui32ExtJobRef,
+                                                        IMG_UINT32 ui32IntJobRef,
+                                                        IMG_UINT32 ui32UFOCount,
+                                                        const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+                                                                       IMG_UINT32 ui32FWCtx,
+                                                                       IMG_UINT32 ui32ExtJobRef,
+                                                                       IMG_UINT32 ui32IntJobRef,
+                                                                       IMG_BOOL bPrEvent,
+                                                                       IMG_UINT32 ui32UFOCount,
+                                                                       const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+                                                                IMG_UINT32 ui32FWCtx,
+                                                                IMG_UINT32 ui32ExtJobRef,
+                                                                IMG_UINT32 ui32IntJobRef,
+                                                                IMG_BOOL bPrEvent,
+                                                                IMG_UINT32 ui32UFOCount,
+                                                                const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+#endif /* if defined(SUPPORT_RGX) */
+
+void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId,
+                                                                 IMG_UINT64 ui64Size);
+
+void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId,
+                                                                         IMG_UINT32 ui32Pid,
+                                                                         IMG_UINT64 ui64Size);
+
+#else /* CONFIG_TRACE_EVENTS */
+static inline
+bool trace_rogue_are_fence_checks_traced(void)
+{
+       return false;
+}
+
+static inline
+bool trace_rogue_are_fence_updates_traced(void)
+{
+       return false;
+}
+
+static inline
+void trace_job_enqueue(IMG_UINT32 ui32FWContext,
+                       IMG_UINT32 ui32ExtJobRef,
+                       IMG_UINT32 ui32IntJobRef,
+                       const char *pszKickType)
+{
+}
+
+#if defined(SUPPORT_RGX)
+static inline
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+                                                          IMG_UINT32 ui32FWContext,
+                                                          IMG_UINT32 ui32Offset,
+                                                          IMG_UINT uCount,
+                                                          PRGXFWIF_UFO_ADDR *pauiAddresses,
+                                                          IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+                                                         IMG_UINT32 ui32FWContext,
+                                                         IMG_UINT32 ui32Offset,
+                                                         IMG_UINT uCount,
+                                                         PRGXFWIF_UFO_ADDR *pauiAddresses,
+                                                         IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+                                                        IMG_UINT32 ui32FWCtx,
+                                                        IMG_UINT32 ui32ExtJobRef,
+                                                        IMG_UINT32 ui32IntJobRef,
+                                                        IMG_UINT32 ui32UFOCount,
+                                                        const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+                                                                       IMG_UINT32 ui32FWCtx,
+                                                                       IMG_UINT32 ui32ExtJobRef,
+                                                                       IMG_UINT32 ui32IntJobRef,
+                                                                       IMG_BOOL bPrEvent,
+                                                                       IMG_UINT32 ui32UFOCount,
+                                                                       const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+                                                                IMG_UINT32 ui32FWCtx,
+                                                                IMG_UINT32 ui32ExtJobRef,
+                                                                IMG_UINT32 ui32IntJobRef,
+                                                                IMG_BOOL bPrEvent,
+                                                                IMG_UINT32 ui32UFOCount,
+                                                                const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+#endif /* if defined(SUPPORT_RGX)*/
+
+static inline
+void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId,
+                                                                 IMG_UINT64 ui64Size)
+{
+}
+
+static inline
+void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId,
+                                                                         IMG_UINT32 ui32Pid,
+                                                                         IMG_UINT64 ui64Size)
+{
+}
+
+#endif /* CONFIG_TRACE_EVENTS */
+
+#endif /* TRACE_EVENTS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/cache_km.h b/drivers/gpu/drm/img/img-rogue/services/server/include/cache_km.h
new file mode 100644 (file)
index 0000000..282ff5b
--- /dev/null
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File           cache_km.h
+@Title          CPU cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CACHE_KM_H
+#define CACHE_KM_H
+
+#if defined(__linux__)
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION
+#endif
+
+#include "pvrsrv_error.h"
+#include "os_cpu_cache.h"
+#include "img_types.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "pmr.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE;  /*!< Represents CPU address type required for CPU d-cache maintenance */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL      0x1     /*!< Operation requires CPU virtual address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL     0x2     /*!< Operation requires CPU physical address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH         0x3     /*!< Operation requires both CPU virtual & physical addresses */
+
+#include "connection_server.h"
+
+/*
+ * CacheOpInit() & CacheOpDeInit()
+ *
+ * This must be called to initialise the KM cache maintenance framework.
+ * This is called early during the driver/module (un)loading phase.
+ */
+PVRSRV_ERROR CacheOpInit(void);
+void CacheOpDeInit(void);
+
+/*
+ * CacheOpInit2() & CacheOpDeInit2()
+ *
+ * This must be called to initialise the UM cache maintenance framework.
+ * This is called when the driver is loaded/unloaded from the kernel.
+ */
+PVRSRV_ERROR CacheOpInit2(void);
+void CacheOpDeInit2(void);
+
+/*
+ * CacheOpExec()
+ *
+ * This is the primary CPU data-cache maintenance interface and it is
+ * always guaranteed to be synchronous; the arguments supplied must be
+ * pre-validated for performance reasons else the d-cache maintenance
+ * operation might cause the underlying OS kernel to fault.
+ */
+PVRSRV_ERROR CacheOpExec(PPVRSRV_DEVICE_NODE psDevNode,
+                                               void *pvVirtStart,
+                                               void *pvVirtEnd,
+                                               IMG_CPU_PHYADDR sCPUPhysStart,
+                                               IMG_CPU_PHYADDR sCPUPhysEnd,
+                                               PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpValExec()
+ *
+ * Same as CacheOpExec(), except arguments are _Validated_ before being
+ * presented to the underlying OS kernel for CPU data-cache maintenance.
+ * The uiAddress is the start CPU virtual address for the to-be d-cache
+ * maintained PMR, it can be NULL in which case a remap will be performed
+ * internally, if required for cache maintenance. This is primarily used
+ * as the services client bridge call handler for synchronous user-mode
+ * cache maintenance requests.
+ */
+PVRSRV_ERROR CacheOpValExec(PMR *psPMR,
+                                                       IMG_UINT64 uiAddress,
+                                                       IMG_DEVMEM_OFFSET_T uiOffset,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpQueue()
+ *
+ * This is the secondary cache maintenance interface and it is not
+ * guaranteed to be synchronous in that requests could be deferred
+ * and executed asynchronously. This interface is primarily meant
+ * as services client bridge call handler. Both uiInfoPgGFSeqNum
+ * and ui32[Current,Next]FenceSeqNum implements an internal client
+ * server queueing protocol so making use of this interface outside
+ * of services client is not recommended and should not be done.
+ */
+PVRSRV_ERROR CacheOpQueue(CONNECTION_DATA *psConnection,
+                                               PPVRSRV_DEVICE_NODE psDevNode,
+                                               IMG_UINT32 ui32OpCount,
+                                               PMR **ppsPMR,
+                                               IMG_UINT64 *puiAddress,
+                                               IMG_DEVMEM_OFFSET_T *puiOffset,
+                                               IMG_DEVMEM_SIZE_T *puiSize,
+                                               PVRSRV_CACHE_OP *puiCacheOp,
+                                               IMG_UINT32 ui32OpTimeline);
+
+/*
+ * CacheOpLog()
+ *
+ * This is used for logging client cache maintenance operations that
+ * was executed in user-space.
+ */
+PVRSRV_ERROR CacheOpLog(PMR *psPMR,
+                                               IMG_UINT64 uiAddress,
+                                               IMG_DEVMEM_OFFSET_T uiOffset,
+                                               IMG_DEVMEM_SIZE_T uiSize,
+                                               IMG_UINT64 ui64StartTime,
+                                               IMG_UINT64 ui64EndTime,
+                                               PVRSRV_CACHE_OP uiCacheOp);
+
+#endif /* CACHE_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/connection_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/connection_server.h
new file mode 100644 (file)
index 0000000..d11a6ea
--- /dev/null
@@ -0,0 +1,144 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(CONNECTION_SERVER_H)
+#define CONNECTION_SERVER_H
+
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "handle.h"
+#include "pvrsrv_cleanup.h"
+
+/* Variable used to hold in memory the timeout for the current time slice*/
+extern IMG_UINT64 gui64TimesliceLimit;
+/* Counter number of handle data freed during the current time slice */
+extern IMG_UINT32 gui32HandleDataFreeCounter;
+/* Set the maximum time the freeing of the resources can keep the lock */
+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS (3000 * 1000) /* 3ms */
+
+typedef struct _CONNECTION_DATA_
+{
+       PVRSRV_HANDLE_BASE              *psHandleBase;
+       PROCESS_HANDLE_BASE             *psProcessHandleBase;
+       struct _SYNC_CONNECTION_DATA_   *psSyncConnectionData;
+       struct _PDUMP_CONNECTION_DATA_  *psPDumpConnectionData;
+
+       /* Holds the client flags supplied at connection time */
+       IMG_UINT32          ui32ClientFlags;
+
+       /*
+        * OS specific data can be stored via this handle.
+        * See osconnection_server.h for a generic mechanism
+        * for initialising this field.
+        */
+       IMG_HANDLE          hOsPrivateData;
+
+#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16)
+       IMG_PID             pid;
+       IMG_PID                         vpid;
+       IMG_UINT32                      tid;
+       IMG_CHAR            pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN];
+
+       IMG_HANDLE          hProcessStats;
+
+       IMG_HANDLE          hClientTLStream;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       /*
+        * Connection-based values per application which can be modified by the
+        * AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application.
+        * These control where the connection's memory allocation is sourced from.
+        * ui32OSid, ui32OSidReg range from 0..(GPUVIRT_VALIDATION_NUM_OS - 1).
+        */
+       IMG_UINT32          ui32OSid;
+       IMG_UINT32          ui32OSidReg;
+       IMG_BOOL            bOSidAxiProtReg;
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#if defined(SUPPORT_DMA_TRANSFER)
+       IMG_BOOL            bAcceptDmaRequests;
+       ATOMIC_T            ui32NumDmaTransfersInFlight;
+       POS_LOCK            hDmaReqLock;
+       IMG_HANDLE          hDmaEventObject;
+#endif
+       /* Structure which is hooked into the cleanup thread work list */
+       PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+
+       DLLIST_NODE         sConnectionListNode;
+
+       /* List navigation for deferred freeing of connection data */
+       struct _CONNECTION_DATA_        **ppsThis;
+       struct _CONNECTION_DATA_        *psNext;
+} CONNECTION_DATA;
+
+#include "osconnection_server.h"
+
+PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData);
+void PVRSRVCommonConnectionDisconnect(void *pvPrivData);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetPurgeConnectionPid
+
+@Description    Returns PID associated with Connection currently being purged by
+                Cleanup Thread. If no Connection is purged 0 is returned.
+
+@Return         PID associated with currently purged connection or 0 if no
+                connection is being purged
+*/ /***************************************************************************/
+IMG_PID PVRSRVGetPurgeConnectionPid(void);
+
+void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                                 DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 void *pvDumpDebugFile);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVConnectionPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection)
+{
+       return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL;
+}
+
+#endif /* !defined(CONNECTION_SERVER_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/debug_common.h b/drivers/gpu/drm/img/img-rogue/services/server/include/debug_common.h
new file mode 100644 (file)
index 0000000..e8b902f
--- /dev/null
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common debug definitions and functions.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEBUG_COMMON_H
+#define DEBUG_COMMON_H
+
+#include "pvrsrv_error.h"
+#include "device.h"
+
+PVRSRV_ERROR DebugCommonInitDriver(void);
+void DebugCommonDeInitDriver(void);
+
+PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* DEBUG_COMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/device.h b/drivers/gpu/drm/img/img-rogue/services/server/include/device.h
new file mode 100644 (file)
index 0000000..f5948d7
--- /dev/null
@@ -0,0 +1,540 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common Device header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device related function templates and defines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef DEVICE_H
+#define DEVICE_H
+
+#include "devicemem_heapcfg.h"
+#include "mmu_common.h"
+#include "ra.h"                        /* RA_ARENA */
+#include "pvrsrv_device.h"
+#include "sync_checkpoint.h"
+#include "srvkm.h"
+#include "physheap.h"
+#include "sync_internal.h"
+#include "sysinfo.h"
+#include "dllist.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "lock.h"
+
+#include "power.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV;
+
+struct SYNC_RECORD;
+
+struct _CONNECTION_DATA_;
+
+/*************************************************************************/ /*!
+ @Function      AllocUFOBlockCallback
+ @Description   Device specific callback for allocation of a UFO block
+
+ @Input         psDeviceNode          Pointer to device node to allocate
+                                      the UFO for.
+ @Output        ppsMemDesc            Pointer to pointer for the memdesc of
+                                      the allocation
+ @Output        pui32SyncAddr         FW Base address of the UFO block
+ @Output        puiSyncPrimBlockSize  Size of the UFO block
+
+ @Return        PVRSRV_OK if allocation was successful
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+                                                                                                               DEVMEM_MEMDESC **ppsMemDesc,
+                                                                                                               IMG_UINT32 *pui32SyncAddr,
+                                                                                                               IMG_UINT32 *puiSyncPrimBlockSize);
+
+/*************************************************************************/ /*!
+ @Function      FreeUFOBlockCallback
+ @Description   Device specific callback for freeing of a UFO
+
+ @Input         psDeviceNode    Pointer to device node that the UFO block was
+                                allocated from.
+ @Input         psMemDesc       Pointer to pointer for the memdesc of the UFO
+                                block to free.
+*/ /**************************************************************************/
+typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+                                                                        DEVMEM_MEMDESC *psMemDesc);
+
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+       /* Pdump memory and register bank names */
+       IMG_CHAR                                *pszPDumpDevName;
+       IMG_CHAR                                *pszPDumpRegName;
+
+       /* Under Linux, this is the minor number of RenderNode corresponding to this Device */
+       IMG_INT32                               i32OsDeviceID;
+       /* Services layer enumeration of the device used in pvrdebug */
+       IMG_UINT32                              ui32InternalID;
+} PVRSRV_DEVICE_IDENTIFIER;
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+       /* Heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */
+       IMG_UINT32                              ui32HeapCount;
+
+       /* Blueprints for creating new device memory contexts */
+       IMG_UINT32              uiNumHeapConfigs;
+       DEVMEM_HEAP_CONFIG      *psDeviceMemoryHeapConfigArray;
+       DEVMEM_HEAP_BLUEPRINT   *psDeviceMemoryHeap;
+} DEVICE_MEMORY_INFO;
+
+#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL)
+#define DUMMY_PAGE     ("DUMMY_PAGE")
+#define DEV_ZERO_PAGE  ("DEV_ZERO_PAGE")
+#define PVR_DUMMY_PAGE_INIT_VALUE      (0x0)
+#define PVR_ZERO_PAGE_INIT_VALUE       (0x0)
+
+typedef struct __DEFAULT_PAGE__
+{
+       /*Page handle for the page allocated (UMA/LMA)*/
+       PG_HANDLE       sPageHandle;
+       POS_LOCK        psPgLock;
+       ATOMIC_T        atRefCounter;
+       /*Default page size in terms of log2 */
+       IMG_UINT32      ui32Log2PgSize;
+       IMG_UINT64      ui64PgPhysAddr;
+#if defined(PDUMP)
+       IMG_HANDLE hPdumpPg;
+#endif
+} PVRSRV_DEF_PAGE;
+
+typedef enum _PVRSRV_DEVICE_STATE_
+{
+       PVRSRV_DEVICE_STATE_UNDEFINED = 0,
+       PVRSRV_DEVICE_STATE_INIT,
+       PVRSRV_DEVICE_STATE_ACTIVE,
+       PVRSRV_DEVICE_STATE_DEINIT,
+       PVRSRV_DEVICE_STATE_BAD,
+} PVRSRV_DEVICE_STATE;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_
+{
+       PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0,
+       PVRSRV_DEVICE_HEALTH_STATUS_OK,
+       PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING,
+       PVRSRV_DEVICE_HEALTH_STATUS_DEAD,
+       PVRSRV_DEVICE_HEALTH_STATUS_FAULT
+} PVRSRV_DEVICE_HEALTH_STATUS;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_
+{
+       PVRSRV_DEVICE_HEALTH_REASON_NONE = 0,
+       PVRSRV_DEVICE_HEALTH_REASON_ASSERTED,
+       PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING,
+       PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS,
+       PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+       PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED,
+       PVRSRV_DEVICE_HEALTH_REASON_IDLING,
+       PVRSRV_DEVICE_HEALTH_REASON_RESTARTING,
+       PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS
+} PVRSRV_DEVICE_HEALTH_REASON;
+
+typedef enum _PVRSRV_DEVICE_DEBUG_DUMP_STATUS_
+{
+       PVRSRV_DEVICE_DEBUG_DUMP_NONE = 0,
+       PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE
+} PVRSRV_DEVICE_DEBUG_DUMP_STATUS;
+
+#ifndef DI_GROUP_DEFINED
+#define DI_GROUP_DEFINED
+typedef struct DI_GROUP DI_GROUP;
+#endif
+#ifndef DI_ENTRY_DEFINED
+#define DI_ENTRY_DEFINED
+typedef struct DI_ENTRY DI_ENTRY;
+#endif
+
+typedef struct _PVRSRV_DEVICE_DEBUG_INFO_
+{
+       DI_GROUP *psGroup;
+       DI_ENTRY *psDumpDebugEntry;
+#ifdef SUPPORT_RGX
+       DI_ENTRY *psFWTraceEntry;
+#ifdef SUPPORT_FIRMWARE_GCOV
+       DI_ENTRY *psFWGCOVEntry;
+#endif
+       DI_ENTRY *psFWMappingsEntry;
+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB)
+       DI_ENTRY *psRiscvDmiDIEntry;
+       IMG_UINT64 ui64RiscvDmi;
+#endif
+#endif /* SUPPORT_RGX */
+#ifdef SUPPORT_VALIDATION
+       DI_ENTRY *psRGXRegsEntry;
+#endif /* SUPPORT_VALIDATION */
+#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
+       DI_ENTRY *psPowMonEntry;
+#endif
+#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS
+       DI_ENTRY *psPowerDataEntry;
+#endif
+} PVRSRV_DEVICE_DEBUG_INFO;
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+#define RGX_LISR_INIT                                                  (0U)
+#define RGX_LISR_DEVICE_NOT_POWERED                            (1U)
+#define RGX_LISR_NOT_TRIGGERED_BY_HW                   (2U)
+#define RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED            (3U)
+#define RGX_LISR_PROCESSED                                             (4U)
+
+typedef IMG_UINT32 LISR_STATUS;
+
+typedef struct _LISR_EXECUTION_INFO_
+{
+       /* status of last LISR invocation */
+       LISR_STATUS ui32Status;
+
+       /* snapshot from the last LISR invocation */
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+       IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_OS_SUPPORTED];
+#else
+       IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM];
+#endif
+
+       /* time of the last LISR invocation */
+       IMG_UINT64 ui64Clockns;
+} LISR_EXECUTION_INFO;
+
+#define UPDATE_LISR_DBG_STATUS(status)         psDeviceNode->sLISRExecutionInfo.ui32Status = (status)
+#define UPDATE_LISR_DBG_SNAPSHOT(idx, val)     psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[idx] = (val)
+#define UPDATE_LISR_DBG_TIMESTAMP()                    psDeviceNode->sLISRExecutionInfo.ui64Clockns = OSClockns64()
+#define UPDATE_LISR_DBG_COUNTER()                      psDeviceNode->ui64nLISR++
+#define UPDATE_MISR_DBG_COUNTER()                      psDeviceNode->ui64nMISR++
+#else
+#define UPDATE_LISR_DBG_STATUS(status)
+#define UPDATE_LISR_DBG_SNAPSHOT(idx, val)
+#define UPDATE_LISR_DBG_TIMESTAMP()
+#define UPDATE_LISR_DBG_COUNTER()
+#define UPDATE_MISR_DBG_COUNTER()
+#endif /* defined(PVRSRV_DEBUG_LISR_EXECUTION) */
+
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+       PVRSRV_DEVICE_IDENTIFIER        sDevId;
+
+       PVRSRV_DEVICE_STATE                     eDevState;
+       PVRSRV_DEVICE_FABRIC_TYPE       eDevFabricType;
+
+       ATOMIC_T                                        eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */
+       ATOMIC_T                                        eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */
+       ATOMIC_T                                        eDebugDumpRequested; /* Holds values from PVRSRV_DEVICE_DEBUG_DUMP_STATUS */
+
+       IMG_HANDLE                                      *hDebugTable;
+
+       /* device specific MMU attributes */
+       MMU_DEVICEATTRIBS      *psMMUDevAttrs;
+       /* Device specific MMU firmware attributes, used only in some devices */
+       MMU_DEVICEATTRIBS      *psFirmwareMMUDevAttrs;
+
+       PHYS_HEAP              *psMMUPhysHeap;
+
+       /* lock for power state transitions */
+       POS_LOCK                                hPowerLock;
+       IMG_PID                 uiPwrLockOwnerPID; /* Only valid between lock and corresponding unlock
+                                                     operations of hPowerLock */
+
+       /* current system device power state */
+       PVRSRV_SYS_POWER_STATE  eCurrentSysPowerState;
+       PPVRSRV_POWER_DEV       psPowerDev;
+
+    /* multicore configuration information */
+    IMG_UINT32              ui32MultiCoreNumCores;      /* total cores primary + secondaries. 0 for non-multi core */
+    IMG_UINT32              ui32MultiCorePrimaryId;     /* primary core id for this device */
+    IMG_UINT64             *pui64MultiCoreCapabilities; /* capabilities for each core */
+
+       /*
+               callbacks the device must support:
+       */
+
+       PVRSRV_ERROR (*pfnDevSLCFlushRange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                                               MMU_CONTEXT *psMMUContext,
+                                                                               IMG_DEV_VIRTADDR sDevVAddr,
+                                                                               IMG_DEVMEM_SIZE_T uiSize,
+                                                                               IMG_BOOL bInvalidate);
+
+       PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                                         MMU_CONTEXT *psMMUContext,
+                                                                         IMG_UINT64 ui64FBSCEntries);
+
+       PVRSRV_ERROR (*pfnValidateOrTweakPhysAddrs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                                                               MMU_DEVICEATTRIBS *psDevAttrs,
+                                                                                               IMG_UINT64 *pui64Addr);
+
+       void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                                 MMU_CONTEXT *psMMUContext,
+                                                                 MMU_LEVEL eLevel,
+                                                                 IMG_BOOL bUnmap);
+
+       PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                                 IMG_UINT32 *pui32NextMMUInvalidateUpdate);
+
+       IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+
+       void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+       PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                             IMG_BOOL bIsTimerPoll);
+
+#if defined(SUPPORT_AUTOVZ)
+       void (*pfnUpdateAutoVzWatchdog)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+#endif
+
+       PVRSRV_ERROR (*pfnValidationGPUUnitsPowerChange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32NewState);
+
+       PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+       PVRSRV_ERROR (*pfnVerifyBVNC)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask);
+
+       /* Method to drain device HWPerf packets from firmware buffer to host buffer */
+       PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+       PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString);
+
+       PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed);
+
+       PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+       PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]);
+       IMG_BOOL        (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+       IMG_INT32       (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex);
+
+    PVRSRV_ERROR (*pfnGetMultiCoreInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32CapsSize,
+                                        IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps);
+
+       IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+       MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx);
+
+       PVRSRV_DEVICE_CONFIG    *psDevConfig;
+
+       /* device post-finalise compatibility check */
+       PVRSRV_ERROR                    (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
+
+       /* initialise device-specific physheaps */
+       PVRSRV_ERROR                    (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *);
+
+       /* initialise fw mmu, if FW not using GPU mmu, NULL otherwise. */
+       PVRSRV_ERROR                    (*pfnFwMMUInit) (struct _PVRSRV_DEVICE_NODE_ *);
+
+       /* information about the device's address space and heaps */
+       DEVICE_MEMORY_INFO              sDevMemoryInfo;
+
+       /* device's shared-virtual-memory heap max virtual address */
+       IMG_UINT64                              ui64GeneralSVMHeapTopVA;
+
+       ATOMIC_T                                iNumClockSpeedChanges;
+
+       /* private device information */
+       void                                    *pvDevice;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       RA_ARENA                *psOSSharedArena;
+       RA_ARENA                                *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
+#endif
+
+       /* FW_MAIN, FW_CONFIG and FW_GUEST heaps. Should be part of registered heaps? */
+       PHYS_HEAP               *psFWMainPhysHeap;
+       PHYS_HEAP               *psFWCfgPhysHeap;
+       PHYS_HEAP               *apsFWPremapPhysHeap[RGX_NUM_OS_SUPPORTED];
+
+       IMG_UINT32                              ui32RegisteredPhysHeaps;
+       PHYS_HEAP                               **papsRegisteredPhysHeaps;
+
+       /* PHYS_HEAP Mapping table to the platform's physical memory heap(s)
+        * used by this device. The physical heaps are created based on
+        * the PHYS_HEAP_CONFIG data from the platform's system layer at device
+        * creation time.
+        *
+        * Contains PVRSRV_PHYS_HEAP_LAST entries for all the possible physical heaps allowed in the design.
+        * It allows the system layer PhysHeaps for the device to be identified for use in creating new PMRs.
+        * See PhysHeapCreatePMR()
+        */
+       PHYS_HEAP                               *apsPhysHeap[PVRSRV_PHYS_HEAP_LAST];
+       IMG_UINT32                              ui32UserAllocHeapCount;
+
+#if defined(SUPPORT_AUTOVZ)
+       /* Phys Heap reserved for storing the MMU mappings of firmware.
+        * The memory backing up this Phys Heap must persist between driver or OS reboots */
+       PHYS_HEAP               *psFwMMUReservedPhysHeap;
+#endif
+
+       /* Flag indicating if the firmware has been initialised during the
+        * 1st boot of the Host driver according to the AutoVz life-cycle. */
+       IMG_BOOL                                bAutoVzFwIsUp;
+
+       struct _PVRSRV_DEVICE_NODE_     *psNext;
+       struct _PVRSRV_DEVICE_NODE_     **ppsThis;
+
+       /* Functions for notification about memory contexts */
+       PVRSRV_ERROR                    (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+                                                                                                               MMU_CONTEXT                                     *psMMUContext,
+                                                                                                               IMG_HANDLE                                      *hPrivData);
+       void                                    (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData);
+
+       /* Functions for allocation/freeing of UFOs */
+       AllocUFOBlockCallback   pfnAllocUFOBlock;       /*!< Callback for allocation of a block of UFO memory */
+       FreeUFOBlockCallback    pfnFreeUFOBlock;        /*!< Callback for freeing of a block of UFO memory */
+
+       IMG_HANDLE                              hSyncServerRecordNotify;
+       POS_LOCK                                hSyncServerRecordLock;
+       IMG_UINT32                              ui32SyncServerRecordCount;
+       IMG_UINT32                              ui32SyncServerRecordCountHighWatermark;
+       DLLIST_NODE                             sSyncServerRecordList;
+       struct SYNC_RECORD              *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+       IMG_UINT32                              uiSyncServerRecordFreeIdx;
+
+       IMG_HANDLE                              hSyncCheckpointRecordNotify;
+       POS_LOCK                                hSyncCheckpointRecordLock;
+       IMG_UINT32                              ui32SyncCheckpointRecordCount;
+       IMG_UINT32                              ui32SyncCheckpointRecordCountHighWatermark;
+       DLLIST_NODE                             sSyncCheckpointRecordList;
+       struct SYNC_CHECKPOINT_RECORD   *apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+       IMG_UINT32                              uiSyncCheckpointRecordFreeIdx;
+
+       IMG_HANDLE                              hSyncCheckpointNotify;
+       POS_SPINLOCK                    hSyncCheckpointListLock; /*!< Protects sSyncCheckpointSyncsList */
+       DLLIST_NODE                             sSyncCheckpointSyncsList;
+
+       PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext;
+       PSYNC_PRIM_CONTEXT              hSyncPrimContext;
+
+       /* With this sync-prim we make sure the MMU cache is flushed
+        * before we free the page table memory */
+       PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim;
+       IMG_UINT32                              ui32NextMMUInvalidateUpdate;
+
+       IMG_HANDLE                              hCmdCompNotify;
+       IMG_HANDLE                              hDbgReqNotify;
+       IMG_HANDLE                              hAppHintDbgReqNotify;
+       IMG_HANDLE                              hPhysHeapDbgReqNotify;
+
+       PVRSRV_DEF_PAGE                 sDummyPage;
+       PVRSRV_DEF_PAGE                 sDevZeroPage;
+
+       POSWR_LOCK                              hMemoryContextPageFaultNotifyListLock;
+       DLLIST_NODE                             sMemoryContextPageFaultNotifyListHead;
+
+       /* System DMA capability */
+       IMG_BOOL                                bHasSystemDMA;
+       IMG_HANDLE                              hDmaTxChan;
+       IMG_HANDLE                              hDmaRxChan;
+
+#if defined(PDUMP)
+       /*
+        * FBC clear color register default value to use.
+        */
+       IMG_UINT64                              ui64FBCClearColour;
+
+       /* Device-level callback which is called when pdump.exe starts.
+        * Should be implemented in device-specific init code, e.g. rgxinit.c
+        */
+       PVRSRV_ERROR                    (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+       /* device-level callback to return pdump ID associated to a memory context */
+       IMG_UINT32                              (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+
+       IMG_UINT8                       *pui8DeferredSyncCPSignal;      /*! Deferred fence events buffer */
+
+       IMG_UINT16                      ui16SyncCPReadIdx;              /*! Read index in the above deferred fence events buffer */
+
+       IMG_UINT16                      ui16SyncCPWriteIdx;             /*! Write index in the above deferred fence events buffer */
+
+       POS_LOCK                        hSyncCheckpointSignalLock;      /*! Guards data shared between an sleepable-contexts */
+
+       void                            *pvSyncCPMISR;                  /*! MISR to emit pending/deferred fence signals */
+
+       void                            *hTransition;                   /*!< SyncCheckpoint PdumpTransition Cookie */
+
+       DLLIST_NODE                     sSyncCheckpointContextListHead; /*!< List head for the sync chkpt contexts */
+
+       POS_LOCK                        hSyncCheckpointContextListLock; /*! lock for accessing sync chkpt contexts list */
+
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+       POS_LOCK                        hValidationLock;
+#endif
+
+       /* Members for linking which connections are open on this device */
+       POS_LOCK                hConnectionsLock;    /*!< Lock protecting sConnections */
+       DLLIST_NODE             sConnections;        /*!< The list of currently active connection objects for this device node */
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+       LISR_EXECUTION_INFO     sLISRExecutionInfo;  /*!< Information about the last execution of the LISR */
+       IMG_UINT64              ui64nLISR;           /*!< Number of LISR calls seen */
+       IMG_UINT64              ui64nMISR;           /*!< Number of MISR calls made */
+#endif
+
+       PVRSRV_DEVICE_DEBUG_INFO sDebugInfo;
+} PVRSRV_DEVICE_NODE;
+
+/*
+ * Macros to be used instead of calling directly the pfns since these macros
+ * will expand the feature passed as argument into the bitmask/index to work
+ * with the macros defined in rgx_bvnc_defs_km.h
+ */
+#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \
+               psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK)
+#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \
+               psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX)
+
+PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                          IMG_BOOL bInitSuccessful);
+
+PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions);
+
+
+#endif /* DEVICE_H */
+
+/******************************************************************************
+ End of file (device.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_heapcfg.h b/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_heapcfg.h
new file mode 100644 (file)
index 0000000..3b032ae
--- /dev/null
@@ -0,0 +1,184 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Heap Configuration Helper Functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef DEVICEMEMHEAPCFG_H
+#define DEVICEMEMHEAPCFG_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*
+ *  Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID
+ */
+#define RGX_HEAP_4KB_PAGE_SHIFT                                        (12U)
+#define RGX_HEAP_16KB_PAGE_SHIFT                               (14U)
+#define RGX_HEAP_64KB_PAGE_SHIFT                               (16U)
+#define RGX_HEAP_256KB_PAGE_SHIFT                              (18U)
+#define RGX_HEAP_1MB_PAGE_SHIFT                                        (20U)
+#define RGX_HEAP_2MB_PAGE_SHIFT                                        (21U)
+
+struct _PVRSRV_DEVICE_NODE_;
+struct _CONNECTION_DATA_;
+
+
+/*
+  A "heap config" is a blueprint to be used for initial setting up of heaps
+  when a device memory context is created.
+
+  We define a data structure to define this, but it's really down to the
+  caller to populate it. This is all expected to be in-kernel. We provide an
+  API that client code can use to enquire about the blueprint, such that it may
+  do the heap set-up during the context creation call on behalf of the user.
+*/
+
+/* Blueprint for a single heap */
+typedef struct _DEVMEM_HEAP_BLUEPRINT_
+{
+       /* Name of this heap - for debug purposes, and perhaps for lookup
+       by name */
+       const IMG_CHAR *pszName;
+
+       /* Virtual address of the beginning of the heap.  This _must_ be a
+       multiple of the data page size for the heap.  It is
+       _recommended_ that it be coarser than that - especially, it
+       should begin on a boundary appropriate to the MMU for the
+       device.  For Rogue, this is a Page Directory boundary, or 1GB
+       (virtual address a multiple of 0x0040000000). */
+       IMG_DEV_VIRTADDR sHeapBaseAddr;
+
+       /* Length of the heap.  Given that the END address of the heap has
+       a similar restriction to that of the _beginning_ of the heap.
+       That is the heap length _must_ be a whole number of data pages.
+       Again, the recommendation is that it ends on a 1GB boundary.
+       Again, this is not essential, but we do know that (at the time
+       of writing) the current implementation of mmu_common.c is such
+       that no two heaps may share a page directory, thus the
+       remaining virtual space would be wasted if the length were not
+       a multiple of 1GB */
+       IMG_DEVMEM_SIZE_T uiHeapLength;
+
+       /* VA space starting sHeapBaseAddr to uiReservedRegionLength-1 are reserved
+       for statically defined addresses (shared/known between clients and FW).
+       Services never maps allocations into this reserved address space _unless_
+       explicitly requested via PVRSRVMapToDeviceAddress by passing sDevVirtAddr
+       which falls within this reserved range. Since this range is completely for
+       clients to manage (where allocations are page granular), it _must_ again be
+       a whole number of data pages. Additionally, another constraint enforces this
+       to be a multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY (which evaluates to
+       max page size supported) to support varied pages sizes */
+       IMG_DEVMEM_SIZE_T uiReservedRegionLength;
+
+       /* Data page size.  This is the page size that is going to get
+       programmed into the MMU, so it needs to be a valid one for the
+       device.  Importantly, the start address and length _must_ be
+       multiples of this page size.  Note that the page size is
+       specified as the log 2 relative to 1 byte (e.g. 12 indicates
+       4kB) */
+       IMG_UINT32 uiLog2DataPageSize;
+
+       /* Import alignment.  Force imports to this heap to be
+       aligned to at least this value */
+       IMG_UINT32 uiLog2ImportAlignment;
+
+} DEVMEM_HEAP_BLUEPRINT;
+
+void HeapCfgBlueprintInit(const IMG_CHAR        *pszName,
+                             IMG_UINT64             ui64HeapBaseAddr,
+                             IMG_DEVMEM_SIZE_T      uiHeapLength,
+                             IMG_DEVMEM_SIZE_T      uiReservedRegionLength,
+                             IMG_UINT32             ui32Log2DataPageSize,
+                             IMG_UINT32             uiLog2ImportAlignment,
+                             DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint);
+
+/* Entire named heap config */
+typedef struct _DEVMEM_HEAP_CONFIG_
+{
+    /* Name of this heap config - for debug and maybe lookup */
+    const IMG_CHAR *pszName;
+
+    /* Number of heaps in this config */
+    IMG_UINT32 uiNumHeaps;
+
+    /* Array of individual heap blueprints as defined above */
+    DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray;
+} DEVMEM_HEAP_CONFIG;
+
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ *psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 *puiNumHeapConfigsOut
+);
+
+PVRSRV_ERROR
+HeapCfgHeapCount(struct _CONNECTION_DATA_ *psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 *puiNumHeapsOut
+);
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(struct _CONNECTION_DATA_ *psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapConfigNameBufSz,
+    IMG_CHAR *pszHeapConfigNameOut
+);
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(struct _CONNECTION_DATA_ *psConnection,
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapIndex,
+    IMG_UINT32 uiHeapNameBufSz,
+    IMG_CHAR *pszHeapNameOut,
+    IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+    IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+    IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut,
+    IMG_UINT32 *puiLog2DataPageSizeOut,
+    IMG_UINT32 *puiLog2ImportAlignmentOut
+);
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_history_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_history_server.h
new file mode 100644 (file)
index 0000000..8e7ca59
--- /dev/null
@@ -0,0 +1,157 @@
+/*************************************************************************/ /*!
+@File           devicemem_history_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Devicemem History functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_HISTORY_SERVER_H
+#define DEVICEMEM_HISTORY_SERVER_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxmem.h"
+#include "devicemem_utils.h"
+#include "connection_server.h"
+
+PVRSRV_ERROR DevicememHistoryInitKM(void);
+
+void DevicememHistoryDeInitKM(void);
+
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+                                                       IMG_UINT32 ui32Offset,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32PageSize,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+                                                       IMG_UINT32 ui32Offset,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32PageSize,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection,
+                                                       PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                       IMG_UINT32 ui32StartPage,
+                                                       IMG_UINT32 ui32NumPages,
+                                                       IMG_DEVMEM_SIZE_T uiAllocSize,
+                                                       const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection,
+                                                       PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                       IMG_DEV_VIRTADDR sBaseDevVAddr,
+                                                       IMG_UINT32 ui32StartPage,
+                                                       IMG_UINT32 ui32NumPages,
+                                                       IMG_DEVMEM_SIZE_T uiAllocSize,
+                                                       const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32Log2PageSize,
+                                                       IMG_UINT32 ui32AllocationIndex,
+                                                       IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+                                                       IMG_UINT32 ui32Offset,
+                                                       IMG_DEV_VIRTADDR sDevVAddr,
+                                                       IMG_DEVMEM_SIZE_T uiSize,
+                                                       const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+                                                       IMG_UINT32 ui32PageSize,
+                                                       IMG_UINT32 ui32AllocPageCount,
+                                                       IMG_UINT32 *paui32AllocPageIndices,
+                                                       IMG_UINT32 ui32FreePageCount,
+                                                       IMG_UINT32 *pauiFreePageIndices,
+                                                       IMG_UINT32 AllocationIndex,
+                                                       IMG_UINT32 *pui32AllocationIndexOut);
+
+/* used when the PID does not matter */
+#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_
+{
+       IMG_PID uiPID;
+       IMG_DEV_VIRTADDR sDevVAddr;
+} DEVICEMEM_HISTORY_QUERY_IN;
+
+/* Store up to 4 results for a lookup. In the case of the faulting page being
+ * re-mapped between the page fault occurring on HW and the page fault analysis
+ * being done, the second result entry will show the allocation being unmapped.
+ * A further 2 entries are added to cater for multiple buffers in the same page.
+ */
+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_
+{
+       IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN];
+       IMG_DEV_VIRTADDR sBaseDevVAddr;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_BOOL bMap;
+       IMG_BOOL bRange;
+       IMG_BOOL bAll;
+       IMG_UINT64 ui64When;
+       IMG_UINT64 ui64Age;
+       /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */
+       IMG_UINT32 ui32StartPage;
+       IMG_UINT32 ui32PageCount;
+       IMG_DEV_VIRTADDR sMapStartAddr;
+       IMG_DEV_VIRTADDR sMapEndAddr;
+       RGXMEM_PROCESS_INFO sProcessInfo;
+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT;
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
+{
+       IMG_UINT32 ui32NumResults;
+       /* result 0 is the newest */
+       DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
+} DEVICEMEM_HISTORY_QUERY_OUT;
+
+IMG_BOOL
+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+                      DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+                      IMG_UINT32 ui32PageSizeBytes,
+                      IMG_BOOL bMatchAnyAllocInPage);
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_server.h
new file mode 100644 (file)
index 0000000..30a2b2e
--- /dev/null
@@ -0,0 +1,633 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Server side component for device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_SERVER_H
+#define DEVICEMEM_SERVER_H
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "connection_server.h"
+#include "pmr.h"
+
+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
+
+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY;
+
+
+/*************************************************************************/ /*!
+@Function       DevmemIntUnpin
+@Description    This is the counterpart to DevmemPin(). It is meant to be
+                called when the allocation is NOT mapped in the device virtual
+                space.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntUnpinInvalidate
+@Description    This is the counterpart to DevmemIntPinValidate(). It is meant
+                to be called for allocations that ARE mapped in the device
+                virtual space and we have to invalidate the mapping.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPin
+@Description    This is the counterpart to DevmemIntUnpin().
+                Is meant to be called if there is NO device mapping present.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPin(PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPinValidate
+@Description    This is the counterpart to DevmemIntUnpinInvalidate().
+                Is meant to be called if there is IS a device mapping present
+                that needs to be taken care of.
+
+@Input          psDevmemMapping The mapping structure used for the passed PMR.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+/*
+ * DevmemServerGetImportHandle()
+ *
+ * For given exportable memory descriptor returns PMR handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                            IMG_HANDLE *phImport);
+
+/*
+ * DevmemServerGetHeapHandle()
+ *
+ * For given reservation returns the Heap handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+                          IMG_HANDLE *phHeap);
+
+/*
+ * DevmemServerGetContext()
+ *
+ * For given heap returns the context.
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap,
+                       DEVMEMINT_CTX **ppsDevmemCtxPtr);
+
+/*
+ * DevmemServerGetPrivData()
+ *
+ * For given context returns the private data handle.
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx,
+                        IMG_HANDLE *phPrivData);
+
+/*
+ * DevmemIntAllocDefBackingPage
+ *
+ * This function allocates default backing page and initializes it
+ * with a given default value
+ *
+ */
+PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+                                            PVRSRV_DEF_PAGE *psDefPage,
+                                            IMG_INT    uiInitValue,
+                                            IMG_CHAR *pcDefPageName,
+                                            IMG_BOOL bInitPage);
+/*
+ * DevmemIntFreeDefBackingPage
+ *
+ * Frees a given page
+ */
+void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+                                   PVRSRV_DEF_PAGE *psDefPage,
+                                   IMG_CHAR *pcDefPageName);
+
+
+/*
+ * DevmemIntCtxCreate()
+ *
+ * Create a Server-side Device Memory Context. This is usually the counterpart
+ * of the client side memory context, and indeed is usually created at the
+ * same time.
+ *
+ * You must have one of these before creating any heaps.
+ *
+ * All heaps must have been destroyed before calling
+ * DevmemIntCtxDestroy()
+ *
+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising to
+ * later call DevmemIntCtxDestroy()
+ *
+ * Note that this call will cause the device MMU code to do some work for
+ * creating the device memory context, but it does not guarantee that a page
+ * catalogue will have been created, as this may be deferred until the first
+ * allocation.
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_CTX object that will
+ * be created by this call.
+ */
+PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   /* devnode / perproc etc */
+                   IMG_BOOL bKernelMemoryCtx,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData,
+                   IMG_UINT32 *pui32CPUCacheLineSize);
+/*
+ * DevmemIntCtxDestroy()
+ *
+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
+ */
+PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx);
+
+/*
+ * DevmemIntHeapCreate()
+ *
+ * Creates a new heap in this device memory context.  This will cause a call
+ * into the MMU code to allocate various data structures for managing this
+ * heap. It will not necessarily cause any page tables to be set up, as this
+ * can be deferred until first allocation. (i.e. we shouldn't care - it's up
+ * to the MMU code)
+ *
+ * Note that the data page size must be specified (as log 2). The data page
+ * size as specified here will be communicated to the mmu module, and thus may
+ * determine the page size configured in page directory entries for subsequent
+ * allocations from this heap. It is essential that the page size here is less
+ * than or equal to the "minimum contiguity guarantee" of any PMR that you
+ * subsequently attempt to map to this heap.
+ *
+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are promising
+ * that you shall subsequently call DevmemIntHeapDestroy()
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object that will
+ * be created by this call.
+ */
+PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr);
+/*
+ * DevmemIntHeapDestroy()
+ *
+ * Destroys a heap previously created with DevmemIntHeapCreate()
+ *
+ * All allocations from his heap must have been freed before this
+ * call.
+ */
+PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap);
+
+/*
+ * DevmemIntMapPMR()
+ *
+ * Maps the given PMR to the virtual range previously allocated with
+ * DevmemIntReserveRange()
+ *
+ * If appropriate, the PMR must have had its physical backing committed, as
+ * this call will call into the MMU code to set up the page tables for this
+ * allocation, which shall in turn request the physical addresses from the
+ * PMR. Alternatively, the PMR implementation can choose to do so off the
+ * the back of the "lock" callback, which it will receive as a result
+ * (indirectly) of this call.
+ *
+ * This function makes no promise w.r.t. the circumstances that it can be
+ * called, and these would be "inherited" from the implementation of the PMR.
+ * For example if the PMR "lock" callback causes pages to be pinned at that
+ * time (which may cause scheduling or disk I/O etc.) then it would not be
+ * legal to "Map" the PMR in a context where scheduling events are disallowed.
+ *
+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are promising
+ * that you shall later call DevmemIntUnmapPMR()
+ */
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr);
+/*
+ * DevmemIntUnmapPMR()
+ *
+ * Reverses the mapping caused by DevmemIntMapPMR()
+ */
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
+
+/* DevmemIntMapPages()
+ *
+ * Maps an arbitrary amount of pages from a PMR to a reserved range
+ *
+ * @input       psReservation      Reservation handle for the range
+ * @input       psPMR              PMR that is mapped
+ * @input       ui32PageCount      Number of consecutive pages that are
+ *                                mapped
+ * @input       ui32PhysicalPgOffset Logical offset in the PMR
+ * @input       uiFlags            Mapping flags
+ * @input       sDevVAddrBase      Virtual address base to start the
+ *                                 mapping from
+ */
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+                  PMR *psPMR,
+                  IMG_UINT32 ui32PageCount,
+                  IMG_UINT32 ui32PhysicalPgOffset,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  IMG_DEV_VIRTADDR sDevVAddrBase);
+
+/* DevmemIntUnmapPages()
+ *
+ * Unmaps an arbitrary amount of pages from a reserved range
+ *
+ * @input       psReservation      Reservation handle for the range
+ * @input       sDevVAddrBase      Virtual address base to start from
+ * @input       ui32PageCount      Number of consecutive pages that are
+ *                                 unmapped
+ */
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+                    IMG_DEV_VIRTADDR sDevVAddrBase,
+                    IMG_UINT32 ui32PageCount);
+
+/*
+ * DevmemIntReserveRange()
+ *
+ * Indicates that the specified range should be reserved from the given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds) then you are
+ * promising that you shall later call DevmemIntUnreserveRange()
+ */
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr);
+/*
+ * DevmemIntUnreserveRange()
+ *
+ * Undoes the state change caused by DevmemIntReserveRage()
+ */
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntChangeSparse
+@Description    Changes the sparse allocations of a PMR by allocating and freeing
+                pages and changing their corresponding CPU and GPU mappings.
+
+@input          psDevmemHeap          Pointer to the heap we map on
+@input          psPMR                 The PMR we want to map
+@input          ui32AllocPageCount    Number of pages to allocate
+@input          pai32AllocIndices     The logical PMR indices where pages will
+                                      be allocated. May be NULL.
+@input          ui32FreePageCount     Number of pages to free
+@input          pai32FreeIndices      The logical PMR indices where pages will
+                                      be freed. May be NULL.
+@input          uiSparseFlags         Flags passed in to determine which kind
+                                      of sparse change the user wanted.
+                                      See devicemem_typedefs.h for details.
+@input          uiFlags               Memalloc flags for this virtual range.
+@input          sDevVAddrBase         The base address of the virtual range of
+                                      this sparse allocation.
+@input          sCpuVAddrBase         The CPU base address of this allocation.
+                                      May be 0 if not existing.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+                      PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+                      PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                      IMG_DEV_VIRTADDR sDevVAddrBase,
+                      IMG_UINT64 sCpuVAddrBase);
+
+/*
+ * DevmemIntFlushDevSLCRange()
+ *
+ * Flush specified device context's virtual address range from SLC.
+ */
+PVRSRV_ERROR
+DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevmemCtx,
+                          IMG_DEV_VIRTADDR sDevVAddr,
+                          IMG_DEVMEM_SIZE_T uiSize,
+                          IMG_BOOL bInvalidate);
+
+/*
+ * DevmemIntRGXInvalidateFBSCTable()
+ *
+ * Invalidate selected FBSC table indices.
+ *
+ */
+PVRSRV_ERROR
+DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevmemCtx,
+                             IMG_UINT64 ui64FBSCEntryMask);
+
+PVRSRV_ERROR
+DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         DEVMEMINT_CTX *psDevMemContext,
+                         IMG_DEV_VIRTADDR sDevAddr);
+
+PVRSRV_ERROR
+DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         DEVMEMINT_CTX *psDevMemContext,
+                         IMG_DEV_VIRTADDR *psFaultAddress);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntRegisterPFNotifyKM
+@Description    Registers a PID to be notified when a page fault occurs on a
+                specific device memory context.
+@Input          psDevmemCtx    The context to be notified about.
+@Input          ui32PID        The PID of the process that would like to be
+                               notified.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+                            IMG_INT32     ui32PID,
+                            IMG_BOOL      bRegister);
+
+/*************************************************************************/ /*!
+@Function       DevmemIntPFNotify
+@Description    Notifies any processes that have registered themselves to be
+                notified when a page fault happens on a specific device memory
+                context.
+@Input          *psDevNode           The device node.
+@Input          ui64FaultedPCAddress The page catalogue address that faulted.
+@Input          sFaultAddress        The address that triggered the fault.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+                               IMG_UINT64         ui64FaultedPCAddress,
+                               IMG_DEV_VIRTADDR   sFaultAddress);
+
+#if defined(PDUMP)
+/*
+ * DevmemIntPDumpSaveToFileVirtual()
+ *
+ * Writes out PDump "SAB" commands with the data found in memory at
+ * the given virtual address.
+ */
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags);
+
+IMG_UINT32
+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
+
+PVRSRV_ERROR
+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32LogicalWidth,
+                              IMG_UINT32 ui32LogicalHeight,
+                              IMG_UINT32 ui32PhysicalWidth,
+                              IMG_UINT32 ui32PhysicalHeight,
+                              PDUMP_PIXEL_FORMAT ePixFmt,
+                              IMG_MEMLAYOUT eMemLayout,
+                              IMG_FB_COMPRESSION eFBCompression,
+                              const IMG_UINT32 *paui32FBCClearColour,
+                              PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                              IMG_DEV_VIRTADDR sHeader,
+                              IMG_UINT32 ui32HeaderSize,
+                              IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR
+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection,
+                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                             DEVMEMINT_CTX *psDevMemContext,
+                             IMG_UINT32 ui32Size,
+                             const IMG_CHAR *pszFileName,
+                             IMG_DEV_VIRTADDR sData,
+                             IMG_UINT32 ui32DataSize,
+                             IMG_UINT32 ui32HeaderType,
+                             IMG_UINT32 ui32ElementType,
+                             IMG_UINT32 ui32ElementCount,
+                             IMG_UINT32 ui32PDumpFlags);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpSaveToFileVirtual)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+                                IMG_UINT32 ui32FileOffset,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
+       PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiArraySize);
+       PVR_UNREFERENCED_PARAMETER(pszFilename);
+       PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpImageDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32LogicalWidth,
+                              IMG_UINT32 ui32LogicalHeight,
+                              IMG_UINT32 ui32PhysicalWidth,
+                              IMG_UINT32 ui32PhysicalHeight,
+                              PDUMP_PIXEL_FORMAT ePixFmt,
+                              IMG_MEMLAYOUT eMemLayout,
+                              IMG_FB_COMPRESSION eFBCompression,
+                              const IMG_UINT32 *paui32FBCClearColour,
+                              PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                              IMG_DEV_VIRTADDR sHeader,
+                              IMG_UINT32 ui32HeaderSize,
+                              IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+       PVR_UNREFERENCED_PARAMETER(ui32Size);
+       PVR_UNREFERENCED_PARAMETER(pszFileName);
+       PVR_UNREFERENCED_PARAMETER(sData);
+       PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+       PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+       PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+       PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+       PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+       PVR_UNREFERENCED_PARAMETER(ePixFmt);
+       PVR_UNREFERENCED_PARAMETER(eMemLayout);
+       PVR_UNREFERENCED_PARAMETER(eFBCompression);
+       PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+       PVR_UNREFERENCED_PARAMETER(eFBCSwizzle);
+       PVR_UNREFERENCED_PARAMETER(sHeader);
+       PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPDumpDataDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection,
+                              PVRSRV_DEVICE_NODE *psDeviceNode,
+                              DEVMEMINT_CTX *psDevMemContext,
+                              IMG_UINT32 ui32Size,
+                              const IMG_CHAR *pszFileName,
+                              IMG_DEV_VIRTADDR sData,
+                              IMG_UINT32 ui32DataSize,
+                              IMG_UINT32 ui32ElementType,
+                              IMG_UINT32 ui32ElementCount,
+                              IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+       PVR_UNREFERENCED_PARAMETER(ui32Size);
+       PVR_UNREFERENCED_PARAMETER(pszFileName);
+       PVR_UNREFERENCED_PARAMETER(sData);
+       PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+       PVR_UNREFERENCED_PARAMETER(ui32ElementType);
+       PVR_UNREFERENCED_PARAMETER(ui32ElementCount);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#endif /* PDUMP */
+
+PVRSRV_ERROR
+DevmemIntInit(void);
+
+PVRSRV_ERROR
+DevmemIntDeInit(void);
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+                   PMR *psPMR,
+                   DEVMEMINT_CTX_EXPORT **ppsContextExport);
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport);
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+                          DEVMEMINT_CTX **ppsContext,
+                          IMG_HANDLE *phPrivData);
+
+#endif /* DEVICEMEM_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_server_utils.h b/drivers/gpu/drm/img/img-rogue/services/server/include/devicemem_server_utils.h
new file mode 100644 (file)
index 0000000..ad85c07
--- /dev/null
@@ -0,0 +1,198 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file utilities that are specific to device memory functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv.h"
+
+static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                         PVRSRV_MEMALLOCFLAGS_T ulFlags,
+                                                                                         IMG_UINT32 *pui32Ret)
+{
+       IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+       IMG_UINT32 ui32Ret;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+       switch (ui32CPUCacheMode)
+       {
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+                       break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC;
+                       break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+                       break;
+
+               case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+
+                       /*
+                        * If system has no coherency but coherency has been requested for CPU
+                        * and GPU we currently fall back to write-combine.
+                        * This avoids errors on arm64 when uncached is turned into ordered device memory
+                        * and suffers from problems with unaligned access.
+                        */
+                       if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) &&
+                               !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+                       {
+                               ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC;
+                       }
+                       else
+                       {
+                               ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+                       }
+                       break;
+
+               default:
+                       PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode));
+                       PVR_ASSERT(0);
+                       /*
+                               We should never get here, but if we do then setting the mode
+                               to uncached is the safest thing to do.
+                       */
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+                       eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+                       break;
+       }
+
+       *pui32Ret = ui32Ret;
+
+       return eError;
+}
+
+static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                PVRSRV_MEMALLOCFLAGS_T ulFlags,
+                                                                                                IMG_UINT32 *pui32Ret)
+{
+       IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+       IMG_UINT32 ui32Ret;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+       switch (ui32DeviceCacheMode)
+       {
+               case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+                       break;
+
+               case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC:
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC;
+                       break;
+
+               case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT:
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+                       break;
+
+               case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT:
+
+                       /*
+                        * If system has no coherency but coherency has been requested for CPU
+                        * and GPU we currently fall back to write-combine.
+                        * This avoids errors on arm64 when uncached is turned into ordered device memory
+                        * and suffers from problems with unaligned access.
+                        */
+                       if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) &&
+                               !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+                       {
+                               ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC;
+                       }
+                       else
+                       {
+                               ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+                       }
+                       break;
+
+               default:
+                       PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode));
+                       PVR_ASSERT(0);
+                       /*
+                               We should never get here, but if we do then setting the mode
+                               to uncached is the safest thing to do.
+                       */
+                       ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+                       eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+                       break;
+       }
+
+       *pui32Ret = ui32Ret;
+
+       return eError;
+}
+
+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                          PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+       IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+       IMG_BOOL bRet = IMG_FALSE;
+
+       PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+       if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+       {
+               bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig);
+       }
+       return bRet;
+}
+
+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                                                                                 PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+       IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+       IMG_BOOL bRet = IMG_FALSE;
+
+       PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+       if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+       {
+               bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig);
+       }
+       return bRet;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/di_common.h b/drivers/gpu/drm/img/img-rogue/services/server/include/di_common.h
new file mode 100644 (file)
index 0000000..a101787
--- /dev/null
@@ -0,0 +1,236 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common types for Debug Info framework.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DI_COMMON_H
+#define DI_COMMON_H
+
+#include "img_types.h"
+
+/* Token that signals that a header should be printed. */
+#define DI_START_TOKEN ((void *) 1)
+
+/* This is a public handle to an entry. */
+#ifndef DI_GROUP_DEFINED
+#define DI_GROUP_DEFINED
+typedef struct DI_GROUP DI_GROUP;
+#endif
+#ifndef DI_ENTRY_DEFINED
+#define DI_ENTRY_DEFINED
+typedef struct DI_ENTRY DI_ENTRY;
+#endif
+typedef struct OSDI_IMPL_ENTRY OSDI_IMPL_ENTRY;
+
+/*! Debug Info entries types. */
+typedef enum DI_ENTRY_TYPE
+{
+    DI_ENTRY_TYPE_GENERIC,          /*!< generic entry type, implements
+                                         start/stop/next/show iterator
+                                         interface */
+    DI_ENTRY_TYPE_RANDOM_ACCESS,    /*!< random access entry, implements
+                                         seek/read iterator interface */
+} DI_ENTRY_TYPE;
+
+/*! @Function DI_PFN_START
+ *
+ * @Description
+ * Start operation returns first entry and passes it to Show operation.
+ *
+ * @Input psEntry pointer to the implementation entry
+ * @InOut pui64Pos current data position in the entry
+ *
+ * @Return pointer to data that will be passed to the other iterator
+ *         functions in pvData argument
+ */
+typedef void *(*DI_PFN_START)(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos);
+
+/*! @Function DI_PFN_STOP
+ *
+ * @Description
+ * Stop operations is called after iterator reaches end of data.
+ *
+ * If pvData was allocated in pfnStart it should be freed here.
+ *
+ * @Input psEntry pointer to the implementation entry
+ * @Input pvData pointer to data returned from pfnStart/pfnNext
+ */
+typedef void (*DI_PFN_STOP)(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+
+/*! @Function DI_PFN_NEXT
+ *
+ * @Description
+ * Next returns next data entry and passes it to Show operation.
+ *
+ * @Input psEntry pointer to the implementation entry
+ * @Input pvData pointer to data returned from pfnStart/pfnNext
+ * @InOut pui64Pos current data position in the entry
+ */
+typedef void *(*DI_PFN_NEXT)(OSDI_IMPL_ENTRY *psEntry, void *pvData,
+                             IMG_UINT64 *pui64Pos);
+
+/*! @Function DI_PFN_SHOW
+ *
+ * @Description
+ * Outputs the data element.
+ *
+ * @Input psEntry pointer to the implementation entry
+ * @Input pvData pointer to data returned from pfnStart/pfnNext
+ */
+typedef int (*DI_PFN_SHOW)(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+
+/*! @Function DI_PFN_SEEK
+ *
+ * @Description
+ * Changes position of the entry data pointer
+ *
+ * @Input uiOffset new entry offset (absolute)
+ * @Input pvData private data provided during entry creation
+ */
+typedef IMG_INT64 (*DI_PFN_SEEK)(IMG_UINT64 ui64Offset, void *pvData);
+
+/*! @Function DI_PFN_READ
+ *
+ * @Description
+ * Retrieves data from the entry from position previously set by Seek.
+ *
+ * @Input pszBuffer output buffer
+ * @Input ui64Count length of the output buffer
+ * @InOut pui64Pos pointer to the current position in the entry
+ * @Input pvData private data provided during entry creation
+ */
+typedef IMG_INT64 (*DI_PFN_READ)(IMG_CHAR *pszBuffer, IMG_UINT64 ui64Count,
+                                 IMG_UINT64 *pui64Pos, void *pvData);
+
+/*! @Function DI_PFN_WRITE
+ *
+ * @Description
+ * Handle writes operation to the entry.
+ *
+ * @Input pszBuffer NUL-terminated buffer containing written data
+ * @Input ui64Count length of the data in pszBuffer (length of the buffer)
+ * @InOut pui64Pos pointer to the current position in the entry
+ * @Input pvData private data provided during entry creation
+ */
+typedef IMG_INT64 (*DI_PFN_WRITE)(const IMG_CHAR *pszBuffer,
+                                  IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos,
+                                  void *pvData);
+
+/*! Debug info entry iterator.
+ *
+ * This covers all entry types: GENERIC and RANDOM_ACCESS.
+ *
+ * The GENERIC entry type
+ *
+ * The GENERIC type should implement either a full set of following callbacks:
+ * pfnStart, pfnStop, pfnNext and pfnShow, or pfnShow only. If only pfnShow
+ * callback is given the framework will use default handlers in place of the
+ * other ones.
+ *
+ * e.g. for generic entry:
+ *
+ *   struct sIter = {
+ *     .pfnStart = StartCb, .pfnStop = StopCb, pfnNext = NextCb,
+ *     .pfnShow = ShowCb
+ *   };
+ *
+ * The use case for implementing pfnShow only is if the data for the given
+ * entry is short and can be printed in one go because the pfnShow callback
+ * will be called only once.
+ *
+ * e.g. for one-shot print generic entry:
+ *
+ *   struct sIter = {
+ *     .pfnShow = SingleShowCb
+ *   };
+ *
+ * The DICreateEntry() function will return error if DI_ENTRY_TYPE_GENERIC
+ * type is used and invalid combination of callbacks is given.
+ *
+ * The RANDOM_ACCESS entry
+ *
+ * The RANDOM_ACCESS type should implement either both pfnSeek and pfnRead
+ * or pfnRead only callbacks.
+ *
+ * e.g. of seekable and readable random access entry:
+ *
+ *   struct sIter = {
+ *     .pfnSeek = SeekCb, .pfnRead = ReadCb
+ *   };
+ *
+ * The DICreateEntry() function will return error if DI_ENTRY_TYPE_RANDOM_ACCESS
+ * type is used and invalid combination of callbacks is given.
+ *
+ * Writing to file (optional)
+ *
+ * The iterator allows also to pass a pfnWrite callback that allows implementing
+ * write operation on the entry. The write operation is entry type agnostic
+ * which means that it can be defined for both GENERIC and RANDOM_ACCESS
+ * entries.
+ *
+ * e.g. for writable one-shot print generic entry
+ *
+ *   struct sIter = {
+ *     .pfnShow = SingleShowCb, .pfnWrite = WriteCb
+ *   };
+ */
+typedef struct DI_ITERATOR_CB
+{
+    /* Generic entry interface. */
+
+    DI_PFN_START pfnStart; /*!< Starts iteration and returns first element
+                                of entry's data. */
+    DI_PFN_STOP pfnStop;   /*!< Stops iteration. */
+    DI_PFN_NEXT pfnNext;   /*!< Returns next element of entry's data. */
+    DI_PFN_SHOW pfnShow;   /*!< Shows current data element of an entry. */
+
+    /* Optional random access entry interface. */
+
+    DI_PFN_SEEK pfnSeek;   /*!< Sets data pointer in an entry. */
+    DI_PFN_READ pfnRead;   /*!< Reads data from an entry. */
+
+    /* Optional writing to entry interface. Null terminated. */
+
+    DI_PFN_WRITE pfnWrite; /*!< Performs write operation on an entry. */
+    IMG_UINT32   ui32WriteLenMax; /*!< Maximum char length of entry
+                                       accepted for write. Includes \0 */
+} DI_ITERATOR_CB;
+
+#endif /* DI_COMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/di_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/di_server.h
new file mode 100644 (file)
index 0000000..a68894b
--- /dev/null
@@ -0,0 +1,219 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating Debug Info groups and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DI_SERVER_H
+#define DI_SERVER_H
+
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+  #include <linux/stdarg.h>
+ #else
+  #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
+
+#include "di_common.h"
+#include "pvrsrv_error.h"
+#include "img_defs.h"
+
+/*! @Function DIInit
+ *
+ * @Description
+ * Initialises Debug Info framework. This function will create common resources
+ * for the framework.
+ *
+ * Note: This function must be called before first call to
+ *       DIRegisterImplementation() all of the implementations.
+ */
+PVRSRV_ERROR DIInit(void);
+
+/*! @Function DIDeInit
+ *
+ * @Description
+ * De-initialises Debug Info framework. This function will call pfnDeInit()
+ * on each implementation and clean up common resources.
+ *
+ * In case some of the entries and groups have not been cleaned up this function
+ * will also perform recursive sweep and remove all entries and group for
+ * all implementations.
+ */
+void DIDeInit(void);
+
+/*! @Function DICreateEntry
+ *
+ * @Description
+ * Creates debug info entry. Depending on different implementations the entry
+ * might be for example a DebugFS file or something totally different.
+ *
+ * The entry will belong to a parent group if provided or to the root group
+ * if not.
+ *
+ * @Input pszName: name of the new entry
+ * @Input psDiGroup: parent group, if NULL entry will belong to the root group
+ * @Input psIterCb: implementation of the iterator for the entry
+ * @Input psPriv: private data that will be passed to the iterator operations
+ * @Input eType: type of the entry
+ *
+ * @Output ppsEntry: handle to the newly created entry
+ *
+ * @Return   PVRSRV_ERROR error code
+ */
+PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName,
+                           DI_GROUP *psGroup,
+                           const DI_ITERATOR_CB *psIterCb,
+                           void *psPriv,
+                           DI_ENTRY_TYPE eType,
+                           DI_ENTRY **ppsEntry);
+
+/*! @Function DIDestroyEntry
+ *
+ * @Description
+ * Destroys debug info entry.
+ *
+ * @Input psEntry: handle to the entry
+ */
+void DIDestroyEntry(DI_ENTRY *psEntry);
+
+/*! @Function DICreateGroup
+ *
+ * @Description
+ * Creates debug info group. Depending on different implementations the group
+ * might be for example a DebugFS directory or something totally different.
+ *
+ * The group will belong to a parent group if provided or to the root group
+ * if not.
+ *
+ * @Input pszName: name of the new entry
+ * @Input psParent: parent group, if NULL entry will belong to the root group
+ *
+ * @Output ppsGroup: handle to the newly created entry
+ *
+ * @Return   PVRSRV_ERROR error code
+ */
+PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName,
+                           DI_GROUP *psParent,
+                           DI_GROUP **ppsGroup);
+
+/*! @Function DIDestroyGroup
+ *
+ * @Description
+ * Destroys debug info group.
+ *
+ * @Input psGroup: handle to the group
+ */
+void DIDestroyGroup(DI_GROUP *psGroup);
+
+/*! @Function DIGetPrivData
+ *
+ * @Description
+ * Retrieves private data from psEntry. The data is either passed during
+ * entry creation via psPriv parameter of DICreateEntry() function
+ * or by explicitly setting it with DIGetPrivData() function.
+ *
+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object
+ *
+ * @Returns pointer to the private data (can be NULL if private data
+ *          has not been specified)
+ */
+void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry);
+
+/*! @Function DIWrite
+ *
+ * @Description
+ * Writes the binary data of the DI entry to the output sync, whatever that may
+ * be for the DI implementation.
+ *
+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object
+ * @Input pvData data
+ * @Input uiSize pvData length
+ */
+void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData,
+             IMG_UINT32 uiSize);
+
+/*! @Function DIPrintf
+ *
+ * @Description
+ * Prints formatted string to the DI entry.
+ *
+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object
+ * @Input pszFmt NUL-terminated format string
+ */
+void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...)
+       __printf(2, 3);
+
+/*! @Function DIVPrintf
+ *
+ * @Description
+ * Prints formatted string to the DI entry. Equivalent to DIPrintf but takes
+ * va_list instead of a variable number of arguments.
+ *
+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object
+ * @Input pszFmt NUL-terminated format string
+ * @Input pArgs vs_list object
+ */
+void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt,
+               va_list pArgs);
+
+/*! @Function DIPrintf
+ *
+ * @Description
+ * Prints a string to the DI entry.
+ *
+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object
+ * @Input pszFmt NUL-terminated string
+ */
+void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr);
+
+/*! @Function DIHasOverflowed
+ *
+ * @Description
+ * Checks if the DI buffer has overflowed.
+ *
+ * @Return IMG_TRUE if buffer overflowed
+ */
+IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry);
+
+#endif /* DI_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/dma_km.h b/drivers/gpu/drm/img/img-rogue/services/server/include/dma_km.h
new file mode 100644 (file)
index 0000000..185d4ff
--- /dev/null
@@ -0,0 +1,83 @@
+/*************************************************************************/ /*!
+@File           dma_km.h
+@Title          DMA transfer module header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DMA_KM_H
+#define DMA_KM_H
+
+#if defined(__linux__)
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION
+#endif
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "pmr.h"
+#include "pvrsrv_sync_km.h"
+#include "connection_server.h"
+
+PVRSRV_ERROR DmaDeviceParams(CONNECTION_DATA *psConnection,
+                                                        PVRSRV_DEVICE_NODE *psDevNode,
+                                                        IMG_UINT32 *ui32DmaBuffAlign,
+                                                        IMG_UINT32 *ui32DmaTransferMult);
+
+PVRSRV_ERROR DmaSparseMappingTable(PMR *psPMR,
+                                                                  IMG_DEVMEM_OFFSET_T uiOffset,
+                                                                  IMG_UINT32 ui32SizeInPages,
+                                                                  IMG_BOOL *pbTable);
+
+PVRSRV_ERROR DmaTransfer(CONNECTION_DATA *psConnection,
+                   PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_UINT32 uiNumDMAs,
+                       PMR** ppsPMR,
+                       IMG_UINT64 *puiAddress,
+                       IMG_DEVMEM_OFFSET_T *puiOffset,
+                       IMG_DEVMEM_SIZE_T *puiSize,
+                       IMG_BOOL bMemToDev,
+                       PVRSRV_TIMELINE iUpdateTimeline);
+
+PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode);
+void PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* DMA_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/fwload.h b/drivers/gpu/drm/img/img-rogue/services/server/include/fwload.h
new file mode 100644 (file)
index 0000000..08e7f53
--- /dev/null
@@ -0,0 +1,158 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services RGX OS Interface for loading the firmware
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines the OS interface through which the RGX
+                device initialisation code in the kernel/server will obtain
+                the RGX firmware binary image. The API is used during the
+                initialisation of an RGX device via the
+                PVRSRVCommonDeviceInitialise()
+                call sequence.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef FWLOAD_H
+#define FWLOAD_H
+
+#include "img_defs.h"
+#include "device_connection.h"
+#include "device.h"
+
+/*! Opaque type handle defined and known to the OS layer implementation of this
+ * fwload.h OS API. This private data is allocated in the implementation of
+ * OSLoadFirmware() and contains whatever data and information needed to be
+ * able to acquire and return the firmware binary image to the Services
+ * kernel/server during initialisation.
+ * It is no longer required and may be freed when OSUnloadFirmware() is called.
+ */
+typedef struct OS_FW_IMAGE_t OS_FW_IMAGE;
+
+#if defined(__linux__)
+
+bool OSVerifyFirmware(OS_FW_IMAGE* psFWImage);
+
+#endif
+
+/*************************************************************************/ /*!
+@Function     OSLoadFirmware
+@Description  The OS implementation must load or acquire the firmware (FW)
+              image binary needed by the driver stack.
+              A handle to the common layer device node is given to identify
+              which device instance in the system is being initialised. The
+              BVNC string is also supplied so that the implementation knows
+              which FW image to retrieve since each FW image only supports one
+              GPU type/revision.
+              The calling server code supports multiple GPU types and revisions
+              and will detect the specific GPU type and revision before calling
+              this API. It will also have runtime configuration of the VZ mode,
+              hence this API must be able to retrieve different FW binary
+              images based on the pszBVNCString given. The purpose of the end
+              platform/system is key to understand which FW images must be
+              available to the kernel server.
+              On exit the implementation must return a pointer to some private
+              data it uses to hold the FW image information and data. It will
+              be passed onto later API calls by the kernel server code.
+              NULL should be returned if the FW image could not be retrieved.
+              The format of the BVNC string is as follows ([x] denotes
+              optional field):
+                "rgx.fw[.signed].B.V[p].N.C[.vz]"
+              The implementation must first try to load the FW identified
+              by the pszBVpNCString parameter. If this is not available then it
+              should drop back to retrieving the FW identified by the
+              pszBVNCString parameter. The fields in the string are:
+                B, V, N, C are all unsigned integer identifying type/revision.
+                [.signed] is present when RGX_FW_SIGNED=1 is defined in the
+                  server build.
+                [p] denotes a provisional (pre-silicon) GPU configuration.
+                [.vz] is present when the kernel server is loaded on the HOST
+                  of a virtualised platform. See the DriverMode server
+                  AppHint for details.
+
+@Input        psDeviceNode       Device instance identifier.
+@Input        pszBVNCString      Identifier string of the FW image to
+                                 be loaded/acquired in production driver.
+@Input        pfnVerifyFirmware  Callback which checks validity of FW image.
+@Output       ppsFWImage         Ptr to private data on success,
+                                 NULL otherwise.
+@Return       PVRSRV_ERROR       PVRSRV_OK on success,
+                                 PVRSRV_ERROR_NOT_READY if filesystem is not
+                                                        ready/initialised,
+                                 PVRSRV_ERROR_NOT_FOUND if no suitable FW
+                                                        image could be found
+                                 PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc
+                                                        memory for FW image
+                                 PVRSRV_ERROR_NOT_AUTHENTICATED if FW image
+                                                        cannot be verified.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+                            const IMG_CHAR *pszBVNCString,
+                            bool (*pfnVerifyFirmware)(OS_FW_IMAGE*),
+                            OS_FW_IMAGE **ppsFWImage);
+
+/*************************************************************************/ /*!
+@Function     OSFirmwareData
+@Description  This function returns a pointer to the start of the FW image
+              binary data held in memory. It must remain valid until
+              OSUnloadFirmware() is called.
+@Input        psFWImage  Private data opaque handle
+@Return       void*      Ptr to FW binary image to start on GPU.
+*/ /**************************************************************************/
+const void* OSFirmwareData(OS_FW_IMAGE *psFWImage);
+
+/*************************************************************************/ /*!
+@Function     OSFirmwareSize
+@Description  This function returns the size of the FW image binary data.
+@Input        psFWImage  Private data opaque handle
+@Return       size_t     Size in bytes of the firmware binary image
+*/ /**************************************************************************/
+size_t OSFirmwareSize(OS_FW_IMAGE *psFWImage);
+
+/*************************************************************************/ /*!
+@Function     OSUnloadFirmware
+@Description  This is called when the server has completed firmware
+              initialisation and no longer needs the private data, possibly
+              allocated by OSLoadFirmware().
+@Input        psFWImage  Private data opaque handle
+*/ /**************************************************************************/
+void OSUnloadFirmware(OS_FW_IMAGE *psFWImage);
+
+#endif /* FWLOAD_H */
+
+/******************************************************************************
+ End of file (fwload.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/fwtrace_string.h b/drivers/gpu/drm/img/img-rogue/services/server/include/fwtrace_string.h
new file mode 100644 (file)
index 0000000..a2ab95c
--- /dev/null
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File           fwtrace_string.h
+@Title          RGX Firmware trace strings for KM
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform       Generic
+@Description    This file defines SFs tuple.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef KM_TRACE_STRING_H
+#define KM_TRACE_STRING_H
+
+#include "rgx_fwif_sf.h"
+
+extern const RGXKM_STID_FMT SFs[];
+extern const IMG_UINT32 g_ui32SFsCount;
+
+#endif /* KM_TRACE_STRING_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/handle.h b/drivers/gpu/drm/img/img-rogue/services/server/include/handle.h
new file mode 100644 (file)
index 0000000..92946b6
--- /dev/null
@@ -0,0 +1,206 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(HANDLE_API_H)
+#define HANDLE_API_H
+
+#include "lock_types.h"
+
+/*
+ * Handle API
+ * ----------
+ * The handle API is intended to provide handles for kernel resources, which
+ * can then be passed back to user space processes.
+ *
+ * The following functions comprise the API. Each function takes a pointer to
+ * a PVRSRV_HANDLE_BASE structure, one of which is allocated for each process,
+ * and stored in the per-process data area. Use KERNEL_HANDLE_BASE for handles
+ * not allocated for a particular process, or for handles that need to be
+ * allocated before the PVRSRV_HANDLE_BASE structure for the process is
+ * available.
+ *
+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ *      IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ *      PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+ *
+ * Allocate a handle phHandle, for the resource of type eType pointed to by
+ * pvData.
+ *
+ * For handles that have a definite lifetime, where the corresponding resource
+ * is explicitly created and destroyed, eFlag should be zero.
+ *
+ * If a particular resource may be referenced multiple times by a given
+ * process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI will allow multiple
+ * handles to be allocated for the resource. Such handles cannot be found with
+ * PVRSRVFindHandle.
+ *
+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ *      IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ *      PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+ *
+ * This function is similar to PVRSRVAllocHandle, except that the allocated
+ * handles are associated with a parent handle, hParent, that has been
+ * allocated previously. Subhandles are automatically deallocated when their
+ * parent handle is deallocated.
+ * Subhandles can be treated as ordinary handles. For example, they may have
+ * subhandles of their own, and may be explicitly deallocated using
+ * PVRSRVReleaseHandle (see below).
+ *
+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ *      IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Find the handle previously allocated for the resource pointed to by pvData,
+ * of type eType. Handles allocated with the flag
+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this function.
+ *
+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ *      void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Given a handle for a resource of type eType, return the pointer to the
+ * resource.
+ *
+ * PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ *      void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType,
+ *      IMH_HANDLE hAncestor);
+ *
+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant
+ * of hAncestor.
+ *
+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ *      IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Deallocate a handle of given type.
+ *
+ * Return the parent of a handle in *phParent, or NULL if the handle has
+ * no parent.
+ */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "hash.h"
+
+typedef enum
+{
+       #define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x,
+       #include "handle_types.h"
+       #undef HANDLETYPE
+} PVRSRV_HANDLE_TYPE;
+
+static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero");
+
+typedef enum
+{
+       PVRSRV_HANDLE_BASE_TYPE_CONNECTION,
+       PVRSRV_HANDLE_BASE_TYPE_PROCESS,
+       PVRSRV_HANDLE_BASE_TYPE_GLOBAL
+} PVRSRV_HANDLE_BASE_TYPE;
+
+
+typedef enum
+{
+       /* No flags */
+       PVRSRV_HANDLE_ALLOC_FLAG_NONE =    0,
+       /* Multiple handles can point at the given data pointer */
+       PVRSRV_HANDLE_ALLOC_FLAG_MULTI =   0x01,
+       /* Subhandles are allocated in a private handle space */
+       PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+typedef struct _PROCESS_HANDLE_BASE_
+{
+       PVRSRV_HANDLE_BASE *psHandleBase;
+       ATOMIC_T iRefCount;
+} PROCESS_HANDLE_BASE;
+
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+#define        KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+#define HANDLE_DEBUG_LISTING_MAX_NUM 20
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData);
+
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+void PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVDestroyHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVDestroyHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVDestroyHandleStagedUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+                                   PVRSRV_HANDLE_BASE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
+
+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVHandleInit(void);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(void);
+
+PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void);
+
+PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase);
+PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, IMG_UINT64 ui64MaxBridgeTime);
+
+void LockHandle(PVRSRV_HANDLE_BASE *psBase);
+void UnlockHandle(PVRSRV_HANDLE_BASE *psBase);
+
+#endif /* !defined(HANDLE_API_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/handle_impl.h b/drivers/gpu/drm/img/img-rogue/services/server/include/handle_impl.h
new file mode 100644 (file)
index 0000000..9430597
--- /dev/null
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the handle manager API. This file is for declarations
+                and definitions that are private/internal to the handle manager
+                API but need to be shared between the generic handle manager
+                code and the various handle manager backends, i.e. the code that
+                implements the various callbacks.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(HANDLE_IMPL_H)
+#define HANDLE_IMPL_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE;
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData);
+
+typedef struct _HANDLE_IMPL_FUNCTAB_
+{
+       /* Acquire a new handle which is associated with the given data */
+       PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData);
+
+       /* Release the given handle (optionally returning the data associated with it) */
+       PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+       /* Get the data associated with the given handle */
+       PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+       /* Set the data associated with the given handle */
+       PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData);
+
+       PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData);
+
+       /* Enable handle purging on the given handle base */
+       PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase);
+
+       /* Purge handles on the given handle base */
+       PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase);
+
+       /* Create handle base */
+       PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase);
+
+       /* Destroy handle base */
+       PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase);
+} HANDLE_IMPL_FUNCTAB;
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs);
+
+#endif /* !defined(HANDLE_IMPL_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/handle_types.h b/drivers/gpu/drm/img/img-rogue/services/server/include/handle_types.h
new file mode 100644 (file)
index 0000000..795e206
--- /dev/null
@@ -0,0 +1,88 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager handle types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+/* NOTE: Do not add include guards to this file */
+
+HANDLETYPE(NONE)
+HANDLETYPE(SHARED_EVENT_OBJECT)
+HANDLETYPE(EVENT_OBJECT_CONNECT)
+HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE)
+HANDLETYPE(PHYSMEM_PMR)
+HANDLETYPE(PHYSMEM_PMR_EXPORT)
+HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT)
+HANDLETYPE(DEVMEMINT_CTX)
+HANDLETYPE(DEVMEMINT_CTX_EXPORT)
+HANDLETYPE(DEVMEMINT_HEAP)
+HANDLETYPE(DEVMEMINT_RESERVATION)
+HANDLETYPE(DEVMEMINT_MAPPING)
+HANDLETYPE(RGX_FW_MEMDESC)
+HANDLETYPE(RGX_FREELIST)
+HANDLETYPE(RGX_MEMORY_BLOCK)
+HANDLETYPE(RGX_SERVER_RENDER_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT)
+HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT)
+HANDLETYPE(RGX_SERVER_RAY_CONTEXT)
+HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT)
+#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION)
+HANDLETYPE(RGX_SERVER_GPUMAP_CONTEXT)
+#endif
+HANDLETYPE(SYNC_PRIMITIVE_BLOCK)
+HANDLETYPE(SYNC_RECORD_HANDLE)
+HANDLETYPE(PVRSRV_TIMELINE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_EXPORT)
+HANDLETYPE(RGX_KM_HW_RT_DATASET)
+HANDLETYPE(RGX_FWIF_ZSBUFFER)
+HANDLETYPE(RGX_POPULATION)
+HANDLETYPE(DC_DEVICE)
+HANDLETYPE(DC_DISPLAY_CONTEXT)
+HANDLETYPE(DC_BUFFER)
+HANDLETYPE(DC_PIN_HANDLE)
+HANDLETYPE(DEVMEM_MEM_IMPORT)
+HANDLETYPE(PHYSMEM_PMR_PAGELIST)
+HANDLETYPE(PVR_TL_SD)
+HANDLETYPE(RI_HANDLE)
+HANDLETYPE(DEV_PRIV_DATA)
+HANDLETYPE(MM_PLAT_CLEANUP)
+HANDLETYPE(WORKEST_RETURN_DATA)
+HANDLETYPE(DI_CONTEXT)
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/htbserver.h b/drivers/gpu/drm/img/img-rogue/services/server/include/htbserver.h
new file mode 100644 (file)
index 0000000..c30556c
--- /dev/null
@@ -0,0 +1,228 @@
+/*************************************************************************/ /*!
+@File           htbserver.h
+@Title          Host Trace Buffer server implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+
+                A Host Trace can be merged with a corresponding Firmware Trace.
+                This is achieved by inserting synchronisation data into both
+                traces and post processing to merge them.
+
+                The FW Trace will contain a "Sync Partition Marker". This is
+                updated every time the RGX is brought out of reset (RGX clock
+                timestamps reset at this point) and is repeated when the FW
+                Trace buffer wraps to ensure there is always at least 1
+                partition marker in the Firmware Trace buffer whenever it is
+                read.
+
+                The Host Trace will contain corresponding "Sync Partition
+                Markers" - #HTBSyncPartitionMarker(). Each partition is then
+                subdivided into "Sync Scale" sections - #HTBSyncScale(). The
+                "Sync Scale" data allows the timestamps from the two traces to
+                be correlated. The "Sync Scale" data is updated as part of the
+                standard RGX time correlation code (rgxtimecorr.c) and is
+                updated periodically including on power and clock changes.
+
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef HTBSERVER_H
+#define HTBSERVER_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+/************************************************************************/ /*!
+ @Function      HTBInit
+ @Description   Initialise the Host Trace Buffer and allocate all resources
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBInit(void);
+
+/************************************************************************/ /*!
+ @Function      HTBDeInit
+ @Description   Close the Host Trace Buffer and free all resources
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit(void);
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigureKM
+ @Description   Configure or update the configuration of the Host Trace Buffer
+
+ @Input         ui32NameSize    Size of the pszName string
+
+ @Input         pszName         Name to use for the underlying data buffer
+
+ @Input         ui32BufferSize  Size of the underlying data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName,
+                          const IMG_UINT32 ui32BufferSize);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControlKM
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control the behaviour of the data buffer
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(const IMG_UINT32 ui32NumFlagGroups,
+                        const IMG_UINT32 *aui32GroupEnable,
+                        const IMG_UINT32 ui32LogLevel,
+                        const IMG_UINT32 ui32EnablePID,
+                        const HTB_LOGMODE_CTRL eLogMode,
+                        const HTB_OPMODE_CTRL eOpMode);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarker
+ @Description   Write an HTB sync partition marker to the HTB log
+
+ @Input         ui32Marker      Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker);
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncPartitionMarkerRpt
+ @Description   Write a HTB sync partition marker to the HTB log, given
+                the previous values to repeat.
+
+ @Input         ui32Marker      Marker value
+ @Input         ui64SyncOSTS    previous OSTS
+ @Input         ui64SyncCRTS    previous CRTS
+ @Input         ui32ClkSpeed    previous Clockspeed
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker,
+                                                        const IMG_UINT64 ui64SyncOSTS,
+                                                        const IMG_UINT64 ui64SyncCRTS,
+                                                        const IMG_UINT32 ui32ClkSpeed);
+
+/*************************************************************************/ /*!
+ @Function      HTBSyncScale
+ @Description   Write FW-Host synchronisation data to the HTB log when clocks
+                change or are re-calibrated
+
+ @Input         bLogValues      IMG_TRUE if value should be immediately written
+                                out to the log
+
+ @Input         ui64OSTS        OS Timestamp
+
+ @Input         ui64CRTS        Rogue timestamp
+
+ @Input         ui32CalcClkSpd  Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS,
+                        const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd);
+
+/*************************************************************************/ /*!
+ @Function      HTBLogKM
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         TID             The TID of the process the event is associated with.
+
+ @Input         ui64TimeStamp   The timestamp to be associated with this log event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF,
+                IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args);
+
+/*************************************************************************/ /*!
+ @Function      HTBIsConfigured
+ @Description   Determine if HTB stream has been configured
+
+ @Input         none
+
+ @Return        IMG_FALSE       Stream has not been configured
+                IMG_TRUE        Stream has been configured
+
+*/ /**************************************************************************/
+IMG_BOOL
+HTBIsConfigured(void);
+#endif /* HTBSERVER_H */
+
+/* EOF */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/info_page.h b/drivers/gpu/drm/img/img-rogue/services/server/include/info_page.h
new file mode 100644 (file)
index 0000000..5816125
--- /dev/null
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Title          Kernel/User mode general purpose shared memory.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    General purpose memory shared between kernel driver and user
+                mode.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef INFO_PAGE_KM_H
+#define INFO_PAGE_KM_H
+
+#include "pvrsrv_error.h"
+
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "info_page_defs.h"
+
+/**
+ * @Function InfoPageCreate
+ * @Description Allocates resources for global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData);
+
+/**
+ * @Function InfoPageDestroy
+ * @Description Frees all of the resource of global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+void InfoPageDestroy(PVRSRV_DATA *psData);
+
+/**
+ * @Function PVRSRVAcquireInfoPageKM()
+ * @Description This interface is used for obtaining the global information page
+ *              which acts as a general purpose shared memory between KM and UM.
+ *              The use of this information page outside of services is _not_
+ *              recommended.
+ * @Output ppsPMR handle to exported PMR
+ * @Return
+ */
+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR);
+
+/**
+ * @Function PVRSRVReleaseInfoPageKM()
+ * @Description This function matches PVRSRVAcquireInfoPageKM().
+ * @Input psPMR handle to exported PMR
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR);
+
+/**
+ * @Function GetInfoPageDebugFlagsKM()
+ * @Description Return info page debug flags
+ * @Return info page debug flags
+ */
+static INLINE IMG_UINT32 GetInfoPageDebugFlagsKM(void)
+{
+       return (PVRSRVGetPVRSRVData())->pui32InfoPage[DEBUG_FEATURE_FLAGS];
+}
+
+#endif /* INFO_PAGE_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/lists.h b/drivers/gpu/drm/img/img-rogue/services/server/include/lists.h
new file mode 100644 (file)
index 0000000..2e2c29a
--- /dev/null
@@ -0,0 +1,367 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions templates.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Definition of the linked list function templates.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LISTS_UTILS_H
+#define LISTS_UTILS_H
+
+/* instruct QAC to ignore warnings about the following custom formatted macros */
+/* PRQA S 0881,3410 ++ */
+
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+  #include <linux/stdarg.h>
+ #else
+  #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
+
+#include "img_types.h"
+#include "device.h"
+#include "power.h"
+
+/*
+ - USAGE -
+
+ The list functions work with any structure that provides the fields psNext and
+ ppsThis. In order to make a function available for a given type, it is required
+ to use the function template macro that creates the actual code.
+
+ There are 5 main types of functions:
+ - INSERT      : given a pointer to the head pointer of the list and a pointer
+                 to the node, inserts it as the new head.
+ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer
+                 to the node, inserts the node at the tail of the list.
+ - REMOVE      : given a pointer to a node, removes it from its list.
+ - FOR EACH    : apply a function over all the elements of a list.
+ - ANY         : apply a function over the elements of a list, until one of them
+                 return a non null value, and then returns it.
+
+ The two last functions can have a variable argument form, with allows to pass
+ additional parameters to the callback function. In order to do this, the
+ callback function must take two arguments, the first is the current node and
+ the second is a list of variable arguments (va_list).
+
+ The ANY functions have also another for which specifies the return type of the
+ callback function and the default value returned by the callback function.
+
+*/
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEach
+@Description    Apply a callback function to all the elements of a list.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+       while (psHead)\
+       {\
+               pfnCallBack(psHead);\
+               psHead = psHead->psNext;\
+       }\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEachSafe
+@Description    Apply a callback function to all the elements of a list. Do it
+                in a safe way that handles the fact that a node might remove
+                itself from the list during the iteration.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+       TYPE *psNext;\
+\
+       while (psHead)\
+       {\
+               psNext = psHead->psNext; \
+               pfnCallBack(psHead);\
+               psHead = psNext;\
+       }\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+       va_list ap;\
+       while (psHead)\
+       {\
+               va_start(ap, pfnCallBack);\
+               pfnCallBack(psHead, ap);\
+               psHead = psHead->psNext;\
+               va_end(ap);\
+       }\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Any
+@Description    Applies a callback function to the elements of a list until
+                the function returns a non null value, then returns it.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+@Return         The first non null value returned by the callback function.
+*/ /**************************************************************************/
+#define DECLARE_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\
+{ \
+       void *pResult;\
+       TYPE *psNextNode;\
+       pResult = NULL;\
+       psNextNode = psHead;\
+       while (psHead && !pResult)\
+       {\
+               psNextNode = psNextNode->psNext;\
+               pResult = pfnCallBack(psHead);\
+               psHead = psNextNode;\
+       }\
+       return pResult;\
+}
+
+
+/*with variable arguments, that will be passed as a va_list to the callback function*/
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+       va_list ap;\
+       TYPE *psNextNode;\
+       void* pResult = NULL;\
+       while (psHead && !pResult)\
+       {\
+               psNextNode = psHead->psNext;\
+               va_start(ap, pfnCallBack);\
+               pResult = pfnCallBack(psHead, ap);\
+               va_end(ap);\
+               psHead = psNextNode;\
+       }\
+       return pResult;\
+}
+
+/*those ones are for extra type safety, so there's no need to use castings for the results*/
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+       RTYPE result;\
+       TYPE *psNextNode;\
+       result = CONTINUE;\
+       psNextNode = psHead;\
+       while (psHead && result == CONTINUE)\
+       {\
+               psNextNode = psNextNode->psNext;\
+               result = pfnCallBack(psHead);\
+               psHead = psNextNode;\
+       }\
+       return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+       va_list ap;\
+       TYPE *psNextNode;\
+       RTYPE result = CONTINUE;\
+       while (psHead && result == CONTINUE)\
+       {\
+               psNextNode = psHead->psNext;\
+               va_start(ap, pfnCallBack);\
+               result = pfnCallBack(psHead, ap);\
+               va_end(ap);\
+               psHead = psNextNode;\
+       }\
+       return result;\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Remove
+@Description    Removes a given node from the list.
+@Input          psNode      The pointer to the node to be removed.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)\
+{\
+       (*psNode->ppsThis)=psNode->psNext;\
+       if (psNode->psNext)\
+       {\
+               psNode->psNext->ppsThis = psNode->ppsThis;\
+       }\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Insert
+@Description    Inserts a given node at the beginning of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+       psNewNode->ppsThis = ppsHead;\
+       psNewNode->psNext = *ppsHead;\
+       *ppsHead = psNewNode;\
+       if (psNewNode->psNext)\
+       {\
+               psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+       }\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_InsertTail
+@Description    Inserts a given node at the end of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+       TYPE *psTempNode = *ppsHead;\
+       if (psTempNode != NULL)\
+       {\
+               while (psTempNode->psNext)\
+                       psTempNode = psTempNode->psNext;\
+               ppsHead = &psTempNode->psNext;\
+       }\
+       psNewNode->ppsThis = ppsHead;\
+       psNewNode->psNext = NULL;\
+       *ppsHead = psNewNode;\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Reverse
+@Description    Reverse a list in place
+@Input          ppsHead    The pointer to the pointer to the head node.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+       TYPE *psTmpNode1; \
+       TYPE *psTmpNode2; \
+       TYPE *psCurNode; \
+       psTmpNode1 = NULL; \
+       psCurNode = *ppsHead; \
+       while (psCurNode) { \
+               psTmpNode2 = psCurNode->psNext; \
+               psCurNode->psNext = psTmpNode1; \
+               psTmpNode1 = psCurNode; \
+               psCurNode = psTmpNode2; \
+               if (psCurNode) \
+               { \
+                       psTmpNode1->ppsThis = &(psCurNode->psNext); \
+               } \
+               else \
+               { \
+                       psTmpNode1->ppsThis = ppsHead; \
+               } \
+       } \
+       *ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL)
+
+
+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+#endif
+
+/* re-enable warnings */
+/* PRQA S 0881,3410 -- */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/mmu_common.h b/drivers/gpu/drm/img/img-rogue/services/server/include/mmu_common.h
new file mode 100644 (file)
index 0000000..a84fa69
--- /dev/null
@@ -0,0 +1,792 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef MMU_COMMON_H
+#define MMU_COMMON_H
+
+/*
+       The Memory Management Unit (MMU) performs device virtual to physical
+       translation.
+
+       Terminology:
+        - page catalogue, PC   (optional, 3 tier MMU)
+        - page directory, PD
+        - page table, PT (can be variable sized)
+        - data page, DP (can be variable sized)
+       Note: PD and PC are fixed size and can't be larger than the native
+             physical (CPU) page size
+       Shifts and AlignShift variables:
+        - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0
+        - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units
+          by applying a bit shift left by 'xxxAlignShift' bits
+*/
+
+/*
+       Device Virtual Address Config:
+
+       Incoming Device Virtual Address is deconstructed into up to 4
+       fields, where the virtual address is up to 64bits:
+       MSB-----------------------------------------------LSB
+       | PC Index:   | PD Index:  | PT Index: | DP offset: |
+       | d bits      | c bits     | b-v bits  |  a+v bits  |
+       -----------------------------------------------------
+       where v is the variable page table modifier, e.g.
+                       v == 0 -> 4KB DP
+                       v == 2 -> 16KB DP
+                       v == 4 -> 64KB DP
+                       v == 6 -> 256KB DP
+                       v == 8 -> 1MB DP
+                       v == 10 -> 4MB DP
+*/
+
+/* services/server/include/ */
+#include "pmr.h"
+
+/* include/ */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+
+/*!
+       The level of the MMU
+*/
+typedef enum
+{
+       MMU_LEVEL_0 = 0,        /* Level 0 = Page */
+
+       MMU_LEVEL_1,
+       MMU_LEVEL_2,
+       MMU_LEVEL_3,
+       MMU_LEVEL_LAST
+} MMU_LEVEL;
+
+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */
+#include "pdump_mmu.h"
+
+#define MMU_MAX_LEVEL 3
+
+typedef struct _MMU_LEVEL_DATA_
+{
+       IMG_UINT32      ui32Index;
+       IMG_UINT32      ui32NumOfEntries;
+       IMG_CHAR const  *psDebugStr;
+       IMG_UINT8       uiBytesPerEntry;
+       IMG_UINT64      ui64Address;
+} MMU_LEVEL_DATA;
+
+typedef enum _MMU_FAULT_TYPE_
+{
+       MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */
+       MMU_FAULT_TYPE_PM,
+       MMU_FAULT_TYPE_NON_PM,
+} MMU_FAULT_TYPE;
+
+typedef struct _MMU_FAULT_DATA_
+{
+       MMU_LEVEL       eTopLevel;
+       MMU_FAULT_TYPE  eType;
+       MMU_LEVEL_DATA  sLevelData[MMU_LEVEL_LAST];
+} MMU_FAULT_DATA;
+
+struct _MMU_DEVVADDR_CONFIG_;
+
+/*!
+       MMU device attributes. This structure is the interface between the generic
+       MMU code and the device specific MMU code.
+*/
+typedef struct _MMU_DEVICEATTRIBS_
+{
+       PDUMP_MMU_TYPE eMMUType;
+
+       IMG_CHAR *pszMMUPxPDumpMemSpaceName;
+
+       /*! The type of the top level object */
+       MMU_LEVEL eTopLevel;
+
+       /*! Alignment requirement of the base object */
+       IMG_UINT32 ui32BaseAlign;
+
+       /*! HW config of the base object */
+       struct _MMU_PxE_CONFIG_ *psBaseConfig;
+
+       /*! Address split for the base object */
+       const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig;
+
+       /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */
+       IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+       /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */
+       IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32 uiProtFlags);
+       /*! Callback for creating protection bits for the page directory entry with 8 byte entry */
+       IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+       /*! Callback for creating protection bits for the page directory entry with 4 byte entry */
+       IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32 uiProtFlags);
+       /*! Callback for creating protection bits for the page table entry with 8 byte entry */
+       IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+       /*! Callback for creating protection bits for the page table entry with 4 byte entry */
+       IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32 uiProtFlags);
+
+       /*! Callback for getting the MMU configuration based on the specified page size */
+       PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize,
+                                                                                               const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig,
+                                                                                               const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig,
+                                                                                               const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig,
+                                                                                               IMG_HANDLE *phPriv2);
+       /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */
+       PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv);
+
+       /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */
+       PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *);
+       /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */
+       PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *);
+       /*! Callback for getting the page size directly from the address. Supported on MMU4 */
+       PVRSRV_ERROR (*pfnGetPageSizeFromVirtAddr)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_DEV_VIRTADDR, IMG_UINT32 *);
+
+       /*! Private data handle */
+       IMG_HANDLE hGetPageSizeFnPriv;
+} MMU_DEVICEATTRIBS;
+
+/*!
+       MMU virtual address split
+*/
+typedef struct _MMU_DEVVADDR_CONFIG_
+{
+       /*! Page catalogue index mask */
+       IMG_UINT64      uiPCIndexMask;
+       /*! Page catalogue index shift */
+       IMG_UINT8       uiPCIndexShift;
+       /*! Total number of PC entries */
+       IMG_UINT32      uiNumEntriesPC;
+       /*! Page directory mask */
+       IMG_UINT64      uiPDIndexMask;
+       /*! Page directory shift */
+       IMG_UINT8       uiPDIndexShift;
+       /*! Total number of PD entries */
+       IMG_UINT32      uiNumEntriesPD;
+       /*! Page table mask */
+       IMG_UINT64      uiPTIndexMask;
+       /*! Page index shift */
+       IMG_UINT8       uiPTIndexShift;
+       /*! Total number of PT entries */
+       IMG_UINT32      uiNumEntriesPT;
+       /*! Page offset mask */
+       IMG_UINT64      uiPageOffsetMask;
+       /*! Page offset shift */
+       IMG_UINT8       uiPageOffsetShift;
+       /*! First virtual address mappable for this config */
+       IMG_UINT64      uiOffsetInBytes;
+
+} MMU_DEVVADDR_CONFIG;
+
+/*
+       P(C/D/T) Entry Config:
+
+       MSB-----------------------------------------------LSB
+       | PT Addr:   | variable PT ctrl | protection flags: |
+       | bits c+v   | b bits           | a bits            |
+       -----------------------------------------------------
+       where v is the variable page table modifier and is optional
+*/
+/*!
+       Generic MMU entry description. This is used to describe PC, PD and PT entries.
+*/
+typedef struct _MMU_PxE_CONFIG_
+{
+       IMG_UINT8       uiBytesPerEntry; /*! Size of an entry in bytes */
+
+       IMG_UINT64      uiAddrMask;      /*! Physical address mask */
+       IMG_UINT8       uiAddrShift;     /*! Physical address shift */
+       IMG_UINT8       uiAddrLog2Align; /*! Physical address Log 2 alignment */
+
+       IMG_UINT64      uiVarCtrlMask;   /*! Variable control mask */
+       IMG_UINT8       uiVarCtrlShift;  /*! Variable control shift */
+
+       IMG_UINT64      uiProtMask;      /*! Protection flags mask */
+       IMG_UINT8       uiProtShift;     /*! Protection flags shift */
+
+       IMG_UINT64      uiValidEnMask;   /*! Entry valid bit mask */
+       IMG_UINT8       uiValidEnShift;  /*! Entry valid bit shift */
+} MMU_PxE_CONFIG;
+
+/* MMU Protection flags */
+
+
+/* These are specified generically and in a h/w independent way, and
+   are interpreted at each level (PC/PD/PT) separately. */
+
+/* The following flags are for internal use only, and should not
+   traverse the API */
+#define MMU_PROTFLAGS_INVALID 0x80000000U
+
+typedef IMG_UINT32 MMU_PROTFLAGS_T;
+
+/* The following flags should be supplied by the caller: */
+#define MMU_PROTFLAGS_READABLE                                 (1U<<0)
+#define MMU_PROTFLAGS_WRITEABLE                                        (1U<<1)
+#define MMU_PROTFLAGS_CACHE_COHERENT                   (1U<<2)
+#define MMU_PROTFLAGS_CACHED                                   (1U<<3)
+
+/* Device specific flags*/
+#define MMU_PROTFLAGS_DEVICE_OFFSET            16
+#define MMU_PROTFLAGS_DEVICE_MASK              0x000f0000UL
+#define MMU_PROTFLAGS_DEVICE(n)        \
+                       (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \
+                       MMU_PROTFLAGS_DEVICE_MASK)
+
+
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+struct _PVRSRV_DEVICE_NODE_;
+
+struct _CONNECTION_DATA_;
+
+typedef struct _MMU_PAGESIZECONFIG_
+{
+       const MMU_PxE_CONFIG *psPDEConfig;
+       const MMU_PxE_CONFIG *psPTEConfig;
+       const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+       IMG_UINT32 uiRefCount;
+       IMG_UINT32 uiMaxRefCount;
+} MMU_PAGESIZECONFIG;
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextCreate
+
+@Description    Create a new MMU context
+
+@Input          psConnection            Connection requesting the MMU context
+                                        creation. Can be NULL for kernel/FW
+                                        memory context.
+@Input          psDevNode               Device node of the device to create the
+                                        MMU context for
+@Output         ppsMMUContext           The created MMU context
+
+@Return         PVRSRV_OK if the MMU context was successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ContextCreate(struct _CONNECTION_DATA_ *psConnection,
+                  struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                  MMU_CONTEXT **ppsMMUContext,
+                  MMU_DEVICEATTRIBS *psDevAttrs);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextDestroy
+
+@Description    Destroy a MMU context
+
+@Input          psMMUContext            MMU context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+MMU_ContextDestroy(MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function       MMU_Alloc
+
+@Description    Allocate the page tables required for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uSize                   The size of the allocation
+
+@Output         puActualSize            Actual size of allocation
+
+@Input          uiProtFlags             Generic MMU protection flags
+
+@Input          uDevVAddrAlignment      Alignment requirement of the virtual
+                                        allocation
+
+@Input          psDevVAddr              Virtual address to start the allocation
+                                        from
+
+@Return         PVRSRV_OK if the allocation of the page tables was successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_Alloc(MMU_CONTEXT *psMMUContext,
+          IMG_DEVMEM_SIZE_T uSize,
+          IMG_DEVMEM_SIZE_T *puActualSize,
+          IMG_UINT32 uiProtFlags,
+          IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+          IMG_DEV_VIRTADDR *psDevVAddr,
+          IMG_UINT32 uiLog2PageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_Free
+
+@Description    Free the page tables of the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               Virtual address to start the free
+                                        from
+
+@Input          uiSize                  The size of the allocation
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+MMU_Free(MMU_CONTEXT *psMMUContext,
+         IMG_DEV_VIRTADDR sDevVAddr,
+         IMG_DEVMEM_SIZE_T uiSize,
+         IMG_UINT32 uiLog2DataPageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPages
+
+@Description    Map pages to the MMU.
+                Two modes of operation: One requires a list of physical page
+                indices that are going to be mapped, the other just takes
+                the PMR and a possible offset to map parts of it.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Input          sDevVAddrBase           Device virtual address of the 1st page
+
+@Input          psPMR                   PMR to map
+
+@Input          ui32PhysPgOffset        Physical offset into the PMR
+
+@Input          ui32MapPageCount        Number of pages to map
+
+@Input          paui32MapIndices        List of page indices to map,
+                                         can be NULL
+
+@Input          uiLog2PageSize          Log2 page size of the pages to map
+
+@Return         PVRSRV_OK if the mapping was successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+             PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+             IMG_DEV_VIRTADDR sDevVAddrBase,
+             PMR *psPMR,
+             IMG_UINT32 ui32PhysPgOffset,
+             IMG_UINT32 ui32MapPageCount,
+             IMG_UINT32 *paui32MapIndices,
+             IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPages
+
+@Description    Unmap pages from the MMU.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Input          sDevVAddr               Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Input          pai32UnmapIndicies      Array of page indices to be unmapped
+
+@Input          uiLog2PageSize          log2 size of the page
+
+
+@Input          uiMemAllocFlags         Indicates if the unmapped regions need
+                                        to be backed by dummy or zero page
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+MMU_UnmapPages(MMU_CONTEXT *psMMUContext,
+               PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+               IMG_DEV_VIRTADDR sDevVAddr,
+               IMG_UINT32 ui32PageCount,
+               IMG_UINT32 *pai32UnmapIndicies,
+               IMG_UINT32 uiLog2PageSize,
+               PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPMRFast
+
+@Description    Map a PMR into the MMU. Must be not sparse.
+                This is supposed to cover most mappings and, as the name suggests,
+                should be as fast as possible.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               Device virtual address to map the PMR
+                                        into
+
+@Input          psPMR                   PMR to map
+
+@Input          uiSizeBytes             Size in bytes to map
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Return         PVRSRV_OK if the PMR was successfully mapped
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_MapPMRFast(MMU_CONTEXT *psMMUContext,
+               IMG_DEV_VIRTADDR sDevVAddr,
+               const PMR *psPMR,
+               IMG_DEVMEM_SIZE_T uiSizeBytes,
+               PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+               IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPMRFast
+
+@Description    Unmap pages from the MMU as fast as possible.
+                PMR must be non-sparse!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrBase           Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Input          uiLog2PageSize          log2 size of the page
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrBase,
+                 IMG_UINT32 ui32PageCount,
+                 IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_ChangeValidity
+
+@Description    Sets or unsets the valid bit of page table entries for a given
+                address range.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               The device virtual base address of
+                                        the range we want to modify
+
+@Input          uiSizeBytes             The size of the range in bytes
+
+@Input          uiLog2PageSize          Log2 of the used page size
+
+@Input          bMakeValid              Choose to set or unset the valid bit.
+                                        (bMakeValid == IMG_TRUE ) -> SET
+                                        (bMakeValid == IMG_FALSE) -> UNSET
+
+@Input          psPMR                   The PMR backing the allocation.
+                                        Needed in case we have sparse memory
+                                        where we have to check whether a physical
+                                        address actually backs the virtual.
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+                   IMG_DEV_VIRTADDR sDevVAddr,
+                   IMG_DEVMEM_SIZE_T uiSizeBytes,
+                   IMG_UINT32 uiLog2PageSize,
+                   IMG_BOOL bMakeValid,
+                   PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquireBaseAddr
+
+@Description    Acquire the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         psPhysAddr              Device physical address of the base level
+                                        MMU object
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquireCPUBaseAddr
+
+@Description    Acquire the CPU Virtual Address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         ppvCPUVAddr             CPU Virtual Address of the base level
+                                        MMU object
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleaseBaseAddr
+
+@Description    Release the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/***********************************************************************************/ /*!
+@Function       MMU_SetOSid
+
+@Description    Set the OSid associated with the application (and the MMU Context)
+
+@Input          psMMUContext            MMU context to store the OSid on
+
+@Input          ui32OSid                the OSid in question
+
+@Input          ui32OSidReg             The value that the firmware will assign to the
+                                        registers.
+
+@Input          bOSidAxiProt            Toggles whether the AXI prot bit will be set or
+                                        not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid,
+                  IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+
+/***********************************************************************************/ /*!
+@Function       MMU_GetOSid
+
+@Description    Retrieve the OSid associated with the MMU context.
+
+@Input          psMMUContext            MMU context in which the OSid is stored
+
+@Output         pui32OSid               The OSid in question
+
+@Output         pui32OSidReg            The OSid that the firmware will assign to the
+                                        registers.
+
+@Output         pbOSidAxiProt           Toggles whether the AXI prot bit will be set or
+                                        not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid,
+                  IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+#endif
+
+/*************************************************************************/ /*!
+@Function       MMU_AppendCacheFlags
+
+@Description    Set the cache flags to the bitwise or of themselves and the
+                specified input flags, i.e. ui32CacheFlags |= ui32NewCacheFlags,
+                atomically.
+
+@Input          psMMUContext            MMU context
+
+@Input          ui32NewCacheFlags       Cache flags to append.
+
+@Return         None
+*/
+/*****************************************************************************/
+void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_ExchangeCacheFlags
+
+@Description    Exchange MMU context flags with specified value, atomically.
+
+@Input          psMMUContext            MMU context
+
+@Input          ui32CacheFlags          Cache flags to set.
+
+@Return         Previous MMU context cache flags.
+*/
+/*****************************************************************************/
+IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_CheckFaultAddress
+
+@Description    Check the specified MMU context to see if the provided address
+                should be valid
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          psDevVAddr              Address to check
+
+@Output          psOutFaultData          To store fault details after checking
+
+@Return         None
+*/
+/*****************************************************************************/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+                           IMG_DEV_VIRTADDR *psDevVAddr,
+                           MMU_FAULT_DATA *psOutFaultData);
+
+/*************************************************************************/ /*!
+@Function       MMU_IsVDevAddrValid
+@Description    Checks if given address is valid.
+@Input          psMMUContext MMU context to store the data on
+@Input          uiLog2PageSize page size
+@Input          sDevVAddr Address to check
+@Return         IMG_TRUE of address is valid
+*/ /**************************************************************************/
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr);
+
+#if defined(PDUMP)
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextDerivePCPDumpSymAddr
+
+@Description    Derives a PDump Symbolic address for the top level MMU object
+
+@Input          psMMUContext                    MMU context to operate on
+
+@Input          pszPDumpSymbolicNameBuffer      Buffer to write the PDump symbolic
+                                                address to
+
+@Input          uiPDumpSymbolicNameBufferSize   Size of the buffer
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                size_t uiPDumpSymbolicNameBufferSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_PDumpWritePageCatBase
+
+@Description    PDump write of the top level MMU object to a device register
+
+@Input          psMMUContext        MMU context to operate on
+
+@Input          pszSpaceName        PDump name of the mem/reg space
+
+@Input          uiOffset            Offset to write the address to
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                                       const IMG_CHAR *pszSpaceName,
+                                       IMG_DEVMEM_OFFSET_T uiOffset,
+                                       IMG_UINT32 ui32WordSize,
+                                       IMG_UINT32 ui32AlignShift,
+                                       IMG_UINT32 ui32Shift,
+                                       PDUMP_FLAGS_T uiPdumpFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquirePDumpMMUContext
+
+@Description    Acquire a reference to the PDump MMU context for this MMU
+                context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         pui32PDumpMMUContextID  PDump MMU context ID
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                           IMG_UINT32 *pui32PDumpMMUContextID,
+                           IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleasePDumpMMUContext
+
+@Description    Release a reference to the PDump MMU context for this MMU context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                           IMG_UINT32 ui32PDumpFlags);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(MMU_PDumpWritePageCatBase)
+#endif
+static INLINE void
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                          const IMG_CHAR *pszSpaceName,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32WordSize,
+                          IMG_UINT32 ui32AlignShift,
+                          IMG_UINT32 ui32Shift,
+                          PDUMP_FLAGS_T uiPdumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMMUContext);
+       PVR_UNREFERENCED_PARAMETER(pszSpaceName);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32WordSize);
+       PVR_UNREFERENCED_PARAMETER(ui32AlignShift);
+       PVR_UNREFERENCED_PARAMETER(ui32Shift);
+       PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+}
+#endif /* PDUMP */
+
+void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext);
+
+#endif /* #ifdef MMU_COMMON_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/opaque_types.h b/drivers/gpu/drm/img/img-rogue/services/server/include/opaque_types.h
new file mode 100644 (file)
index 0000000..766bc22
--- /dev/null
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          Opaque Types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines opaque types for various services types
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef SERVICES_OPAQUE_TYPES_H
+#define SERVICES_OPAQUE_TYPES_H
+
+#include "img_defs.h"
+#include "img_types.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE;
+
+#endif /* SERVICES_OPAQUE_TYPES_H */
+
+/******************************************************************************
+ End of file (opaque_types.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/os_srvinit_param.h b/drivers/gpu/drm/img/img-rogue/services/server/include/os_srvinit_param.h
new file mode 100644 (file)
index 0000000..a4d77e3
--- /dev/null
@@ -0,0 +1,328 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services initialisation parameters header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services initialisation parameter support for the Linux kernel.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef OS_SRVINIT_PARAM_H
+#define OS_SRVINIT_PARAM_H
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include "km_apphint.h"
+#include "km_apphint_defs.h"
+
+/* Supplied to SrvInitParamGetXXX() functions when the param/AppHint is
+ * applicable to all devices and not a specific device. Typically used
+ * for server-wide build and module AppHints.
+ */
+#define INITPARAM_NO_DEVICE (NULL)
+
+#define SrvInitParamOpen() NULL
+#define SrvInitParamClose(pvState) ((void)(pvState))
+
+#define SrvInitParamGetBOOL(device, state, name, value) \
+       ((void) pvr_apphint_get_bool(device, APPHINT_ID_ ## name, &value))
+
+#define SrvInitParamGetUINT32(device, state, name, value) \
+       ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value))
+
+#define SrvInitParamGetUINT64(device, state, name, value) \
+       ((void) pvr_apphint_get_uint64(device, APPHINT_ID_ ## name, &value))
+
+#define SrvInitParamGetSTRING(device, state, name, buffer, size) \
+       ((void) pvr_apphint_get_string(device, APPHINT_ID_ ## name, buffer, size))
+
+#define SrvInitParamGetUINT32BitField(device, state, name, value) \
+       ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value))
+
+#define SrvInitParamGetUINT32List(device, state, name, value) \
+       ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value))
+
+#else  /* defined(__linux__) && defined(__KERNEL__) */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "img_types.h"
+
+/*! Lookup item. */
+typedef struct
+{
+       const IMG_CHAR *pszValue;       /*!< looked up name */
+       IMG_UINT32 ui32Value;           /*!< looked up value */
+} SRV_INIT_PARAM_UINT32_LOOKUP;
+
+/*************************************************************************/ /*!
+@Brief          SrvInitParamOpen
+
+@Description    Establish a connection to the Parameter resource store which is
+                used to hold configuration information associated with the
+                server instance.
+
+@Return         (void *) Handle to Parameter resource store to be used for
+                subsequent parameter value queries
+
+*/ /**************************************************************************/
+void *SrvInitParamOpen(void);
+
+/*************************************************************************/ /*!
+@Brief          SrvInitParamClose
+
+@Description    Remove a pre-existing connection to the Parameter resource store
+                given by 'pvState' and release any temporary storage associated
+                with the 'pvState' mapping handle
+
+@Input          pvState             Handle to Parameter resource store
+
+*/ /**************************************************************************/
+void SrvInitParamClose(void *pvState);
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetBOOL
+
+@Description    Get the current BOOL value for parameter 'pszName' from the
+                Parameter resource store attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Name of parameter to look-up
+
+@Input          pbDefault           Value to return if parameter not found
+
+@Output         pbValue             Value of parameter 'pszName' or 'pbDefault'
+                                    if not found
+
+*/ /**************************************************************************/
+void _SrvInitParamGetBOOL(
+       void *pvState,
+       const IMG_CHAR *pszName,
+       const IMG_BOOL *pbDefault,
+       IMG_BOOL *pbValue
+);
+
+/*! Get the BOOL value for parameter 'name' from the parameter resource store
+ *  attached to 'state'. */
+#define SrvInitParamGetBOOL(device, state, name, value) \
+               _SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value))
+
+/*! Initialise FLAG type parameter identified by 'name'. */
+#define SrvInitParamInitFLAG(name, defval, unused) \
+       static const IMG_BOOL __SrvInitParam_ ## name = defval;
+
+/*! Initialise BOOL type parameter identified by 'name'. */
+#define SrvInitParamInitBOOL(name, defval, unused) \
+       static const IMG_BOOL __SrvInitParam_ ## name = defval;
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetUINT32
+
+@Description    Get the current IMG_UINT32 value for parameter 'pszName'
+                from the Parameter resource store attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Name of parameter to look-up
+
+@Input          pui32Default        Value to return if parameter not found
+
+@Output         pui32Value            Value of parameter 'pszName' or
+                                    'pui32Default' if not found
+
+*/ /**************************************************************************/
+void _SrvInitParamGetUINT32(
+       void *pvState,
+       const IMG_CHAR *pszName,
+       const IMG_UINT32 *pui32Default,
+       IMG_UINT32 *pui32Value
+);
+
+/*! Get the UINT32 value for parameter 'name' from the parameter resource store
+ *  attached to 'state'. */
+#define SrvInitParamGetUINT32(device, state, name, value) \
+               _SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value))
+
+/*! Initialise UINT32 type parameter identified by 'name'. */
+#define SrvInitParamInitUINT32(name, defval, unused) \
+       static const IMG_UINT32 __SrvInitParam_ ## name = defval;
+
+/*! Initialise UINT64 type parameter identified by 'name'. */
+#define SrvInitParamInitUINT64(name, defval, unused) \
+       static const IMG_UINT64 __SrvInitParam_ ## name = defval;
+
+/*! @cond Doxygen_Suppress */
+#define SrvInitParamUnreferenced(name) \
+               PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name )
+/*! @endcond */
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetUINT32BitField
+
+@Description    Get the current IMG_UINT32 bitfield value for parameter
+                'pszBasename' from the Parameter resource store
+                attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszBaseName         Bitfield parameter name to search for
+
+@Input          uiDefault           Default return value if parameter not found
+
+@Input          psLookup            Bitfield array to traverse
+
+@Input          uiSize              number of elements in 'psLookup'
+
+@Output         puiValue            Value of bitfield or 'uiDefault' if
+                                    parameter not found
+*/ /**************************************************************************/
+void _SrvInitParamGetUINT32BitField(
+       void *pvState,
+       const IMG_CHAR *pszBaseName,
+       IMG_UINT32 uiDefault,
+       const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup,
+       IMG_UINT32 uiSize,
+       IMG_UINT32 *puiValue
+);
+
+/*! Initialise UINT32 bitfield type parameter identified by 'name' with
+ *  'inival' value and 'lookup' look up array. */
+#define SrvInitParamInitUINT32Bitfield(name, inival, lookup) \
+       static IMG_UINT32 __SrvInitParam_ ## name = inival; \
+       static SRV_INIT_PARAM_UINT32_LOOKUP * \
+               __SrvInitParamLookup_ ## name = &lookup[0]; \
+       static const IMG_UINT32 __SrvInitParamSize_ ## name = \
+                                       ARRAY_SIZE(lookup);
+
+/*! Get the UINT32 bitfield value for parameter 'name' from the parameter
+ *  resource store attached to 'state'. */
+#define SrvInitParamGetUINT32BitField(device, state, name, value) \
+               _SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value))
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetUINT32List
+
+@Description    Get the current IMG_UINT32 list value for the specified
+                parameter 'pszName' from the Parameter resource store
+                attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Parameter list name to search for
+
+@Input          uiDefault           Default value to return if 'pszName' is
+                                    not set within 'pvState'
+
+@Input          psLookup            parameter list to traverse
+
+@Input          uiSize              number of elements in 'psLookup' list
+
+@Output         puiValue            value of located list element or
+                                    'uiDefault' if parameter not found
+
+*/ /**************************************************************************/
+void _SrvInitParamGetUINT32List(
+       void *pvState,
+       const IMG_CHAR *pszName,
+       IMG_UINT32 uiDefault,
+       const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup,
+       IMG_UINT32 uiSize,
+       IMG_UINT32 *puiValue
+);
+
+/*! Get the UINT32 list value for parameter 'name' from the parameter
+ *  resource store attached to 'state'. */
+#define SrvInitParamGetUINT32List(device, state, name, value) \
+               _SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value))
+
+/*! Initialise UINT32 list type parameter identified by 'name' with
+ *  'defval' default value and 'lookup' look up list. */
+#define SrvInitParamInitUINT32List(name, defval, lookup) \
+       static IMG_UINT32 __SrvInitParam_ ## name = defval; \
+       static SRV_INIT_PARAM_UINT32_LOOKUP * \
+               __SrvInitParamLookup_ ## name = &lookup[0]; \
+       static const IMG_UINT32 __SrvInitParamSize_ ## name = \
+                                       ARRAY_SIZE(lookup);
+
+/*************************************************************************/ /*!
+@Brief          _SrvInitParamGetSTRING
+
+@Description    Get the contents of the specified parameter string 'pszName'
+                from the Parameter resource store attached to 'pvState'
+
+@Input          pvState             Handle to Parameter resource store
+
+@Input          pszName             Parameter string name to search for
+
+@Input          psDefault           Default string to return if 'pszName' is
+                                    not set within 'pvState'
+
+@Input          size                Size of output 'pBuffer'
+
+@Output         pBuffer             Output copy of 'pszName' contents or
+                                    copy of 'psDefault' if 'pszName' is not
+                                    set within 'pvState'
+
+*/ /**************************************************************************/
+void _SrvInitParamGetSTRING(
+       void *pvState,
+       const IMG_CHAR *pszName,
+       const IMG_CHAR *psDefault,
+       IMG_CHAR *pBuffer,
+       size_t size
+);
+
+/*! Initialise STRING type parameter identified by 'name' with 'defval' default
+ *  value. */
+#define SrvInitParamInitSTRING(name, defval, unused) \
+       static const IMG_CHAR *__SrvInitParam_ ## name = defval;
+
+/*! Get the STRING value for parameter 'name' from the parameter resource store
+ *  attached to 'state'. */
+#define SrvInitParamGetSTRING(device, state, name, buffer, size) \
+               _SrvInitParamGetSTRING(state, # name,  __SrvInitParam_ ## name, buffer, size)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* defined(__linux__) && defined(__KERNEL__) */
+
+#endif /* OS_SRVINIT_PARAM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/osconnection_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/osconnection_server.h
new file mode 100644 (file)
index 0000000..28a6dd3
--- /dev/null
@@ -0,0 +1,133 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for OS specific callbacks from server side connection
+                management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef OSCONNECTION_SERVER_H
+#define OSCONNECTION_SERVER_H
+
+#include "handle.h"
+#include "osfunc.h"
+
+/*! Function not implemented definition. */
+#define OSCONNECTION_SERVER_NOT_IMPLEMENTED 0
+/*! Assert used for OSCONNECTION_SERVER_NOT_IMPLEMENTED. */
+#define OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSCONNECTION_SERVER_NOT_IMPLEMENTED)
+
+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData);
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+
+PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection);
+
+#else  /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataInit)
+#endif
+/*************************************************************************/ /*!
+@Function       OSConnectionPrivateDataInit
+@Description    Allocates and initialises any OS-specific private data
+                relating to a connection.
+                Called from PVRSRVCommonConnectionConnect().
+@Input          pvOSData            pointer to any OS private data
+@Output         phOsPrivateData     handle to the created connection
+                                    private data
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+       PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+       PVR_UNREFERENCED_PARAMETER(pvOSData);
+
+       OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT();
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataDeInit)
+#endif
+/*************************************************************************/ /*!
+@Function       OSConnectionPrivateDataDeInit
+@Description    Frees previously allocated OS-specific private data
+                relating to a connection.
+@Input          hOsPrivateData      handle to the connection private data
+                                    to be freed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+       PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+       OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT();
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+       PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+       OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT();
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSGetDevNode)
+#endif
+static INLINE PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+
+       OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT();
+
+       return NULL;
+}
+#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+
+#endif /* OSCONNECTION_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/osdi_impl.h b/drivers/gpu/drm/img/img-rogue/services/server/include/osdi_impl.h
new file mode 100644 (file)
index 0000000..55d9470
--- /dev/null
@@ -0,0 +1,201 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions and types for creating Debug Info implementations.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef OSDI_IMPL_H
+#define OSDI_IMPL_H
+
+#include <linux/stdarg.h>
+
+#include "di_common.h"
+#include "pvrsrv_error.h"
+
+/*! Implementation callbacks. Those operations are performed on native
+ * implementation handles. */
+typedef struct OSDI_IMPL_ENTRY_CB
+{
+    /*! @Function pfnWrite
+     *
+     * @Description
+     * Writes the binary data of the DI entry to the output sync, whatever that
+     * may be for the DI implementation.
+     *
+     * @Input pvNativeHandle native implementation handle
+     * @Input pvData data
+     * @Input uiSize pvData length
+     */
+    void (*pfnWrite)(void  *pvNativeHandle, const void *pvData,
+                     IMG_UINT32 uiSize);
+
+    /*! @Function pfnVPrintf
+     *
+     * @Description
+     * Implementation of the 'vprintf' operation.
+     *
+     * @Input pvNativeHandle native implementation handle
+     * @Input pszFmt NUL-terminated format string
+     * @Input va_list variable length argument list
+     */
+    void (*pfnVPrintf)(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs);
+
+    /*! @Function pfnPuts
+     *
+     * @Description
+     * Implementation of the 'puts' operation.
+     *
+     * @Input pvNativeHandle native implementation handle
+     * @Input pszStr NUL-terminated string
+     */
+    void (*pfnPuts)(void *pvNativeHandle, const IMG_CHAR *pszStr);
+
+    /*! @Function pfnHasOverflowed
+     *
+     * @Description
+     * Checks if the native implementation's buffer has overflowed.
+     *
+     * @Input pvNativeHandle native implementation handle
+     */
+    IMG_BOOL (*pfnHasOverflowed)(void *pvNativeHandle);
+} OSDI_IMPL_ENTRY_CB;
+
+/*! Debug Info entry specialisation. */
+struct OSDI_IMPL_ENTRY
+{
+    /*! Pointer to the private data. The data originates from DICreateEntry()
+     *  function. */
+    void *pvPrivData;
+    /*! Pointer to the implementation native handle. */
+    void *pvNative;
+    /*! Implementation entry callbacks. */
+    OSDI_IMPL_ENTRY_CB *psCb;
+}; /* OSDI_IMPL_ENTRY is already typedef-ed in di_common.h */
+
+/*! Debug Info implementation callbacks. */
+typedef struct OSDI_IMPL_CB
+{
+    /*! Initialise implementation callback.
+     */
+    PVRSRV_ERROR (*pfnInit)(void);
+
+    /*! De-initialise implementation callback.
+     */
+    void (*pfnDeInit)(void);
+
+    /*! @Function pfnCreateEntry
+     *
+     * @Description
+     * Creates entry of eType type with pszName in the pvNativeGroup parent
+     * group. The entry is an abstract term which depends on the implementation,
+     * e.g.: a file in DebugFS.
+     *
+     * @Input pszName: name of the entry
+     * @Input eType: type of the entry
+     * @Input psIterCb: iterator implementation for the entry
+     * @Input pvPrivData: data that will be passed to the iterator callbacks
+     *                    in OSDI_IMPL_ENTRY - it can be retrieved by calling
+     *                    DIGetPrivData() function
+     * @Input pvNativeGroup: implementation specific handle to the parent group
+     *
+     * @Output pvNativeEntry: implementation specific handle to the entry
+     *
+     * return PVRSRV_ERROR error code
+     */
+    PVRSRV_ERROR (*pfnCreateEntry)(const IMG_CHAR *pszName,
+                                   DI_ENTRY_TYPE eType,
+                                   const DI_ITERATOR_CB *psIterCb,
+                                   void *pvPrivData,
+                                   void *pvNativeGroup,
+                                   void **pvNativeEntry);
+
+    /*! @Function pfnDestroyEntry
+     *
+     * @Description
+     * Destroys native entry.
+     *
+     * @Input psNativeEntry: handle to the entry
+     */
+    void (*pfnDestroyEntry)(void *psNativeEntry);
+
+    /*! @Function pfnCreateGroup
+     *
+     * @Description
+     * Creates group with pszName in the psNativeParentGroup parent group.
+     * The group is an abstract term which depends on the implementation,
+     * e.g.: a directory in DebugFS.
+     *
+     * @Input pszName: name of the entry
+     * @Input psNativeParentGroup: implementation specific handle to the parent
+     *                             group
+     *
+     * @Output psNativeGroup: implementation specific handle to the group
+     *
+     * return PVRSRV_ERROR error code
+     */
+    PVRSRV_ERROR (*pfnCreateGroup)(const IMG_CHAR *pszName,
+                                   void *psNativeParentGroup,
+                                   void **psNativeGroup);
+
+    /*! @Function pfnDestroyGroup
+     *
+     * @Description
+     * Destroys native group.
+     *
+     * @Input psNativeGroup: handle to the group
+     */
+    void (*pfnDestroyGroup)(void *psNativeGroup);
+} OSDI_IMPL_CB;
+
+/*! @Function DIRegisterImplementation
+ *
+ * @Description
+ * Registers Debug Info implementations with the framework. The framework takes
+ * the ownership of the implementation and will clean up the resources when
+ * it's de-initialised.
+ *
+ * @Input pszName: name of the implementation
+ * @Input psImplCb: implementation callbacks
+ *
+ * @Return PVRSRV_ERROR error code
+ */
+PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName,
+                                      const OSDI_IMPL_CB *psImplCb);
+
+#endif /* OSDI_IMPL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/osfunc.h b/drivers/gpu/drm/img/img-rogue/services/server/include/osfunc.h
new file mode 100644 (file)
index 0000000..4226f77
--- /dev/null
@@ -0,0 +1,1696 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS functions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG          1
+#endif
+
+#ifndef OSFUNC_H
+/*! @cond Doxygen_Suppress */
+#define OSFUNC_H
+/*! @endcond */
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include "kernel_nospec.h"
+#if !defined(NO_HARDWARE)
+#include <linux/io.h>
+
+#endif
+#endif
+
+#include <linux/stdarg.h>
+
+#if defined(__QNXNTO__)
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#if defined(INTEGRITY_OS)
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "device.h"
+#include "pvrsrv_device.h"
+#include "cache_ops.h"
+#include "osfunc_common.h"
+#if defined(SUPPORT_DMA_TRANSFER)
+#include "dma_km.h"
+#include "pmr.h"
+#endif
+
+/******************************************************************************
+ * Static defines
+ *****************************************************************************/
+/*!
+ * Returned by OSGetCurrentProcessID() and OSGetCurrentThreadID() if the OS
+ * is currently operating in the interrupt context.
+ */
+#define KERNEL_ID                      0xffffffffL
+
+#if defined(__linux__) && defined(__KERNEL__)
+#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size))
+#elif defined(__QNXNTO__)
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#elif defined(INTEGRITY_OS)
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#else
+/*************************************************************************/ /*!
+@Function       OSConfineArrayIndexNoSpeculation
+@Description    This macro aims to avoid code exposure to Cache Timing
+                Side-Channel Mechanisms which rely on speculative code
+                execution (Variant 1). It does so by ensuring a value to be
+                used as an array index will be set to zero if outside of the
+                bounds of the array, meaning any speculative execution of code
+                which uses this suitably adjusted index value will not then
+                attempt to load data from memory outside of the array bounds.
+                Code calling this macro must still first verify that the
+                original unmodified index value is within the bounds of the
+                array, and should then only use the modified value returned
+                by this function when accessing the array itself.
+                NB. If no OS-specific implementation of this macro is
+                defined, the original index is returned unmodified and no
+                protection against the potential exploit is provided.
+@Input          index    The original array index value that would be used to
+                         access the array.
+@Input          size     The number of elements in the array being accessed.
+@Return         The value to use for the array index, modified so that it
+                remains within array bounds.
+*/ /**************************************************************************/
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#if !defined(DOXYGEN)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#endif
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSClockns64
+@Description    This function returns the number of ticks since system boot
+                expressed in nanoseconds. Unlike OSClockns, OSClockns64 has
+                a near 64-bit range.
+@Return         The 64-bit clock value, in nanoseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockns64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockus64
+@Description    This function returns the number of ticks since system boot
+                expressed in microseconds. Unlike OSClockus, OSClockus64 has
+                a near 64-bit range.
+@Return         The 64-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockus64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockus
+@Description    This function returns the number of ticks since system boot
+                in microseconds.
+@Return         The 32-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockus(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockms
+@Description    This function returns the number of ticks since system boot
+                in milliseconds.
+@Return         The 32-bit clock value, in milliseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockms(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicns64
+@Description    This function returns a clock value based on the system
+                monotonic clock.
+@Output         pui64Time     The 64-bit clock value, in nanoseconds.
+@Return         Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicus64
+@Description    This function returns a clock value based on the system
+                monotonic clock.
+@Output         pui64Time     The 64-bit clock value, in microseconds.
+@Return         Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicRawns64
+@Description    This function returns a clock value based on the system
+                monotonic raw clock.
+@Return         64bit ns timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawns64(void);
+
+/*************************************************************************/ /*!
+@Function       OSClockMonotonicRawus64
+@Description    This function returns a clock value based on the system
+                monotonic raw clock.
+@Return         64bit us timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawus64(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageSize
+@Description    This function returns the page size.
+                If the OS is not using memory mappings it should return a
+                default value of 4096.
+@Return         The size of a page, in bytes.
+*/ /**************************************************************************/
+size_t OSGetPageSize(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageShift
+@Description    This function returns the page size expressed as a power of
+                two. A number of pages, left-shifted by this value, gives the
+                equivalent size in bytes.
+                If the OS is not using memory mappings it should return a
+                default value of 12.
+@Return         The page size expressed as a power of two.
+*/ /**************************************************************************/
+size_t OSGetPageShift(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetPageMask
+@Description    This function returns a bitmask that may be applied to an
+                address to mask off the least-significant bits so as to
+                leave the start address of the page containing that address.
+@Return         The page mask.
+*/ /**************************************************************************/
+size_t OSGetPageMask(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetOrder
+@Description    This function returns the order of power of two for a given
+                size. Eg. for a uSize of 4096 bytes the function would
+                return 12 (4096 = 2^12).
+@Input          uSize     The size in bytes.
+@Return         The order of power of two.
+*/ /**************************************************************************/
+size_t OSGetOrder(size_t uSize);
+
+/*************************************************************************/ /*!
+@Function       OSGetRAMSize
+@Description    This function returns the total amount of GPU-addressable
+                memory provided by the system. In other words, after loading
+                the driver this would be the largest allocation an
+                application would reasonably expect to be able to make.
+                Note that this is function is not expected to return the
+                current available memory but the amount which would be
+                available on startup.
+@Return         Total GPU-addressable memory size, in bytes.
+*/ /**************************************************************************/
+IMG_UINT64 OSGetRAMSize(void);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a Mid-level Interrupt Service Routine (MISR).
+@Input  pvData  Pointer to MISR specific data.
+*/ /**************************************************************************/
+typedef void (*PFN_MISR)(void *pvData);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a thread entry point function.
+@Input  pvData  Pointer to thread specific data.
+*/ /**************************************************************************/
+typedef void (*PFN_THREAD)(void *pvData);
+
+/*************************************************************************/ /*!
+@Function       OSChangeSparseMemCPUAddrMap
+@Description    This function changes the CPU mapping of the underlying
+                sparse allocation. It is used by a PMR 'factory'
+                implementation if that factory supports sparse
+                allocations.
+@Input          psPageArray        array representing the pages in the
+                                   sparse allocation
+@Input          sCpuVAddrBase      the virtual base address of the sparse
+                                   allocation ('first' page)
+@Input          sCpuPAHeapBase     the physical address of the virtual
+                                   base address 'sCpuVAddrBase'
+@Input          ui32AllocPageCount the number of pages referenced in
+                                   'pai32AllocIndices'
+@Input          pai32AllocIndices  list of indices of pages within
+                                   'psPageArray' that we now want to
+                                   allocate and map
+@Input          ui32FreePageCount  the number of pages referenced in
+                                   'pai32FreeIndices'
+@Input          pai32FreeIndices   list of indices of pages within
+                                   'psPageArray' we now want to
+                                   unmap and free
+@Input          bIsLMA             flag indicating if the sparse allocation
+                                   is from LMA or UMA memory
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+                                         IMG_UINT64 sCpuVAddrBase,
+                                         IMG_CPU_PHYADDR sCpuPAHeapBase,
+                                         IMG_UINT32 ui32AllocPageCount,
+                                         IMG_UINT32 *pai32AllocIndices,
+                                         IMG_UINT32 ui32FreePageCount,
+                                         IMG_UINT32 *pai32FreeIndices,
+                                         IMG_BOOL bIsLMA);
+
+/*************************************************************************/ /*!
+@Function       OSInstallMISR
+@Description    Installs a Mid-level Interrupt Service Routine (MISR)
+                which handles higher-level processing of interrupts from
+                the device (GPU).
+                An MISR runs outside of interrupt context, and so may be
+                descheduled. This means it can contain code that would
+                not be permitted in the LISR.
+                An MISR is invoked when OSScheduleMISR() is called. This
+                call should be made by installed LISR once it has completed
+                its interrupt processing.
+                Multiple MISRs may be installed by the driver to handle
+                different causes of interrupt.
+@Input          pfnMISR       pointer to the function to be installed
+                              as the MISR
+@Input          hData         private data provided to the MISR
+@Input          pszMisrName   Name describing purpose of MISR worker thread
+                              (Must be a string literal).
+@Output         hMISRData     handle to the installed MISR (to be used
+                              for a subsequent uninstall)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData,
+                                                       PFN_MISR pfnMISR,
+                                                       void *hData,
+                                                       const IMG_CHAR *pszMisrName);
+
+/*************************************************************************/ /*!
+@Function       OSUninstallMISR
+@Description    Uninstalls a Mid-level Interrupt Service Routine (MISR).
+@Input          hMISRData     handle to the installed MISR
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Function       OSScheduleMISR
+@Description    Schedules a Mid-level Interrupt Service Routine (MISR) to be
+                executed. An MISR should be executed outside of interrupt
+                context, for example in a work queue.
+@Input          hMISRData     handle to the installed MISR
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Description    Pointer to a function implementing debug dump of thread-specific
+                data.
+@Input          pfnDumpDebugPrintf      Used to specify the print function used
+                                        to dump any debug information. If this
+                                        argument is NULL then a default print
+                                        function will be used.
+@Input          pvDumpDebugFile         File identifier to be passed to the
+                                        print function if specified.
+*/ /**************************************************************************/
+
+typedef void (*PFN_THREAD_DEBUG_DUMP)(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                                      void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreate
+@Description    Creates a kernel thread and starts it running. The caller
+                is responsible for informing the thread that it must finish
+                and return from the pfnThread function. It is not possible
+                to kill or terminate it. The new thread runs with the default
+                priority provided by the Operating System.
+                Note: Kernel threads are freezable which means that they
+                can be frozen by the kernel on for example driver suspend.
+                Because of that only OSEventObjectWaitKernel() function should
+                be used to put kernel threads in waiting state.
+@Output         phThread            Returned handle to the thread.
+@Input          pszThreadName       Name to assign to the thread.
+@Input          pfnThread           Thread entry point function.
+@Input          pfnDebugDumpCB      Used to dump info of the created thread
+@Input          bIsSupportingThread Set, if summary of this thread needs to
+                                    be dumped in debug_dump
+@Input          hData               Thread specific data pointer for pfnThread().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+                            IMG_CHAR *pszThreadName,
+                            PFN_THREAD pfnThread,
+                            PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                            IMG_BOOL bIsSupportingThread,
+                            void *hData);
+
+/*! Available priority levels for the creation of a new Kernel Thread. */
+typedef enum priority_levels
+{
+       OS_THREAD_NOSET_PRIORITY = 0,   /* With this option the priority level is the default for the given OS */
+       OS_THREAD_HIGHEST_PRIORITY,
+       OS_THREAD_HIGH_PRIORITY,
+       OS_THREAD_NORMAL_PRIORITY,
+       OS_THREAD_LOW_PRIORITY,
+       OS_THREAD_LOWEST_PRIORITY,
+       OS_THREAD_LAST_PRIORITY     /* This must be always the last entry */
+} OS_THREAD_LEVEL;
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreatePriority
+@Description    As OSThreadCreate, this function creates a kernel thread and
+                starts it running. The difference is that with this function
+                is possible to specify the priority used to schedule the new
+                thread.
+
+@Output         phThread            Returned handle to the thread.
+@Input          pszThreadName       Name to assign to the thread.
+@Input          pfnThread           Thread entry point function.
+@Input          pfnDebugDumpCB      Used to dump info of the created thread
+@Input          bIsSupportingThread Set, if summary of this thread needs to
+                                    be dumped in debug_dump
+@Input          hData               Thread specific data pointer for pfnThread().
+@Input          eThreadPriority     Priority level to assign to the new thread.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+                                    IMG_CHAR *pszThreadName,
+                                    PFN_THREAD pfnThread,
+                                    PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB,
+                                    IMG_BOOL bIsSupportingThread,
+                                    void *hData,
+                                    OS_THREAD_LEVEL eThreadPriority);
+
+/*************************************************************************/ /*!
+@Function       OSThreadDestroy
+@Description    Waits for the thread to end and then destroys the thread
+                handle memory. This function will block and wait for the
+                thread to finish successfully, thereby providing a sync point
+                for the thread completing its work. No attempt is made to kill
+                or otherwise terminate the thread.
+@Input          hThread   The thread handle returned by OSThreadCreate().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+
+/*************************************************************************/ /*!
+@Function       OSMapPhysToLin
+@Description    Maps physical memory into a linear address range.
+@Input          BasePAddr    physical CPU address
+@Input          ui32Bytes    number of bytes to be mapped
+@Input          uiFlags      flags denoting the caching mode to be employed
+                             for the mapping (uncached/write-combined,
+                             cached coherent or cached incoherent).
+                             See pvrsrv_memallocflags.h for full flag bit
+                             definitions.
+@Return         Pointer to the new mapping if successful, NULL otherwise.
+*/ /**************************************************************************/
+void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, PVRSRV_MEMALLOCFLAGS_T uiFlags);
+
+/*************************************************************************/ /*!
+@Function       OSUnMapPhysToLin
+@Description    Unmaps physical memory previously mapped by OSMapPhysToLin().
+@Input          pvLinAddr    the linear mapping to be unmapped
+@Input          ui32Bytes    number of bytes to be unmapped
+@Return         IMG_TRUE if unmapping was successful, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSCPUCacheFlushRangeKM
+@Description    Clean and invalidate the CPU cache for the specified
+                address range.
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              flushed
+@Input          pvVirtEnd     virtual end address of the range to be
+                              flushed
+@Input          sCPUPhysStart physical start address of the range to be
+                              flushed
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              flushed
+@Return         None
+*/ /**************************************************************************/
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/*************************************************************************/ /*!
+@Function       OSCPUCacheCleanRangeKM
+@Description    Clean the CPU cache for the specified address range.
+                This writes out the contents of the cache and clears the
+                'dirty' bit (which indicates the physical memory is
+                consistent with the cache contents).
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              cleaned
+@Input          pvVirtEnd     virtual end address of the range to be
+                              cleaned
+@Input          sCPUPhysStart physical start address of the range to be
+                              cleaned
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              cleaned
+@Return         None
+*/ /**************************************************************************/
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            void *pvVirtStart,
+                            void *pvVirtEnd,
+                            IMG_CPU_PHYADDR sCPUPhysStart,
+                            IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/*************************************************************************/ /*!
+@Function       OSCPUCacheInvalidateRangeKM
+@Description    Invalidate the CPU cache for the specified address range.
+                The cache must reload data from those addresses if they
+                are accessed.
+@Input          psDevNode     device on which the allocation was made
+@Input          pvVirtStart   virtual start address of the range to be
+                              invalidated
+@Input          pvVirtEnd     virtual end address of the range to be
+                              invalidated
+@Input          sCPUPhysStart physical start address of the range to be
+                              invalidated
+@Input          sCPUPhysEnd   physical end address of the range to be
+                              invalidated
+@Return         None
+*/ /**************************************************************************/
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+                                 void *pvVirtStart,
+                                 void *pvVirtEnd,
+                                 IMG_CPU_PHYADDR sCPUPhysStart,
+                                 IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/*! CPU Cache operations address domain type */
+typedef enum
+{
+       OS_CACHE_OP_ADDR_TYPE_VIRTUAL,    /*!< Operation requires CPU virtual address only */
+       OS_CACHE_OP_ADDR_TYPE_PHYSICAL,   /*!< Operation requires CPU physical address only */
+       OS_CACHE_OP_ADDR_TYPE_BOTH        /*!< Operation requires both CPU virtual & physical addresses */
+} OS_CACHE_OP_ADDR_TYPE;
+
+/*************************************************************************/ /*!
+@Function       OSCPUCacheOpAddressType
+@Description    Returns the address type (i.e. virtual/physical/both) the CPU
+                architecture performs cache maintenance operations under.
+                This is used to infer whether the virtual or physical address
+                supplied to the OSCPUCacheXXXRangeKM functions can be omitted
+                when called.
+@Return         OS_CACHE_OP_ADDR_TYPE
+*/ /**************************************************************************/
+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void);
+
+/*! CPU Cache attributes available for retrieval, DCache unless specified */
+typedef enum _OS_CPU_CACHE_ATTRIBUTE_
+{
+       OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE, /*!< The cache line size */
+       OS_CPU_CACHE_ATTRIBUTE_COUNT      /*!< The number of attributes (must be last) */
+} OS_CPU_CACHE_ATTRIBUTE;
+
+/*************************************************************************/ /*!
+@Function       OSCPUCacheAttributeSize
+@Description    Returns the size of a given cache attribute.
+                Typically this function is used to return the cache line
+                size, but may be extended to return the size of other
+                cache attributes.
+@Input          eCacheAttribute   the cache attribute whose size should
+                                  be returned.
+@Return         The size of the specified cache attribute, in bytes.
+*/ /**************************************************************************/
+IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessID
+@Description    Returns ID of current process (thread group)
+@Return         ID of current process
+*****************************************************************************/
+IMG_PID OSGetCurrentProcessID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentVirtualProcessID
+@Description    Returns ID of current process (thread group of current
+                PID namespace)
+@Return         ID of current process in PID namespace
+*****************************************************************************/
+IMG_PID OSGetCurrentVirtualProcessID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessName
+@Description    Gets the name of current process
+@Return         Process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentProcessName(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessVASpaceSize
+@Description    Returns the CPU virtual address space size of current process
+@Return         Process VA space size
+*/ /**************************************************************************/
+IMG_UINT64 OSGetCurrentProcessVASpaceSize(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentThreadID
+@Description    Returns ID for current thread
+@Return         ID of current thread
+*****************************************************************************/
+uintptr_t OSGetCurrentThreadID(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientProcessIDKM
+@Description    Returns ID of current client process (thread group) which
+                has made a bridge call into the server.
+                For some operating systems, this may simply be the current
+                process id. For others, it may be that a dedicated thread
+                is used to handle the processing of bridge calls and that
+                some additional processing is required to obtain the ID of
+                the client process making the bridge call.
+@Return         ID of current client process
+*****************************************************************************/
+IMG_PID OSGetCurrentClientProcessIDKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientProcessNameKM
+@Description    Gets the name of current client process
+@Return         Client process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentClientThreadIDKM
+@Description    Returns ID for current client thread
+                For some operating systems, this may simply be the current
+                thread id. For others, it may be that a dedicated thread
+                is used to handle the processing of bridge calls and that
+                some additional processing is require to obtain the ID of
+                the client thread making the bridge call.
+@Return         ID of current client thread
+*****************************************************************************/
+uintptr_t OSGetCurrentClientThreadIDKM(void);
+
+/*************************************************************************/ /*!
+@Function       OSMemCmp
+@Description    Compares two blocks of memory for equality.
+@Input          pvBufA      Pointer to the first block of memory
+@Input          pvBufB      Pointer to the second block of memory
+@Input          uiLen       The number of bytes to be compared
+@Return         Value < 0 if pvBufA is less than pvBufB.
+                Value > 0 if pvBufB is less than pvBufA.
+                Value = 0 if pvBufA is equal to pvBufB.
+*****************************************************************************/
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesAlloc
+@Description    Allocates a number of contiguous physical pages.
+                If allocations made by this function are CPU cached then
+                OSPhyContigPagesClean has to be implemented to write the
+                cached data to memory.
+@Input          psPhysHeap    the heap from which to allocate
+@Input          uiSize        the size of the required allocation (in bytes)
+@Output         psMemHandle   a returned handle to be used to refer to this
+                              allocation
+@Output         psDevPAddr    the physical address of the allocation
+@Input          uiPid         the process ID that this allocation should
+                              be associated with
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize,
+                                                       PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr,
+                                                       IMG_PID uiPid);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesFree
+@Description    Frees a previous allocation of contiguous physical pages
+@Input          psPhysHeap    the heap from which to allocate
+@Input          psMemHandle   the handle of the allocation to be freed
+@Return         None.
+*****************************************************************************/
+void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesMap
+@Description    Maps the specified allocation of contiguous physical pages
+                to a kernel virtual address
+@Input          psPhysHeap    the heap from which to allocate
+@Input          psMemHandle   the handle of the allocation to be mapped
+@Input          uiSize        the size of the allocation (in bytes)
+@Input          psDevPAddr    the physical address of the allocation
+@Output         pvPtr         the virtual kernel address to which the
+                              allocation is now mapped
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle,
+                                               size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+                                               void **pvPtr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesUnmap
+@Description    Unmaps the kernel mapping for the specified allocation of
+                contiguous physical pages
+@Input          psPhysHeap    the heap from which to allocate
+@Input          psMemHandle   the handle of the allocation to be unmapped
+@Input          pvPtr         the virtual kernel address to which the
+                              allocation is currently mapped
+@Return         None.
+*****************************************************************************/
+void OSPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr);
+
+/*************************************************************************/ /*!
+@Function       OSPhyContigPagesClean
+@Description    Write the content of the specified allocation from CPU cache to
+                memory from (start + uiOffset) to (start + uiOffset + uiLength)
+                It is expected to be implemented as a cache clean operation but
+                it is allowed to fall back to a cache clean + invalidate
+                (i.e. flush).
+                If allocations returned by OSPhyContigPagesAlloc are always
+                uncached this can be implemented as nop.
+@Input          psPhysHeap    the heap from which to allocate
+@Input          psMemHandle   the handle of the allocation to be flushed
+@Input          uiOffset      the offset in bytes from the start of the
+                              allocation from where to start flushing
+@Input          uiLength      the amount to flush from the offset in bytes
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesClean(PHYS_HEAP *psPhysHeap,
+                                   PG_HANDLE *psMemHandle,
+                                   IMG_UINT32 uiOffset,
+                                   IMG_UINT32 uiLength);
+
+
+/*************************************************************************/ /*!
+@Function       OSInitEnvData
+@Description    Called to initialise any environment-specific data. This
+                could include initialising the bridge calling infrastructure
+                or device memory management infrastructure.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInitEnvData(void);
+
+/*************************************************************************/ /*!
+@Function       OSDeInitEnvData
+@Description    The counterpart to OSInitEnvData(). Called to free any
+                resources which may have been allocated by OSInitEnvData().
+@Return         None.
+*/ /**************************************************************************/
+void OSDeInitEnvData(void);
+
+/*************************************************************************/ /*!
+@Function       OSVSScanf
+@Description    OS function to support the standard C vsscanf() function.
+*/ /**************************************************************************/
+IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...);
+
+/*************************************************************************/ /*!
+@Function       OSStringLCat
+@Description    OS function to support the BSD C strlcat() function.
+*/ /**************************************************************************/
+size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize);
+
+/*************************************************************************/ /*!
+@Function       OSSNPrintf
+@Description    OS function to support the standard C snprintf() function.
+@Output         pStr        char array to print into
+@Input          ui32Size    maximum size of data to write (chars)
+@Input          pszFormat   format string
+*/ /**************************************************************************/
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/*************************************************************************/ /*!
+@Function       OSVSNPrintf
+@Description    Printf to IMG string using variable args (see stdarg.h).
+                This is necessary because the '...' notation does not
+                support nested function calls.
+@Input          ui32Size           maximum size of data to write (chars)
+@Input          pszFormat          format string
+@Input          vaArgs             variable args structure (from stdarg.h)
+@Output         pStr               char array to print into
+@Return         Number of character written in buffer if successful other wise -1 on error
+*/ /**************************************************************************/
+IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) __printf(3, 0);
+
+/*************************************************************************/ /*!
+@Function       OSStringLength
+@Description    OS function to support the standard C strlen() function.
+*/ /**************************************************************************/
+size_t OSStringLength(const IMG_CHAR *pStr);
+
+/*************************************************************************/ /*!
+@Function       OSStringNLength
+@Description    Return the length of a string, excluding the terminating null
+                byte ('\0'), but return at most 'uiCount' bytes. Only the first
+                'uiCount' bytes of 'pStr' are interrogated.
+@Input          pStr     pointer to the string
+@Input          uiCount  the maximum length to return
+@Return         Length of the string if less than 'uiCount' bytes, otherwise
+                'uiCount'.
+*/ /**************************************************************************/
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount);
+
+/*************************************************************************/ /*!
+@Function       OSStringNCompare
+@Description    OS function to support the standard C strncmp() function.
+*/ /**************************************************************************/
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+                           size_t uiSize);
+
+/*************************************************************************/ /*!
+@Function       OSStringToUINT32
+@Description    Changes string to IMG_UINT32.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+                              IMG_UINT32 *ui32Result);
+
+/*************************************************************************/ /*!
+@Function       OSStringUINT32ToStr
+@Description    Changes IMG_UINT32 to string
+@Input          pszBuf         Buffer to write output number string
+@Input          uSize          Size of buffer provided, i.e. size of pszBuf
+@Input          ui32Num        Number to convert to string
+@Return         Returns 0 if buffer is not sufficient to hold the number string,
+                else returns length of number string
+*/ /**************************************************************************/
+IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, IMG_UINT32 ui32Num);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectCreate
+@Description    Create an event object.
+@Input          pszName         name to assign to the new event object.
+@Output         EventObject     the created event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+                                                                IMG_HANDLE *EventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectDestroy
+@Description    Destroy an event object.
+@Input          hEventObject    the event object to destroy.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectSignal
+@Description    Signal an event object. Any thread waiting on that event
+                object will be woken.
+@Input          hEventObject    the event object to signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWait
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitKernel
+@Description    Wait for an event object to signal. The function is passed
+                an OS event object handle (which allows the OS to have the
+                calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after a default timeout
+                period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+                Note: This function should be used only by kernel thread.
+                This is because all kernel threads are freezable and
+                this function allows the kernel to freeze the threads
+                when waiting.
+
+                See OSEventObjectWait() for more details.
+
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+#if defined(__linux__) && defined(__KERNEL__)
+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+#else
+#define OSEventObjectWaitKernel OSEventObjectWaitTimeout
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSSuspendTaskInterruptible
+@Description    Suspend the current task into interruptible state.
+@Return         none.
+*/ /**************************************************************************/
+#if defined(__linux__) && defined(__KERNEL__)
+void OSSuspendTaskInterruptible(void);
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeout
+@Description    Wait for an event object to signal or timeout. The function
+                is passed an OS event object handle (which allows the OS to
+                have the calling thread wait on the associated event object).
+                The calling thread will be rescheduled when the associated
+                event object signals.
+                If the event object has not signalled after the specified
+                timeout period (passed in 'uiTimeoutus'), the function
+                will return with the result code PVRSRV_ERROR_TIMEOUT.
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Input          uiTimeoutus   the timeout period (in usecs)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectDumpDebugInfo
+@Description    Emits debug counters/stats related to the event object passed
+@Input          hOSEventKM    the OS event object handle associated with
+                              the event object.
+@Return         None.
+*/ /**************************************************************************/
+void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectOpen
+@Description    Open an OS handle on the specified event object.
+                This OS handle may then be used to make a thread wait for
+                that event object to signal.
+@Input          hEventObject    Event object handle.
+@Output         phOSEvent       OS handle to the returned event object.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+                                                                                       IMG_HANDLE *phOSEvent);
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectClose
+@Description    Close an OS handle previously opened for an event object.
+@Input          hOSEventKM      OS event object handle to close.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function      OSWaitus
+@Description   Implements a busy wait of the specified number of microseconds.
+               This function does NOT release thread quanta.
+@Input         ui32Timeus     The duration of the wait period (in us)
+@Return        None.
+*/ /**************************************************************************/
+void OSWaitus(IMG_UINT32 ui32Timeus);
+
+/*************************************************************************/ /*!
+@Function       OSSleepms
+@Description    Implements a sleep of the specified number of milliseconds.
+                This function may allow pre-emption, meaning the thread
+                may potentially not be rescheduled for a longer period.
+@Input          ui32Timems    The duration of the sleep (in ms)
+@Return         None.
+*/ /**************************************************************************/
+void OSSleepms(IMG_UINT32 ui32Timems);
+
+/*************************************************************************/ /*!
+@Function       OSReleaseThreadQuanta
+@Description    Relinquishes the current thread's execution time-slice,
+                permitting the OS scheduler to schedule another thread.
+@Return         None.
+*/ /**************************************************************************/
+void OSReleaseThreadQuanta(void);
+
+#if defined(__linux__) && defined(__KERNEL__)
+#define OSReadMemoryBarrier() rmb()
+#else
+/*************************************************************************/ /*!
+@Function       OSReadMemoryBarrier
+@Description    Insert a read memory barrier.
+                The read memory barrier guarantees that all load (read)
+                operations specified before the barrier will appear to happen
+                before all of the load operations specified after the barrier.
+*/ /**************************************************************************/
+void OSReadMemoryBarrier(void);
+#endif
+/*************************************************************************/ /*!
+@Function       OSMemoryBarrier
+@Description    Insert a read/write memory barrier.
+                The read and write memory barrier guarantees that all load
+                (read) and all store (write) operations specified before the
+                barrier will appear to happen before all of the load/store
+                operations specified after the barrier.
+@Input          hReadback     Optional pointer to memory to read back, can be
+                useful for flushing queues in bus interconnects to RAM before
+                device (GPU) access the shared memory.
+@Return         None.
+*/ /**************************************************************************/
+void OSMemoryBarrier(volatile void *hReadback);
+/*************************************************************************/ /*!
+@Function       OSWriteMemoryBarrier
+@Description    Insert a write memory barrier.
+                The write memory barrier guarantees that all store operations
+                (writes) specified before the barrier will appear to happen
+                before all of the store operations specified after the barrier.
+@Input          hReadback     Optional pointer to memory to read back, can be
+                useful for flushing queues in bus interconnects to RAM before
+                device (GPU) access the shared memory.
+@Return         None.
+*/ /**************************************************************************/
+void OSWriteMemoryBarrier(volatile void *hReadback);
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+
+/* The access method is dependent on the location of the physical memory that
+ * makes up the PhyHeaps defined for the system and the CPU architecture. These
+ * macros may change in future to accommodate different access requirements.
+ */
+/*! Performs a 32 bit word read from the device memory. */
+#define OSReadDeviceMem32(addr)        (*((volatile IMG_UINT32 __force *)((void*)addr)))
+/*! Performs a 32 bit word write to the device memory. */
+#define OSWriteDeviceMem32(addr, val)  (*((volatile IMG_UINT32 __force *)((void*)addr)) = (IMG_UINT32)(val))
+/*! Performs a 32 bit word write to the device memory and issues a write memory barrier */
+#define OSWriteDeviceMem32WithWMB(addr, val) \
+       do { \
+               *((volatile IMG_UINT32 __force *)((void*)addr)) = (IMG_UINT32)(val); \
+               OSWriteMemoryBarrier(addr); \
+       } while (0)
+
+#if defined(__linux__) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+       #define OSReadHWReg8(addr, off)  ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off)))
+       #define OSReadHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off)))
+       #define OSReadHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off)))
+
+       /* Little endian support only */
+       #define OSReadHWReg64(addr, off) \
+                       ({ \
+                               __typeof__(addr) _addr = addr; \
+                               __typeof__(off) _off = off; \
+                               (IMG_UINT64) \
+                               ( \
+                                       ( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \
+                                       | readl((IMG_BYTE __iomem *)(_addr) + (_off)) \
+                               ); \
+                       })
+
+       #define OSWriteHWReg8(addr, off, val)  writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off))
+       #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off))
+       #define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off))
+       /* Little endian support only */
+       #define OSWriteHWReg64(addr, off, val) do \
+                       { \
+                               __typeof__(addr) _addr = addr; \
+                               __typeof__(off) _off = off; \
+                               __typeof__(val) _val = val; \
+                               writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off)); \
+                               writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \
+                       } while (0)
+
+
+#elif defined(NO_HARDWARE)
+       /* OSReadHWReg operations skipped in no hardware builds */
+       #define OSReadHWReg8(addr, off)  ((void)(addr), 0x4eU)
+       #define OSReadHWReg16(addr, off) ((void)(addr), 0x3a4eU)
+       #define OSReadHWReg32(addr, off) ((void)(addr), 0x30f73a4eU)
+#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8
+       /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */
+       #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eUL)
+#else
+       #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eULL)
+#endif
+
+       #define OSWriteHWReg8(addr, off, val)
+       #define OSWriteHWReg16(addr, off, val)
+       #define OSWriteHWReg32(addr, off, val)
+       #define OSWriteHWReg64(addr, off, val)
+
+#else
+/*************************************************************************/ /*!
+@Function       OSReadHWReg8
+@Description    Read from an 8-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The byte read.
+*/ /**************************************************************************/
+       IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg16
+@Description    Read from a 16-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The word read.
+*/ /**************************************************************************/
+       IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg32
+@Description    Read from a 32-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The long word read.
+*/ /**************************************************************************/
+       IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSReadHWReg64
+@Description    Read from a 64-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to read from a location
+                but instead returns a constant value.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be read.
+@Return         The long long word read.
+*/ /**************************************************************************/
+       IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg8
+@Description    Write to an 8-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui8Value           The byte to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+       void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg16
+@Description    Write to a 16-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui16Value          The word to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+       void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg32
+@Description    Write to a 32-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui32Value          The long word to be written to the register.
+@Return         None.
+*/ /**************************************************************************/
+       void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       OSWriteHWReg64
+@Description    Write to a 64-bit memory-mapped device register.
+                The implementation should not permit the compiler to
+                reorder the I/O sequence.
+                The implementation should ensure that for a NO_HARDWARE
+                build the code does not attempt to write to a location.
+@Input          pvLinRegBaseAddr   The virtual base address of the register
+                                   block.
+@Input          ui32Offset         The byte offset from the base address of
+                                   the register to be written to.
+@Input          ui64Value          The long long word to be written to the
+                                   register.
+@Return         None.
+*/ /**************************************************************************/
+       void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
+#endif
+
+/*************************************************************************/ /*!
+@Description    Pointer to a timer callback function.
+@Input          pvData  Pointer to timer specific data.
+*/ /**************************************************************************/
+typedef void (*PFN_TIMER_FUNC)(void* pvData);
+
+/*************************************************************************/ /*!
+@Function       OSAddTimer
+@Description    OS specific function to install a timer callback. The
+                timer will then need to be enabled, as it is disabled by
+                default.
+                When enabled, the callback will be invoked once the specified
+                timeout has elapsed.
+@Input          pfnTimerFunc    Timer callback
+@Input          *pvData         Callback data
+@Input          ui32MsTimeout   Callback period
+@Return         Valid handle on success, NULL if a failure
+*/ /**************************************************************************/
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout);
+
+/*************************************************************************/ /*!
+@Function       OSRemoveTimer
+@Description    Removes the specified timer. The handle becomes invalid and
+                should no longer be used.
+@Input          hTimer          handle of the timer to be removed
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function       OSEnableTimer
+@Description    Enable the specified timer. after enabling, the timer will
+                invoke the associated callback at an interval determined by
+                the configured timeout period until disabled.
+@Input          hTimer          handle of the timer to be enabled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function       OSDisableTimer
+@Description    Disable the specified timer
+@Input          hTimer          handle of the timer to be disabled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer);
+
+
+/*************************************************************************/ /*!
+ @Function      OSPanic
+ @Description   Take action in response to an unrecoverable driver error
+ @Return        None
+*/ /**************************************************************************/
+void OSPanic(void);
+
+/*************************************************************************/ /*!
+@Function       OSCopyToUser
+@Description    Copy data to user-addressable memory from kernel-addressable
+                memory.
+                Note that pvDest may be an invalid address or NULL and the
+                function should return an error in this case.
+                For operating systems that do not have a user/kernel space
+                distinction, this function should be implemented as a stub
+                which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination User memory
+@Input          pvSrc            pointer to the source Kernel memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSCopyFromUser
+@Description    Copy data from user-addressable memory to kernel-addressable
+                memory.
+                Note that pvSrc may be an invalid address or NULL and the
+                function should return an error in this case.
+                For operating systems that do not have a user/kernel space
+                distinction, this function should be implemented as a stub
+                which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination Kernel memory
+@Input          pvSrc            pointer to the source User memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes);
+
+#if defined(__linux__) || defined(INTEGRITY_OS)
+#define OSBridgeCopyFromUser OSCopyFromUser
+#define OSBridgeCopyToUser OSCopyToUser
+#else
+/*************************************************************************/ /*!
+@Function       OSBridgeCopyFromUser
+@Description    Copy data from user-addressable memory into kernel-addressable
+                memory as part of a bridge call operation.
+                For operating systems that do not have a user/kernel space
+                distinction, this function will require whatever implementation
+                is needed to pass data for making the bridge function call.
+                For operating systems which do have a user/kernel space
+                distinction (such as Linux) this function may be defined so
+                as to equate to a call to OSCopyFromUser().
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination Kernel memory
+@Input          pvSrc            pointer to the source User memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess,
+                                               void *pvDest,
+                                               const void *pvSrc,
+                                               size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function       OSBridgeCopyToUser
+@Description    Copy data to user-addressable memory from kernel-addressable
+                memory as part of a bridge call operation.
+                For operating systems that do not have a user/kernel space
+                distinction, this function will require whatever implementation
+                is needed to pass data for making the bridge function call.
+                For operating systems which do have a user/kernel space
+                distinction (such as Linux) this function may be defined so
+                as to equate to a call to OSCopyToUser().
+@Input          pvProcess        handle of the connection
+@Input          pvDest           pointer to the destination User memory
+@Input          pvSrc            pointer to the source Kernel memory
+@Input          ui32Bytes        size of the data to be copied
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess,
+                                               void *pvDest,
+                                               const void *pvSrc,
+                                               size_t ui32Bytes);
+#endif
+
+/* To be increased if required in future */
+#define PVRSRV_MAX_BRIDGE_IN_SIZE      0x2000    /*!< Size of the memory block used to hold data passed in to a bridge call */
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE     0x1000    /*!< Size of the memory block used to hold data returned from a bridge call */
+
+/*************************************************************************/ /*!
+@Function       OSPlatformBridgeInit
+@Description    Called during device creation to allow the OS port to register
+                other bridge modules and related resources that it requires.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPlatformBridgeInit(void);
+
+/*************************************************************************/ /*!
+@Function       OSPlatformBridgeDeInit
+@Description    Called during device destruction to allow the OS port to
+                deregister its OS specific bridges and clean up other
+                related resources.
+*/ /**************************************************************************/
+void OSPlatformBridgeDeInit(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVToNativeError
+@Description    Returns the OS-specific equivalent error number/code for
+                the specified PVRSRV_ERROR value.
+                If there is no equivalent, or the PVRSRV_ERROR value is
+                PVRSRV_OK (no error), 0 is returned.
+@Return         The OS equivalent error code.
+*/ /**************************************************************************/
+int PVRSRVToNativeError(PVRSRV_ERROR e);
+/** See PVRSRVToNativeError(). */
+#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) )
+
+
+#if defined(__linux__) && defined(__KERNEL__)
+
+/* Provide LockDep friendly definitions for Services RW locks */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "allocmem.h"
+
+#define OSWRLockCreate(ppsLock) ({ \
+       PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+       *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \
+       if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \
+       e;})
+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;})
+
+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireReadNested(psLock, subclass) ({down_read_nested((psLock), (subclass)); PVRSRV_OK;})
+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;})
+
+#elif defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+/* User-mode unit tests use these definitions on Linux */
+
+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock);
+void OSWRLockDestroy(POSWR_LOCK psLock);
+void OSWRLockAcquireRead(POSWR_LOCK psLock);
+#define OSWRLockAcquireReadNested(psLock, subclass) OSWRLockAcquireRead((psLock))
+void OSWRLockReleaseRead(POSWR_LOCK psLock);
+void OSWRLockAcquireWrite(POSWR_LOCK psLock);
+void OSWRLockReleaseWrite(POSWR_LOCK psLock);
+
+#else
+
+/*! Function not implemented definition. */
+#define OSFUNC_NOT_IMPLEMENTED 0
+/*! Assert used for OSFUNC_NOT_IMPLEMENTED. */
+#define OSFUNC_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSFUNC_NOT_IMPLEMENTED)
+
+/*************************************************************************/ /*!
+@Function       OSWRLockCreate
+@Description    Create a writer/reader lock.
+                This type of lock allows multiple concurrent readers but
+                only a single writer, allowing for optimized performance.
+@Output         ppsLock     A handle to the created WR lock.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock)
+{
+       PVR_UNREFERENCED_PARAMETER(ppsLock);
+
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockDestroy
+@Description    Destroys a writer/reader lock.
+@Input          psLock     The handle of the WR lock to be destroyed.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockDestroy(POSWR_LOCK psLock)
+{
+       PVR_UNREFERENCED_PARAMETER(psLock);
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireRead
+@Description    Acquire a writer/reader read lock.
+                If the write lock is already acquired, the caller will
+                block until it is released.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           reading.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock)
+{
+       PVR_UNREFERENCED_PARAMETER(psLock);
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireReadNested
+@Description    Acquire a nested writer/reader read lock.
+                If the write lock is already acquired, the caller will
+                block until it is released.
+                For operating systems other than Linux, this equates to an
+                OSWRLockAcquireRead() call. On Linux, this function wraps a call
+                to down_read_nested(). This recognises the scenario where
+                there may be multiple subclasses within a particular class
+                of lock. In such cases, the order in which the locks belonging
+                these various subclasses are acquired is important and must be
+                validated.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           reading.
+@Input          iSubclass  The subclass of the lock.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireReadNested(POSWR_LOCK psLock, IMG_INT iSubclass)
+{
+       PVR_UNREFERENCED_PARAMETER(psLock);
+       PVR_UNREFERENCED_PARAMETER(iSubclass);
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockReleaseRead
+@Description    Release a writer/reader read lock.
+@Input          psLock     The handle of the WR lock whose read lock is to
+                           be released.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock)
+{
+       PVR_UNREFERENCED_PARAMETER(psLock);
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockAcquireWrite
+@Description    Acquire a writer/reader write lock.
+                If the write lock or any read lock are already acquired,
+                the caller will block until all are released.
+@Input          psLock     The handle of the WR lock to be acquired for
+                           writing.
+@Return         None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock)
+{
+       PVR_UNREFERENCED_PARAMETER(psLock);
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+}
+
+/*************************************************************************/ /*!
+@Function       OSWRLockReleaseWrite
+@Description    Release a writer/reader write lock.
+@Input          psLock     The handle of the WR lock whose write lock is to
+                           be released.
+@Return         None
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock)
+{
+       PVR_UNREFERENCED_PARAMETER(psLock);
+       OSFUNC_NOT_IMPLEMENTED_ASSERT();
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSDivide64r64
+@Description    Divide a 64-bit value by a 32-bit value. Return the 64-bit
+                quotient.
+                The remainder is also returned in 'pui32Remainder'.
+@Input          ui64Divident        The number to be divided.
+@Input          ui32Divisor         The 32-bit value 'ui64Divident' is to
+                                    be divided by.
+@Output         pui32Remainder      The remainder of the division.
+@Return         The 64-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function       OSDivide64
+@Description    Divide a 64-bit value by a 32-bit value. Return a 32-bit
+                quotient.
+                The remainder is also returned in 'pui32Remainder'.
+                This function allows for a more optional implementation
+                of a 64-bit division when the result is known to be
+                representable in 32-bits.
+@Input          ui64Divident        The number to be divided.
+@Input          ui32Divisor         The 32-bit value 'ui64Divident' is to
+                                    be divided by.
+@Output         pui32Remainder      The remainder of the division.
+@Return         The 32-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function       OSDumpStack
+@Description    Dump the current task information and its stack trace.
+@Return         None
+*/ /**************************************************************************/
+void OSDumpStack(void);
+
+/*************************************************************************/ /*!
+@Function       OSUserModeAccessToPerfCountersEn
+@Description    Permit User-mode access to CPU performance counter
+                registers.
+                This function is called during device initialisation.
+                Certain CPU architectures may need to explicitly permit
+                User mode access to performance counters - if this is
+                required, the necessary code should be implemented inside
+                this function.
+@Return         None.
+*/ /**************************************************************************/
+void OSUserModeAccessToPerfCountersEn(void);
+
+/*************************************************************************/ /*!
+@Function       OSDebugSignalPID
+@Description    Sends a SIGTRAP signal to a specific PID in user mode for
+                debugging purposes. The user mode process can register a handler
+                against this signal.
+                This is necessary to support the Rogue debugger. If the Rogue
+                debugger is not used then this function may be implemented as
+                a stub.
+@Input          ui32PID    The PID for the signal.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID);
+
+#if defined(__linux__) && defined(__KERNEL__) && !defined(DOXYGEN)
+#define OSWarnOn(a) WARN_ON(a)
+#else
+/*************************************************************************/ /*!
+@Function       OSWarnOn
+@Description    This API allows the driver to emit a special token and stack
+                dump to the server log when an issue is detected that needs the
+                OS to be notified. The token or call may be used to trigger
+                log collection by the OS environment.
+                PVR_DPF log messages will have been emitted prior to this call.
+@Input          a    Expression to evaluate, if true trigger Warn signal
+@Return         None
+*/ /**************************************************************************/
+#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while (0)
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSIsKernelThread
+@Description    This API determines if the current running thread is a kernel
+                thread (i.e. one not associated with any userland process,
+                typically an MISR handler.)
+@Return         IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE.
+*/ /**************************************************************************/
+IMG_BOOL OSIsKernelThread(void);
+
+/*************************************************************************/ /*!
+@Function       OSThreadDumpInfo
+@Description    Traverse the thread list and call each of the stored
+                callbacks to dump the info in debug_dump.
+@Input          pfnDumpDebugPrintf  The 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the 'printf' function if required
+*/ /**************************************************************************/
+void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+                      void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       OSDumpVersionInfo
+@Description    Store OS version information in debug dump.
+@Input          pfnDumpDebugPrintf  The 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the 'printf' function if required
+*/ /**************************************************************************/
+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                       void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function       OSIsWriteCombineUnalignedSafe
+@Description    Determine if unaligned accesses to write-combine memory are
+                safe to perform, i.e. whether we are safe from a CPU fault
+                occurring. This test is specifically aimed at ARM64 platforms
+                which cannot provide this guarantee if the memory is 'device'
+                memory rather than 'normal' under the ARM memory architecture.
+@Return         IMG_TRUE if safe, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+IMG_BOOL OSIsWriteCombineUnalignedSafe(void);
+
+/*************************************************************************/ /*!
+@Function       OSDebugLevel
+@Description    Returns current value of the debug level.
+@Return         Debug level.
+*/ /**************************************************************************/
+IMG_UINT32 OSDebugLevel(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVSetDebugLevel
+@Description    Sets the current value of the debug level to ui32DebugLevel.
+@Input          ui32DebugLevel New debug level value.
+*/ /**************************************************************************/
+void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVIsDebugLevel
+@Description    Tests if a given debug level is enabled.
+@Input          ui32DebugLevel IMG_TRUE if debug level is enabled
+                and IMG_FALSE otherwise.
+*/ /**************************************************************************/
+IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel);
+
+#if defined(SUPPORT_DMA_TRANSFER)
+
+typedef void (*PFN_SERVER_CLEANUP)(void *pvData, IMG_BOOL bAdvanceTimeline);
+
+#define DMA_COMPLETION_TIMEOUT_MS 60000
+#define DMA_ERROR_SYNC_RETRIES 100
+
+PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *psChan,
+                                                          IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress,
+                                                          IMG_UINT64 uiSize, IMG_BOOL bMemToDev,
+                                                          IMG_HANDLE pvOSData,
+                                                          IMG_HANDLE pvServerCleanupParam,PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst);
+
+PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, IMG_HANDLE pvChan,
+                                                                               IMG_DMA_ADDR* psDmaAddr, IMG_BOOL *pbValid,
+                                                                               IMG_UINT64* puiAddress, IMG_UINT64 uiSize,
+                                                                               IMG_UINT32 uiOffsetInPage,
+                                                                               IMG_UINT32 ui32SizeInPages,
+                                                                               IMG_BOOL bMemToDev,
+                                                                               IMG_HANDLE pvOSData,
+                                                                               IMG_HANDLE pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup,
+                                                                               IMG_BOOL bFirst);
+
+PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode,IMG_UINT32 uiNumDMA, void **pvAllocedData);
+PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, void *psChan, IMG_BOOL bSynchronous);
+void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan,
+                                          void *pvOSData, IMG_HANDLE pvServerCleanupParam,
+                                          PFN_SERVER_CLEANUP pfnServerCleanup);
+#endif
+#endif /* OSFUNC_H */
+
+/******************************************************************************
+ End of file (osfunc.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/oskm_apphint.h b/drivers/gpu/drm/img/img-rogue/services/server/include/oskm_apphint.h
new file mode 100644 (file)
index 0000000..78d4040
--- /dev/null
@@ -0,0 +1,186 @@
+/*************************************************************************/ /*!
+@File           oskm_apphint.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS-independent interface for retrieving KM apphints
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#if defined(__linux__)
+#include "km_apphint.h"
+#include "device.h"
+#else
+#include "services_client_porting.h"
+#endif
+#if !defined(OSKM_APPHINT_H)
+#define OSKM_APPHINT_H
+
+/*! Supplied to os_get_km_apphint_XXX() functions when the param/AppHint is
+ * applicable to all devices and not a specific device. Typically used
+ * for server-wide build and module AppHints.
+ */
+#define APPHINT_NO_DEVICE (NULL)
+
+#if defined(__linux__) && !defined(DOXYGEN)
+static INLINE IMG_UINT os_get_km_apphint_UINT32(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) {
+       return !pvr_apphint_get_uint32(device, id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_UINT64(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) {
+       return !pvr_apphint_get_uint64(device, id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_BOOL(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) {
+       return !pvr_apphint_get_bool(device, id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+       return !pvr_apphint_get_string(device, id, buffer, size);
+}
+
+#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \
+       os_get_km_apphint_UINT32(device, state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \
+       os_get_km_apphint_UINT64(device, state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \
+       os_get_km_apphint_BOOL(device, state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \
+       os_get_km_apphint_STRING(device, state, APPHINT_ID_ ## name, appHintDefault, buffer, size)
+
+
+#define OSCreateKMAppHintState(state) \
+       PVR_UNREFERENCED_PARAMETER(state)
+
+#define OSFreeKMAppHintState(state) \
+       PVR_UNREFERENCED_PARAMETER(state)
+
+#else /* defined(__linux__) && !defined(DOXYGEN) */
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT32(state, name, appHintDefault, value)
+@Description    Interface for retrieval of uint32 km app hint.
+                               For non-linux operating systems, this macro implements a call
+                               from server code to PVRSRVGetAppHint() declared in
+                               services_client_porting.h, effectively making it 'shared' code.
+@Input          device            Device node
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+                                                                 app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \
+       PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT64(state, name, appHintDefault, value)
+@Description    Interface for retrieval of uint64 km app hint.
+                               For non-linux operating systems, this macro implements a call
+                               from server code to PVRSRVGetAppHint() declared in
+                               services_client_porting.h, effectively making it 'shared' code.
+@Input          device            Device node
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+                                                                 app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \
+       PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintBOOL(state, name, appHintDefault, value)
+@Description    Interface for retrieval of IMG_BOOL km app hint.
+                               For non-linux operating systems, this macro implements a call
+                               from server code to PVRSRVGetAppHint() declared in
+                               services_client_porting.h, effectively making it 'shared' code.
+@Input          device            Device node
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+                                                                 app hint is found.
+@Output         value             Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \
+       PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size)
+@Description    Interface for retrieval of string km app hint.
+                               For non-linux operating systems, this macro implements a call
+                               from server code to PVRSRVGetAppHint() declared in
+                               services_client_porting.h, effectively making it 'shared' code.
+@Input          device            Device node
+@Input          state             App hint state
+@Input          name              Name used to identify app hint
+@Input          appHintDefault    Default value to be returned if no
+                                                                 app hint is found.
+@Output         buffer            Buffer used to return app hint string.
+@Input                 size                      Size of the buffer.
+ */ /**************************************************************************/
+#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \
+       (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer))
+
+/**************************************************************************/ /*!
+@def OSCreateKMAppHintState(state)
+@Description    Creates the app hint state.
+                               For non-linux operating systems, this macro implements a call
+                               from server code to PVRSRVCreateAppHintState() declared in
+                               services_client_porting.h, effectively making it 'shared' code.
+@Output          state             App hint state
+ */ /**************************************************************************/
+#define OSCreateKMAppHintState(state) \
+       PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state)
+
+/**************************************************************************/ /*!
+@def OSFreeKMAppHintState
+@Description    Free the app hint state.
+                               For non-linux operating systems, this macro implements a call
+                               from server code to PVRSRVCreateAppHintState() declared in
+                               services_client_porting.h, effectively making it 'shared' code.
+@Output          state             App hint state
+ */ /**************************************************************************/
+#define OSFreeKMAppHintState(state) \
+       PVRSRVFreeAppHintState(IMG_SRV_UM, state)
+
+#endif /* defined(__linux__) */
+
+#endif /* OSKM_APPHINT_H */
+
+/******************************************************************************
+ End of file (oskm_apphint.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/ospvr_gputrace.h b/drivers/gpu/drm/img/img-rogue/services/server/include/ospvr_gputrace.h
new file mode 100644 (file)
index 0000000..0d6b89f
--- /dev/null
@@ -0,0 +1,167 @@
+/*************************************************************************/ /*!
+@File           ospvr_gputrace.h
+@Title          PVR GPU Trace module common environment interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_GPUTRACE_H_
+#define PVR_GPUTRACE_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_hwperf.h"
+#include "device.h"
+
+#if defined(__linux__)
+
+void PVRGpuTraceEnqueueEvent(
+               PVRSRV_DEVICE_NODE *psDevNode,
+               IMG_UINT32 ui32FirmwareCtx,
+               IMG_UINT32 ui32ExternalJobRef,
+               IMG_UINT32 ui32InternalJobRef,
+               RGX_HWPERF_KICK_TYPE eKickType);
+
+/* Early initialisation of GPU Trace events logic.
+ * This function is called on *driver* initialisation. */
+PVRSRV_ERROR PVRGpuTraceSupportInit(void);
+
+/* GPU Trace resources final cleanup.
+ * This function is called on driver de-initialisation. */
+void PVRGpuTraceSupportDeInit(void);
+
+/* Initialisation for AppHints callbacks.
+ * This function is called during the late stage of driver initialisation but
+ * before the device initialisation but after the debugfs sub-system has been
+ * initialised. */
+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* Per-device initialisation of the GPU Trace resources */
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* Per-device cleanup for the GPU Trace resources. */
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* Enables the gpu trace sub-system for a given device. */
+PVRSRV_ERROR PVRGpuTraceSetEnabled(
+               PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_BOOL bNewValue);
+
+/* Returns IMG_TRUE if the gpu trace sub-system has been enabled (but not
+ * necessarily initialised). */
+IMG_BOOL PVRGpuTraceIsEnabled(void);
+
+/* Performs some initialisation steps if the feature was enabled
+ * on driver startup. */
+void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* FTrace events callbacks interface */
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+
+#else /* defined(__linux__) */
+
+static inline void PVRGpuTraceEnqueueEvent(
+               PVRSRV_DEVICE_NODE *psDevNode,
+               IMG_UINT32 ui32FirmwareCtx,
+               IMG_UINT32 ui32ExternalJobRef,
+               IMG_UINT32 ui32InternalJobRef,
+               RGX_HWPERF_KICK_TYPE eKickType)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevNode);
+       PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef);
+       PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef);
+       PVR_UNREFERENCED_PARAMETER(eKickType);
+}
+
+static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) {
+       return PVRSRV_OK;
+}
+
+static inline void PVRGpuTraceSupportDeInit(void) {}
+
+static inline void PVRGpuTraceInitAppHintCallbacks(
+               const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+static inline PVRSRV_ERROR PVRGpuTraceInitDevice(
+               PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       return PVRSRV_OK;
+}
+
+static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+static inline PVRSRV_ERROR PVRGpuTraceSetEnabled(
+               PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_BOOL bNewValue)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(bNewValue);
+       return PVRSRV_OK;
+}
+
+static inline IMG_BOOL PVRGpuTraceIsEnabled(void)
+{
+       return IMG_FALSE;
+}
+
+static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+static inline void PVRGpuTraceEnableUfoCallback(void) {}
+static inline void PVRGpuTraceDisableUfoCallback(void) {}
+
+static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {}
+static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {}
+
+#endif /* defined(__linux__) */
+
+#endif /* PVR_GPUTRACE_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/ossecure_export.h b/drivers/gpu/drm/img/img-rogue/services/server/include/ossecure_export.h
new file mode 100644 (file)
index 0000000..020f5f5
--- /dev/null
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+/*************************************************************************/ /*!
+@Function       OSSecureExport
+@Description    Assigns an OS-specific 'token' to allow a resource
+                to be securely referenced by another process.
+                A process wishing to reference the exported resource
+                should call OSSecureImport(), passing its OS-specific
+                reference to the same resource.
+                For the export/import to be deemed 'secure', the
+                implementation should ensure that the OS-specific
+                reference can only be meaningfully used by a process
+                which is permitted to do so.
+@Input          pszName        name of the "class" of new secure file
+@Input          pfnReleaseFunc  pointer to the function to be called
+                               while closing secure file
+@Input          pvData         pointer to the actual resource that
+                               is being exported
+@Output         phSecure       the returned secure token
+@Return         PVRSRV_OK      on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSSecureExport(const IMG_CHAR *pszName,
+                            PVRSRV_ERROR (*pfnReleaseFunc)(void *),
+                            void *pvData,
+                            IMG_SECURE_TYPE *phSecure);
+
+/*************************************************************************/ /*!
+@Function       OSSecureImport
+@Description    Imports an OS-specific 'token' that allows a resource
+                allocated by another process to be securely referenced by
+                the current process.
+@Input          hSecure             the secure token for the resource to
+                                    be imported
+@Output         ppvData             pointer to the actual resource that
+                                    is being referenced
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSSecureImport(IMG_SECURE_TYPE hSecure, void **ppvData);
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_km.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_km.h
new file mode 100644 (file)
index 0000000..b9cb5f6
--- /dev/null
@@ -0,0 +1,1136 @@
+/*************************************************************************/ /*!
+@File
+@Title          pdump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PDUMP_KM_H
+#define PDUMP_KM_H
+
+#if defined(PDUMP)
+ #if defined(__linux__)
+  #include <linux/version.h>
+  #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+   #include <linux/stdarg.h>
+  #else
+   #include <stdarg.h>
+  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+ #else
+  #include <stdarg.h>
+ #endif /* __linux__ */
+#endif /* PDUMP */
+
+/* services/srvkm/include/ */
+#include "device.h"
+
+/* include/ */
+#include "pvrsrv_error.h"
+
+
+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "connection_server.h"
+/* Pull in pdump flags from services include */
+#include "pdump.h"
+#include "pdumpdefs.h"
+
+/* Define this to enable the PDUMP_HERE trace in the server */
+#undef PDUMP_TRACE
+
+#if defined(PDUMP_TRACE)
+#define PDUMP_HERE_VAR  IMG_UINT32 here = 0;
+#define PDUMP_HERE(a)  { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); }
+#define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); }
+#else
+#define PDUMP_HERE_VAR  IMG_UINT32 here = 0;
+#define PDUMP_HERE(a)  here = (a);
+#define PDUMP_HEREA(a) here = (a);
+#endif
+
+#define PDUMP_PD_UNIQUETAG     (IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG     (IMG_HANDLE)0
+
+/* Invalid value for PDump block number */
+#define PDUMP_BLOCKNUM_INVALID      IMG_UINT32_MAX
+
+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA;
+
+/* PDump transition events */
+typedef enum _PDUMP_TRANSITION_EVENT_
+{
+       PDUMP_TRANSITION_EVENT_NONE,              /* No event */
+       PDUMP_TRANSITION_EVENT_BLOCK_FINISHED,    /* Block mode event, current PDump-block has finished */
+       PDUMP_TRANSITION_EVENT_BLOCK_STARTED,     /* Block mode event, new PDump-block has started */
+       PDUMP_TRANSITION_EVENT_RANGE_ENTERED,     /* Transition into capture range */
+       PDUMP_TRANSITION_EVENT_RANGE_EXITED,      /* Transition out of capture range */
+} PDUMP_TRANSITION_EVENT;
+
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags);
+typedef void (*PFN_PDUMP_SYNCBLOCKS)(PVRSRV_DEVICE_NODE *psDevNode, void *pvData, PDUMP_TRANSITION_EVENT eEvent);
+
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION_FENCE_SYNC)(void *pvData, PDUMP_TRANSITION_EVENT eEvent);
+
+#ifdef PDUMP
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_dev, _id, _msg) do \
+               { PVRSRV_ERROR _eE;\
+                       _eE = PDumpPanic((_dev), ((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __func__, __LINE__);       \
+                       PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\
+               MSC_SUPPRESS_4127\
+               } while (0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_dev, _err, _msg) \
+       (void)PDumpCaptureError((_dev), _err, _msg, __func__, __LINE__)
+
+#define SZ_MSG_SIZE_MAX                        PVRSRV_PDUMP_MAX_COMMENT_SIZE
+#define SZ_SCRIPT_SIZE_MAX             PVRSRV_PDUMP_MAX_COMMENT_SIZE
+#define SZ_FILENAME_SIZE_MAX   (PVRSRV_PDUMP_MAX_FILENAME_SIZE+sizeof(PDUMP_PARAM_N_FILE_NAME))
+
+#define PDUMP_GET_SCRIPT_STRING()                                                                                                                                                      \
+       IMG_HANDLE hScript;                                                                                                                                                                             \
+       void *pvScriptAlloc;                                                                                                                                                                    \
+       IMG_UINT32 ui32MaxLen = SZ_SCRIPT_SIZE_MAX-1;                                                                                                                   \
+       pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX );                                                                                                               \
+       if (!pvScriptAlloc)                                                                                                                                                                             \
+       {                                                                                                                                                                                                               \
+               PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_STRING() failed to allocate memory for script buffer"));      \
+               return PVRSRV_ERROR_OUT_OF_MEMORY;                                                                                                                                      \
+       }                                                                                                                                                                                                               \
+                                                                                                                                                                                                                       \
+       hScript = (IMG_HANDLE) pvScriptAlloc;
+
+#define PDUMP_GET_MSG_STRING()                                                                                                                                                         \
+       IMG_CHAR *pszMsg;                                                                                                                                                                               \
+       void *pvMsgAlloc;                                                                                                                                                                               \
+       IMG_UINT32 ui32MaxLen = SZ_MSG_SIZE_MAX-1;                                                                                                                              \
+       pvMsgAlloc = OSAllocMem( SZ_MSG_SIZE_MAX );                                                                                                                             \
+       if (!pvMsgAlloc)                                                                                                                                                                                \
+       {                                                                                                                                                                                                               \
+               PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_MSG_STRING() failed to allocate memory for message buffer"));        \
+               return PVRSRV_ERROR_OUT_OF_MEMORY;                                                                                                                                      \
+       }                                                                                                                                                                                                               \
+       pszMsg = (IMG_CHAR *)pvMsgAlloc;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()                                                                                                                                     \
+       IMG_HANDLE hScript;                                                                                                                                                                             \
+       IMG_CHAR *pszFileName;                                                                                                                                                                  \
+       IMG_UINT32 ui32MaxLenScript = SZ_SCRIPT_SIZE_MAX-1;                                                                                                             \
+       void *pvScriptAlloc;                                                                                                                                                                    \
+       void *pvFileAlloc;                                                                                                                                                                              \
+       pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX );                                                                                                               \
+       if (!pvScriptAlloc)                                                                                                                                                                             \
+       {                                                                                                                                                                                                               \
+               PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for script buffer"));             \
+               return PVRSRV_ERROR_OUT_OF_MEMORY;                                                                                                                                      \
+       }                                                                                                                                                                                                               \
+                                                                                                                                                                                                                       \
+       hScript = (IMG_HANDLE) pvScriptAlloc;                                                                                                                                   \
+       pvFileAlloc = OSAllocMem( SZ_FILENAME_SIZE_MAX );                                                                                                               \
+       if (!pvFileAlloc)                                                                                                                                                                               \
+       {                                                                                                                                                                                                               \
+               PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for filename buffer"));   \
+               OSFreeMem(pvScriptAlloc);                                                                                                                                                       \
+               return PVRSRV_ERROR_OUT_OF_MEMORY;                                                                                                                                      \
+       }                                                                                                                                                                                                               \
+       pszFileName = (IMG_CHAR *)pvFileAlloc;
+
+#define PDUMP_RELEASE_SCRIPT_STRING()                                                                                                                                          \
+       if (pvScriptAlloc)                                                                                                                                                                              \
+       {                                                                                                                                                                                                               \
+               OSFreeMem(pvScriptAlloc);                                                                                                                                                       \
+               pvScriptAlloc = NULL;                                                                                                                                                           \
+       }
+
+#define PDUMP_RELEASE_MSG_STRING()                                                                                                                                                     \
+       if (pvMsgAlloc)                                                                                                                                                                                 \
+       {                                                                                                                                                                                                               \
+               OSFreeMem(pvMsgAlloc);                                                                                                                                                          \
+               pvMsgAlloc = NULL;                                                                                                                                                                      \
+       }
+
+#define PDUMP_RELEASE_FILE_STRING()                                                                                                                                                    \
+       if (pvFileAlloc)                                                                                                                                                                                \
+       {                                                                                                                                                                                                               \
+               OSFreeMem(pvFileAlloc);                                                                                                                                                         \
+               pvFileAlloc = NULL;                                                                                                                                                                     \
+       }
+
+#define PDUMP_RELEASE_SCRIPT_AND_FILE_STRING()                                                                                                                         \
+       if (pvScriptAlloc)                                                                                                                                                                              \
+       {                                                                                                                                                                                                               \
+               OSFreeMem(pvScriptAlloc);                                                                                                                                                       \
+               pvScriptAlloc = NULL;                                                                                                                                                           \
+       }                                                                                                                                                                                                               \
+       if (pvFileAlloc)                                                                                                                                                                                \
+       {                                                                                                                                                                                                               \
+               OSFreeMem(pvFileAlloc);                                                                                                                                                         \
+               pvFileAlloc = NULL;                                                                                                                                                                     \
+       }
+
+
+/* Shared across pdump_x files */
+PVRSRV_ERROR PDumpInitCommon(void);
+void PDumpDeInitCommon(void);
+PVRSRV_ERROR PDumpReady(void);
+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+                                   size_t *puiZeroPageSize,
+                                   const IMG_CHAR **ppszZeroPageFilename);
+
+void PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode);
+void PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                             IMG_UINT32 ui32Frame);
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                             PVRSRV_DEVICE_NODE * psDeviceNode,
+                             IMG_UINT32* pui32Frame);
+PVRSRV_ERROR PDumpCommentKM(CONNECTION_DATA *psConnection,
+                            PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_UINT32 ui32CommentSize,
+                            IMG_CHAR *pszComment,
+                            IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection,
+                                            PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_UINT32 ui32Mode,
+                                            IMG_UINT32 ui32Start,
+                                            IMG_UINT32 ui32End,
+                                            IMG_UINT32 ui32Interval,
+                                            IMG_UINT32 ui32MaxParamFileSize);
+
+
+PVRSRV_ERROR PDumpReg32(PVRSRV_DEVICE_NODE *psDeviceNode,
+                        IMG_CHAR *pszPDumpRegName,
+                        IMG_UINT32 ui32RegAddr,
+                        IMG_UINT32 ui32RegValue,
+                        IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpReg64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                        IMG_CHAR *pszPDumpRegName,
+                        IMG_UINT32 ui32RegAddr,
+                        IMG_UINT64 ui64RegValue,
+                        IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToReg64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32RegDst,
+                                  IMG_UINT32 ui32RegSrc,
+                                  IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpPhysHandleToInternalVar64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                            IMG_CHAR *pszInternalVar,
+                                            IMG_HANDLE hPdumpPages,
+                                            IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+                                          PMR *psPMR,
+                                          IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                          IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR,
+                                        IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                        IMG_CHAR *pszInternalVar,
+                                        IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpWriteVarORValueOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                    const IMG_CHAR *pszInternalVariable,
+                                    const IMG_UINT64 ui64Value,
+                                    const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpWriteVarANDValueOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const IMG_CHAR *pszInternalVariable,
+                                     const IMG_UINT64 ui64Value,
+                                     const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpWriteVarSHRValueOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     const IMG_CHAR *pszInternalVariable,
+                                     const IMG_UINT64 ui64Value,
+                                     const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpWriteVarORVarOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  const IMG_CHAR *pszInternalVar,
+                                  const IMG_CHAR *pszInternalVar2,
+                                  const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpWriteVarANDVarOp(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   const IMG_CHAR *pszInternalVar,
+                                   const IMG_CHAR *pszInternalVar2,
+                                   const IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PDumpInternalVarToReg32(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32 ui32Reg,
+                                     IMG_CHAR *pszInternalVar,
+                                     IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpInternalVarToReg64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32 ui32Reg,
+                                     IMG_CHAR *pszInternalVar,
+                                     IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+                                  PMR *psPMRDest,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                  IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+                                  PMR *psPMRDest,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                                  IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32Reg,
+                                  PMR *psPMR,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                  IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+                                  IMG_UINT32 ui32Reg,
+                                  PMR *psPMR,
+                                  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                                  IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegLabelToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                        IMG_CHAR *pszPDumpRegName,
+                                        IMG_UINT32 ui32Reg,
+                                        IMG_CHAR *pszInternalVar,
+                                        IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpSAW(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_CHAR      *pszDevSpaceName,
+                      IMG_UINT32    ui32HPOffsetBytes,
+                      IMG_UINT32    ui32NumSaveBytes,
+                      IMG_CHAR      *pszOutfileName,
+                      IMG_UINT32    ui32OutfileOffsetByte,
+                      PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR PDumpRegPolKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                           IMG_CHAR            *pszPDumpRegName,
+                           IMG_UINT32          ui32RegAddr,
+                           IMG_UINT32          ui32RegValue,
+                           IMG_UINT32          ui32Mask,
+                           IMG_UINT32          ui32Flags,
+                           PDUMP_POLL_OPERATOR eOperator);
+
+
+/**************************************************************************/ /*!
+@Function       PDumpImageDescriptor
+@Description    PDumps image data out as an IMGBv2 data section
+@Input          psDeviceNode         Pointer to device node.
+@Input          ui32MMUContextID     PDUMP MMU context ID.
+@Input          pszSABFileName       Pointer to string containing file name of
+                                     Image being SABed
+@Input          sData                GPU virtual address of this surface.
+@Input          ui32DataSize         Image data size
+@Input          ui32LogicalWidth     Image logical width
+@Input          ui32LogicalHeight    Image logical height
+@Input          ui32PhysicalWidth    Image physical width
+@Input          ui32PhysicalHeight   Image physical height
+@Input          ePixFmt              Image pixel format
+@Input          eFBCompression       FB compression mode
+@Input          paui32FBCClearColour FB clear colour (Only applicable to FBC surfaces)
+@Input          eFBCSwizzle          FBC channel swizzle (Only applicable to FBC surfaces)
+@Input          sHeader              GPU virtual address of the headers of this
+                                     surface (Only applicable to FBC surfaces)
+@Input          ui32HeaderSize       Header size (Only applicable to FBC surfaces)
+@Input          ui32PDumpFlags       PDUMP flags
+@Return         PVRSRV_ERROR:        PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                     error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                  IMG_UINT32 ui32MMUContextID,
+                                  IMG_CHAR *pszSABFileName,
+                                  IMG_DEV_VIRTADDR sData,
+                                  IMG_UINT32 ui32DataSize,
+                                  IMG_UINT32 ui32LogicalWidth,
+                                  IMG_UINT32 ui32LogicalHeight,
+                                  IMG_UINT32 ui32PhysicalWidth,
+                                  IMG_UINT32 ui32PhysicalHeight,
+                                  PDUMP_PIXEL_FORMAT ePixFmt,
+                                  IMG_MEMLAYOUT eMemLayout,
+                                  IMG_FB_COMPRESSION eFBCompression,
+                                  const IMG_UINT32 *paui32FBCClearColour,
+                                  PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                                  IMG_DEV_VIRTADDR sHeader,
+                                  IMG_UINT32 ui32HeaderSize,
+                                  IMG_UINT32 ui32PDumpFlags);
+
+/**************************************************************************/ /*!
+@Function       PDumpDataDescriptor
+@Description    PDumps non-image data out as an IMGCv1 data section
+@Input          psDeviceNode         Pointer to device node.
+@Input          ui32MMUContextID     PDUMP MMU context ID.
+@Input          pszSABFileName       Pointer to string containing file name of
+                                     Data being SABed
+@Input          sData                GPU virtual address of this data.
+@Input          ui32DataSize         Data size
+@Input          ui32HeaderType       Header type
+@Input          ui32ElementType      Data element type
+@Input          ui32ElementCount     Number of data elements
+@Input          ui32PDumpFlags       PDUMP flags
+@Return         PVRSRV_ERROR:        PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                     error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT32 ui32MMUContextID,
+                                 IMG_CHAR *pszSABFileName,
+                                 IMG_DEV_VIRTADDR sData,
+                                 IMG_UINT32 ui32DataSize,
+                                 IMG_UINT32 ui32HeaderType,
+                                 IMG_UINT32 ui32ElementType,
+                                 IMG_UINT32 ui32ElementCount,
+                                 IMG_UINT32 ui32PDumpFlags);
+
+
+PVRSRV_ERROR PDumpReadRegKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_CHAR *pszPDumpRegName,
+                            IMG_CHAR *pszFileName,
+                            IMG_UINT32 ui32FileOffset,
+                            IMG_UINT32 ui32Address,
+                            IMG_UINT32 ui32Size,
+                            IMG_UINT32 ui32PDumpFlags);
+
+__printf(3, 4)
+PVRSRV_ERROR PDumpCommentWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   IMG_UINT32 ui32Flags,
+                                   IMG_CHAR* pszFormat,
+                                   ...);
+
+PVRSRV_ERROR PDumpCommentWithFlagsVA(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 ui32Flags,
+                                     const IMG_CHAR * pszFormat,
+                                     va_list args);
+
+PVRSRV_ERROR PDumpPanic(PVRSRV_DEVICE_NODE *psDeviceNode,
+                        IMG_UINT32      ui32PanicNo,
+                        IMG_CHAR*       pszPanicMsg,
+                        const IMG_CHAR* pszPPFunc,
+                        IMG_UINT32      ui32PPline);
+
+PVRSRV_ERROR PDumpCaptureError(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               PVRSRV_ERROR    ui32ErrorNo,
+                               IMG_CHAR*       pszErrorMsg,
+                               const IMG_CHAR* pszPPFunc,
+                               IMG_UINT32      ui32PPline);
+
+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame);
+
+PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State);
+
+PVRSRV_ERROR PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PDumpRegRead32ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         IMG_CHAR *pszPDumpRegName,
+                                         IMG_UINT32 ui32RegOffset,
+                                         IMG_CHAR *pszInternalVar,
+                                         IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegRead32(PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_CHAR *pszPDumpRegName,
+                            const IMG_UINT32 dwRegOffset,
+                            IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegRead64(PVRSRV_DEVICE_NODE *psDeviceNode,
+                            IMG_CHAR *pszPDumpRegName,
+                            const IMG_UINT32 dwRegOffset,
+                            IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpRegRead64ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                         IMG_CHAR *pszPDumpRegName,
+                                         IMG_CHAR *pszInternalVar,
+                                         const IMG_UINT32 dwRegOffset,
+                                         IMG_UINT32    ui32Flags);
+
+PVRSRV_ERROR PDumpIDLWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode,
+                               IMG_UINT32 ui32Clocks,
+                               IMG_UINT32 ui32Flags);
+PVRSRV_ERROR PDumpIDL(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_UINT32 ui32Clocks);
+
+PVRSRV_ERROR PDumpRegBasedCBP(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              IMG_CHAR   *pszPDumpRegName,
+                              IMG_UINT32 ui32RegOffset,
+                              IMG_UINT32 ui32WPosVal,
+                              IMG_UINT32 ui32PacketSize,
+                              IMG_UINT32 ui32BufferSize,
+                              IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpTRG(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_CHAR *pszMemSpace,
+                      IMG_UINT32 ui32MMUCtxID,
+                      IMG_UINT32 ui32RegionID,
+                      IMG_BOOL bEnable,
+                      IMG_UINT64 ui64VAddr,
+                      IMG_UINT64 ui64LenBytes,
+                      IMG_UINT32 ui32XStride,
+                      IMG_UINT32 ui32Flags);
+
+void PDumpLock(void);
+void PDumpUnlock(void);
+
+PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR            **ppszPDumpCond,
+                             IMG_CHAR            *pszPDumpRegName,
+                             IMG_UINT32          ui32RegAddr,
+                             IMG_UINT32          ui32RegValue,
+                             IMG_UINT32          ui32Mask,
+                             IMG_UINT32          ui32Flags,
+                             PDUMP_POLL_OPERATOR eOperator);
+
+PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR            **ppszPDumpCond,
+                                     IMG_CHAR            *pszInternalVar,
+                                     IMG_UINT32          ui32RegValue,
+                                     IMG_UINT32          ui32Mask,
+                                     IMG_UINT32          ui32Flags,
+                                     PDUMP_POLL_OPERATOR eOperator);
+
+PVRSRV_ERROR PDumpIfKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpElseKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpFiKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                       IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpStartDoLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpEndDoWhileLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                   IMG_CHAR *pszPDumpWhileCond,
+                                   IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR PDumpCOMCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             IMG_UINT32 ui32PDumpFlags,
+                             const IMG_CHAR *pszPDump);
+
+void PDumpPowerTransitionStart(PVRSRV_DEVICE_NODE *psDeviceNode);
+void PDumpPowerTransitionEnd(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_BOOL PDumpCheckFlagsWrite(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              IMG_UINT32 ui32Flags);
+
+/*!
+ * @name       PDumpWriteParameter
+ * @brief      General function for writing to PDump stream. Used
+ *          mainly for memory dumps to parameter stream.
+ *          Usually more convenient to use PDumpWriteScript below
+ *          for the script stream.
+ * @param      psDeviceNode - device PDump pertains to
+ * @param      psui8Data - data to write
+ * @param      ui32Size - size of write
+ * @param      ui32Flags - PDump flags
+ * @param   pui32FileOffset - on return contains the file offset to
+ *                            the start of the parameter data
+ * @param   aszFilenameStr - pointer to at least a 20 char buffer to
+ *                           return the parameter filename
+ * @return     error
+ */
+PVRSRV_ERROR PDumpWriteParameter(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size,
+                                 IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset,
+                                 IMG_CHAR* aszFilenameStr);
+
+/*!
+ * @name       PDumpWriteScript
+ * @brief      Write an PDumpOS created string to the "script" output stream
+ * @param      psDeviceNode - device PDump pertains to
+ * @param      hString - PDump OS layer handle of string buffer to write
+ * @param      ui32Flags - PDump flags
+ * @return     IMG_TRUE on success.
+ */
+IMG_BOOL PDumpWriteScript(PVRSRV_DEVICE_NODE *psDeviceNode,
+                          IMG_HANDLE hString, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function       PDumpSNPrintf
+@Description    Printf to OS-specific PDump state buffer. This function is
+                only called if PDUMP is defined.
+@Input          hBuf               handle of buffer to write into
+@Input          ui32ScriptSizeMax  maximum size of data to write (chars)
+@Input          pszFormat          format string
+@Return         None
+*/ /**************************************************************************/
+__printf(3, 4)
+PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...);
+
+
+/*
+       PDumpWriteShiftedMaskedValue():
+
+       loads the "reference" address into an internal PDump register,
+       optionally shifts it right,
+       optionally shifts it left,
+       optionally masks it
+       then finally writes the computed value to the given destination address
+
+       i.e. it emits pdump language equivalent to this expression:
+
+        dest = ((&ref) >> SHRamount << SHLamount) & MASK
+*/
+PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(PVRSRV_DEVICE_NODE *psDeviceNode,
+                             const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags);
+
+/*
+       PDumpWriteSymbAddress():
+       writes the address of the "reference" to the offset given
+*/
+PVRSRV_ERROR
+PDumpWriteSymbAddress(PVRSRV_DEVICE_NODE *psDeviceNode,
+                      const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags);
+
+/* Register the connection with the PDump subsystem */
+PVRSRV_ERROR
+PDumpRegisterConnection(void *hSyncPrivData,
+                        PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
+                        PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
+
+/* Unregister the connection with the PDump subsystem */
+void
+PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+
+/* Register for notification of PDump Transition into/out of capture range */
+PVRSRV_ERROR
+PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                                PFN_PDUMP_TRANSITION pfnCallback,
+                                void *hPrivData,
+                                void *pvDevice,
+                                void **ppvHandle);
+
+/* Unregister notification of PDump Transition */
+void
+PDumpUnregisterTransitionCallback(void *pvHandle);
+
+PVRSRV_ERROR
+PDumpRegisterTransitionCallbackFenceSync(void *hPrivData,
+                                         PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback,
+                                         void **ppvHandle);
+
+void
+PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle);
+
+/* Notify PDump of a Transition into/out of capture range */
+PVRSRV_ERROR
+PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode,
+                PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                PDUMP_TRANSITION_EVENT eEvent,
+                IMG_UINT32 ui32PDumpFlags);
+
+/* Check if writing to a PDump file is permitted for the given device */
+IMG_BOOL PDumpIsDevicePermitted(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* _ui32PDumpFlags must be a variable in the local scope */
+#define PDUMP_LOCK(_ui32PDumpFlags) do \
+       { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\
+               {\
+                       PDumpLock();\
+               }\
+       MSC_SUPPRESS_4127\
+       } while (0)
+
+/* _ui32PDumpFlags must be a variable in the local scope */
+#define PDUMP_UNLOCK(_ui32PDumpFlags) do \
+       { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\
+               {\
+                       PDumpUnlock();\
+               }\
+       MSC_SUPPRESS_4127\
+       } while (0)
+
+#define PDUMPINIT                              PDumpInitCommon
+#define PDUMPDEINIT                            PDumpDeInitCommon
+#define PDUMPREG32                             PDumpReg32
+#define PDUMPREG64                             PDumpReg64
+#define PDUMPREGREAD32                 PDumpRegRead32
+#define PDUMPREGREAD64                 PDumpRegRead64
+#define PDUMPCOMMENT(d, ...)   PDumpCommentWithFlags(d, PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__)
+#define PDUMPCOMMENTWITHFLAGS  PDumpCommentWithFlags
+#define PDUMPREGPOL                            PDumpRegPolKM
+#define PDUMPREGBASEDCBP               PDumpRegBasedCBP
+#define PDUMPENDINITPHASE              PDumpStopInitPhase
+#define PDUMPIDLWITHFLAGS              PDumpIDLWithFlags
+#define PDUMPIDL                               PDumpIDL
+#define PDUMPPOWCMDSTART               PDumpPowerTransitionStart
+#define PDUMPPOWCMDEND                 PDumpPowerTransitionEnd
+#define PDUMPCOM                               PDumpCOMCommand
+
+/* _ui32PDumpFlags must be a variable in the local scope */
+#define PDUMP_BLKSTART(_ui32PDumpFlags) do \
+       { PDUMP_LOCK(_ui32PDumpFlags);\
+       _ui32PDumpFlags |= PDUMP_FLAGS_PDUMP_LOCK_HELD;\
+       MSC_SUPPRESS_4127\
+       } while (0)
+
+/* _ui32PDumpFlags must be a variable in the local scope */
+#define PDUMP_BLKEND(_ui32PDumpFlags) do \
+       { _ui32PDumpFlags &= ~PDUMP_FLAGS_PDUMP_LOCK_HELD;\
+       PDUMP_UNLOCK(_ui32PDumpFlags);\
+       MSC_SUPPRESS_4127\
+       } while (0)
+
+/* _ui32PDumpFlags must be a variable in the local scope */
+#define PDUMPIF(_dev,_msg,_ui32PDumpFlags) do \
+       {PDUMP_BLKSTART(_ui32PDumpFlags);\
+       PDumpIfKM(_dev,_msg,_ui32PDumpFlags);\
+       MSC_SUPPRESS_4127\
+       } while (0)
+
+#define PDUMPELSE                              PDumpElseKM
+
+/* _ui32PDumpFlags must be a variable in the local scope */
+#define PDUMPFI(_dev,_msg,_ui32PDumpFlags) do \
+       { PDumpFiKM(_dev,_msg,_ui32PDumpFlags);\
+       PDUMP_BLKEND(_ui32PDumpFlags);\
+       MSC_SUPPRESS_4127\
+       } while (0)
+
+#else
+/*
+       We should be clearer about which functions can be called
+       across the bridge as this looks rather unbalanced
+*/
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_dev, _id, _msg)  ((void)0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_dev, _err, _msg) ((void)0)
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpInitCommon)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpInitCommon(void)
+{
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpConnectionNotify)
+#endif
+static INLINE void
+PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDisconnectionNotify)
+#endif
+static INLINE void
+PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpLock)
+#endif
+static INLINE void
+PDumpLock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnlock)
+#endif
+static INLINE void
+PDumpUnlock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStopInitPhase)
+#endif
+static INLINE void
+PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE *psDevNode,
+                IMG_UINT32 ui32Frame)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(ui32Frame);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE *psDeviceNode,
+                IMG_UINT32* pui32Frame)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(pui32Frame);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCommentKM(CONNECTION_DATA *psConnection,
+               PVRSRV_DEVICE_NODE *psDeviceNode,
+               IMG_UINT32 ui32CommentSize,
+               IMG_CHAR *pszComment,
+               IMG_UINT32 ui32Flags)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+    PVR_UNREFERENCED_PARAMETER(ui32CommentSize);
+       PVR_UNREFERENCED_PARAMETER(pszComment);
+       PVR_UNREFERENCED_PARAMETER(ui32Flags);
+       return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetDefaultCaptureParamsKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection,
+                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                               IMG_UINT32 ui32Mode,
+                               IMG_UINT32 ui32Start,
+                               IMG_UINT32 ui32End,
+                               IMG_UINT32 ui32Interval,
+                               IMG_UINT32 ui32MaxParamFileSize)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32Mode);
+       PVR_UNREFERENCED_PARAMETER(ui32Start);
+       PVR_UNREFERENCED_PARAMETER(ui32End);
+       PVR_UNREFERENCED_PARAMETER(ui32Interval);
+       PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+       return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPanic)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPanic(IMG_UINT32      ui32PanicNo,
+           IMG_CHAR*       pszPanicMsg,
+           const IMG_CHAR* pszPPFunc,
+           IMG_UINT32      ui32PPline)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32PanicNo);
+       PVR_UNREFERENCED_PARAMETER(pszPanicMsg);
+       PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+       PVR_UNREFERENCED_PARAMETER(ui32PPline);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCaptureError)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCaptureError(PVRSRV_ERROR    ui32ErrorNo,
+                  IMG_CHAR*       pszErrorMsg,
+                  const IMG_CHAR* pszPPFunc,
+                  IMG_UINT32      ui32PPline)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32ErrorNo);
+       PVR_UNREFERENCED_PARAMETER(pszErrorMsg);
+       PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+       PVR_UNREFERENCED_PARAMETER(ui32PPline);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsLastCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+       *pbIsLastCaptureFrame = IMG_FALSE;
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetStateKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetStateKM(IMG_UINT64 *ui64State)
+{
+       *ui64State = 0;
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpForceCaptureStopKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection,
+                        PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+       PVR_UNREFERENCED_PARAMETER(psConnection);
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpImageDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+                     IMG_UINT32 ui32MMUContextID,
+                     IMG_CHAR *pszSABFileName,
+                     IMG_DEV_VIRTADDR sData,
+                     IMG_UINT32 ui32DataSize,
+                     IMG_UINT32 ui32LogicalWidth,
+                     IMG_UINT32 ui32LogicalHeight,
+                     IMG_UINT32 ui32PhysicalWidth,
+                     IMG_UINT32 ui32PhysicalHeight,
+                     PDUMP_PIXEL_FORMAT ePixFmt,
+                     IMG_MEMLAYOUT eMemLayout,
+                     IMG_FB_COMPRESSION eFBCompression,
+                     const IMG_UINT32 *paui32FBCClearColour,
+                     PDUMP_FBC_SWIZZLE eFBCSwizzle,
+                     IMG_DEV_VIRTADDR sHeader,
+                     IMG_UINT32 ui32HeaderSize,
+                     IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+       PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+       PVR_UNREFERENCED_PARAMETER(sData);
+       PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+       PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+       PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+       PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+       PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+       PVR_UNREFERENCED_PARAMETER(ePixFmt);
+       PVR_UNREFERENCED_PARAMETER(eMemLayout);
+       PVR_UNREFERENCED_PARAMETER(eFBCompression);
+       PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+       PVR_UNREFERENCED_PARAMETER(eFBCSwizzle);
+       PVR_UNREFERENCED_PARAMETER(sHeader);
+       PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDataDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode,
+                    IMG_UINT32 ui32MMUContextID,
+                    IMG_CHAR *pszSABFileName,
+                    IMG_DEV_VIRTADDR sData,
+                    IMG_UINT32 ui32DataSize,
+                    IMG_UINT32 ui32ElementType,
+                    IMG_UINT32 ui32ElementCount,
+                    IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+       PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+       PVR_UNREFERENCED_PARAMETER(sData);
+       PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+       PVR_UNREFERENCED_PARAMETER(ui32ElementType);
+       PVR_UNREFERENCED_PARAMETER(ui32ElementCount);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterConnection)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterConnection(void *hSyncPrivData,
+                        PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
+                        PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+       PVR_UNREFERENCED_PARAMETER(hSyncPrivData);
+       PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks);
+       PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
+
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterConnection)
+#endif
+static INLINE void
+PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+       PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                                PFN_PDUMP_TRANSITION pfnCallback,
+                                void *hPrivData,
+                                void *pvDevice,
+                                void **ppvHandle)
+{
+       PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+       PVR_UNREFERENCED_PARAMETER(pfnCallback);
+       PVR_UNREFERENCED_PARAMETER(hPrivData);
+       PVR_UNREFERENCED_PARAMETER(pvDevice);
+       PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallback)
+#endif
+static INLINE void
+PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+       PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterTransitionCallbackFenceSync(void *hPrivData,
+                                         PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback,
+                                         void **ppvHandle)
+{
+       PVR_UNREFERENCED_PARAMETER(pfnCallback);
+       PVR_UNREFERENCED_PARAMETER(hPrivData);
+       PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallbackFenceSync)
+#endif
+static INLINE void
+PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle)
+{
+       PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpTransition)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode,
+                PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+                PDUMP_TRANSITION_EVENT eEvent,
+                IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+       PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+       PVR_UNREFERENCED_PARAMETER(eEvent);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+#if defined(__linux__) || defined(GCC_IA32) || defined(GCC_ARM) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+       #define PDUMPINIT       PDumpInitCommon
+       #define PDUMPDEINIT(args...)
+       #define PDUMPREG32(args...)
+       #define PDUMPREG64(args...)
+       #define PDUMPREGREAD32(args...)
+       #define PDUMPREGREAD64(args...)
+       #define PDUMPCOMMENT(args...)
+       #define PDUMPREGPOL(args...)
+       #define PDUMPSYNC(args...)
+       #define PDUMPCOPYTOMEM(args...)
+       #define PDUMPWRITE(args...)
+       #define PDUMPREGBASEDCBP(args...)
+       #define PDUMPCOMMENTWITHFLAGS(args...)
+       #define PDUMPENDINITPHASE(args...)
+       #define PDUMPIDLWITHFLAGS(args...)
+       #define PDUMPIDL(args...)
+       #define PDUMPPOWCMDSTART(args...)
+       #define PDUMPPOWCMDEND(args...)
+       #define PDUMP_LOCK(args...)
+       #define PDUMP_UNLOCK(args...)
+       #define PDUMPIF(args...)
+       #define PDUMPFI(args...)
+       #define PDUMPCOM(args...)
+#else
+       #error Compiler not specified
+#endif
+
+#endif /* PDUMP */
+
+#endif /* PDUMP_KM_H */
+
+/******************************************************************************
+ End of file (pdump_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_mmu.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_mmu.h
new file mode 100644 (file)
index 0000000..b67a402
--- /dev/null
@@ -0,0 +1,180 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_MMU_H
+#define SRVKM_PDUMP_MMU_H
+
+/* services/server/include/ */
+#include "pdump_symbolicaddr.h"
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+
+#include "opaque_types.h"
+
+/*
+ * PDUMP MMU attributes
+ */
+typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
+{
+       /* Per-Device Pdump attribs */
+
+       /*!< Pdump memory bank name */
+       IMG_CHAR *pszPDumpMemDevName;
+
+       /*!< Pdump register bank name */
+       IMG_CHAR *pszPDumpRegDevName;
+
+} PDUMP_MMU_ATTRIB_DEVICE;
+
+typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
+{
+       IMG_UINT32 ui32Dummy;
+} PDUMP_MMU_ATTRIB_CONTEXT;
+
+typedef struct _PDUMP_MMU_ATTRIB_HEAP_
+{
+       /* data page info */
+       IMG_UINT32 ui32DataPageMask;
+} PDUMP_MMU_ATTRIB_HEAP;
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+       struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
+       struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
+       struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
+} PDUMP_MMU_ATTRIB;
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PDumpMMUMalloc(PPVRSRV_DEVICE_NODE psDeviceNode,
+               const IMG_CHAR *pszPDumpDevName,
+               MMU_LEVEL eMMULevel,
+               IMG_DEV_PHYADDR *psDevPAddr,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32Align,
+               PDUMP_MMU_TYPE eMMUType);
+
+PVRSRV_ERROR
+PDumpMMUFree(PPVRSRV_DEVICE_NODE psDeviceNode,
+             const IMG_CHAR *pszPDumpDevName,
+             MMU_LEVEL eMMULevel,
+             IMG_DEV_PHYADDR *psDevPAddr,
+             PDUMP_MMU_TYPE eMMUType);
+
+PVRSRV_ERROR
+PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+                         PMR *psPMRDest,
+                         IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+                         IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+                         IMG_UINT32 ui32Flags,
+                         MMU_LEVEL eMMULevel,
+                         IMG_UINT64 ui64PxSymAddr,
+                         IMG_UINT64 ui64PxOffset);
+
+PVRSRV_ERROR
+PDumpMMUDumpPxEntries(PPVRSRV_DEVICE_NODE psDeviceNode,
+                      MMU_LEVEL eMMULevel,
+                      const IMG_CHAR *pszPDumpDevName,
+                      void *pvPxMem,
+                      IMG_DEV_PHYADDR sPxDevPAddr,
+                      IMG_UINT32 uiFirstEntry,
+                      IMG_UINT32 uiNumEntries,
+                      const IMG_CHAR *pszMemspaceName,
+                      const IMG_CHAR *pszSymbolicAddr,
+                      IMG_UINT64 uiSymbolicAddrOffset,
+                      IMG_UINT32 uiBytesPerEntry,
+                      IMG_UINT32 uiLog2Align,
+                      IMG_UINT32 uiAddrShift,
+                      IMG_UINT64 uiAddrMask,
+                      IMG_UINT64 uiPxEProtMask,
+                      IMG_UINT64 uiDataValidEnable,
+                      IMG_UINT32 ui32Flags,
+                      PDUMP_MMU_TYPE eMMUType);
+
+PVRSRV_ERROR
+PDumpMMUAllocMMUContext(PPVRSRV_DEVICE_NODE psDeviceNode,
+                        const IMG_CHAR *pszPDumpMemSpaceName,
+                        IMG_DEV_PHYADDR sPCDevPAddr,
+                        PDUMP_MMU_TYPE eMMUType,
+                        IMG_UINT32 *pui32MMUContextID,
+                        IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR
+PDumpMMUFreeMMUContext(PPVRSRV_DEVICE_NODE psDeviceNode,
+                       const IMG_CHAR *pszPDumpMemSpaceName,
+                       IMG_UINT32 ui32MMUContextID,
+                       IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR
+PDumpMMUSAB(PPVRSRV_DEVICE_NODE psDeviceNode,
+            const IMG_CHAR *pszPDumpMemNamespace,
+            IMG_UINT32 uiPDumpMMUCtx,
+            IMG_DEV_VIRTADDR sDevAddrStart,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            IMG_UINT32 ui32PDumpFlags);
+
+#define PDUMP_MMU_ALLOC_MMUCONTEXT(psDevNode, pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \
+        PDumpMMUAllocMMUContext(psDevNode,                              \
+                                pszPDumpMemDevName,                     \
+                                sPCDevPAddr,                            \
+                                eMMUType,                               \
+                                puiPDumpCtxID,                          \
+                                ui32PDumpFlags)
+
+#define PDUMP_MMU_FREE_MMUCONTEXT(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \
+        PDumpMMUFreeMMUContext(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags)
+#else /* PDUMP */
+
+#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \
+        ((void)0)
+#define PDUMP_MMU_FREE_MMUCONTEXT(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \
+        ((void)0)
+
+#endif /* PDUMP */
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_physmem.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_physmem.h
new file mode 100644 (file)
index 0000000..a5a6f37
--- /dev/null
@@ -0,0 +1,257 @@
+/**************************************************************************/ /*!
+@File
+@Title          pdump functions to assist with physmem allocations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PDUMP_PHYSMEM_H
+#define SRVSRV_PDUMP_PHYSMEM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+#include "device.h" /* For device node */
+
+#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40
+#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60
+#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+
+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T;
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                     IMG_CHAR **ppszSymbolicAddress);
+
+PVRSRV_ERROR
+PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicAddress,
+            IMG_UINT64 ui64Size,
+            /* alignment is alignment of start of buffer _and_
+               minimum contiguity - i.e. smallest allowable
+               page-size. */
+            IMG_DEVMEM_ALIGN_T uiAlign,
+            IMG_BOOL bInitialise,
+            IMG_UINT32 ui32InitValue,
+            IMG_HANDLE *phHandlePtr,
+            IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR
+PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+          IMG_HANDLE hPDumpAllocationInfoHandle);
+
+void
+PDumpMakeStringValid(IMG_CHAR *pszString,
+                     IMG_UINT32 ui32StrLen);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+                     IMG_CHAR **ppszSymbolicAddress)
+{
+       PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle);
+       PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress);
+       return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicAddress,
+            IMG_UINT64 ui64Size,
+            IMG_DEVMEM_ALIGN_T uiAlign,
+            IMG_BOOL bInitialise,
+            IMG_UINT32 ui32InitValue,
+            IMG_HANDLE *phHandlePtr,
+            IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+       PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+       PVR_UNREFERENCED_PARAMETER(ui64Size);
+       PVR_UNREFERENCED_PARAMETER(uiAlign);
+       PVR_UNREFERENCED_PARAMETER(bInitialise);
+       PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+       PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+       return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+       PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s"
+#define PMR_MEMSPACE_FMTSPEC "%s"
+#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s"
+
+#if defined(PDUMP)
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+    PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE)
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    PDumpFree(hHandle)
+#else
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+    ((void)(*phHandlePtr=NULL))
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    ((void)(0))
+#endif /* PDUMP */
+
+PVRSRV_ERROR
+PDumpPMRWRW32(PVRSRV_DEVICE_NODE *psDeviceNode,
+              const IMG_CHAR *pszDevSpace,
+              const IMG_CHAR *pszSymbolicName,
+              IMG_DEVMEM_OFFSET_T uiOffset,
+              IMG_UINT32 ui32Value,
+              PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRWRW64(PVRSRV_DEVICE_NODE *psDeviceNode,
+              const IMG_CHAR *pszDevSpace,
+              const IMG_CHAR *pszSymbolicName,
+              IMG_DEVMEM_OFFSET_T uiOffset,
+              IMG_UINT64 ui64Value,
+              PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              const IMG_CHAR *pszInternalVar,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode,
+                              const IMG_CHAR *pszInternalVar,
+                              const IMG_CHAR *pszDevSpace,
+                              const IMG_CHAR *pszSymbolicName,
+                              IMG_DEVMEM_OFFSET_T uiOffset,
+                              PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRLDB(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRSAB(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFileName,
+            IMG_UINT32 uiFileOffset);
+
+/*
+  PDumpPMRPOL()
+
+  Emits a POL to the PDUMP.
+*/
+PVRSRV_ERROR
+PDumpPMRPOL(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszMempaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PDumpPMRCBP(PVRSRV_DEVICE_NODE *psDeviceNode,
+            const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+
+/*
+ * PDumpWriteParameterBlob()
+ *
+ * Writes a binary blob to the pdump param stream containing the current
+ * contents of the memory, and returns the filename and offset of where
+ * that blob is located (for use in a subsequent LDB, for example).
+ *
+ * Caller to provide buffer to receive filename, and declare the size of
+ * that buffer.
+ */
+PVRSRV_ERROR
+PDumpWriteParameterBlob(PVRSRV_DEVICE_NODE *psDeviceNode,
+                        IMG_UINT8 *pcBuffer,
+                        size_t uiNumBytes,
+                        PDUMP_FLAGS_T uiPDumpFlags,
+                        IMG_CHAR *pszFilenameOut,
+                        size_t uiFilenameBufSz,
+                        PDUMP_FILEOFFSET_T *puiOffsetOut);
+
+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_symbolicaddr.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pdump_symbolicaddr.h
new file mode 100644 (file)
index 0000000..ed912a5
--- /dev/null
@@ -0,0 +1,55 @@
+/**************************************************************************/ /*!
+@File
+@Title          Abstraction of PDUMP symbolic address derivation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Allows pdump functions to derive symbolic addresses on-the-fly
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H
+#define SRVKM_PDUMP_SYMBOLICADDR_H
+
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+/* pdump symbolic addresses are generated on-the-fly with a callback */
+
+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset);
+
+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/physmem.h b/drivers/gpu/drm/img/img-rogue/services/server/include/physmem.h
new file mode 100644 (file)
index 0000000..ca293e9
--- /dev/null
@@ -0,0 +1,321 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVSRV_PHYSMEM_H
+#define SRVSRV_PHYSMEM_H
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/* Valid values for TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL                        (1)
+#define TC_MEMORY_HOST                 (2)
+#define TC_MEMORY_HYBRID               (3)
+
+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */
+#define PLATO_MEMORY_LOCAL             (1)
+#define PLATO_MEMORY_HOST              (2)
+#define PLATO_MEMORY_HYBRID            (3)
+
+/*************************************************************************/ /*!
+@Function       DevPhysMemAlloc
+@Description    Allocate memory from device specific heaps directly.
+@Input          psDevNode             device node to operate on
+@Input          ui32MemSize           Size of the memory to be allocated
+@Input          u8Value               Value to be initialised to.
+@Input          bInitPage             Flag to control initialisation
+@Input          pszDevSpace           PDUMP memory space in which the
+                                        allocation is to be done
+@Input          pszSymbolicAddress    Symbolic name of the allocation
+@Input          phHandlePtr           PDUMP handle to the allocation
+@Output         hMemHandle            Handle to the allocated memory
+@Output         psDevPhysAddr         Device Physical address of allocated
+                                        page
+@Return         PVRSRV_OK if the allocation is successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+                IMG_UINT32 ui32MemSize,
+                IMG_UINT32 ui32Log2Align,
+                const IMG_UINT8 u8Value,
+                IMG_BOOL bInitPage,
+#if defined(PDUMP)
+                const IMG_CHAR *pszDevSpace,
+                const IMG_CHAR *pszSymbolicAddress,
+                IMG_HANDLE *phHandlePtr,
+#endif
+                IMG_HANDLE hMemHandle,
+                IMG_DEV_PHYADDR *psDevPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       DevPhysMemFree
+@Description    Free memory to device specific heaps directly.
+@Input          psDevNode             device node to operate on
+@Input          hPDUMPMemHandle       Pdump handle to allocated memory
+@Input          hMemHandle            Devmem handle to allocated memory
+@Return         None
+*/ /**************************************************************************/
+void
+DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+               IMG_HANDLE hPDUMPMemHandle,
+#endif
+               IMG_HANDLE hMemHandle);
+
+/*
+ * PhysmemNewRamBackedPMR
+ *
+ * This function will create a RAM backed PMR using the device specific
+ * callback, this allows control at a per-devicenode level to select the
+ * memory source thus supporting mixed UMA/LMA systems.
+ *
+ * The size must be a multiple of page size. The page size is specified in
+ * log2. It should be regarded as a minimum contiguity of which the
+ * resulting memory must be a multiple. It may be that this should be a fixed
+ * number. It may be that the allocation size needs to be a multiple of some
+ * coarser "page size" than that specified in the page size argument.
+ * For example, take an OS whose page granularity is a fixed 16kB, but the
+ * caller requests memory in page sizes of 4kB. The request can be satisfied
+ * if and only if the SIZE requested is a multiple of 16kB. If the arguments
+ * supplied are such that this OS cannot grant the request,
+ * PVRSRV_ERROR_INVALID_PARAMS will be returned.
+ *
+ * The caller should supply storage of a pointer. Upon successful return a
+ * PMR object will have been created and a pointer to it returned in the
+ * PMROut argument.
+ *
+ * A PMR successfully created should be destroyed with PhysmemUnrefPMR.
+ *
+ * Note that this function may cause memory allocations and on some operating
+ * systems this may cause scheduling events, so it is important that this
+ * function be called with interrupts enabled and in a context where
+ * scheduling events and memory allocations are permitted.
+ *
+ * The flags may be used by the implementation to change its behaviour if
+ * required. The flags will also be stored in the PMR as immutable metadata
+ * and returned to mmu_common when it asks for it.
+ *
+ * The PID specified is used to tie this allocation to the process context
+ * that the allocation is made on behalf of.
+ */
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+                       PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       IMG_DEVMEM_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_UINT32 *pui32MappingTable,
+                       IMG_UINT32 uiLog2PageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       IMG_UINT32 uiAnnotationLength,
+                       const IMG_CHAR *pszAnnotation,
+                       IMG_PID uiPid,
+                       PMR **ppsPMROut,
+                       IMG_UINT32 ui32PDumpFlags,
+                       PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags);
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR_direct(CONNECTION_DATA * psConnection,
+                                                         PVRSRV_DEVICE_NODE *psDevNode,
+                                                         IMG_DEVMEM_SIZE_T uiSize,
+                                                         IMG_DEVMEM_SIZE_T uiChunkSize,
+                                                         IMG_UINT32 ui32NumPhysChunks,
+                                                         IMG_UINT32 ui32NumVirtChunks,
+                                                         IMG_UINT32 *pui32MappingTable,
+                                                         IMG_UINT32 uiLog2PageSize,
+                                                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                                         IMG_UINT32 uiAnnotationLength,
+                                                         const IMG_CHAR *pszAnnotation,
+                                                         IMG_PID uiPid,
+                                                         PMR **ppsPMROut,
+                                                         IMG_UINT32 ui32PDumpFlags,
+                                                         PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags);
+
+/*
+ * PhysmemNewRamBackedLockedPMR
+ *
+ * Same as function above but is additionally locking down the PMR.
+ *
+ * Get the physical memory and lock down the PMR directly, we do not want to
+ * defer the actual allocation to mapping time.
+ *
+ * In general the concept of on-demand allocations is not useful for
+ * allocations where we give the users the freedom to map and unmap memory at
+ * will. The user is not expecting their memory contents to suddenly vanish
+ * just because they unmapped the buffer.
+ * Even if they would know and be ok with it, we do not want to check for
+ * every page we unmap whether we have to unlock the underlying PMR.
+*/
+PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+                             PVRSRV_DEVICE_NODE *psDevNode,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             PMR_SIZE_T uiChunkSize,
+                             IMG_UINT32 ui32NumPhysChunks,
+                             IMG_UINT32 ui32NumVirtChunks,
+                             IMG_UINT32 *pui32MappingTable,
+                             IMG_UINT32 uiLog2PageSize,
+                             PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                             IMG_UINT32 uiAnnotationLength,
+                             const IMG_CHAR *pszAnnotation,
+                             IMG_PID uiPid,
+                             PMR **ppsPMRPtr,
+                             IMG_UINT32 ui32PDumpFlags,
+                             PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags);
+
+/*************************************************************************/ /*!
+@Function       PhysmemImportPMR
+@Description    Import PMR a previously exported PMR
+@Input          psPMRExport           The exported PMR token
+@Input          uiPassword            Authorisation password
+                                      for the PMR being imported
+@Input          uiSize                Size of the PMR being imported
+                                      (for verification)
+@Input          uiLog2Contig          Log2 continuity of the PMR being
+                                      imported (for verification)
+@Output         ppsPMR                The imported PMR
+@Return         PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device
+                PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect
+                PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect
+                PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+                 PVRSRV_DEVICE_NODE *psDevNode,
+                 PMR_EXPORT *psPMRExport,
+                 PMR_PASSWORD_T uiPassword,
+                 PMR_SIZE_T uiSize,
+                 PMR_LOG2ALIGN_T uiLog2Contig,
+                 PMR **ppsPMR);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetMaxPhysHeapCountKM
+@Description    Get the user accessible physical heap count
+@Output         puiPhysHeapCount   user accessible physical heap count
+@Return         PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection,
+                           PVRSRV_DEVICE_NODE *psDevNode,
+                           IMG_UINT32 *puiPhysHeapCount);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetDefaultPhysicalHeapKM
+@Description    For the specified device, get the physical heap used for
+                allocations when the PVRSRV_PHYS_HEAP_DEFAULT
+                physical heap hint is set in memalloc flags.
+@Output         peHeap                 Default Heap return value
+@Return         PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection,
+                         PVRSRV_DEVICE_NODE *psDevNode,
+                         PVRSRV_PHYS_HEAP *peHeap);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetHeapPhysMemUsageKM
+@Description    Get the memory usage statistics for all user accessible
+                physical heaps
+@Input          ui32PhysHeapCount      Total user accessible physical heaps
+@Output         apPhysHeapMemStats     Buffer to hold the memory statistics
+@Return         PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection,
+                           PVRSRV_DEVICE_NODE *psDevNode,
+                           IMG_UINT32 ui32PhysHeapCount,
+                           PHYS_HEAP_MEM_STATS *apPhysHeapMemStats);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetHeapPhysMemUsagePkdKM
+@Description    Get the memory usage statistics for all user accessible
+                physical heaps
+@Input          ui32PhysHeapCount      Total user accessible physical heaps
+@Output         apPhysHeapMemStats     Buffer to hold the memory statistics
+@Return         PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapPhysMemUsagePkdKM(CONNECTION_DATA *psConnection,
+                           PVRSRV_DEVICE_NODE *psDevNode,
+                           IMG_UINT32 ui32PhysHeapCount,
+                           PHYS_HEAP_MEM_STATS_PKD *apPhysHeapMemStats);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVPhysHeapGetMemInfoKM
+@Description    Get the memory usage statistics for a given physical heap ID
+@Input          ui32PhysHeapCount      Physical Heap count
+@Input          paePhysHeapID          Array of Physical Heap ID's
+@Output         paPhysHeapMemStats     Buffer to hold the memory statistics
+@Return         PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_UINT32 ui32PhysHeapCount,
+                          PVRSRV_PHYS_HEAP *paePhysHeapID,
+                          PHYS_HEAP_MEM_STATS *paPhysHeapMemStats);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVPhysHeapGetMemInfoPkdKM
+@Description    Get the memory usage statistics for a given physical heap ID
+@Input          ui32PhysHeapCount      Physical Heap count
+@Input          paePhysHeapID          Array of Physical Heap ID's
+@Output         paPhysHeapMemStats     Buffer to hold the memory statistics
+@Return         PVRSRV_OK if successful
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVPhysHeapGetMemInfoPkdKM(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_UINT32 ui32PhysHeapCount,
+                          PVRSRV_PHYS_HEAP *paePhysHeapID,
+                          PHYS_HEAP_MEM_STATS_PKD *paPhysHeapMemStats);
+
+#endif /* SRVSRV_PHYSMEM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_dmabuf.h b/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_dmabuf.h
new file mode 100644 (file)
index 0000000..99b5c33
--- /dev/null
@@ -0,0 +1,124 @@
+/**************************************************************************/ /*!
+@File           physmem_dmabuf.h
+@Title          Header for dmabuf PMR factory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks importing Ion allocations
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(PHYSMEM_DMABUF_H)
+#define PHYSMEM_DMABUF_H
+
+#include <linux/dma-buf.h>
+
+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+#include "pmr.h"
+
+typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+                                               struct dma_buf_attachment *psAttachment);
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
+                                struct dma_buf_attachment *psAttachment,
+                                PFN_DESTROY_DMABUF_PMR pfnDestroy,
+                                PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                IMG_DEVMEM_SIZE_T uiChunkSize,
+                                IMG_UINT32 ui32NumPhysChunks,
+                                IMG_UINT32 ui32NumVirtChunks,
+                                IMG_UINT32 *pui32MappingTable,
+                                       IMG_UINT32 ui32NameSize,
+                                       const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                                PMR **ppsPMRPtr);
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR);
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    PMR *psPMR,
+                    IMG_INT *piFd);
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_INT fd,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_UINT32 ui32NameSize,
+                    const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                    PMR **ppsPMRPtr,
+                    IMG_DEVMEM_SIZE_T *puiSize,
+                    IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR
+PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+                          PVRSRV_DEVICE_NODE *psDevNode,
+                          IMG_INT fd,
+                          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                          IMG_DEVMEM_SIZE_T uiChunkSize,
+                          IMG_UINT32 ui32NumPhysChunks,
+                          IMG_UINT32 ui32NumVirtChunks,
+                          IMG_UINT32 *pui32MappingTable,
+                          IMG_UINT32 ui32NameSize,
+                          const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+                          PMR **ppsPMRPtr,
+                          IMG_DEVMEM_SIZE_T *puiSize,
+                          IMG_DEVMEM_ALIGN_T *puiAlign);
+
+#endif /* !defined(PHYSMEM_DMABUF_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_hostmem.h b/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_hostmem.h
new file mode 100644 (file)
index 0000000..cfa453d
--- /dev/null
@@ -0,0 +1,65 @@
+/*************************************************************************/ /*!
+@File           physmem_hostmem.h
+@Title          Host memory device node header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(PHYSMEM_HOSTMEM_H)
+#define PHYSMEM_HOSTMEM_H
+
+#include "pvrsrv_device.h"
+#include "device.h"
+
+/*************************************************************************/ /*!
+@Function       HostMemDeviceCreate
+@Description    Allocate memory for and create host memory device node.
+@Output         ppsDeviceNode Pointer to device node pointer.
+@Return         PVRSRV_ERROR  PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       HostMemDeviceDestroy
+@Description    Destroy host memory device node.
+@Input          psDeviceNode  Pointer to device node.
+*/ /**************************************************************************/
+void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* !defined(PHYSMEM_HOSTMEM_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_lma.h b/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_lma.h
new file mode 100644 (file)
index 0000000..51f4257
--- /dev/null
@@ -0,0 +1,93 @@
+/**************************************************************************/ /*!
+@File
+@Title          Header for local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PHYSMEM_LMA_H
+#define SRVSRV_PHYSMEM_LMA_H
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function       PhysmemCreateHeapLMA
+@Description    Create and register new LMA heap with LMA specific details.
+@Input          psDevNode    Pointer to device node struct.
+@Input          psConfig     Heap configuration.
+@Input          pszLabel     Debug identifier label
+@Output         ppsPhysHeap  Pointer to the created heap.
+@Return         PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode,
+                                        PHYS_HEAP_CONFIG *psConfig,
+                                        IMG_CHAR *pszLabel,
+                                        PHYS_HEAP **ppsPhysHeap);
+
+/*
+ * PhysmemNewLocalRamBackedPMR
+ *
+ * This function will create a PMR using the local card memory and is OS
+ * agnostic.
+ */
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap,
+                                                       CONNECTION_DATA *psConnection,
+                            IMG_DEVMEM_SIZE_T uiSize,
+                            IMG_DEVMEM_SIZE_T uiChunkSize,
+                            IMG_UINT32 ui32NumPhysChunks,
+                            IMG_UINT32 ui32NumVirtChunks,
+                            IMG_UINT32 *pui32MappingTable,
+                            IMG_UINT32 uiLog2PageSize,
+                            PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                            const IMG_CHAR *pszAnnotation,
+                            IMG_PID uiPid,
+                            PMR **ppsPMRPtr,
+                            IMG_UINT32 ui32PDumpFlags);
+
+#endif /* #ifndef SRVSRV_PHYSMEM_LMA_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_osmem.h b/drivers/gpu/drm/img/img-rogue/services/server/include/physmem_osmem.h
new file mode 100644 (file)
index 0000000..1eb7565
--- /dev/null
@@ -0,0 +1,142 @@
+/*************************************************************************/ /*!
+@File           physmem_osmem.h
+@Title          OS memory PMR factory API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of Services memory management.  This file defines the
+                OS memory PMR factory API that must be defined so that the
+                common & device layer code in the Services Server can allocate
+                new PMRs back with pages from the OS page allocator. Applicable
+                for UMA based platforms, such platforms must implement this API
+                in the OS Porting layer, in the "env" directory for that
+                system.
+
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PHYSMEM_OSMEM_H
+#define PHYSMEM_OSMEM_H
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "connection_server.h"
+#include "physheap.h"
+
+/*************************************************************************/ /*!
+@Function       PhysmemNewOSRamBackedPMR
+@Description    Rogue Services will call this function to allocate GPU device
+                memory from the PMR factory supported by the OS DDK port. This
+                factory typically obtains physical memory from the kernel/OS
+                API that allocates memory from the default heap of shared
+                system memory available on the platform. The allocated memory
+                must be page-aligned and be a whole number of pages.
+                After allocating the required memory, the implementation must
+                then call PMRCreatePMR() to obtain the PMR structure that
+                describes this allocation to the upper layers of the Services.
+                memory management sub-system.
+                NB. Implementation of this function is mandatory. If shared
+                system memory is not to be used in the OS port then the
+                implementation must return PVRSRV_ERROR_NOT_SUPPORTED.
+
+@Input          psPhysHeap       the phys heap
+@Input          psConnection     the connection to the originator process
+@Input          uiSize           the size of the allocation
+                                 (must be a multiple of page size)
+@Input          uiChunkSize      when sparse allocations are requested,
+                                 this is the allocated chunk size.
+                                 For regular allocations, this will be
+                                 the same as uiSize.
+                                 (must be a multiple of page size)
+@Input          ui32NumPhysChunks  when sparse allocations are requested,
+                                   this is the number of physical chunks
+                                   to be allocated.
+                                   For regular allocations, this will be 1.
+@Input          ui32NumVirtChunks  when sparse allocations are requested,
+                                   this is the number of virtual chunks
+                                   covering the sparse allocation.
+                                   For regular allocations, this will be 1.
+@Input          pui32MappingTable  when sparse allocations are requested,
+                                   this is the list of the indices of
+                                   each physically-backed virtual chunk
+                                   For regular allocations, this will
+                                   be NULL.
+@Input          uiLog2PageSize   the physical pagesize in log2(bytes).
+@Input          uiFlags          the allocation flags.
+@Input          pszAnnotation    string describing the PMR (for debug).
+                                 This should be passed into the function
+                                 PMRCreatePMR().
+@Input          uiPid            The process ID that this allocation should
+                                 be associated with.
+@Output         ppsPMROut        pointer to the PMR created for the
+                                 new allocation
+@Input          ui32PDumpFlags   the pdump flags.
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap,
+                         CONNECTION_DATA *psConnection,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_DEVMEM_SIZE_T uiChunkSize,
+                         IMG_UINT32 ui32NumPhysChunks,
+                         IMG_UINT32 ui32NumVirtChunks,
+                         IMG_UINT32 *pui32MappingTable,
+                         IMG_UINT32 uiLog2PageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         const IMG_CHAR *pszAnnotation,
+                         IMG_PID uiPid,
+                         PMR **ppsPMROut,
+                         IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       PhysmemGetOSRamMemStats
+@Description    Function that gets the OS memory usage statistics
+@Input          pvImplData     Physical heap private data.
+@Output         pui64TotalSize Buffer that holds the total OS memory size
+@Output         pui64FreeSize  Buffer that holds the free OS memory size
+@Return         None.
+*/ /**************************************************************************/
+void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData,
+                            IMG_UINT64 *pui64TotalSize,
+                            IMG_UINT64 *pui64FreeSize);
+
+#endif /* PHYSMEM_OSMEM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pmr.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pmr.h
new file mode 100644 (file)
index 0000000..6a1f0e8
--- /dev/null
@@ -0,0 +1,1023 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excusable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVSRV_PMR_H
+#define SRVSRV_PMR_H
+
+/* include/ */
+#include "img_types.h"
+#include "img_defs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"                        /* Required for export DEVMEM_EXPORTCOOKIE */
+
+/* services/include */
+#include "pdump.h"
+#include "physheap.h"
+
+/* services/server/include/ */
+#include "pmr_impl.h"
+#include "opaque_types.h"
+
+#define PMR_MAX_TRANSLATION_STACK_ALLOC                                (32)
+
+/* Maximum number of pages a PMR can have is 1G of memory */
+#define PMR_MAX_SUPPORTED_PAGE_COUNT                           (262144)
+
+typedef IMG_UINT64 PMR_BASE_T;
+typedef IMG_UINT64 PMR_SIZE_T;
+#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX
+#define PMR_VALUE32_FMTSPEC "0x%08X"
+#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX
+typedef IMG_UINT32 PMR_LOG2ALIGN_T;
+typedef IMG_UINT64 PMR_PASSWORD_T;
+
+struct _PMR_MAPPING_TABLE_
+{
+       PMR_SIZE_T      uiChunkSize;                    /*!< Size of a "chunk" */
+       IMG_UINT32      ui32NumPhysChunks;              /*!< Number of physical chunks that are valid */
+       IMG_UINT32      ui32NumVirtChunks;              /*!< Number of virtual chunks in the mapping */
+       /* Must be last */
+       IMG_UINT32      aui32Translation[1];    /*!< Translation mapping for "logical" to physical */
+};
+
+#define TRANSLATION_INVALID 0xFFFFFFFFUL
+
+typedef struct _PMR_EXPORT_ PMR_EXPORT;
+
+typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+
+/*
+ * PMRCreatePMR
+ *
+ * Not to be called directly, only via implementations of PMR
+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc.
+ *
+ * Creates a PMR object, with callbacks and private data as per the
+ * FuncTab/PrivData args.
+ *
+ * Note that at creation time the PMR must set in stone the "logical
+ * size" and the "contiguity guarantee"
+ *
+ * Flags are also set at this time.  (T.B.D.  flags also immutable for
+ * the life of the PMR?)
+ *
+ * Logical size is the amount of Virtual space this allocation would
+ * take up when mapped.  Note that this does not have to be the same
+ * as the actual physical size of the memory.  For example, consider
+ * the sparsely allocated non-power-of-2 texture case.  In this
+ * instance, the "logical size" would be the virtual size of the
+ * rounded-up power-of-2 texture.  That some pages of physical memory
+ * may not exist does not affect the logical size calculation.
+ *
+ * The PMR must also supply the "contiguity guarantee" which is the
+ * finest granularity of alignment and size of physical pages that the
+ * PMR will provide after LockSysPhysAddresses is called.  Note that
+ * the calling code may choose to call PMRSysPhysAddr with a finer
+ * granularity than this, for example if it were to map into a device
+ * MMU with a smaller page size, and it's also OK for the PMR to
+ * supply physical memory in larger chunks than this.  But
+ * importantly, never the other way around.
+ *
+ * More precisely, the following inequality must be maintained
+ * whenever mappings and/or physical addresses exist:
+ *
+ *       (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory)
+ *
+ * The function table will contain the following callbacks which may
+ * be overridden by the PMR implementation:
+ *
+ * pfnLockPhysAddresses
+ *
+ *      Called when someone locks requests that Physical pages are to
+ *      be locked down via the PMRLockSysPhysAddresses() API.  Note
+ *      that if physical pages are prefaulted at PMR creation time and
+ *      therefore static, it would not be necessary to override this
+ *      function, in which case NULL may be supplied.
+ *
+ * pfnUnlockPhysAddresses
+ *
+ *      The reverse of pfnLockPhysAddresses.  Note that this should be
+ *      NULL if and only if pfnLockPhysAddresses is NULL
+ *
+ * pfnSysPhysAddr
+ *
+ *      This function is mandatory.  This is the one which returns the
+ *      system physical address for a given offset into this PMR.  The
+ *      "lock" function will have been called, if overridden, before
+ *      this function, thus the implementation should not increase any
+ *      refcount when answering this call.  Refcounting, if necessary,
+ *      should be done in the lock/unlock calls.  Refcounting would
+ *      not be necessary in the prefaulted/static scenario, as the
+ *      pmr.c abstraction will handle the refcounting for the whole
+ *      PMR.
+ *
+ * pfnFinalize
+ *
+ *      Called when the PMR's refcount reaches zero and it gets
+ *      destroyed.  This allows the implementation to free up any
+ *      resource acquired during creation time.
+ *
+ */
+PVRSRV_ERROR
+PMRCreatePMR(PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_UINT32 *pui32MappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszAnnotation,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR_IMPL_TYPE eType,
+             PMR **ppsPMRPtr,
+             IMG_UINT32 ui32PDumpFlags);
+
+/*
+ * PMRLockSysPhysAddresses()
+ *
+ * Calls the relevant callback to lock down the system physical addresses of
+ * the memory that makes up the whole PMR.
+ *
+ * Before this call, it is not valid to use any of the information
+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(),
+ * [ see note below about lock/unlock semantics ]
+ *
+ * The caller of this function does not have to care about how the PMR
+ * is implemented.  He only has to know that he is allowed access to
+ * the physical addresses _after_ calling this function and _until_
+ * calling PMRUnlockSysPhysAddresses().
+ *
+ *
+ * Notes to callback implementers (authors of PMR Factories):
+ *
+ * Some PMR implementations will be such that the physical memory exists for
+ * the lifetime of the PMR, with a static address, (and normally flags and
+ * symbolic address are static too) and so it is legal for a PMR
+ * implementation to not provide an implementation for the lock callback.
+ *
+ * Some PMR implementation may wish to page memory in from secondary storage
+ * on demand. The lock/unlock callbacks _may_ be the place to do this.
+ * (More likely, there would be a separate API for doing this, but this API
+ * provides a useful place to assert that it has been done)
+ */
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLockSysPhysAddressesNested(PMR *psPMR,
+                        IMG_UINT32 ui32NestingLevel);
+
+/*
+ * PMRUnlockSysPhysAddresses()
+ *
+ * the reverse of PMRLockSysPhysAddresses()
+ */
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel);
+
+
+/*************************************************************************/ /*!
+@Function       PMRUnpinPMR
+@Description    This is the counterpart to PMRPinPMR(). It is meant to be
+                called before repinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          psPMR           The physical memory to unpin.
+
+@Input          bDevMapped      A flag that indicates if this PMR has been
+                                mapped to device virtual space.
+                                Needed to check if this PMR is allowed to be
+                                unpinned or not.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped);
+
+/*************************************************************************/ /*!
+@Function       PMRPinPMR
+@Description    This is the counterpart to PMRUnpinPMR(). It is meant to be
+                called after unpinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          psPMR           The physical memory to pin.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PMRPinPMR(PMR *psPMR);
+
+/*
+ * PhysmemPMRExport()
+ *
+ * Given a PMR, creates a PMR "Export", which is a handle that
+ * provides sufficient data to be able to "import" this PMR elsewhere.
+ * The PMR Export is an object in its own right, whose existence
+ * implies a reference on the PMR, thus the PMR cannot be destroyed
+ * while the PMR Export exists.  The intention is that the PMR Export
+ * will be wrapped in the devicemem layer by a cross process handle,
+ * and some IPC by which to communicate the handle value and password
+ * to other processes.  The receiving process is able to unwrap this
+ * to gain access to the same PMR Export in this layer, and, via
+ * PhysmemPMRImport(), obtain a reference to the original PMR.
+ *
+ * The caller receives, along with the PMR Export object, information
+ * about the size and contiguity guarantee for the PMR, and also the
+ * PMRs secret password, in order to authenticate the subsequent
+ * import.
+ *
+ * N.B.  If you call PMRExportPMR() (and it succeeds), you are
+ * promising to later call PMRUnexportPMR()
+ */
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExport,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword);
+
+/*!
+*******************************************************************************
+
+ @Function      PMRMakeLocalImportHandle
+
+ @Description
+
+ Transform a general handle type into one that we are able to import.
+ Takes a PMR reference.
+
+ @Input   psPMR     The input PMR.
+ @Output  ppsPMR    The output PMR that is going to be transformed to the
+                    correct handle type.
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+                         PMR **ppsPMR);
+
+/*!
+*******************************************************************************
+
+ @Function      PMRUnmakeLocalImportHandle
+
+ @Description
+
+ Take a PMR, destroy the handle and release a reference.
+ Counterpart to PMRMakeServerExportClientExport().
+
+ @Input   psPMR       PMR to destroy.
+                      Created by PMRMakeLocalImportHandle().
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR);
+
+/*
+ * PMRUnexporPMRt()
+ *
+ * The reverse of PMRExportPMR().  This causes the PMR to no longer be
+ * exported.  If the PMR has already been imported, the imported PMR
+ * reference will still be valid, but no further imports will be possible.
+ */
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRImportPMR()
+ *
+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and
+ * obtains a reference to the original PMR.
+ *
+ * The password must match, and is assumed to have been (by whatever
+ * means, IPC etc.) preserved intact from the former call to
+ * PMRExportPMR()
+ *
+ * The size and contiguity arguments are entirely irrelevant for the
+ * import, however they are verified in order to trap bugs.
+ *
+ * N.B.  If you call PhysmemPMRImport() (and it succeeds), you are
+ * promising to later call PhysmemPMRUnimport()
+ */
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR);
+
+/* Function that alters the mutability property
+ * of the PMR
+ * Setting it to TRUE makes sure the PMR memory layout
+ * can't be changed through future calls */
+void
+PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag);
+
+IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR);
+
+/*
+ * PMRUnimportPMR()
+ *
+ * releases the reference on the PMR as obtained by PMRImportPMR()
+ */
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+                  PMR **ppsPMR,
+                  IMG_DEVMEM_SIZE_T *puiSize,
+                  IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * Equivalent mapping functions when in kernel mode.
+ */
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            size_t uiLogicalOffset,
+                            size_t uiSize,
+                            void **ppvKernelAddressOut,
+                            size_t *puiLengthOut,
+                            IMG_HANDLE *phPrivOut);
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  size_t uiLogicalOffset,
+                                  size_t uiSize,
+                                  void **ppvKernelAddressOut,
+                                  size_t *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut);
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv);
+
+/*
+ * PMR_ReadBytes()
+ *
+ * calls into the PMR implementation to read up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will read up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is read, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just write 0 to invalid offsets
+ */
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              size_t uiBufSz,
+              size_t *puiNumBytes);
+
+/*
+ * PMR_WriteBytes()
+ *
+ * calls into the PMR implementation to write up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will write up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is written, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just ignore data at invalid offsets
+ */
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               size_t uiBufSz,
+               size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Function       PMRMMapPMR
+@Description    Performs the necessary steps to map the PMR into a user process
+                address space. The caller does not need to call
+                PMRLockSysPhysAddresses before calling this function.
+
+@Input          psPMR           PMR to map.
+
+@Input          pOSMMapData     OS specific data needed to create a mapping.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success or an error otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+/*
+ * PMRRefPMR()
+ *
+ * Take a reference on the passed in PMR
+ */
+void
+PMRRefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor")
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefUnlockPMR()
+ *
+ * Same as above but also unlocks the PMR.
+ */
+PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR);
+
+PPVRSRV_DEVICE_NODE
+PMR_DeviceNode(const PMR *psPMR);
+
+/*
+ * PMRIsPMRLive()
+ *
+ * This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller needs to ensure the
+ * thread safety by explicitly taking PMR or through other means.
+ */
+IMG_BOOL PMRIsPMRLive(PMR *psPMR);
+
+/*
+ * PMR_Flags()
+ *
+ * Flags are static and guaranteed for the life of the PMR.  Thus this
+ * function is idempotent and acquire/release semantics is not required.
+ *
+ * Returns the flags as specified on the PMR.  The flags are to be
+ * interpreted as mapping permissions
+ */
+PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR);
+
+IMG_BOOL
+PMR_IsSparse(const PMR *psPMR);
+
+IMG_BOOL
+PMR_IsUnpinned(const PMR *psPMR);
+
+void
+PMR_LogicalSize(const PMR *psPMR,
+                               IMG_DEVMEM_SIZE_T *puiLogicalSize);
+
+PVRSRV_ERROR
+PMR_PhysicalSize(const PMR *psPMR,
+                                IMG_DEVMEM_SIZE_T *puiPhysicalSize);
+
+PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR);
+
+PMR_MAPPING_TABLE *
+PMR_GetMappingTable(const PMR *psPMR);
+
+IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR);
+
+const IMG_CHAR *
+PMR_GetAnnotation(const PMR *psPMR);
+
+/*
+ * PMR_IsOffsetValid()
+ *
+ * Returns if an address offset inside a PMR has a valid
+ * physical backing.
+ */
+PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+                               IMG_UINT32 ui32Log2PageSize,
+                               IMG_UINT32 ui32NumOfPages,
+                               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                               IMG_BOOL *pbValid);
+
+PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR);
+
+IMG_INT32
+PMR_GetRefCount(const PMR *psPMR);
+
+/*
+ * PMR_DevPhysAddr()
+ *
+ * A note regarding Lock/Unlock semantics
+ * ======================================
+ *
+ * PMR_DevPhysAddr may only be called after PMRLockSysPhysAddresses()
+ * has been called.  The data returned may be used only until
+ * PMRUnlockSysPhysAddresses() is called after which time the licence
+ * to use the data is revoked and the information may be invalid.
+ *
+ * Given an offset, this function returns the device physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ * If caller only wants one physical address it is sufficient to pass in:
+ * ui32Log2PageSize==0 and ui32NumOfPages==1
+ */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddr,
+                IMG_BOOL *pbValid);
+
+/*
+ * PMR_CpuPhysAddr()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * Given an offset, this function returns the CPU physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ */
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid);
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+          IMG_UINT64 *pui64UID);
+/*
+ * PMR_ChangeSparseMem()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in device space by
+ *  adding/deleting the pages as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+                                 IMG_UINT32 ui32AllocPageCount,
+                                 IMG_UINT32 *pai32AllocIndices,
+                                 IMG_UINT32 ui32FreePageCount,
+                                 IMG_UINT32 *pai32FreeIndices,
+                                 IMG_UINT32    uiSparseFlags);
+
+/*
+ * PMR_ChangeSparseMemCPUMap()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in CPU space by
+ * adding/deleting the pages as requested.
+ */
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+                                       IMG_UINT64 sCpuVAddrBase,
+                                       IMG_UINT32 ui32AllocPageCount,
+                                       IMG_UINT32 *pai32AllocIndices,
+                                       IMG_UINT32 ui32FreePageCount,
+                                       IMG_UINT32 *pai32FreeIndices);
+
+#if defined(PDUMP)
+
+/*
+ * PMR_PDumpSymbolicAddr()
+ *
+ * Given an offset, returns the pdump memspace name and symbolic
+ * address of the corresponding page in the PMR.
+ *
+ * Note that PDump memspace names and symbolic addresses are static
+ * and valid for the lifetime of the PMR, therefore we don't require
+ * acquire/release semantics here.
+ *
+ * Note that it is expected that the pdump "mapping" code will call
+ * this function multiple times as each page is mapped in turn
+ *
+ * Note that NextSymName is the offset from the base of the PMR to the
+ * next pdump symbolic address (or the end of the PMR if the PMR only
+ * had one PDUMPMALLOC
+ */
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                     IMG_DEVMEM_OFFSET_T *puiNextSymName
+                      );
+
+/*
+ * PMRPDumpLoadMemValue32()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+                                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem32
+ *
+ * Adds in the pdump script stream a copy of a dword in one PMR memory
+ * location to another PMR memory location.
+ *
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMemValue64()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+                                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem64
+ *
+ * Adds in the pdump script stream a copy of a quadword in one PMR memory
+ * location to another PMR memory location.
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+                  IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+                  PMR *psSrcPMR,
+                  IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+                  const IMG_CHAR *pszTmpVar,
+                  PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMem()
+ *
+ * Writes the current contents of the PMR memory to the pdump PRM stream,
+ * and emits some PDump code to the script stream to LDB said bytes from
+ * said file. If bZero is IMG_TRUE then the PDump zero page is used as the
+ * source for the LDB.
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero);
+
+/*
+ * PMRPDumpSaveToFile()
+ *
+ * Emits some PDump that does an SAB (save bytes) using the PDump symbolic
+ * address of the PMR. Note that this is generally not the preferred way to
+ * dump the buffer contents. There is an equivalent function in
+ * devicemem_server.h which also emits SAB but using the virtual address,
+ * which is the "right" way to dump the buffer contents to a file.
+ * This function exists just to aid testing by providing a means to dump
+ * the PMR directly by symbolic address also.
+ */
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMR_PDumpSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen);
+       PVR_UNREFERENCED_PARAMETER(pszNamespaceName);
+       PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen);
+       PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr);
+       PVR_UNREFERENCED_PARAMETER(puiNewOffset);
+       PVR_UNREFERENCED_PARAMETER(puiNextSymName);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+                                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue64)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+                                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(ui64Value);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMem)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+       PVR_UNREFERENCED_PARAMETER(bZero);
+       return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpSaveToFile)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename,
+                   IMG_UINT32 uiFileOffset)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiArraySize);
+       PVR_UNREFERENCED_PARAMETER(pszFilename);
+       PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+       return PVRSRV_OK;
+}
+
+#endif /* PDUMP */
+
+/* This function returns the private data that a pmr subtype embedded in
+ * here. We use the function table pointer as "authorisation" that this
+ * function is being called by the pmr subtype implementation. We can
+ * assume (assert) that. It would be a bug in the implementation of the
+ * pmr subtype if this assertion ever fails.
+ */
+void *
+PMRGetPrivateData(const PMR *psPMR,
+                  const PMR_IMPL_FUNCTAB *psFuncTab);
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+                               IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psReferencePMR,
+                                       IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+                   PMR *psPageListPMR,
+                   IMG_DEVMEM_OFFSET_T uiTableOffset,
+                   IMG_DEVMEM_SIZE_T  uiTableLength,
+                   /* Referenced PMR, and "page" granularity */
+                   PMR *psReferencePMR,
+                   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+                   PMR_PAGELIST **ppsPageList);
+
+/* Doesn't actually erase the page list - just releases
+ * the appropriate refcounts
+ */
+PVRSRV_ERROR // should be void, surely
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList);
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags);
+
+PVRSRV_ERROR
+PMRPDumpCheck32(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                               IMG_UINT32 ui32Value,
+                               IMG_UINT32 ui32Mask,
+                               PDUMP_POLL_OPERATOR eOperator,
+                               PDUMP_FLAGS_T uiPDumpFlags);
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpPol32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(eOperator);
+       PVR_UNREFERENCED_PARAMETER(uiFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCheck32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCheck32(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                               IMG_UINT32 ui32Value,
+                               IMG_UINT32 ui32Mask,
+                               PDUMP_POLL_OPERATOR eOperator,
+                               PDUMP_FLAGS_T uiFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(eOperator);
+       PVR_UNREFERENCED_PARAMETER(uiFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+       PVR_UNREFERENCED_PARAMETER(psPMR);
+       PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+       PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+       PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+       PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+       return PVRSRV_OK;
+}
+#endif
+
+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR);
+
+/*
+ * PMRInit()
+ *
+ * To be called once and only once to initialise the internal data in
+ * the PMR module (mutexes and such)
+ *
+ * Not for general use.  Only PVRSRVInit(); should be calling this.
+ */
+PVRSRV_ERROR
+PMRInit(void);
+
+/*
+ * PMRDeInit()
+ *
+ * To be called once and only once to deinitialise the internal data in
+ * the PMR module (mutexes and such) and for debug checks
+ *
+ * Not for general use.  Only PVRSRVDeInit(); should be calling this.
+ */
+PVRSRV_ERROR
+PMRDeInit(void);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR, void *hRIHandle);
+#endif
+
+#endif /* #ifdef SRVSRV_PMR_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pmr_impl.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pmr_impl.h
new file mode 100644 (file)
index 0000000..cae0b7e
--- /dev/null
@@ -0,0 +1,539 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This file is for definitions
+                that are private to the world of PMRs, but that need to be
+                shared between pmr.c itself and the modules that implement the
+                callbacks for the PMR.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PMR_IMPL_H
+#define SRVSRV_PMR_IMPL_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+/*! Physical Memory Resource type.
+ */
+typedef struct _PMR_ PMR;
+
+/*! Per-flavour callbacks need to be shared with generic implementation
+ * (pmr.c).
+ */
+typedef void *PMR_IMPL_PRIVDATA;
+
+/*! Type for holding flags passed to the PMR factory.
+ */
+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T;
+
+/*! Mapping table for the allocation.
+ *
+ * PMR's can be sparse in which case not all the logical addresses in it are
+ * valid. The mapping table translates logical offsets into physical offsets.
+ *
+ * This table is always passed to the PMR factory regardless if the memory is
+ * sparse or not. In case of non-sparse memory all virtual offsets are mapped
+ * to physical offsets.
+ */
+typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE;
+
+/*! Private data passed to the ::PFN_MMAP_FN function.
+ */
+typedef void *PMR_MMAP_DATA;
+
+/*! PMR factory type.
+ */
+typedef enum _PMR_IMPL_TYPE_
+{
+       PMR_TYPE_NONE = 0,
+       PMR_TYPE_OSMEM,
+       PMR_TYPE_LMA,
+       PMR_TYPE_DMABUF,
+       PMR_TYPE_EXTMEM,
+       PMR_TYPE_DC,
+       PMR_TYPE_TDFWMEM,
+       PMR_TYPE_TDSECBUF
+} PMR_IMPL_TYPE;
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_LOCK_PHYS_ADDRESSES_FN
+
+@Description    Called to lock down the physical addresses for all pages
+                allocated for a PMR.
+                The default implementation is to simply increment a
+                lock-count for debugging purposes.
+                If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will
+                be called when someone first requires a physical address,
+                and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be
+                called when the last such reference is released.
+                The PMR implementation may assume that physical addresses
+                will have been "locked" in this manner before any call is
+                made to the pfnDevPhysAddr() callback
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN
+
+@Description    Called to release the lock taken on the physical addresses
+                for all pages allocated for a PMR.
+                The default implementation is to simply decrement a
+                lock-count for debugging purposes.
+                If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be
+                called when the last reference taken on the PMR is
+                released.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_DEV_PHYS_ADDR_FN
+
+@Description    Called to obtain one or more physical addresses for given
+                offsets within a PMR.
+
+                The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is
+                guaranteed to have been called prior to calling the
+                PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to
+                rely on the physical address thus obtained after the
+                PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          ui32Log2PageSize      The log2 page size.
+@Input          ui32NumOfAddr         The number of addresses to be returned
+@Input          puiOffset             The offset from the start of the PMR
+                                      (in bytes) for which the physical
+                                      address is required. Where multiple
+                                      addresses are requested, this will
+                                      contain a list of offsets.
+@Output         pbValid               List of boolean flags indicating which
+                                      addresses in the returned list
+                                      (psDevAddrPtr) are valid (for sparse
+                                      allocations, not all pages may have a
+                                      physical backing)
+@Output         psDevAddrPtr          Returned list of physical addresses
+
+@Return         PVRSRV_OK if the operation was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_UINT32 ui32Log2PageSize,
+                      IMG_UINT32 ui32NumOfAddr,
+                      IMG_DEVMEM_OFFSET_T *puiOffset,
+                      IMG_BOOL *pbValid,
+                      IMG_DEV_PHYADDR *psDevAddrPtr);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+
+@Description    Called to obtain a kernel-accessible address (mapped to a
+                virtual address if required) for the PMR for use internally
+                in Services.
+
+    Implementation of this function for the (default) PMR factory providing
+    OS-allocations is mandatory (the driver will expect to be able to call
+    this function for OS-provided allocations).
+    For other PMR factories, implementation of this function is only necessary
+    where an MMU mapping is required for the Kernel to be able to access the
+    allocated memory.
+    If no mapping is needed, this function can remain unimplemented and the
+    pfn may be set to NULL.
+@Input          pvPriv                Private data (which was generated by
+                                      the PMR factory when PMR was created)
+@Input          uiOffset              Offset from the beginning of the PMR
+                                      at which mapping is to start
+@Input          uiSize                Size of mapping (in bytes)
+@Output         ppvKernelAddressOut   Mapped kernel address
+@Output         phHandleOut           Returned handle of the new mapping
+@Input          ulFlags               Mapping flags
+
+@Return         PVRSRV_OK if the mapping was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      size_t uiOffset,
+                      size_t uiSize,
+                      void **ppvKernelAddressOut,
+                      IMG_HANDLE *phHandleOut,
+                      PMR_FLAGS_T ulFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN
+
+@Description    Called to release a mapped kernel virtual address
+
+   Implementation of this callback is mandatory if
+   PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN is provided for the PMR factory,
+   otherwise this function can remain unimplemented and the pfn may be set
+   to NULL.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          hHandle               Handle of the mapping to be released
+
+@Return         None
+*/ /**************************************************************************/
+typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+              IMG_HANDLE hHandle);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_READ_BYTES_FN
+
+@Description    Called to read bytes from an unmapped allocation
+
+   Implementation of this callback is optional - where it is not provided,
+   the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire
+   PMR (if an MMU mapping is required for the Kernel to be able to access the
+   allocated memory).
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          uiOffset              Offset from the beginning of the PMR at
+                                      which to begin reading
+@Output         pcBuffer              Buffer in which to return the read data
+@Input          uiBufSz               Number of bytes to be read
+@Output         puiNumBytes           Number of bytes actually read (may be
+                                      less than uiBufSz)
+
+@Return         PVRSRV_OK if the read was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_UINT8 *pcBuffer,
+                      size_t uiBufSz,
+                      size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_WRITE_BYTES_FN
+
+@Description    Called to write bytes into an unmapped allocation
+
+   Implementation of this callback is optional - where it is not provided,
+   the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire
+   PMR (if an MMU mapping is required for the Kernel to be able to access the
+   allocated memory).
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          uiOffset              Offset from the beginning of the PMR at
+                                      which to begin writing
+@Input          pcBuffer              Buffer containing the data to be written
+@Input          uiBufSz               Number of bytes to be written
+@Output         puiNumBytes           Number of bytes actually written (may be
+                                      less than uiBufSz)
+
+@Return         PVRSRV_OK if the write was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_UINT8 *pcBuffer,
+                      size_t uiBufSz,
+                      size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_UNPIN_MEM_FN
+
+@Description    Called to unpin an allocation.
+                Once unpinned, the pages backing the allocation may be
+                re-used by the Operating System for another purpose.
+                When the pages are required again, they may be re-pinned
+                (by calling PFN_PIN_MEM_FN). The driver will try to return
+                same pages as before. The caller will be told if the
+                content of these returned pages has been modified or if
+                the pages returned are not the original pages.
+
+   Implementation of this callback is optional.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+
+@Return         PVRSRV_OK if the unpin was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_PIN_MEM_FN
+
+@Description    Called to pin a previously unpinned allocation.
+                The driver will try to return same pages as were previously
+                assigned to the allocation. The caller will be told if the
+                content of these returned pages has been modified or if
+                the pages returned are not the original pages.
+
+   Implementation of this callback is optional.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+
+@Input          psMappingTable        Mapping table, which describes how
+                                      virtual 'chunks' are to be mapped to
+                                      physical 'chunks' for the allocation.
+
+@Return         PVRSRV_OK if the original pages were returned unmodified.
+                PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified
+                or different pages were returned.
+                Another PVRSRV_ERROR code on failure.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      PMR_MAPPING_TABLE *psMappingTable);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_CHANGE_SPARSE_MEM_FN
+
+@Description    Called to modify the physical backing for a given sparse
+                allocation.
+                The caller provides a list of the pages within the sparse
+                allocation which should be backed with a physical allocation
+                and a list of the pages which do not require backing.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          psPMR                 The PMR of the sparse allocation to be
+                                      modified
+@Input          ui32AllocPageCount    The number of pages specified in
+                                      pai32AllocIndices
+@Input          pai32AllocIndices     The list of pages in the sparse
+                                      allocation that should be backed with a
+                                      physical allocation. Pages are
+                                      referenced by their index within the
+                                      sparse allocation (e.g. in a 10 page
+                                      allocation, pages are denoted by
+                                      indices 0 to 9)
+@Input          ui32FreePageCount     The number of pages specified in
+                                      pai32FreeIndices
+@Input          pai32FreeIndices      The list of pages in the sparse
+                                      allocation that do not require
+                                      a physical allocation.
+@Input          ui32Flags             Allocation flags
+
+@Return         PVRSRV_OK if the sparse allocation physical backing was updated
+                successfully, an error code otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      const PMR *psPMR,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices,
+                      IMG_UINT32 uiFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN
+
+@Description    Called to modify which pages are mapped for a given sparse
+                allocation.
+                The caller provides a list of the pages within the sparse
+                allocation which should be given a CPU mapping and a list
+                of the pages which do not require a CPU mapping.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          psPMR                 The PMR of the sparse allocation to be
+                                      modified
+@Input          sCpuVAddrBase         The virtual base address of the sparse
+                                      allocation
+@Input          ui32AllocPageCount    The number of pages specified in
+                                      pai32AllocIndices
+@Input          pai32AllocIndices     The list of pages in the sparse
+                                      allocation that should be given a CPU
+                                      mapping. Pages are referenced by their
+                                      index within the sparse allocation (e.g.
+                                      in a 10 page allocation, pages are
+                                      denoted by indices 0 to 9)
+@Input          ui32FreePageCount     The number of pages specified in
+                                      pai32FreeIndices
+@Input          pai32FreeIndices      The list of pages in the sparse
+                                      allocation that do not require a CPU
+                                      mapping.
+
+@Return         PVRSRV_OK if the page mappings were updated successfully, an
+                error code otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+                      const PMR *psPMR,
+                      IMG_UINT64 sCpuVAddrBase,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *pai32AllocIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pai32FreeIndices);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_MMAP_FN
+
+@Description    Called to map pages in the specified PMR.
+
+   Implementation of this callback is optional.
+   Where it is provided, it will be used in place of OSMMapPMRGeneric().
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+@Input          psPMR                 The PMR of the allocation to be mapped
+@Input          pMMapData             OS-specific data to describe how mapping
+                                      should be performed
+
+@Return         PVRSRV_OK if the mapping was successful, an error code
+                otherwise.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+                                    PMR *psPMR,
+                                    PMR_MMAP_DATA pMMapData);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_FINALIZE_FN
+
+@Description    Called to destroy the PMR.
+                This callback will be called only when all references to
+                the PMR have been dropped.
+                The PMR was created via a call to PhysmemNewRamBackedPMR()
+                and is destroyed via this callback.
+
+   Implementation of this callback is mandatory.
+
+@Input          pvPriv                Private data (which was generated by the
+                                      PMR factory when PMR was created)
+
+@Return         PVRSRV_OK if the PMR destruction was successful, an error
+                code otherwise.
+                Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only
+                error returned from physmem_dmabuf.c layer and on this
+                error, destroying of the PMR is aborted without disturbing
+                the PMR state.
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN
+
+@Description    Called to acquire the PMR factory's global lock, if it has one,
+                hence callback optional. Factories which support entry points
+                in addition to the normal bridge calls, for example, from the
+                native OS that manipulate the PMR reference count should
+                create a factory lock and implementations for these call backs.
+
+   Implementation of this callback is optional.
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_RELEASE_PMR_FACTORY_LOCK_FN
+
+@Description    Called to release the PMR factory's global lock acquired by calling
+                pfn_acquire_pmr_factory_lock callback.
+
+   Implementation of this callback is optional.
+
+@Return         None
+*/ /**************************************************************************/
+typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void);
+
+/*! PMR factory callback table.
+ */
+struct _PMR_IMPL_FUNCTAB_ {
+    /*! Callback function pointer, see ::PFN_LOCK_PHYS_ADDRESSES_FN */
+    PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses;
+    /*! Callback function pointer, see ::PFN_UNLOCK_PHYS_ADDRESSES_FN */
+    PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses;
+
+    /*! Callback function pointer, see ::PFN_DEV_PHYS_ADDR_FN */
+    PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr;
+
+    /*! Callback function pointer, see ::PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN */
+    PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData;
+    /*! Callback function pointer, see ::PFN_RELEASE_KERNEL_MAPPING_DATA_FN */
+    PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData;
+
+    /*! Callback function pointer, see ::PFN_READ_BYTES_FN */
+    PFN_READ_BYTES_FN pfnReadBytes;
+    /*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */
+    PFN_WRITE_BYTES_FN pfnWriteBytes;
+
+    /*! Callback function pointer, see ::PFN_UNPIN_MEM_FN */
+    PFN_UNPIN_MEM_FN pfnUnpinMem;
+    /*! Callback function pointer, see ::PFN_PIN_MEM_FN */
+    PFN_PIN_MEM_FN pfnPinMem;
+
+    /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */
+    PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem;
+    /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */
+    PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap;
+
+    /*! Callback function pointer, see ::PFN_MMAP_FN */
+    PFN_MMAP_FN pfnMMap;
+
+    /*! Callback function pointer, see ::PFN_FINALIZE_FN */
+    PFN_FINALIZE_FN pfnFinalize;
+
+    /*! Callback function pointer, see ::PFN_ACQUIRE_PMR_FACTORY_LOCK_FN */
+    PFN_ACQUIRE_PMR_FACTORY_LOCK_FN pfnGetPMRFactoryLock;
+
+    /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */
+    PFN_RELEASE_PMR_FACTORY_LOCK_FN pfnReleasePMRFactoryLock;
+};
+
+/*! PMR factory callback table.
+ */
+typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB;
+
+#endif /* SRVSRV_PMR_IMPL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pmr_os.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pmr_os.h
new file mode 100644 (file)
index 0000000..c1e8c4e
--- /dev/null
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS PMR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific PMR functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(PMR_OS_H)
+#define PMR_OS_H
+
+#include "pmr_impl.h"
+
+#define SYSPORT_MEM_OFFSET   0x400000000    /*this may used in any other place */
+
+/*************************************************************************/ /*!
+@Function       OSMMapPMRGeneric
+@Description    Implements a generic PMR mapping function, which is used
+                to CPU map a PMR where the PMR does not have a mapping
+                function defined by the creating PMR factory.
+@Input          psPMR               the PMR to be mapped
+@Output         pOSMMapData         pointer to any private data
+                                    needed by the generic mapping function
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+#endif /* !defined(PMR_OS_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/power.h b/drivers/gpu/drm/img/img-rogue/services/server/include/power.h
new file mode 100644 (file)
index 0000000..333e799
--- /dev/null
@@ -0,0 +1,430 @@
+/*************************************************************************/ /*!
+@File
+@Title          Power Management Functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWER_H
+#define POWER_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+#include "opaque_types.h"
+
+/*!
+ *****************************************************************************
+ *     Power management
+ *****************************************************************************/
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV;
+
+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice);
+
+/* Power transition handler prototypes */
+
+/*!
+  Typedef for a pointer to a Function that will be called before a transition
+  from one power state to another. See also PFN_POST_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE                              hDevHandle,
+                                                                          PVRSRV_DEV_POWER_STATE       eNewPowerState,
+                                                                          PVRSRV_DEV_POWER_STATE       eCurrentPowerState,
+                                                                          PVRSRV_POWER_FLAGS           ePwrFlags);
+/*!
+  Typedef for a pointer to a Function that will be called after a transition
+  from one power state to another. See also PFN_PRE_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE                             hDevHandle,
+                                                                               PVRSRV_DEV_POWER_STATE  eNewPowerState,
+                                                                               PVRSRV_DEV_POWER_STATE  eCurrentPowerState,
+                                                                               PVRSRV_POWER_FLAGS              ePwrFlags);
+
+PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode);
+void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVPowerLock
+
+ @Description  Obtain the mutex for power transitions. Only allowed when
+                system power is on.
+
+ @Return       PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVPowerUnlock
+
+ @Description  Release the mutex for power transitions
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVPowerTryLock
+
+ @Description  Try to obtain the mutex for power transitions. Only allowed when
+               system power is on.
+
+ @Return       PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or
+               PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVPwrLockIsLockedByMe
+
+ @Description  Determine if the calling context is holding the device power-lock
+
+ @Return       IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode);
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVSetDevicePowerStateKM
+
+ @Description  Set the Device into a new state
+
+ @Input                psDeviceNode : Device node
+ @Input                eNewPowerState : New power state
+ @Input                ePwrFlags : Power state change flags
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE   psDeviceNode,
+                                                                                PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                                                                PVRSRV_POWER_FLAGS             ePwrFlags);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVSetDeviceSystemPowerState
+@Description  Set the device into a new power state based on the systems power
+              state
+@Input        psDeviceNode       Device node
+@Input        eNewSysPowerState  New system power state
+@Input        ePwrFlags          Power state change flags
+@Return       PVRSRV_ERROR       PVRSRV_OK on success or an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                                        PVRSRV_SYS_POWER_STATE eNewSysPowerState,
+                                                                                        PVRSRV_POWER_FLAGS ePwrFlags);
+
+/*!
+******************************************************************************
+
+ @Function      PVRSRVSetDeviceDefaultPowerState
+
+ @Description   Set the default device power state to eNewPowerState
+
+ @Input         psDeviceNode : Device node
+ @Input         eNewPowerState : New power state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+                                       PVRSRV_DEV_POWER_STATE eNewPowerState);
+
+/*!
+******************************************************************************
+
+ @Function      PVRSRVSetSystemPowerState
+
+ @Description   Set the system power state to eNewPowerState
+
+ @Input         psDeviceConfig : Device config
+ @Input         eNewPowerState : New power state
+
+ @Return        PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG * psDeviceConfig,
+                                                                                        PVRSRV_SYS_POWER_STATE eNewSysPowerState);
+
+/*!
+******************************************************************************
+
+ @Function      PVRSRVSetPowerCallbacks
+
+ @Description   Initialise the Power Device's function pointers
+                to the appropriate callbacks depending on driver mode and
+                system setup.
+
+ @Input         psDeviceNode : Device node
+ @Input         psPowerDevice : Power device
+ @Input         pfnDevicePrePower : regular device pre power callback
+ @Input         pfnDevicePostPower : regular device post power callback
+ @Input         pfnSystemPrePower : regular system pre power callback
+ @Input         pfnDevicePostPower : regular system post power callback
+ @Input         pfnSystemPrePower : regular device pre power callback
+ @Input         pfnSystemPostPower : regular device pre power callback
+ @Input         pfnForcedIdleRequest : forced idle request callback
+ @Input         pfnForcedIdleCancelRequest : forced idle request cancel callback
+
+******************************************************************************/
+void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE                               psDeviceNode,
+                                                        PVRSRV_POWER_DEV                                       *psPowerDevice,
+                                                        PFN_PRE_POWER                                          pfnDevicePrePower,
+                                                        PFN_POST_POWER                                     pfnDevicePostPower,
+                                                        PFN_SYS_PRE_POWER                                  pfnSystemPrePower,
+                                                        PFN_SYS_POST_POWER                             pfnSystemPostPower,
+                                                        PFN_FORCED_IDLE_REQUEST                        pfnForcedIdleRequest,
+                                                        PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest);
+
+/* Type PFN_DC_REGISTER_POWER */
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE                             psDeviceNode,
+                                                                          PFN_PRE_POWER                                        pfnDevicePrePower,
+                                                                          PFN_POST_POWER                                       pfnDevicePostPower,
+                                                                          PFN_SYS_PRE_POWER                        pfnSystemPrePower,
+                                                                          PFN_SYS_POST_POWER                       pfnSystemPostPower,
+                                                                          PFN_PRE_CLOCKSPEED_CHANGE            pfnPreClockSpeedChange,
+                                                                          PFN_POST_CLOCKSPEED_CHANGE           pfnPostClockSpeedChange,
+                                                                          PFN_FORCED_IDLE_REQUEST                      pfnForcedIdleRequest,
+                                                                          PFN_FORCED_IDLE_CANCEL_REQUEST       pfnForcedIdleCancelRequest,
+                                                                          PFN_GPU_UNITS_POWER_CHANGE           pfnGPUUnitsPowerChange,
+                                                                          IMG_HANDLE                                           hDevCookie,
+                                                                          PVRSRV_DEV_POWER_STATE                       eCurrentPowerState,
+                                                                          PVRSRV_DEV_POWER_STATE                       eDefaultPowerState);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVRemovePowerDevice
+
+ @Description
+
+ Removes device from power management register. Device is located by Device Index
+
+ @Input                psDeviceNode : Device node
+
+******************************************************************************/
+void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVGetDevicePowerState
+
+ @Description
+
+       Return the device power state
+
+ @Input                psDeviceNode : Device node
+ @Output       pePowerState : Current power state
+
+ @Return       PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found.
+            PVRSRV_OK otherwise.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                          PPVRSRV_DEV_POWER_STATE pePowerState);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVIsDevicePowered
+
+ @Description
+
+       Whether the device is powered, for the purposes of lockup detection.
+
+ @Input                psDeviceNode : Device node
+
+ @Return       IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevicePreClockSpeedChange
+
+@Description    This function is called before a voltage/frequency change is
+                made to the GPU HW. It informs the host driver of the intention
+                to make a DVFS change. If allows the host driver to idle
+                the GPU and begin a hold off period from starting new work
+                on the GPU.
+                When this call succeeds the caller *must* call
+                PVRSRVDevicePostClockSpeedChange() to end the hold off period
+                to allow new work to be submitted to the GPU.
+
+                Called from system layer or OS layer implementation that
+                is responsible for triggering a GPU DVFS transition.
+
+@Input          psDeviceNode pointer to the device affected by DVFS transition.
+@Input          bIdleDevice  when True, the driver will wait for the GPU to
+                             reach an idle state before the call returns.
+@Input          pvInfo       unused
+
+@Return         PVRSRV_OK    on success, power lock acquired and held on exit,
+                             GPU idle.
+                PVRSRV_ERROR on failure, power lock not held on exit, do not
+                             call PVRSRVDevicePostClockSpeedChange().
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                                        IMG_BOOL       bIdleDevice,
+                                                                                        void   *pvInfo);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevicePostClockSpeedChange
+
+@Description    This function is called after a voltage/frequency change has
+                been made to the GPU HW following a call to
+                PVRSRVDevicePreClockSpeedChange().
+                Before calling this function the caller must ensure the system
+                data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has
+                been updated with the new frequency set, measured in Hz.
+                The function informs the host driver that the DVFS change has
+                completed. The driver will end the work hold off period, cancel
+                the device idle period and update its time data records.
+                When this call returns work submissions are unblocked and
+                are submitted to the GPU as normal.
+                This function *must* not be called if the preceding call to
+                PVRSRVDevicePreClockSpeedChange() failed.
+
+                Called from system layer or OS layer implementation that
+                is responsible for triggering a GPU DVFS transition.
+
+@Input          psDeviceNode pointer to the device affected by DVFS transition.
+@Input          bIdleDevice  when True, the driver will cancel the GPU
+                             device idle state before the call returns. Value
+                             given must match that used in the call to
+                             PVRSRVDevicePreClockSpeedChange() otherwise
+                             undefined behaviour will result.
+@Input          pvInfo       unused
+
+@Return         void         power lock released, no longer held on exit.
+*/ /**************************************************************************/
+void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                                                         IMG_BOOL              bIdleDevice,
+                                                                         void          *pvInfo);
+
+/*!
+******************************************************************************
+
+ @Function    PVRSRVDeviceIdleRequestKM
+
+ @Description Perform device-specific processing required to force the device
+              idle. The device power-lock might be temporarily released (and
+              again re-acquired) during the course of this call, hence to
+              maintain lock-ordering power-lock should be the last acquired
+              lock before calling this function
+
+ @Input       psDeviceNode         : Device node
+
+ @Input       pfnIsDefaultStateOff : When specified, the idle request is only
+                                     processed if this function passes.
+
+ @Input       bDeviceOffPermitted  : IMG_TRUE if the transition should not fail
+                                       if device off
+                                     IMG_FALSE if the transition should fail if
+                                       device off
+
+ @Return      PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED
+                                     When re-acquisition of power-lock failed.
+                                     This error NEEDS EXPLICIT HANDLING at call
+                                     site as it signifies the caller needs to
+                                     AVOID calling PVRSRVPowerUnlock, since
+                                     power-lock is no longer "possessed" by
+                                     this context.
+
+              PVRSRV_OK              When idle request succeeded.
+              PVRSRV_ERROR           Other system errors.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                       PFN_SYS_DEV_IS_DEFAULT_STATE_OFF        pfnIsDefaultStateOff,
+                                       IMG_BOOL                                bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function     PVRSRVDeviceIdleCancelRequestKM
+
+ @Description Perform device-specific processing required to cancel the forced idle state
+              on the device, returning to normal operation.
+
+ @Input                psDeviceNode : Device node
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+/*!
+******************************************************************************
+
+@Function       PVRSRVDeviceGPUUnitsPowerChange
+@Description    Request from system layer for changing power state of GPU
+                units
+@Input          psDeviceNode            RGX Device Node.
+@Input          ui32NewValue            Value indicating the new power state
+                                        of GPU units. how this is interpreted
+                                        depends upon the device-specific
+                                        function subsequently called by the
+                                        server via a pfn.
+@Return         PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                       IMG_UINT32      ui32NewValue);
+
+
+#endif /* POWER_H */
+
+/******************************************************************************
+ End of file (power.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/process_stats.h b/drivers/gpu/drm/img/img-rogue/services/server/include/process_stats.h
new file mode 100644 (file)
index 0000000..4003997
--- /dev/null
@@ -0,0 +1,223 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating and reading proc filesystem entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PROCESS_STATS_H
+#define PROCESS_STATS_H
+
+#include <powervr/mem_types.h>
+
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "cache_ops.h"
+
+/*
+ * The publishing of Process Stats is controlled by the
+ * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory
+ * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option.
+ *
+ * Note: There will be a performance degradation with memory allocation
+ *       recording enabled!
+ */
+
+
+/*
+ * Memory types which can be tracked...
+ */
+typedef enum {
+       PVRSRV_MEM_ALLOC_TYPE_KMALLOC,                          /* memory allocated by kmalloc() */
+       PVRSRV_MEM_ALLOC_TYPE_VMALLOC,                          /* memory allocated by vmalloc() */
+       PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,       /* pages allocated from UMA to hold page table information */
+       PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,                      /* ALLOC_PAGES_PT_UMA mapped to kernel address space */
+       PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,       /* pages allocated from LMA to hold page table information */
+       PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,           /* ALLOC_PAGES_PT_LMA mapped to kernel address space */
+       PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,          /* pages allocated from LMA */
+       PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,          /* pages allocated from UMA */
+       PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,        /* mapped UMA/LMA pages */
+       PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES,           /* pages in the page pool */
+       PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT,       /* dma-buf imports */
+
+       /* Must be the last enum...*/
+       PVRSRV_MEM_ALLOC_TYPE_COUNT
+} PVRSRV_MEM_ALLOC_TYPE;
+
+
+/*
+ * Functions for managing the processes recorded...
+ */
+PVRSRV_ERROR PVRSRVStatsInitialise(void);
+void PVRSRVStatsDestroy(void);
+
+PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats);
+
+void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+
+#define MAX_POWER_STAT_ENTRIES         51
+
+/*
+ * Functions for recording the statistics...
+ */
+
+void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                 void *pvCpuVAddr,
+                                                                 IMG_CPU_PHYADDR sCpuPAddr,
+                                                                 size_t uiBytes,
+                                                                 void *pvPrivateData,
+                                                                 IMG_PID uiPid
+                                                                 DEBUG_MEMSTATS_PARAMS);
+
+void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                        IMG_UINT64 ui64Key,
+                                                                        IMG_PID uiPid);
+
+void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                size_t uiBytes,
+                                                                IMG_PID uiPid);
+
+/*
+ * Increases the memory stat for eAllocType. Tracks the allocation size value
+ * by inserting a value into a hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack().
+ */
+void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                                size_t uiBytes,
+                                                                                IMG_UINT64 uiCpuVAddr,
+                                                                                IMG_PID uiPid);
+
+void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                size_t uiBytes,
+                                                                IMG_PID uiPid);
+
+void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+                                                                 IMG_PID decrPID);
+
+/*
+ * Decrease the memory stat for eAllocType. Takes the allocation size value
+ * from the hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsIncrMemAllocStatAndTrack().
+ */
+void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                                                       IMG_UINT64 uiCpuVAddr);
+
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes);
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes);
+
+void
+PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
+                         IMG_PID pidOwner);
+
+PVRSRV_ERROR
+PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
+                          IMG_PID pidOwner);
+
+void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+                                                                                IMG_UINT32 ui32TotalNumOutOfMemory,
+                                                                                IMG_UINT32 ui32TotalTAStores,
+                                                                                IMG_UINT32 ui32Total3DStores,
+                                                                                IMG_UINT32 ui32TotalCDMStores,
+                                                                                IMG_UINT32 ui32TotalTDMStores,
+                                                                                IMG_PID owner);
+
+void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+                                                                       IMG_UINT32 ui32NumReqByFW,
+                                                                       IMG_PID owner);
+
+void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+                                                                       IMG_UINT32 ui32NumGrowReqByFW,
+                                                                       IMG_UINT32 ui32InitFLPages,
+                                                                       IMG_UINT32 ui32NumHighPages,
+                                                                       IMG_PID    ownerPid);
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
+                                                                  IMG_DEV_VIRTADDR sDevVAddr,
+                                                                  IMG_DEV_PHYADDR sDevPAddr,
+#endif
+                                                                  IMG_DEVMEM_SIZE_T uiOffset,
+                                                                  IMG_DEVMEM_SIZE_T uiSize,
+                                                                  IMG_UINT64 ui64ExecuteTimeMs,
+                                                                  IMG_BOOL bUserModeFlush,
+                                                                  IMG_PID ownerPid);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* Update pre/post power transition timing statistics */
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower);
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
+#else
+/* Update pre/post power transition timing statistics */
+static inline
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+                              IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+                              IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {}
+static inline
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {}
+
+static inline
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {}
+#endif
+
+void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
+
+/* Functions used for calculating the memory usage statistics of a process */
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize,
+                                       IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats);
+
+typedef struct {
+       IMG_UINT32 ui32Pid;
+       IMG_UINT32 ui32KernelMemUsage;
+       IMG_UINT32 ui32GraphicsMemUsage;
+} PVRSRV_PER_PROCESS_MEM_USAGE;
+
+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem,
+                                                                         IMG_UINT32 *pui32NumberOfLivePids,
+                                                                         PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData);
+
+#endif /* PROCESS_STATS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvr_dvfs.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvr_dvfs.h
new file mode 100644 (file)
index 0000000..b99b8f5
--- /dev/null
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@File           pvr_dvfs.h
+@Title          System level interface for DVFS
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_DVFS_H
+#define PVR_DVFS_H
+
+#include <linux/version.h>
+
+#if defined(SUPPORT_LINUX_DVFS)
+ #include <linux/devfreq.h>
+ #include <linux/thermal.h>
+
+ #if defined(CONFIG_DEVFREQ_THERMAL)
+  #include <linux/devfreq_cooling.h>
+ #endif
+
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+  #include <linux/opp.h>
+ #else
+  #include <linux/pm_opp.h>
+ #endif
+#endif
+
+#include "img_types.h"
+
+typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq);
+typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt);
+
+typedef struct _IMG_OPP_
+{
+       IMG_UINT32                      ui32Volt;
+       /*
+        * Unit of frequency in Hz.
+        */
+       IMG_UINT32                      ui32Freq;
+} IMG_OPP;
+
+typedef struct _IMG_DVFS_DEVICE_CFG_
+{
+       const IMG_OPP  *pasOPPTable;
+       IMG_UINT32      ui32OPPTableSize;
+#if defined(SUPPORT_LINUX_DVFS)
+       IMG_UINT32      ui32PollMs;
+#endif
+       IMG_BOOL        bIdleReq;
+       PFN_SYS_DEV_DVFS_SET_FREQUENCY  pfnSetFrequency;
+       PFN_SYS_DEV_DVFS_SET_VOLTAGE    pfnSetVoltage;
+
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(SUPPORT_LINUX_DVFS)
+       struct devfreq_cooling_power *psPowerOps;
+#endif
+} IMG_DVFS_DEVICE_CFG;
+
+#if defined(SUPPORT_LINUX_DVFS)
+typedef struct _IMG_DVFS_GOVERNOR_
+{
+       IMG_BOOL                        bEnabled;
+} IMG_DVFS_GOVERNOR;
+
+typedef struct _IMG_DVFS_GOVERNOR_CFG_
+{
+       IMG_UINT32                      ui32UpThreshold;
+       IMG_UINT32                      ui32DownDifferential;
+} IMG_DVFS_GOVERNOR_CFG;
+#endif
+
+#if defined(__linux__)
+#if defined(SUPPORT_LINUX_DVFS)
+typedef struct _IMG_DVFS_DEVICE_
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+       struct opp                              *psOPP;
+#else
+       struct dev_pm_opp               *psOPP;
+#endif
+       struct devfreq                  *psDevFreq;
+       IMG_BOOL                        bInitPending;
+       IMG_BOOL                        bReady;
+       IMG_BOOL                        bEnabled;
+       IMG_HANDLE                      hGpuUtilUserDVFS;
+       struct devfreq_simple_ondemand_data data;
+#if defined(CONFIG_DEVFREQ_THERMAL)
+       struct thermal_cooling_device   *psDevfreqCoolingDevice;
+#endif
+} IMG_DVFS_DEVICE;
+#endif
+
+typedef struct _IMG_DVFS_
+{
+#if defined(SUPPORT_LINUX_DVFS)
+       IMG_DVFS_DEVICE                 sDVFSDevice;
+       IMG_DVFS_GOVERNOR               sDVFSGovernor;
+       IMG_DVFS_GOVERNOR_CFG   sDVFSGovernorCfg;
+#endif
+       IMG_DVFS_DEVICE_CFG             sDVFSDeviceCfg;
+} PVRSRV_DVFS;
+#endif/* (__linux__) */
+
+#endif /* PVR_DVFS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvr_notifier.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvr_notifier.h
new file mode 100644 (file)
index 0000000..5717236
--- /dev/null
@@ -0,0 +1,326 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR notifier interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(PVR_NOTIFIER_H)
+#define PVR_NOTIFIER_H
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE;
+#ifndef CMDCOMPNOTIFY_PFN
+typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+#define CMDCOMPNOTIFY_PFN
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVCmdCompleteInit
+@Description    Performs initialisation of the command complete notifier
+                interface.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVCmdCompleteDeinit
+@Description    Performs cleanup for the command complete notifier interface.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+void
+PVRSRVCmdCompleteDeinit(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterCmdCompleteNotify
+@Description    Register a callback function that is called when some device
+                finishes some work, which is signalled via a call to
+                PVRSRVCheckStatus.
+@Output         phNotify             On success, points to command complete
+                                     notifier handle
+@Input          pfnCmdCompleteNotify Function callback
+@Input          hPrivData            Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+                                PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+                                PVRSRV_CMDCOMP_HANDLE hPrivData);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterCmdCompleteNotify
+@Description    Unregister a previously registered callback function.
+@Input          hNotify              Command complete notifier handle
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVCheckStatus
+@Description    Calls PVRSRVNotifyCommandCompletion() to notify registered
+                command complete handlers of work completion and then calls
+                PVRSRVSignalGlobalEO() to signal the global event object.
+@Input          hCmdCompCallerHandle Used to prevent a handler from being
+                                     notified. A NULL value results in all
+                                     handlers being notified.
+*/ /**************************************************************************/
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVNotifyCommandCompletion
+@Description    Notify any registered command complete handlers that some work
+                has been finished (unless hCmdCompCallerHandle matches a
+                handler's hPrivData).
+@Input          hCmdCompCallerHandle Used to prevent a handler from being
+                                     notified. A NULL value results in all
+                                     handlers being notified.
+*/ /**************************************************************************/
+void
+PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVSignalGlobalEO
+@Description    Signals the global event object.
+*/ /**************************************************************************/
+void
+PVRSRVSignalGlobalEO(void);
+
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+#define DEBUG_REQUEST_DC                0
+#define DEBUG_REQUEST_SYNCTRACKING      1
+#define DEBUG_REQUEST_SRV               2
+#define DEBUG_REQUEST_SYS               3
+#define DEBUG_REQUEST_RGX               4
+#define DEBUG_REQUEST_ANDROIDSYNC       5
+#define DEBUG_REQUEST_LINUXFENCE        6
+#define DEBUG_REQUEST_SYNCCHECKPOINT    7
+#define DEBUG_REQUEST_HTB               8
+#define DEBUG_REQUEST_APPHINT           9
+#define DEBUG_REQUEST_FALLBACKSYNC      10
+
+#define DEBUG_REQUEST_VERBOSITY_LOW     0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM  1
+#define DEBUG_REQUEST_VERBOSITY_HIGH    2
+#define DEBUG_REQUEST_VERBOSITY_MAX     DEBUG_REQUEST_VERBOSITY_HIGH
+
+#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk))
+
+/*
+ * Macro used within debug dump functions to send output either to PVR_LOG or
+ * a custom function. The custom function should be stored as a function
+ * pointer in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile'
+ * is also required as a local variable to serve as a file identifier for the
+ * printf function if required.
+ */
+#define PVR_DUMPDEBUG_LOG(...)                                \
+       do                                                        \
+       {                                                         \
+               if (pfnDumpDebugPrintf)                               \
+                       pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \
+               else                                                  \
+                       PVR_LOG((__VA_ARGS__));                           \
+       } while (0)
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE;
+#ifndef DBGNOTIFY_PFNS
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+                                       const IMG_CHAR *pszFormat, ...);
+typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+                                       IMG_UINT32 ui32VerbLevel,
+                                       DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                       void *pvDumpDebugFile);
+#define DBGNOTIFY_PFNS
+#endif
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDeviceDbgTable
+@Description    Registers a debug requester table for the given device. The
+                order in which the debug requester IDs appear in the
+                table determine the order in which a set of notifier callbacks
+                will be called. In other words, the requester ID that appears
+                first will have all of its associated debug notifier callbacks
+                called first. This will then be followed by all the callbacks
+                associated with the next requester ID in the table and so on.
+                The order table is handled internally.
+@Input          psDevNode     Device node to register requester table with
+@Return         PVRSRV_ERROR  PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDeviceDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDriverDbgTable
+@Description    Registers a debug requester table for the driver. The
+                order in which the debug requester IDs appear in the
+                table determine the order in which a set of notifier callbacks
+                will be called. In other words, the requester ID that appears
+                first will have all of its associated debug notifier callbacks
+                called first. This will then be followed by all the callbacks
+                associated with the next requester ID in the table and so on.
+                The order table is handled internally.
+@Return         PVRSRV_ERROR  PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDriverDbgTable(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDeviceDbgTable
+@Description    Unregisters a debug requester table.
+@Input          psDevNode     Device node for which the requester table should
+                              be unregistered
+@Return         void
+*/ /**************************************************************************/
+void
+PVRSRVUnregisterDeviceDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDriverDbgTable
+@Description    Unregisters the driver debug requester table.
+@Return         void
+*/ /**************************************************************************/
+void
+PVRSRVUnregisterDriverDbgTable(void);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDeviceDbgRequestNotify
+@Description    Register a callback function on a given device that is called
+                when a debug request is made via a call PVRSRVDebugRequest.
+                There are a number of verbosity levels ranging from
+                DEBUG_REQUEST_VERBOSITY_LOW up to
+                DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+                for each level up to the highest level specified to
+                PVRSRVDebugRequest.
+@Output         phNotify             Points to debug notifier handle on success
+@Input          psDevNode            Device node for which the debug callback
+                                     should be registered
+@Input          pfnDbgRequestNotify  Function callback
+@Input          ui32RequesterID      Requester ID. This is used to determine
+                                     the order in which callbacks are called
+@Input          hDbgReqeustHandle    Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDeviceDbgRequestNotify(IMG_HANDLE *phNotify,
+                                     struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                                     PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                     IMG_UINT32 ui32RequesterID,
+                                     PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVRegisterDriverDbgRequestNotify
+@Description    Register a callback function that is called when a debug request
+                is made via a call PVRSRVDebugRequest. There are a number of
+                verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+                DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+                for each level up to the highest level specified to
+                PVRSRVDebugRequest.
+@Output         phNotify             Points to debug notifier handle on success
+@Input          pfnDbgRequestNotify  Function callback
+@Input          ui32RequesterID      Requester ID. This is used to determine
+                                     the order in which callbacks are called
+@Input          hDbgReqeustHandle    Data to be passed back to the caller via
+                                     the callback function
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDriverDbgRequestNotify(IMG_HANDLE *phNotify,
+                                                                        PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                                                        IMG_UINT32 ui32RequesterID,
+                                                                        PVRSRV_DBGREQ_HANDLE hDbgRequestHandle);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDeviceDbgRequestNotify
+@Description    Unregister a previously registered (device context) callback
+                function.
+@Input          hNotify              Debug notifier handle.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterDeviceDbgRequestNotify(IMG_HANDLE hNotify);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVUnregisterDriverDbgRequestNotify
+@Description    Unregister a previously registered (driver context) callback
+                function.
+@Input          hNotify              Debug notifier handle.
+@Return         PVRSRV_ERROR         PVRSRV_OK on success otherwise an error
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterDriverDbgRequestNotify(IMG_HANDLE hNotify);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugRequest
+@Description    Notify any registered debug request handlers that a debug
+                request has been made and at what level.
+@Input          psDevNode           Device node for which the debug request
+                                    has been made
+@Input          ui32VerbLevel       The maximum verbosity level to dump
+@Input          pfnDumpDebugPrintf  Used to specify the print function that
+                                    should be used to dump any debug
+                                    information. If this argument is NULL then
+                                    PVR_LOG() will be used as the default
+                                    print function.
+@Input          pvDumpDebugFile     Optional file identifier to be passed to
+                                    the print function if required.
+@Return         void
+*/ /**************************************************************************/
+void
+PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+                   IMG_UINT32 ui32VerbLevel,
+                   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                   void *pvDumpDebugFile);
+
+#endif /* !defined(PVR_NOTIFIER_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv.h
new file mode 100644 (file)
index 0000000..5292bcf
--- /dev/null
@@ -0,0 +1,542 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR services server header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_H
+#define PVRSRV_H
+
+#include "connection_server.h"
+#include "pvrsrv_pool.h"
+#include "device.h"
+#include "power.h"
+#include "syscommon.h"
+#include "sysinfo.h"
+#include "physheap.h"
+#include "cache_ops.h"
+#include "pvr_notifier.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "dma_support.h"
+#include "vz_vmm_pvz.h"
+
+/*!
+ * For OSThreadDestroy(), which may require a retry
+ * Try for 100 ms to destroy an OS thread before failing
+ */
+#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL
+#define OS_THREAD_DESTROY_RETRY_COUNT 10
+
+typedef enum _POLL_FLAGS_
+{
+       POLL_FLAG_NONE = 0, /* No message or dump is printed on poll timeout */
+       POLL_FLAG_LOG_ERROR = 1, /* Log error on poll timeout */
+       POLL_FLAG_DEBUG_DUMP = 2 /* Print debug dump on poll timeout */
+} POLL_FLAGS;
+
+typedef struct _BUILD_INFO_
+{
+       IMG_UINT32      ui32BuildOptions;
+       IMG_UINT32      ui32BuildVersion;
+       IMG_UINT32      ui32BuildRevision;
+       IMG_UINT32      ui32BuildType;
+#define BUILD_TYPE_DEBUG       0
+#define BUILD_TYPE_RELEASE     1
+       /* The above fields are self explanatory */
+       /* B.V.N.C can be added later if required */
+} BUILD_INFO;
+
+typedef struct _DRIVER_INFO_
+{
+       BUILD_INFO      sUMBuildInfo;
+       BUILD_INFO      sKMBuildInfo;
+       IMG_UINT8       ui8UMSupportedArch;
+       IMG_UINT8       ui8KMBitArch;
+
+#define        BUILD_ARCH_64BIT                        (1 << 0)
+#define        BUILD_ARCH_32BIT                        (1 << 1)
+#define        BUILD_ARCH_BOTH         (BUILD_ARCH_32BIT | BUILD_ARCH_64BIT)
+       IMG_BOOL        bIsNoMatch;
+}DRIVER_INFO;
+
+#if defined(SUPPORT_VALIDATION) && defined(__linux__)
+typedef struct MEM_LEAK_INTERVALS_TAG
+{
+       IMG_UINT32 ui32OSAlloc;
+       IMG_UINT32 ui32GPU;
+       IMG_UINT32 ui32MMU;
+} MEM_LEAK_INTERVALS;
+#endif
+
+typedef struct PVRSRV_DATA_TAG
+{
+       PVRSRV_DRIVER_MODE    eDriverMode;                    /*!< Driver mode (i.e. native, host or guest) */
+       IMG_BOOL              bForceApphintDriverMode;        /*!< Indicate if driver mode is forced via apphint */
+       DRIVER_INFO           sDriverInfo;
+       IMG_UINT32            ui32DPFErrorCount;                 /*!< Number of Fatal/Error DPFs */
+
+       POSWR_LOCK            hDeviceNodeListLock;            /*!< Read-Write lock to protect the list of devices */
+       PVRSRV_DEVICE_NODE    *psDeviceNodeList;              /*!< List head of device nodes */
+       IMG_UINT32            ui32RegisteredDevices;
+       PVRSRV_DEVICE_NODE    *psHostMemDeviceNode;           /*!< DeviceNode to be used for device independent
+                                                               host based memory allocations where the DevMem
+                                                               framework is to be used e.g. TL */
+       PVRSRV_SERVICES_STATE eServicesState;                 /*!< global driver state */
+
+       IMG_HANDLE            hGlobalEventObject;             /*!< OS Global Event Object */
+       IMG_UINT32            ui32GEOConsecutiveTimeouts;     /*!< OS Global Event Object Timeouts */
+
+       IMG_HANDLE            hCleanupThread;                 /*!< Cleanup thread */
+       IMG_HANDLE            hCleanupEventObject;            /*!< Event object to drive cleanup thread */
+       POS_SPINLOCK          hCleanupThreadWorkListLock;     /*!< Lock protecting the cleanup thread work list */
+       DLLIST_NODE           sCleanupThreadWorkList;         /*!< List of work for the cleanup thread */
+       IMG_PID               cleanupThreadPid;               /*!< Cleanup thread process id */
+       uintptr_t             cleanupThreadTid;               /*!< Cleanup thread id */
+       ATOMIC_T              i32NumCleanupItemsQueued;       /*!< Number of items in cleanup thread work list */
+       ATOMIC_T              i32NumCleanupItemsNotCompleted; /*!< Number of items dropped from cleanup thread work list
+                                                                  after retry limit reached */
+
+       IMG_HANDLE            hDevicesWatchdogThread;         /*!< Devices watchdog thread */
+       IMG_HANDLE            hDevicesWatchdogEvObj;          /*! Event object to drive devices watchdog thread */
+       volatile IMG_UINT32   ui32DevicesWatchdogPwrTrans;    /*! Number of off -> on power state transitions */
+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+       volatile IMG_UINT32   ui32DevicesWatchdogTimeout;     /*! Timeout for the Devices watchdog Thread */
+#endif
+#ifdef PVR_TESTING_UTILS
+       volatile IMG_UINT32   ui32DevicesWdWakeupCounter;     /* Need this for the unit tests. */
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+       IMG_HANDLE            hAutoVzWatchdogThread;          /*!< Devices watchdog thread */
+       IMG_HANDLE            hAutoVzWatchdogEvObj;           /*! Event object to drive devices watchdog thread */
+#endif
+
+       POS_LOCK              hHWPerfHostPeriodicThread_Lock; /*!< Lock for the HWPerf Host periodic thread */
+       IMG_HANDLE            hHWPerfHostPeriodicThread;      /*!< HWPerf Host periodic thread */
+       IMG_HANDLE            hHWPerfHostPeriodicEvObj;       /*! Event object to drive HWPerf thread */
+       volatile IMG_BOOL     bHWPerfHostThreadStop;
+       IMG_UINT32            ui32HWPerfHostThreadTimeout;
+
+       IMG_HANDLE            hPvzConnection;                 /*!< PVZ connection used for cross-VM hyper-calls */
+       POS_LOCK              hPvzConnectionLock;             /*!< Lock protecting PVZ connection */
+       IMG_BOOL              abVmOnline[RGX_NUM_OS_SUPPORTED];
+
+       IMG_BOOL              bUnload;                        /*!< Driver unload is in progress */
+
+       IMG_HANDLE            hTLCtrlStream;                  /*! Control plane for TL streams */
+
+       IMG_HANDLE            hDriverThreadEventObject;       /*! Event object relating to multi-threading in the Server */
+       IMG_BOOL              bDriverSuspended;               /*! if TRUE, the driver is suspended and new threads should not enter */
+       ATOMIC_T              iNumActiveDriverThreads;        /*! Number of threads active in the Server */
+
+       PMR                   *psInfoPagePMR;                 /*! Handle to exportable PMR of the information page. */
+       IMG_UINT32            *pui32InfoPage;                 /*! CPU memory mapping for information page. */
+       DEVMEM_MEMDESC        *psInfoPageMemDesc;             /*! Memory descriptor of the information page. */
+       POS_LOCK              hInfoPageLock;                  /*! Lock guarding access to information page. */
+
+#if defined(SUPPORT_VALIDATION) && defined(__linux__)
+       MEM_LEAK_INTERVALS    sMemLeakIntervals;              /*!< How often certain memory leak types will trigger */
+#endif
+       IMG_HANDLE            hThreadsDbgReqNotify;
+
+       IMG_UINT32            ui32PDumpBoundDevice;           /*!< PDump is bound to the device first connected to */
+} PVRSRV_DATA;
+
+
+/*!
+******************************************************************************
+ @Function     PVRSRVGetPVRSRVData
+
+ @Description  Get a pointer to the global data
+
+ @Return   PVRSRV_DATA *
+******************************************************************************/
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void);
+
+#define PVRSRV_KM_ERRORS                     (PVRSRVGetPVRSRVData()->ui32DPFErrorCount)
+#define PVRSRV_ERROR_LIMIT_REACHED                (PVRSRV_KM_ERRORS == IMG_UINT32_MAX)
+#define PVRSRV_REPORT_ERROR()                do { if (!(PVRSRV_ERROR_LIMIT_REACHED)) { PVRSRVGetPVRSRVData()->ui32DPFErrorCount++; } } while (0)
+
+#define PVRSRV_VZ_MODE_IS(_expr)              (DRIVER_MODE_##_expr == PVRSRVGetPVRSRVData()->eDriverMode)
+#define PVRSRV_VZ_RETN_IF_MODE(_expr)         do { if (  PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0)
+#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr)     do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0)
+#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc)     do { if (  PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0)
+#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0)
+
+/*!
+******************************************************************************
+@Note  The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE)
+               can be an override or non-override 32-bit value. An override value
+               has the MSB bit set & a non-override value has this MSB bit cleared.
+               Excluding this MSB bit & interpreting the remaining 31-bit as a
+               signed 31-bit integer, the mode values are:
+                 [-1 native <default>: 0 host : +1 guest ].
+******************************************************************************/
+#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr)   ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31))
+#define PVRSRV_VZ_APPHINT_MODE(_expr)                          \
+       ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \
+               !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \
+                       ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \
+                               ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF))
+
+typedef struct _PHYS_HEAP_ITERATOR_ PHYS_HEAP_ITERATOR;
+
+/*!
+******************************************************************************
+ @Function LMA_HeapIteratorCreate
+
+ @Description
+ Creates iterator for traversing physical heap requested by ui32Flags. The
+ iterator will go through all of the segments (a segment is physically
+ contiguous) of the physical heap and return their CPU physical address and
+ size.
+
+ @Input psDevNode: Pointer to device node struct.
+ @Input ui32Flags: Find heap that matches flags.
+ @Output ppsIter: Pointer to the iterator object.
+
+ @Return PVRSRV_OK upon success and PVRSRV_ERROR otherwise.
+******************************************************************************/
+PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode,
+                                    PHYS_HEAP_USAGE_FLAGS ui32Flags,
+                                    PHYS_HEAP_ITERATOR **ppsIter);
+
+/*!
+******************************************************************************
+ @Function LMA_HeapIteratorDestroy
+
+ @Description
+ Frees the iterator object created with LMA_HeapIteratorCreate.
+
+ @Input psIter: Pointer to the iterator object.
+******************************************************************************/
+void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter);
+
+/*!
+******************************************************************************
+ @Function LMA_HeapIteratorReset
+
+ @Description
+ Resets the iterator the first segment of the physical heap.
+
+ @Input psIter: Pointer to the iterator object.
+******************************************************************************/
+PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter);
+
+/*!
+******************************************************************************
+ @Function LMA_HeapIteratorNext
+
+ @Description
+ Retrieves current segment's physical device address and size and moves the
+ iterator to the next element (if exists). If the iterator reached an end of
+ the heap and no segment was retrieved, this function returns IMG_FALSE.
+
+ @Input psIter: Pointer to the iterator object.
+ @Output psDevPAddr: Device physical address of the current segment.
+ @Output puiSize: Size of the current segment.
+
+ @Return IMG TRUE if a segment was found and retrieved, IMG_FALSE otherwise.
+******************************************************************************/
+IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter,
+                              IMG_DEV_PHYADDR *psDevPAddr,
+                              IMG_UINT64 *puiSize);
+
+/*!
+******************************************************************************
+ @Function LMA_HeapIteratorGetHeapStats
+
+ @Description
+ Retrieves phys heap's usage statistics.
+
+ @Input psPhysHeap: Pointer to the physical heap object.
+ @Output puiTotalSize: Total size of the physical heap.
+ @Output puiInUseSize: Used space in the physical heap.
+
+ @Return PVRSRV_OK upon success and PVRSRV_otherwise.
+******************************************************************************/
+PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter,
+                                          IMG_UINT64 *puiTotalSize,
+                                          IMG_UINT64 *puiInUseSize);
+
+/*!
+******************************************************************************
+ @Function     PVRSRVPollForValueKM
+
+ @Description
+ Polls for a value to match a masked read
+
+ @Input psDevNode : Pointer to device node struct
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+ @Input bDebugDumpOnFailure : Whether poll failure should result into a debug
+        dump. CAUTION: When calling this function from code paths which are
+        also used by debug-dumping code, this argument MUST be IMG_FALSE
+        otherwise, we might end up requesting debug-dump in recursion and
+        eventually blow-up call stack.
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode,
+               volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+               IMG_UINT32                   ui32Value,
+               IMG_UINT32                   ui32Mask,
+               POLL_FLAGS                   ePollFlags);
+
+/*!
+******************************************************************************
+ @Function     PVRSRVWaitForValueKM
+
+ @Description
+ Waits (using EventObjects) for a value to match a masked read
+
+ @Input  pui32LinMemAddr       : CPU linear address to poll
+ @Input  ui32Value             : Required value
+ @Input  ui32Mask              : Mask to be applied before checking against
+                                 ui32Value
+ @Return PVRSRV_ERROR          :
+******************************************************************************/
+PVRSRV_ERROR
+PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+                     IMG_UINT32                  ui32Value,
+                     IMG_UINT32                  ui32Mask);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVSystemHasCacheSnooping
+
+ @Description  : Returns whether the system has cache snooping
+
+ @Return : IMG_TRUE if the system has cache snooping
+******************************************************************************/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVSystemSnoopingIsEmulated
+
+ @Description : Returns whether system cache snooping support is emulated
+
+ @Return : IMG_TRUE if the system cache snooping is emulated in software
+******************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVSystemSnoopingOfCPUCache
+
+ @Description  : Returns whether the system supports snooping of the CPU cache
+
+ @Return : IMG_TRUE if the system has CPU cache snooping
+******************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVSystemSnoopingOfDeviceCache
+
+ @Description  : Returns whether the system supports snooping of the device cache
+
+ @Return : IMG_TRUE if the system has device cache snooping
+******************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVSystemHasNonMappableLocalMemory
+
+ @Description  : Returns whether the device has non-mappable part of local memory
+
+ @Return : IMG_TRUE if the device has non-mappable part of local memory
+******************************************************************************/
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVSystemWaitCycles
+
+ @Description  : Waits for at least ui32Cycles of the Device clk.
+******************************************************************************/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles);
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+                                                                                  IMG_UINT32 ui32IRQ,
+                                                                                  const IMG_CHAR *pszName,
+                                                                                  PFN_LISR pfnLISR,
+                                                                                  void *pvData,
+                                                                                  IMG_HANDLE *phLISRData);
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+int PVRSRVGetDriverStatus(void);
+
+/*!
+******************************************************************************
+ @Function     : PVRSRVIsBridgeEnabled
+
+ @Description  : Returns whether the given bridge group is enabled
+
+ @Return : IMG_TRUE if the given bridge group is enabled
+******************************************************************************/
+static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup)
+{
+       IMG_UINT32 ui32Bridges;
+       IMG_UINT32 ui32Offset;
+
+       PVR_UNREFERENCED_PARAMETER(hServices);
+
+#if defined(SUPPORT_RGX)
+       if (ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST)
+       {
+               ui32Bridges = gui32RGXBridges;
+               ui32Offset = PVRSRV_BRIDGE_RGX_FIRST;
+       }
+       else
+#endif /* SUPPORT_RGX */
+       {
+               ui32Bridges = gui32PVRBridges;
+               ui32Offset = PVRSRV_BRIDGE_FIRST;
+       }
+
+       return (IMG_BOOL)(((1U << (ui32BridgeGroup - ui32Offset)) & ui32Bridges) != 0);
+}
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(EMULATOR)
+       void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+       void SetTrustedDeviceAceEnabled(void);
+#endif
+#endif
+
+/*!
+******************************************************************************
+ @Function                     : PVRSRVCreateHWPerfHostThread
+
+ @Description          : Creates HWPerf event object and thread unless already created
+
+ @Input ui32Timeout    : Initial timeout (ms) between updates on the HWPerf thread
+
+ @Return                       : PVRSRV_ERROR  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                                                       error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout);
+
+/*!
+******************************************************************************
+ @Function                     : PVRSRVDestroyHWPerfHostThread
+
+ @Description          : Destroys HWPerf event object and thread if created
+
+ @Return                       : PVRSRV_ERROR  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                                                       error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void);
+
+/*!
+******************************************************************************
+ @Function                     : PVRSRVPhysMemHeapsInit
+
+ @Description          : Registers and acquires physical memory heaps
+
+ @Return                       : PVRSRV_ERROR  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                                                       error code
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function                     : PVRSRVPhysMemHeapsDeinit
+
+ @Description          : Releases and unregisters physical memory heaps
+
+ @Return                       : PVRSRV_ERROR  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                                                       error code
+******************************************************************************/
+void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       FindPhysHeapConfig
+@Description    Find Phys Heap Config from Device Config.
+@Input          psDevConfig  Pointer to device config.
+@Input          ui32Flags    Find heap that matches flags.
+@Return         PHYS_HEAP_CONFIG*  Return a config, or NULL if not found.
+*/ /**************************************************************************/
+PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+                                                                        PHYS_HEAP_USAGE_FLAGS ui32Flags);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetDeviceInstance
+@Description    Return the specified device instance from Device node list.
+@Input          ui32Instance         Device instance to find
+@Return         PVRSRV_DEVICE_NODE*  Return a device node, or NULL if not found.
+*/ /**************************************************************************/
+PVRSRV_DEVICE_NODE* PVRSRVGetDeviceInstance(IMG_UINT32 ui32Instance);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVGetDeviceInstanceByOSId
+@Description    Return the specified device instance by OS Id.
+@Input          i32OSInstance        OS device Id to find
+@Return         PVRSRV_DEVICE_NODE*  Return a device node, or NULL if not found.
+*/ /**************************************************************************/
+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDefaultDomainPower
+@Description    Returns psDevNode->eCurrentSysPowerState
+@Input          PVRSRV_DEVICE_NODE*     Device node
+@Return         PVRSRV_SYS_POWER_STATE  System power state tracked internally
+*/ /**************************************************************************/
+PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode);
+
+#endif /* PVRSRV_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_apphint.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_apphint.h
new file mode 100644 (file)
index 0000000..e354266
--- /dev/null
@@ -0,0 +1,71 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR AppHint generic interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(PVRSRV_APPHINT_H)
+#define PVRSRV_APPHINT_H
+
+/* Supplied to PVRSRVAppHintRegisterHandlers*() functions when the apphint
+ * is a global driver apphint, i.e. apphints not present in
+ * APPHINT_DEBUGFS_DEVICE_ID, i.e. not per device.
+ */
+#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U)
+
+#if defined(__linux__)
+
+#include "km_apphint.h"
+#define PVRSRVAppHintDumpState(d) pvr_apphint_dump_state(d)
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p)
+
+#else
+
+#define PVRSRVAppHintDumpState(d)
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p)
+
+#endif
+
+#endif /* PVRSRV_APPHINT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_bridge_init.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_bridge_init.h
new file mode 100644 (file)
index 0000000..750c981
--- /dev/null
@@ -0,0 +1,53 @@
+/**************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Init/Deinit Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the common PVR Bridge init/deinit code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_BRIDGE_INIT_H
+#define PVRSRV_BRIDGE_INIT_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+PVRSRV_ERROR ServerBridgeInit(void);
+void ServerBridgeDeInit(void);
+
+#endif  /* PVRSRV_BRIDGE_INIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_cleanup.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_cleanup.h
new file mode 100644 (file)
index 0000000..9eb454f
--- /dev/null
@@ -0,0 +1,177 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR SrvKM cleanup thread deferred work interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_CLEANUP_H
+#define PVRSRV_CLEANUP_H
+
+#include "dllist.h"
+
+/**************************************************************************/ /*!
+@Brief          CLEANUP_THREAD_FN
+
+@Description    This is the function prototype for the pfnFree member found in
+                the structure PVRSRV_CLEANUP_THREAD_WORK. The function is
+                responsible for carrying out the clean up work and if successful
+                freeing the memory originally supplied to the call
+                PVRSRVCleanupThreadAddWork().
+
+@Input          pvParam  This is private data originally supplied by the caller
+                         to PVRSRVCleanupThreadAddWork() when registering the
+                         clean up work item, psDAta->pvData. Itr can be cast
+                         to a relevant type within the using module.
+
+@Return         PVRSRV_OK if the cleanup operation was successful and the
+                callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item
+                memory original supplied to PVRSRVCleanupThreadAddWork()
+                Any other error code will lead to the work item
+                being re-queued and hence the original
+                PVRSRV_CLEANUP_THREAD_WORK* must not be freed.
+*/ /***************************************************************************/
+
+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam);
+
+
+/* Typical number of times a caller should want the work to be retried in case
+ * of the callback function (pfnFree) returning an error.
+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry
+ * count (ui32RetryCount) unless there are special requirements.
+ * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not
+ * successful by then give up as an unrecoverable problem has occurred.
+ */
+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u
+/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for
+ * a specified amount of time rather than number of retries.
+ */
+#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 20000u /* 20s */
+
+/* Use to set retry count on a cleanup item.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * _count - retry count
+ */
+#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \
+       do { \
+               (_item)->ui32RetryCount = (_count); \
+               (_item)->ui32TimeStart = 0; \
+               (_item)->ui32TimeEnd = 0; \
+       } while (0)
+
+/* Use to set timeout deadline on a cleanup item.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * _timeout - timeout in milliseconds, if 0
+ *            CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used
+ */
+#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \
+       do { \
+               (_item)->ui32RetryCount = 0; \
+               (_item)->ui32TimeStart = OSClockms(); \
+               (_item)->ui32TimeEnd = (_item)->ui32TimeStart + ((_timeout) > 0 ? \
+                               (_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \
+       } while (0)
+
+/* Indicates if the timeout on a given item has been reached.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ */
+#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \
+       ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \
+                       OSClockms() - (_item)->ui32TimeStart)
+
+/* Indicates if the current item is waiting on timeout or retry count.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * */
+#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \
+       ((_item)->ui32TimeStart != (_item->ui32TimeEnd))
+
+/* Clean up work item specifics so that the task can be managed by the
+ * pvr_defer_free cleanup thread in the Server.
+ */
+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_
+{
+       DLLIST_NODE sNode;             /*!< List node used internally by the cleanup
+                                           thread */
+       CLEANUP_THREAD_FN pfnFree;     /*!< Pointer to the function to be called to
+                                           carry out the deferred cleanup */
+       void *pvData;                  /*!< private data for pfnFree, usually a way back
+                                           to the original PVRSRV_CLEANUP_THREAD_WORK*
+                                           pointer supplied in the call to
+                                           PVRSRVCleanupThreadAddWork(). */
+       IMG_UINT32 ui32TimeStart;      /*!< Timestamp in ms of the moment when
+                                           cleanup item has been created. */
+       IMG_UINT32 ui32TimeEnd;        /*!< Time in ms after which no further retry
+                                           attempts will be made, item discard and
+                                           error logged when this is reached. */
+       IMG_UINT32 ui32RetryCount;     /*!< Number of times the callback should be
+                                           re-tried when it returns error. */
+       IMG_BOOL bDependsOnHW;         /*!< Retry again after the RGX interrupt signals
+                                           the global event object */
+} PVRSRV_CLEANUP_THREAD_WORK;
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCleanupThreadAddWork
+
+@Description    Add a work item to be called from the cleanup thread
+
+@Input          psData : The function pointer and private data for the callback
+
+@Return         None
+*/ /***************************************************************************/
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCleanupThreadGetPid
+
+@Description    Returns Cleanup Thread's PID.
+
+@Return         PID of the Cleanup Thread
+*/ /***************************************************************************/
+IMG_PID PVRSRVCleanupThreadGetPid(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCleanupThreadGetTid
+
+@Description    Returns Cleanup Thread's TID.
+
+@Return         TID of the Cleanup Thread
+*/ /***************************************************************************/
+uintptr_t PVRSRVCleanupThreadGetTid(void);
+
+#endif /* PVRSRV_CLEANUP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_device.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_device.h
new file mode 100644 (file)
index 0000000..b97e015
--- /dev/null
@@ -0,0 +1,401 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_DEVICE_H
+#define PVRSRV_DEVICE_H
+
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memalloc_physheap.h"
+#include "pvrsrv_firmware_boot.h"
+#include "rgx_fwif_km.h"
+#include "servicesext.h"
+#include "cache_ops.h"
+
+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
+#include "pvr_dvfs.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG;
+typedef enum _DRIVER_MODE_
+{
+/* Do not use these enumerations directly, to query the
+   current driver mode, use the PVRSRV_VZ_MODE_IS()
+   macro */
+       DRIVER_MODE_NATIVE      = -1,
+       DRIVER_MODE_HOST        =  0,
+       DRIVER_MODE_GUEST
+} PVRSRV_DRIVER_MODE;
+
+typedef enum
+{
+       PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0,
+       PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1,
+       PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST
+} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA;
+
+typedef enum _PVRSRV_DEVICE_SNOOP_MODE_
+{
+       PVRSRV_DEVICE_SNOOP_NONE = 0,
+       PVRSRV_DEVICE_SNOOP_CPU_ONLY,
+       PVRSRV_DEVICE_SNOOP_DEVICE_ONLY,
+       PVRSRV_DEVICE_SNOOP_CROSS,
+       PVRSRV_DEVICE_SNOOP_EMULATED,
+} PVRSRV_DEVICE_SNOOP_MODE;
+
+#if defined(SUPPORT_SOC_TIMER)
+typedef IMG_UINT64
+(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData);
+#endif
+
+typedef enum _PVRSRV_DEVICE_FABRIC_TYPE_
+{
+       PVRSRV_DEVICE_FABRIC_NONE = 0,
+       PVRSRV_DEVICE_FABRIC_ACELITE,
+       PVRSRV_DEVICE_FABRIC_FULLACE,
+} PVRSRV_DEVICE_FABRIC_TYPE;
+
+typedef IMG_UINT32
+(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_PRE_POWER)(IMG_HANDLE hSysData,
+                                                PVRSRV_SYS_POWER_STATE eNewPowerState,
+                                                PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+                                                PVRSRV_POWER_FLAGS ePwrFlags);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_POST_POWER)(IMG_HANDLE hSysData,
+                                                 PVRSRV_SYS_POWER_STATE eNewPowerState,
+                                                 PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+                                                 PVRSRV_POWER_FLAGS ePwrFlags);
+
+/*************************************************************************/ /*!
+@Brief          Callback function type PFN_SYS_GET_POWER
+
+@Description    This function queries the SoC power registers to determine
+                if the power domain on which the GPU resides is powered on.
+
+   Implementation of this callback is optional - where it is not provided,
+   the driver will assume the domain power state depending on driver type:
+   regular drivers assume it is unpowered at startup, while drivers with
+   AutoVz support expect the GPU domain to be powered on initially. The power
+   state will be then tracked internally according to the pfnPrePowerState
+   and pfnPostPowerState calls using a fallback function.
+
+@Input          psDevNode                  Pointer to node struct of the
+                                           device being initialised
+
+@Return         PVRSRV_SYS_POWER_STATE_ON  if the respective device's hardware
+                                           domain is powered on
+                PVRSRV_SYS_POWER_STATE_OFF if the domain is powered off
+*/ /**************************************************************************/
+typedef PVRSRV_SYS_POWER_STATE
+(*PFN_SYS_GET_POWER)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+typedef void
+(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData,
+                                                                       IMG_UINT64 ui64MemSize);
+
+typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64);
+
+typedef void
+(*PFN_SYS_DEV_HOST_CACHE_MAINTENANCE)(IMG_HANDLE hSysData,
+                                                                       PVRSRV_CACHE_OP eRequestType,
+                                                                       void *pvVirtStart,
+                                                                       void *pvVirtEnd,
+                                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                                       IMG_CPU_PHYADDR sCPUPhysEnd);
+
+typedef void*
+(*PFN_SLAVE_DMA_CHAN)(PVRSRV_DEVICE_CONFIG*, char*);
+
+typedef void
+(*PFN_SLAVE_DMA_FREE)(PVRSRV_DEVICE_CONFIG*,
+                                         void*);
+
+typedef void
+(*PFN_DEV_PHY_ADDR_2_DMA_ADDR)(PVRSRV_DEVICE_CONFIG *,
+                                                          IMG_DMA_ADDR *,
+                                                          IMG_DEV_PHYADDR *,
+                                                          IMG_BOOL *,
+                                                          IMG_UINT32,
+                                                          IMG_BOOL);
+
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+
+typedef struct _PVRSRV_TD_FW_PARAMS_
+{
+       const void *pvFirmware;
+       IMG_UINT32 ui32FirmwareSize;
+       PVRSRV_FW_BOOT_PARAMS uFWP;
+} PVRSRV_TD_FW_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData,
+                                               PVRSRV_TD_FW_PARAMS *psTDFWParams);
+
+typedef struct _PVRSRV_TD_POWER_PARAMS_
+{
+       IMG_DEV_PHYADDR sPCAddr;
+
+       /* MIPS-only fields */
+       IMG_DEV_PHYADDR sGPURegAddr;
+       IMG_DEV_PHYADDR sBootRemapAddr;
+       IMG_DEV_PHYADDR sCodeRemapAddr;
+       IMG_DEV_PHYADDR sDataRemapAddr;
+} PVRSRV_TD_POWER_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData,
+                                                  PVRSRV_TD_POWER_PARAMS *psTDPowerParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData);
+
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG_
+{
+       IMG_UINT32 ui32Status;     /*!< FW status */
+       IMG_UINT32 ui32Reason;     /*!< Reason for FW status */
+} PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG;
+
+typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF_
+{
+       IMG_DEV_VIRTADDR sFWFaultAddr;     /*!< FW page fault address */
+} PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF;
+
+typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM_
+{
+       IMG_UINT32 ui32ExtJobRef;     /*!< External Job Reference of any affected GPU work */
+       RGXFWIF_DM eDM;               /*!< Data Master which was running any affected GPU work */
+} PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM;
+
+typedef struct _PVRSRV_ROBUSTNESS_NOTIFY_DATA_
+{
+       RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for error/reset */
+       IMG_PID                  pid;          /*!< Pid of process which created the errored context */
+       union
+       {
+               PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM sChecksumErrData; /*!< Data returned for checksum errors */
+               PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF    sFwPFErrData;     /*!< Data returned for FW page faults */
+               PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG sHostWdgData;     /*!< Data returned for Host Wdg FW faults */
+       } uErrData;
+} PVRSRV_ROBUSTNESS_NOTIFY_DATA;
+
+typedef void
+(*PFN_SYS_DEV_ERROR_NOTIFY)(IMG_HANDLE hSysData,
+                                                   PVRSRV_ROBUSTNESS_NOTIFY_DATA *psRobustnessErrorData);
+
+struct _PVRSRV_DEVICE_CONFIG_
+{
+       /*! OS device passed to SysDevInit (linux: 'struct device') */
+       void *pvOSDevice;
+
+       /*!
+        *! Service representation of pvOSDevice. Should be set to NULL when the
+        *! config is created in SysDevInit. Set by Services once a device node has
+        *! been created for this config and unset before SysDevDeInit is called.
+        */
+       struct _PVRSRV_DEVICE_NODE_ *psDevNode;
+
+       /*! Name of the device */
+       IMG_CHAR *pszName;
+
+       /*! Version of the device (optional) */
+       IMG_CHAR *pszVersion;
+
+       /*! Register bank address */
+       IMG_CPU_PHYADDR sRegsCpuPBase;
+       /*! Register bank size */
+       IMG_UINT32 ui32RegsSize;
+       /*! Device interrupt number */
+       IMG_UINT32 ui32IRQ;
+
+       PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode;
+
+       /*! Device specific data handle */
+       IMG_HANDLE hDevData;
+
+       /*! System specific data that gets passed into system callback functions. */
+       IMG_HANDLE hSysData;
+
+       IMG_BOOL bHasNonMappableLocalMemory;
+
+       /*! Indicates if system supports FBCDC v3.1 */
+       IMG_BOOL bHasFBCDCVersion31;
+
+       /*! Physical Heap definitions for this device.
+        * eDefaultHeap must be set to GPU_LOCAL or CPU_LOCAL. Specifying any other value
+        *    (e.g. DEFAULT) will lead to an error at device discovery.
+        * pasPhysHeap array must contain at least one PhysHeap, the declared default heap.
+        */
+       PVRSRV_PHYS_HEAP  eDefaultHeap;
+       PHYS_HEAP_CONFIG *pasPhysHeaps;
+       IMG_UINT32 ui32PhysHeapCount;
+
+       /*!
+        *! Callbacks to change system device power state at the beginning and end
+        *! of a power state change (optional).
+        */
+       PFN_SYS_PRE_POWER pfnPrePowerState;
+       PFN_SYS_POST_POWER pfnPostPowerState;
+       PFN_SYS_GET_POWER  pfnGpuDomainPower;
+
+       /*! Callback to obtain the clock frequency from the device (optional). */
+       PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet;
+
+#if defined(SUPPORT_SOC_TIMER)
+       /*! Callback to read SoC timer register value (mandatory). */
+       PFN_SYS_DEV_SOC_TIMER_READ      pfnSoCTimerRead;
+#endif
+
+       /*!
+        *! Callback to handle memory budgeting. Can be used to reject allocations
+        *! over a certain size (optional).
+        */
+       PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize;
+
+       /*!
+        *! Callback to perform host CPU cache maintenance. Might be needed for
+        *! architectures which allow extensions such as RISC-V (optional).
+        */
+       PFN_SYS_DEV_HOST_CACHE_MAINTENANCE pfnHostCacheMaintenance;
+       IMG_BOOL bHasPhysicalCacheMaintenance;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+       /*!
+        *! Callback to send FW image and FW boot time parameters to the trusted
+        *! device.
+        */
+       PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage;
+
+       /*!
+        *! Callback to send parameters needed in a power transition to the trusted
+        *! device.
+        */
+       PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams;
+
+       /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */
+       PFN_TD_RGXSTART pfnTDRGXStart;
+       PFN_TD_RGXSTOP pfnTDRGXStop;
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+       /*! Function that does device feature specific system layer initialisation */
+       PFN_SYS_DEV_FEAT_DEP_INIT       pfnSysDevFeatureDepInit;
+
+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
+       PVRSRV_DVFS sDVFS;
+#endif
+
+#if defined(SUPPORT_ALT_REGBASE)
+       IMG_DEV_PHYADDR sAltRegsGpuPBase;
+#endif
+
+       /*!
+        *! Indicates if device physical address 0x0 might be used as GPU memory
+        *! (e.g. LMA system or UMA system with CPU PA 0x0 reserved by the OS,
+        *!  but CPU PA != device PA and device PA 0x0 available for the GPU)
+        */
+       IMG_BOOL bDevicePA0IsValid;
+
+       /*!
+        *! Function to initialize System-specific virtualization. If not supported
+        *! this should be a NULL reference. Only present if
+        *! SUPPORT_GPUVIRT_VALIDATION is defined.
+        */
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+       PFN_SYS_DEV_VIRT_INIT           pfnSysDevVirtInit;
+#endif
+
+       /*!
+        *! Callback to notify system layer of device errors.
+        *! NB. implementers should ensure that the minimal amount of work is
+        *! done in the callback function, as it will be executed in the main
+        *! RGX MISR. (e.g. any blocking or lengthy work should be performed by
+        *! a worker queue/thread instead.)
+        */
+       PFN_SYS_DEV_ERROR_NOTIFY        pfnSysDevErrorNotify;
+
+       /*!
+        *!  Slave DMA channel request callbacks
+        */
+       PFN_SLAVE_DMA_CHAN pfnSlaveDMAGetChan;
+       PFN_SLAVE_DMA_FREE pfnSlaveDMAFreeChan;
+       /*!
+        *!  Conversion of device memory to DMA addresses
+        */
+       PFN_DEV_PHY_ADDR_2_DMA_ADDR pfnDevPhysAddr2DmaAddr;
+       /*!
+        *!  DMA channel names
+        */
+       IMG_CHAR *pszDmaTxChanName;
+       IMG_CHAR *pszDmaRxChanName;
+       /*!
+        *!  DMA device transfer restrictions
+        */
+       IMG_UINT32 ui32DmaAlignment;
+       IMG_UINT32 ui32DmaTransferUnit;
+       /*!
+        *!  System-wide presence of DMA capabilities
+        */
+       IMG_BOOL bHasDma;
+
+};
+
+#endif /* PVRSRV_DEVICE_H*/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_firmware_boot.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_firmware_boot.h
new file mode 100644 (file)
index 0000000..14a196d
--- /dev/null
@@ -0,0 +1,87 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_FIRMWARE_BOOT_H
+#define PVRSRV_FIRMWARE_BOOT_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+#define TD_MAX_NUM_MIPS_PAGETABLE_PAGES (4U)
+
+typedef union _PVRSRV_FW_BOOT_PARAMS_
+{
+       struct
+       {
+               IMG_DEV_VIRTADDR sFWCodeDevVAddr;
+               IMG_DEV_VIRTADDR sFWDataDevVAddr;
+               IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr;
+               RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+               IMG_DEVMEM_SIZE_T uiFWCorememCodeSize;
+               IMG_DEV_VIRTADDR sFWCorememDataDevVAddr;
+               RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr;
+               IMG_UINT32 ui32NumThreads;
+       } sMeta;
+
+       struct
+       {
+               IMG_DEV_PHYADDR sGPURegAddr;
+               IMG_DEV_PHYADDR asFWPageTableAddr[TD_MAX_NUM_MIPS_PAGETABLE_PAGES];
+               IMG_DEV_PHYADDR sFWStackAddr;
+               IMG_UINT32 ui32FWPageTableLog2PageSize;
+               IMG_UINT32 ui32FWPageTableNumPages;
+       } sMips;
+
+       struct
+       {
+               IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr;
+               RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+               IMG_DEVMEM_SIZE_T uiFWCorememCodeSize;
+
+               IMG_DEV_VIRTADDR sFWCorememDataDevVAddr;
+               RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr;
+               IMG_DEVMEM_SIZE_T uiFWCorememDataSize;
+       } sRISCV;
+
+} PVRSRV_FW_BOOT_PARAMS;
+
+
+#endif /* PVRSRV_FIRMWARE_BOOT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_pool.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_pool.h
new file mode 100644 (file)
index 0000000..2272fc5
--- /dev/null
@@ -0,0 +1,135 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services pool implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a generic pool implementation.
+                The pool allows to dynamically retrieve and return entries from
+                it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries
+                are created in lazy manner which means not until first usage.
+                The pool API allows to pass and allocation/free functions
+                pair that will allocate entry's private data and return it
+                to the caller on every entry 'Get'.
+                The pool will keep up to ui32MaxEntries entries allocated.
+                Every entry that exceeds this number and is 'Put' back to the
+                pool will be freed on the spot instead being returned to the
+                pool.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(PVRSRVPOOL_H)
+#define PVRSRVPOOL_H
+
+/**************************************************************************/ /*!
+ @Description  Callback function called during creation of the new element. This
+               function allocates an object that will be stored in the pool.
+               The object can be retrieved from the pool by calling
+               PVRSRVPoolGet.
+ @Input        pvPrivData      Private data passed to the alloc function.
+ @Output       pvOut           Allocated object.
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut);
+
+/**************************************************************************/ /*!
+ @Description  Callback function called to free the object allocated by
+               the counterpart alloc function.
+ @Input        pvPrivData      Private data passed to the free function.
+ @Output       pvFreeData      Object allocated by PVRSRV_POOL_ALLOC_FUNC.
+*/ /***************************************************************************/
+typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData);
+
+typedef IMG_HANDLE PVRSRV_POOL_TOKEN;
+
+typedef struct _PVRSRV_POOL_ PVRSRV_POOL;
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolCreate
+ @Description  Creates new buffer pool.
+ @Input        pfnAlloc        Allocation function pointer. Function is used
+                               to allocate new pool entries' data.
+ @Input        pfnFree         Free function pointer. Function is used to
+                               free memory allocated by pfnAlloc function.
+ @Input        ui32MaxEntries  Total maximum number of entries in the pool.
+ @Input        pszName         Name of the pool. String has to be NULL
+                               terminated.
+ @Input        pvPrivData      Private data that will be passed to pfnAlloc and
+                               pfnFree functions.
+ @Output       ppsPool         New buffer pool object.
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+                                       PVRSRV_POOL_FREE_FUNC *pfnFree,
+                                       IMG_UINT32 ui32MaxEntries,
+                                       const IMG_CHAR *pszName,
+                                       void *pvPrivData,
+                                       PVRSRV_POOL **ppsPool);
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolDestroy
+ @Description  Destroys pool created by PVRSRVPoolCreate.
+ @Input        psPool          Buffer pool object meant to be destroyed.
+*/ /***************************************************************************/
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool);
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolGet
+ @Description  Retrieves an entry from a pool. If no free elements are
+               available new entry will be allocated.
+ @Input        psPool          Pointer to the pool.
+ @Output       hToken          Pointer to the entry handle.
+ @Output       ppvDataOut      Pointer to data stored in the entry (the data
+                               allocated by the pfnAlloc function).
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+                                               PVRSRV_POOL_TOKEN *hToken,
+                                               void **ppvDataOut);
+
+/**************************************************************************/ /*!
+ @Function     PVRSRVPoolPut
+ @Description  Returns entry to the pool. If number of entries is greater
+               than ui32MaxEntries set during pool creation the entry will
+               be freed instead.
+ @Input        psPool          Pointer to the pool.
+ @Input        hToken          Entry handle.
+ @Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool,
+                                               PVRSRV_POOL_TOKEN hToken);
+
+#endif /* PVRSRVPOOL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_sync_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/pvrsrv_sync_server.h
new file mode 100644 (file)
index 0000000..5d1a10c
--- /dev/null
@@ -0,0 +1,278 @@
+/**************************************************************************/ /*!
+@File
+@Title          Fence sync server interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_SYNC_SERVER_H
+#define PVRSRV_SYNC_SERVER_H
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "sync_fallback_server.h"
+#include "pvr_notifier.h"
+#include "img_types.h"
+#include "pvrsrv_sync_km.h"
+#elif defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#include "rgxhwperf.h"
+
+#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+typedef struct _SYNC_TIMELINE_OBJ_
+{
+       void *pvTlObj; /* Implementation specific timeline object */
+
+       PVRSRV_TIMELINE hTimeline; /* Reference to implementation-independent timeline object */
+} SYNC_TIMELINE_OBJ;
+
+typedef struct _SYNC_FENCE_OBJ_
+{
+       void *pvFenceObj; /* Implementation specific fence object */
+
+       PVRSRV_FENCE hFence; /* Reference to implementation-independent fence object */
+} SYNC_FENCE_OBJ;
+
+static inline void SyncClearTimelineObj(SYNC_TIMELINE_OBJ *psSTO)
+{
+       psSTO->pvTlObj = NULL;
+       psSTO->hTimeline = PVRSRV_NO_TIMELINE;
+}
+
+static inline IMG_BOOL SyncIsTimelineObjValid(const SYNC_TIMELINE_OBJ *psSTO)
+{
+       return (IMG_BOOL)(psSTO->pvTlObj != NULL);
+}
+
+static inline void SyncClearFenceObj(SYNC_FENCE_OBJ *psSFO)
+{
+       psSFO->pvFenceObj = NULL;
+       psSFO->hFence = PVRSRV_NO_FENCE;
+}
+
+static inline IMG_BOOL SyncIsFenceObjValid(const SYNC_FENCE_OBJ *psSFO)
+{
+       return (IMG_BOOL)(psSFO->pvFenceObj != NULL);
+}
+
+
+/* Mapping of each required function to its appropriate sync-implementation function */
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+       #define SyncFenceWaitKM_                SyncFbFenceWait
+       #define SyncGetFenceObj_                SyncFbGetFenceObj
+       #define SyncFenceReleaseKM_             SyncFbFenceReleaseKM
+       #define SyncSWTimelineFenceCreateKM_    SyncFbSWTimelineFenceCreateKM
+       #define SyncSWTimelineAdvanceKM_        SyncFbSWTimelineAdvanceKM
+       #define SyncSWGetTimelineObj_           SyncFbSWGetTimelineObj
+       #define SyncSWTimelineReleaseKM_        SyncFbTimelineRelease
+       #define SyncDumpFence_                  SyncFbDumpFenceKM
+       #define SyncSWDumpTimeline_             SyncFbSWDumpTimelineKM
+#elif defined(SUPPORT_NATIVE_FENCE_SYNC)
+       #define SyncFenceWaitKM_                pvr_sync_fence_wait
+       #define SyncGetFenceObj_                pvr_sync_fence_get
+       #define SyncFenceReleaseKM_             pvr_sync_fence_release
+       #define SyncSWTimelineFenceCreateKM_    pvr_sync_sw_timeline_fence_create
+       #define SyncSWTimelineAdvanceKM_        pvr_sync_sw_timeline_advance
+       #define SyncSWGetTimelineObj_           pvr_sync_sw_timeline_get
+       #define SyncSWTimelineReleaseKM_        pvr_sync_sw_timeline_release
+       #define SyncDumpFence_                  sync_dump_fence
+       #define SyncSWDumpTimeline_             sync_sw_dump_timeline
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncFenceWaitKM
+
+@Description    Wait for all the sync points in the fence to be signalled.
+
+@Input          psFenceObj          Fence to wait on
+
+@Input          ui32TimeoutInMs     Maximum time to wait (in milliseconds)
+
+@Return         PVRSRV_OK               once the fence has been passed (all
+                                        containing check points have either
+                                        signalled or errored)
+                PVRSRV_ERROR_TIMEOUT    if the poll has exceeded the timeout
+                PVRSRV_ERROR_FAILED_DEPENDENCIES Other sync-impl specific error
+*/ /**************************************************************************/
+static inline PVRSRV_ERROR
+SyncFenceWaitKM(PVRSRV_DEVICE_NODE *psDevNode,
+                const SYNC_FENCE_OBJ *psFenceObj,
+                IMG_UINT32 ui32TimeoutInMs)
+{
+       PVRSRV_ERROR eError;
+
+       RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice,
+                                                                 BEGIN,
+                                                                 OSGetCurrentProcessID(),
+                                                                 psFenceObj->hFence,
+                                                                 ui32TimeoutInMs);
+
+       eError = SyncFenceWaitKM_(psFenceObj->pvFenceObj, ui32TimeoutInMs);
+
+       RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice,
+                                                                 END,
+                                                                 OSGetCurrentProcessID(),
+                                                                 psFenceObj->hFence,
+                                                                 ((eError == PVRSRV_OK) ?
+                                                                         RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED :
+                                                                         ((eError == PVRSRV_ERROR_TIMEOUT) ?
+                                                                                 RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT :
+                                                                                 RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR)));
+       return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       SyncGetFenceObj
+
+@Description    Get the implementation specific server fence object from
+                opaque implementation independent PVRSRV_FENCE type.
+                When successful, this function gets a reference on the base
+                fence, which needs to be dropped using SyncFenceReleaseKM,
+                when fence object is no longer in use.
+
+@Input          iFence        Input opaque fence object
+
+@Output         psFenceObj    Pointer to implementation specific fence object
+
+@Return         PVRSRV_ERROR  PVRSRV_OK, on success
+*/ /**************************************************************************/
+static inline PVRSRV_ERROR
+SyncGetFenceObj(PVRSRV_FENCE iFence,
+                SYNC_FENCE_OBJ *psFenceObj)
+{
+       psFenceObj->hFence = iFence;
+       return SyncGetFenceObj_(iFence, &psFenceObj->pvFenceObj);
+}
+
+/*************************************************************************/ /*!
+@Function       SyncFenceReleaseKM
+
+@Description    Release reference on this fence.
+
+@Input          psFenceObj     Fence to be released
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static inline
+PVRSRV_ERROR SyncFenceReleaseKM(const SYNC_FENCE_OBJ *psFenceObj)
+{
+       return SyncFenceReleaseKM_(psFenceObj->pvFenceObj);
+}
+
+/*****************************************************************************/
+/*                                                                           */
+/*                      SW TIMELINE SPECIFIC FUNCTIONS                       */
+/*                                                                           */
+/*****************************************************************************/
+
+static inline PVRSRV_ERROR
+SyncSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDevNode,
+                            PVRSRV_TIMELINE hSWTimeline,
+                            const IMG_CHAR *pszFenceName,
+                            PVRSRV_FENCE *phOutFence)
+{
+       IMG_UINT64 ui64SyncPtIdx;
+       PVRSRV_ERROR eError;
+       eError = SyncSWTimelineFenceCreateKM_(psDevNode,
+                                             hSWTimeline,
+                                             pszFenceName,
+                                             phOutFence,
+                                             &ui64SyncPtIdx);
+       if (eError == PVRSRV_OK)
+       {
+               RGXSRV_HWPERF_ALLOC_SW_FENCE(psDevNode, OSGetCurrentProcessID(),
+                                            *phOutFence, hSWTimeline, ui64SyncPtIdx,
+                                            pszFenceName, OSStringLength(pszFenceName));
+       }
+       return eError;
+}
+
+static inline PVRSRV_ERROR
+SyncSWTimelineAdvanceKM(PVRSRV_DEVICE_NODE *psDevNode,
+                        const SYNC_TIMELINE_OBJ *psSWTimelineObj)
+{
+       IMG_UINT64 ui64SyncPtIdx;
+       PVRSRV_ERROR eError;
+       eError = SyncSWTimelineAdvanceKM_(psSWTimelineObj->pvTlObj,
+                                         &ui64SyncPtIdx);
+
+       if (eError == PVRSRV_OK)
+       {
+               RGXSRV_HWPERF_SYNC_SW_TL_ADV(psDevNode->pvDevice,
+                                            OSGetCurrentProcessID(),
+                                            psSWTimelineObj->hTimeline,
+                                            ui64SyncPtIdx);
+       }
+       return eError;
+}
+
+static inline PVRSRV_ERROR
+SyncSWGetTimelineObj(PVRSRV_TIMELINE hSWTimeline,
+                     SYNC_TIMELINE_OBJ *psSWTimelineObj)
+{
+       psSWTimelineObj->hTimeline = hSWTimeline;
+       return SyncSWGetTimelineObj_(hSWTimeline, &psSWTimelineObj->pvTlObj);
+}
+
+static inline PVRSRV_ERROR
+SyncSWTimelineReleaseKM(const SYNC_TIMELINE_OBJ *psSWTimelineObj)
+{
+       return SyncSWTimelineReleaseKM_(psSWTimelineObj->pvTlObj);
+}
+
+static inline PVRSRV_ERROR
+SyncDumpFence(const SYNC_FENCE_OBJ *psFenceObj,
+              DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+              void *pvDumpDebugFile)
+{
+       return SyncDumpFence_(psFenceObj->pvFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile);
+}
+
+static inline PVRSRV_ERROR
+SyncSWDumpTimeline(const SYNC_TIMELINE_OBJ *psSWTimelineObj,
+                   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                   void *pvDumpDebugFile)
+{
+       return SyncSWDumpTimeline_(psSWTimelineObj->pvTlObj, pfnDumpDebugPrintf, pvDumpDebugFile);
+}
+
+
+#endif /* PVRSRV_SYNC_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/ri_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/ri_server.h
new file mode 100644 (file)
index 0000000..f7467f8
--- /dev/null
@@ -0,0 +1,106 @@
+/*************************************************************************/ /*!
+@File           ri_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Resource Information (RI) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_SERVER_H
+#define RI_SERVER_H
+
+#include "img_defs.h"
+#include "ri_typedefs.h"
+#include "pmr.h"
+#include "pvrsrv_error.h"
+#include "physheap.h"
+
+PVRSRV_ERROR RIInitKM(void);
+void RIDeInitKM(void);
+
+void RILockAcquireKM(void);
+void RILockReleaseKM(void);
+
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR);
+
+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR,
+                                        IMG_PID ui32Owner);
+
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+                                   IMG_UINT32 ui32TextBSize,
+                                   const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN],
+                                   IMG_UINT64 uiOffset,
+                                   IMG_UINT64 uiSize,
+                                   IMG_BOOL bIsImport,
+                                   IMG_BOOL bIsSuballoc,
+                                   RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+                                    const IMG_CHAR *psz8TextB,
+                                    IMG_UINT64 ui64Size,
+                                    IMG_UINT64 ui64DevVAddr,
+                                    RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+                                   IMG_DEV_VIRTADDR sVAddr);
+
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle);
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle);
+
+PVRSRV_ERROR RIDeleteListKM(void);
+
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR);
+
+PVRSRV_ERROR RIDumpAllKM(void);
+
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid);
+
+#if defined(DEBUG)
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+                                 IMG_PID pid,
+                                 IMG_UINT64 ui64Offset,
+                                 IMG_DEV_VIRTADDR *psDevVAddr);
+#endif
+
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+                          IMG_HANDLE **ppHandle,
+                          IMG_CHAR **ppszEntryString);
+
+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType);
+
+#endif /* RI_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/sofunc_pvr.h b/drivers/gpu/drm/img/img-rogue/services/server/include/sofunc_pvr.h
new file mode 100644 (file)
index 0000000..48bc94d
--- /dev/null
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title          SO Interface header file for common PVR functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains SO interface functions. These functions are defined in
+                the common layer and are called from the env layer OS specific
+                implementation.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SOFUNC_PVR_H_)
+#define SOFUNC_PVR_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "pvr_notifier.h"
+
+
+/**************************************************************************/ /*!
+ @Function     SOPvrDbgRequestNotifyRegister
+ @Description  SO Interface function called from the OS layer implementation.
+               Register a callback function that is called when a debug request
+               is made via a call PVRSRVDebugRequest. There are a number of
+               verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+               DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+               for each level up to the highest level specified to
+               PVRSRVDebugRequest.
+@Output        phNotify             On success, points to debug notifier handle
+@Input         psDevNode            Device node for which the debug callback
+                                    should be registered
+@Input         pfnDbgRequestNotify  Function callback
+@Input         ui32RequesterID      Requester ID. This is used to determine
+                                    the order in which callbacks are called,
+                                    see DEBUG_REQUEST_*
+@Input         hDbgReqeustHandle    Data to be passed back to the caller via
+                                    the callback function
+@Return        PVRSRV_ERROR         PVRSRV_OK on success and an error otherwise
+*/ /******************************************************************** ******/
+PVRSRV_ERROR SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify,
+                                                         PVRSRV_DEVICE_NODE *psDevNode,
+                                                         PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+                                                         IMG_UINT32 ui32RequesterID,
+                                                         PVRSRV_DBGREQ_HANDLE hDbgRequestHandle);
+
+/**************************************************************************/ /*!
+ @Function     SOPvrDbgRequestNotifyUnregister
+ @Description  SO Interface function called from the OS layer implementation.
+               Remove and clean up the specified notifier registration so that
+               it does not receive any further callbacks.
+ @Input        hNotify     Handle returned to caller from
+                           SOPvrDbgRequestNotifyRegister().
+ @Return       PVRSRV_ERROR
+*/ /***************************************************************************/
+PVRSRV_ERROR SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify);
+
+
+#endif /* SOFUNC_PVR_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/sofunc_rgx.h b/drivers/gpu/drm/img/img-rogue/services/server/include/sofunc_rgx.h
new file mode 100644 (file)
index 0000000..be9594d
--- /dev/null
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File
+@Title          SO Interface header file for devices/RGX functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains SO interface functions. These functions are defined in
+                the common devices layer and are called from the env layer OS
+                specific implementation.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SOFUNC_RGX_H_)
+#define SOFUNC_RGX_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if !defined(NO_HARDWARE)
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsRegister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     SORgxGpuUtilStatsUnregister
+
+ @Description  SO Interface function called from the OS layer implementation.
+               Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+
+#endif /* SOFUNC_RGX_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/srvcore.h b/drivers/gpu/drm/img/img-rogue/services/server/include/srvcore.h
new file mode 100644 (file)
index 0000000..0483b0a
--- /dev/null
@@ -0,0 +1,229 @@
+/**************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVCORE_H
+#define SRVCORE_H
+
+#include "lock_types.h"
+#include "connection_server.h"
+#include "pvr_debug.h"
+
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+                    IMG_UINT32 ui32DispatchTableEntry,
+                    void *pvDest,
+                    void __user *pvSrc,
+                    IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+                  IMG_UINT32 ui32DispatchTableEntry,
+                  void __user *pvDest,
+                  void *pvSrc,
+                  IMG_UINT32 ui32Size);
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+        IMG_UINT8 *psBridgeIn,
+        IMG_UINT8 *psBridgeOut,
+        CONNECTION_DATA *psConnection);
+
+typedef PVRSRV_ERROR (*ServerResourceDestroyFunction)(IMG_HANDLE, IMG_HANDLE);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry,
+                                                                        IMG_UINT8 *psBridgeIn,
+                                                                        IMG_UINT8 *psBridgeOut,
+                                                                        CONNECTION_DATA *psConnection);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+       BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl
+                                           arguments before calling into srvkm proper */
+       POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired
+                                   before calling the above wrapper */
+#if defined(DEBUG_BRIDGE_KM)
+       const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */
+       const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */
+       const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */
+       IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */
+       IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from
+                                                userspace within this ioctl */
+       IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from
+                                              userspace within this ioctl */
+       IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */
+       IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_RGX)
+       #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1)
+       #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_LAST+1)
+#else
+       #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_DISPATCH_LAST+1)
+       #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_LAST+1)
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+void BridgeDispatchTableStartOffsetsInit(void);
+
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+                       IMG_UINT32 ui32Index,
+                       const IMG_CHAR *pszIOCName,
+                       BridgeWrapperFunction pfFunction,
+                       const IMG_CHAR *pszFunctionName,
+                       POS_LOCK hBridgeLock,
+                       const IMG_CHAR* pszBridgeLockName);
+void
+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+                        IMG_UINT32 ui32Index);
+
+
+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */
+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\
+                                       hBridgeLock) \
+       _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\
+                                                       (POS_LOCK)hBridgeLock, #hBridgeLock)
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+       IMG_UINT32 ui32IOCTLCount;
+       IMG_UINT32 ui32TotalCopyFromUserBytes;
+       IMG_UINT32 ui32TotalCopyToUserBytes;
+} PVRSRV_BRIDGE_GLOBAL_STATS;
+
+void BridgeGlobalStatsLock(void);
+void BridgeGlobalStatsUnlock(void);
+
+/* OS specific code may want to report the stats held here and within the
+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a
+ * debugfs entry /(sys/kernel/debug|proc)/pvr/bridge_stats) */
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+PVRSRV_ERROR BridgeDispatcherInit(void);
+void BridgeDispatcherDeinit(void);
+
+PVRSRV_ERROR
+BridgedDispatchKM(CONNECTION_DATA * psConnection,
+                  PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM);
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+                PVRSRV_DEVICE_NODE * psDeviceNode,
+                IMG_UINT32 ui32Flags,
+                IMG_UINT32 ui32ClientBuildOptions,
+                IMG_UINT32 ui32ClientDDKVersion,
+                IMG_UINT32 ui32ClientDDKBuild,
+                IMG_UINT8  *pui8KernelArch,
+                IMG_UINT32 *ui32CapabilityFlags,
+                IMG_UINT64 *ui64PackedBvnc);
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void);
+
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+                      PVRSRV_DEVICE_NODE *psDeviceNode,
+                      IMG_UINT32 ui32VerbLevel);
+
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+                         PVRSRV_DEVICE_NODE *psDeviceNode,
+                         IMG_PUINT32  pui32RGXClockSpeed);
+
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+                    PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+                                    PVRSRV_DEVICE_NODE * psDeviceNode,
+                                    IMG_UINT32 ui32FWAlignChecksSize,
+                                    IMG_UINT32 aui32FWAlignChecks[]);
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 *pui32DeviceStatus);
+
+PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDeviceNode,
+                                     IMG_UINT32 ui32CapsSize,
+                                     IMG_UINT32 *pui32NumCores,
+                                     IMG_UINT64 *pui64Caps);
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid,
+                                         IMG_UINT32 ui32ArrSize,
+                                         IMG_BOOL bAllProcessStats,
+                                         IMG_UINT32 *ui32MemoryStats);
+
+static INLINE
+PVRSRV_ERROR DestroyServerResource(const SHARED_DEV_CONNECTION hConnection,
+                                   IMG_HANDLE hEvent,
+                                   ServerResourceDestroyFunction pfnDestroyCall,
+                                   IMG_HANDLE hResource)
+{
+    PVR_UNREFERENCED_PARAMETER(hEvent);
+
+    return pfnDestroyCall(GetBridgeHandle(hConnection), hResource);
+}
+
+#endif /* SRVCORE_H */
+
+/******************************************************************************
+ End of file (srvcore.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/srvinit.h b/drivers/gpu/drm/img/img-rogue/services/server/include/srvinit.h
new file mode 100644 (file)
index 0000000..48e6863
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title          Initialisation server internal header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the connections between the various parts of the
+                initialisation server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVINIT_H
+#define SRVINIT_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "device_connection.h"
+#include "device.h"
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* SRVINIT_H */
+
+/******************************************************************************
+ End of file (srvinit.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/srvkm.h b/drivers/gpu/drm/img/img-rogue/services/server/include/srvkm.h
new file mode 100644 (file)
index 0000000..1ca4ee8
--- /dev/null
@@ -0,0 +1,145 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services kernel module internal header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+#include "servicesext.h"
+
+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+struct _PVRSRV_DEVICE_NODE_;
+
+/*************************************************************************/ /*!
+@Function     PVRSRVCommonDriverInit
+@Description  Performs one time driver initialisation of Services Common and
+              Device layers.
+@Return       PVRSRV_ERROR   PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVCommonDriverInit(void);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVCommonDriverInit
+@Description  Performs one time driver de-initialisation of Services.
+@Return       void
+*/ /**************************************************************************/
+void PVRSRVCommonDriverDeInit(void);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceCreate
+@Description  Creates and initialises a common layer Services device node
+              for an OS native device. First stage device discovery.
+@Input        pvOSDevice      OS native device
+@Input        i32OsDeviceID A unique identifier which helps recognise this
+                              Device in the UM space provided by the OS.
+@Output       ppsDeviceNode   Points to the new device node on success
+@Return       PVRSRV_ERROR    PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32OsDeviceID,
+                                  struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceInitialise
+@Description  Initialises the device layer specifics (e.g. boot FW etc)
+              for the supplied device node, created previously by
+              PVRSRVCommonDeviceCreate. The device is ready for use when this
+              second stage device initialisation returns successfully.
+@Input        psDeviceNode  Device node of the device to be initialised
+@Return       PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVCommonDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function     PVRSRVCommonDeviceDestroy
+@Description  Destroys a PVR Services device node.
+@Input        psDeviceNode  Device node to destroy
+@Return       PVRSRV_ERROR  PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVCommonDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/******************
+HIGHER LEVEL MACROS
+*******************/
+
+/*----------------------------------------------------------------------------
+Repeats the body of the loop for a certain minimum time, or until the body
+exits by its own means (break, return, goto, etc.)
+
+Example of usage:
+
+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+{
+       if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+       {
+               bTimeout = IMG_FALSE;
+               break;
+       }
+
+       OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+} END_LOOP_UNTIL_TIMEOUT();
+
+-----------------------------------------------------------------------------*/
+
+/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time
+ * it will be decremented and the loop executed one final time. This is
+ * necessary when preemption is enabled.
+ */
+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+       IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+       IMG_INT32 iNotLastLoop;                                  \
+       for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+               ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--;                               \
+               uiCurrent = OSClockus(),                                                                                                        \
+               uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset,           \
+               uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+#endif /* SRVKM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/sync_checkpoint.h b/drivers/gpu/drm/img/img-rogue/services/server/include/sync_checkpoint.h
new file mode 100644 (file)
index 0000000..33c26f4
--- /dev/null
@@ -0,0 +1,666 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation checkpoint interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_CHECKPOINT_H
+#define SYNC_CHECKPOINT_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_sync_km.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+#include "device_connection.h"
+#include "opaque_types.h"
+
+#ifndef CHECKPOINT_TYPES
+#define CHECKPOINT_TYPES
+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct SYNC_CHECKPOINT_TAG *PSYNC_CHECKPOINT;
+#endif
+
+/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code
+   will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions
+   we can then call */
+#ifndef CHECKPOINT_PFNS
+#define CHECKPOINT_PFNS
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                                             PVRSRV_FENCE fence,
+                                                             IMG_UINT32 *nr_checkpoints,
+                                                             PSYNC_CHECKPOINT **checkpoint_handles,
+                                                             IMG_UINT64 *pui64FenceUID);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(PPVRSRV_DEVICE_NODE device,
+                                                            const IMG_CHAR *fence_name,
+                                                            PVRSRV_TIMELINE timeline,
+                                                            PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                                                            PVRSRV_FENCE *new_fence,
+                                                            IMG_UINT64 *pui64FenceUID,
+                                                            void **ppvFenceFinaliseData,
+                                                            PSYNC_CHECKPOINT *new_checkpoint_handle,
+                                                            IMG_HANDLE *timeline_update_sync,
+                                                            IMG_UINT32 *timeline_update_value);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs);
+#if defined(PDUMP)
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence,
+                                                                       IMG_UINT32 *puiNumCheckpoints,
+                                                                       PSYNC_CHECKPOINT **papsCheckpoints);
+#endif
+
+#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20
+
+typedef struct
+{
+       PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve;
+       PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate;
+       PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback;
+       PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise;
+       PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines;
+       PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem;
+       PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs;
+       IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN];
+#if defined(PDUMP)
+       PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints;
+#endif
+} PFN_SYNC_CHECKPOINT_STRUCT;
+
+PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns);
+
+#endif /* ifndef CHECKPOINT_PFNS */
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextCreate
+
+@Description    Create a new synchronisation checkpoint context
+
+@Input          psDevNode                 Device node
+
+@Output         ppsSyncCheckpointContext  Handle to the created synchronisation
+                                          checkpoint context
+
+@Return         PVRSRV_OK if the synchronisation checkpoint context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+                            PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextDestroy
+
+@Description    Destroy a synchronisation checkpoint context
+
+@Input          psSyncCheckpointContext  Handle to the synchronisation
+                                         checkpoint context to destroy
+
+@Return         PVRSRV_OK if the synchronisation checkpoint context was
+                successfully destroyed.
+                PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still
+                has sync checkpoints defined
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextRef
+
+@Description    Takes a reference on a synchronisation checkpoint context
+
+@Input          psContext  Handle to the synchronisation checkpoint context
+                           on which a ref is to be taken
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointContextUnref
+
+@Description    Drops a reference taken on a synchronisation checkpoint
+                context
+
+@Input          psContext  Handle to the synchronisation checkpoint context
+                           on which the ref is to be dropped
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointAlloc
+
+@Description    Allocate a new synchronisation checkpoint on the specified
+                synchronisation checkpoint context
+
+@Input          hSyncCheckpointContext  Handle to the synchronisation
+                                        checkpoint context
+
+@Input          hTimeline               Timeline on which this sync
+                                        checkpoint is being created
+
+@Input          hFence                  Fence as passed into pfnFenceResolve
+                                        API, when the API encounters a non-PVR
+                                        fence as part of its input fence. From
+                                        all other places this argument must be
+                                        PVRSRV_NO_FENCE.
+
+@Input          pszClassName            Sync checkpoint source annotation
+                                        (will be truncated to at most
+                                         PVRSRV_SYNC_NAME_LENGTH chars)
+
+@Output         ppsSyncCheckpoint       Created synchronisation checkpoint
+
+@Return         PVRSRV_OK if the synchronisation checkpoint was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+                    PVRSRV_TIMELINE hTimeline,
+                    PVRSRV_FENCE hFence,
+                    const IMG_CHAR *pszCheckpointName,
+                    PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFree
+
+@Description    Free a synchronisation checkpoint
+                The reference count held for the synchronisation checkpoint
+                is decremented - if it has becomes zero, it is also freed.
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to free
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignal
+
+@Description    Signal the synchronisation checkpoint
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to signal
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointSignalNoHW
+
+@Description    Signal the synchronisation checkpoint in NO_HARWARE build
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to signal
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointError
+
+@Description    Error the synchronisation checkpoint
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to error
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointStateFromUFO
+
+@Description    Returns the current state of the synchronisation checkpoint
+                which has the given UFO firmware address
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint
+
+@Return         The current state (32-bit value) of the sync checkpoint
+*/
+/*****************************************************************************/
+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+                                IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointErrorFromUFO
+
+@Description    Error the synchronisation checkpoint which has the
+                given UFO firmware address
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to be errored
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to be errored
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRollbackFromUFO
+
+@Description    Drop the enqueued count reference taken on the synchronisation
+                checkpoint on behalf of the firmware.
+                Called in the event of a DM Kick failing.
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to be rolled back
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to be rolled back
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointIsSignalled
+
+@Description    Returns IMG_TRUE if the synchronisation checkpoint is
+                signalled or errored
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to test
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint,
+                          IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointIsErrored
+
+@Description    Returns IMG_TRUE if the synchronisation checkpoint is
+                errored
+
+@Input          psSyncCheckpoint        The synchronisation checkpoint to test
+
+@Input          ui32FenceSyncFlags      Flags used for controlling HWPerf behavior
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint,
+                        IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointTakeRef
+
+@Description    Take a reference on a synchronisation checkpoint
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to take a
+                                        reference on
+
+@Return         PVRSRV_OK if a reference was taken on the synchronisation
+                primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDropRef
+
+@Description    Drop a reference on a synchronisation checkpoint
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to drop a
+                                        reference on
+
+@Return         PVRSRV_OK if a reference was dropped on the synchronisation
+                primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointResolveFence
+
+@Description    Resolve a fence, returning a list of the sync checkpoints
+                that fence contains.
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          psSyncCheckpointContext The sync checkpoint context
+                                        on which checkpoints should be
+                                        created (in the event of the fence
+                                        having a native sync pt with no
+                                        associated sync checkpoint)
+
+@Input          hFence                  The fence to be resolved
+
+@Output         pui32NumSyncCheckpoints The number of sync checkpoints the
+                                        fence contains. Can return 0 if
+                                        passed a null (-1) fence.
+
+@Output         papsSyncCheckpoints     List of sync checkpoints the fence
+                                        contains
+
+@Output         puiFenceUID             Unique ID of the resolved fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                           PVRSRV_FENCE hFence,
+                           IMG_UINT32 *pui32NumSyncCheckpoints,
+                           PSYNC_CHECKPOINT **papsSyncCheckpoints,
+                           IMG_UINT64 *puiFenceUID,
+                           PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointCreateFence
+
+@Description    Create a fence containing a single sync checkpoint.
+                Return the fence and a ptr to sync checkpoint it contains.
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          pszFenceName            String to assign to the new fence
+                                        (for debugging purposes)
+
+@Input          hTimeline               Timeline on which the new fence is
+                                        to be created
+
+@Input          psSyncCheckpointContext Sync checkpoint context to be used
+                                        when creating the new fence
+
+@Output         phNewFence              The newly created fence
+
+@Output         pui64FenceUID           Unique ID of the created fence
+
+@Output         ppvFenceFinaliseData    Any data needed to finalise the fence
+                                        in a later call to the function
+                                        SyncCheckpointFinaliseFence()
+
+@Output         psNewSyncCheckpoint     The sync checkpoint contained in
+                                        the new fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode,
+                          const IMG_CHAR *pszFenceName,
+                          PVRSRV_TIMELINE hTimeline,
+                          PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+                          PVRSRV_FENCE *phNewFence,
+                          IMG_UINT64 *pui64FenceUID,
+                          void **ppvFenceFinaliseData,
+                          PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+                          void **ppvTimelineUpdateSyncPrim,
+                          IMG_UINT32 *pui32TimelineUpdateValue,
+                          PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRollbackFenceData
+
+@Description    'Rolls back' the fence specified (destroys the fence and
+                takes any other required actions to undo the fence
+                creation (eg if the implementation wishes to revert the
+                incrementing of the fence's timeline, etc).
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          hFence                  Fence to be 'rolled back'
+
+@Input          pvFinaliseData          Data needed to finalise the
+                                        fence
+
+@Return         PVRSRV_OK if a valid fence was provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFinaliseFence
+
+@Description    'Finalise' the fence specified (performs any actions the
+                underlying implementation may need to perform just prior
+                to the fence being returned to the client.
+                This function in turn calls a function provided by the
+                OS native sync implementation - if the native sync
+                implementation does not need to perform any actions at
+                this time, this function does not need to be registered.
+
+@Input          psDevNode               Device node
+
+@Input          hFence                  Fence to be 'finalised'
+
+@Input          pvFinaliseData          Data needed to finalise the fence
+
+@Input          psSyncCheckpoint        Base sync checkpoint that this fence
+                                        is formed of
+
+@Input          pszName                 Fence annotation
+
+@Return         PVRSRV_OK if a valid fence and finalise data were provided.
+                PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise
+                data were provided.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function (permitted).
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode,
+                            PVRSRV_FENCE hFence,
+                            void *pvFinaliseData,
+                            PSYNC_CHECKPOINT psSyncCheckpoint,
+                            const IMG_CHAR *pszName);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointFreeCheckpointListMem
+
+@Description    Free memory the memory which was allocated by the sync
+                implementation and used to return the list of sync
+                checkpoints when resolving a fence.
+                to the fence being returned to the client.
+                This function in turn calls a free function registered by
+                the sync implementation (if a function has been registered).
+
+@Input          pvCheckpointListMem     Pointer to the memory to be freed
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointNoHWUpdateTimelines
+
+@Description    Called by the DDK in a NO_HARDWARE build only.
+                After syncs have been manually signalled by the DDK, this
+                function is called to allow the OS native sync implementation
+                to update its timelines (as the usual callback notification
+                of signalled checkpoints is not supported for NO_HARDWARE).
+                This function in turn calls a function provided by the
+                OS native sync implementation.
+
+@Input          pvPrivateData            Any data the OS native sync
+                                         implementation might require.
+
+@Return         PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function, otherwise
+                PVRSRV_OK.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDumpInfoOnStalledUFOs
+
+@Description    Called by the DDK in the event of the health check watchdog
+                examining the CCBs and determining that one has failed to
+                progress after 10 second when the GPU is idle due to waiting
+                on one or more UFO fences.
+                The DDK will pass a list of UFOs on which the CCB is waiting
+                and the sync implementation will check them to see if any
+                relate to sync points it has created. If so, the
+                implementation should dump debug information on those sync
+                points to the kernel log or other suitable output (which will
+                allow the unsignalled syncs to be identified).
+                The function shall return the number of syncs in the provided
+                array that were syncs which it had created.
+
+@Input          ui32NumUFOs           The number of UFOs in the array passed
+                                      in the pui32VAddrs parameter.
+                pui32Vaddr            The array of UFOs the CCB is waiting on.
+
+@Output         pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which
+                                      relate to syncs created by the sync
+                                      implementation.
+
+@Return         PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs.
+                PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in
+                pui32NumSyncOwnedUFOs.
+                PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+                sync has not registered a callback function.
+
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs,
+                                    IMG_UINT32 *pui32Vaddrs,
+                                    IMG_UINT32 *pui32NumSyncOwnedUFOs);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointGetStateString
+
+@Description    Called to get a string representing the current state of a
+                sync checkpoint.
+
+@Input          psSyncCheckpoint        Synchronisation checkpoint to get the
+                                        state for.
+
+@Return         The string representing the current state of this checkpoint
+*/
+/*****************************************************************************/
+const IMG_CHAR *
+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointRecordLookup
+
+@Description    Returns a debug string with information about the
+                sync checkpoint.
+
+@Input          psDevNode               The device owning the sync
+                                        checkpoint to lookup
+
+@Input          ui32FwAddr              The firmware address of the sync
+                                        checkpoint to lookup
+
+@Input          pszSyncInfo             Character array to write to
+
+@Input          len                     Len of the character array
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode,
+                           IMG_UINT32 ui32FwAddr,
+                           IMG_CHAR * pszSyncInfo, size_t len);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       PVRSRVSyncCheckpointFencePDumpPolKM
+
+@Description    Called to insert a poll into the PDump script on a given
+                Fence being signalled or errored.
+
+@Input          hFence        Fence for PDump to poll on
+
+@Return         PVRSRV_OK if a valid sync checkpoint was provided.
+*/
+/*****************************************************************************/
+
+PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence);
+
+#endif
+
+#endif /* SYNC_CHECKPOINT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/sync_checkpoint_init.h b/drivers/gpu/drm/img/img-rogue/services/server/include/sync_checkpoint_init.h
new file mode 100644 (file)
index 0000000..94f2e00
--- /dev/null
@@ -0,0 +1,82 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation checkpoint initialisation interface
+                header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation checkpoint structures that are visible
+                internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_CHECKPOINT_INIT_H
+#define SYNC_CHECKPOINT_INIT_H
+
+#include "device.h"
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointInit
+
+@Description    Initialise the sync checkpoint driver by giving it the
+                device node (needed to determine the pfnUFOAlloc function
+                to call in order to allocate sync block memory).
+
+@Input          psDevNode               Device for which sync checkpoints
+                                        are being initialised
+
+@Return         PVRSRV_OK               initialised successfully,
+                PVRSRV_ERROR_<error>    otherwise
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*************************************************************************/ /*!
+@Function       SyncCheckpointDeinit
+
+@Description    Deinitialise the sync checkpoint driver.
+                Frees resources allocated during initialisation.
+
+@Input          psDevNode               Device for which sync checkpoints
+                                        are being de-initialised
+
+@Return         None
+*/
+/*****************************************************************************/
+void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#endif /* SYNC_CHECKPOINT_INIT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/sync_fallback_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/sync_fallback_server.h
new file mode 100644 (file)
index 0000000..ac6bd47
--- /dev/null
@@ -0,0 +1,204 @@
+/**************************************************************************/ /*!
+@File
+@Title          Fallback sync interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SYNC_FALLBACK_SERVER_H
+#define SYNC_FALLBACK_SERVER_H
+
+#include "img_types.h"
+#include "sync_checkpoint.h"
+#include "device.h"
+#include "connection_server.h"
+
+
+typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER;
+typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER;
+typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT;
+
+typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT;
+
+#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         SW SPECIFIC FUNCTIONS                             */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize,
+                                    const IMG_CHAR *pszTimelineName,
+                                    PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreateSW(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 PVRSRV_TIMELINE_SERVER *psTimeline,
+                                 IMG_UINT32 uiFenceNameSize,
+                                 const IMG_CHAR *pszFenceName,
+                                 PVRSRV_FENCE_SERVER **ppsOutputFence,
+                                 IMG_UINT64 *pui64SyncPtIdx);
+PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                           PVRSRV_TIMELINE iSWTimeline,
+                                           const IMG_CHAR *pszFenceName,
+                                           PVRSRV_FENCE *piOutputFence,
+                                           IMG_UINT64* pui64SyncPtIdx);
+
+PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline,
+                                     IMG_UINT64 *pui64SyncPtIdx);
+PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj,
+                                       IMG_UINT64* pui64SyncPtIdx);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         PVR SPECIFIC FUNCTIONS                            */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize,
+                                     const IMG_CHAR *pszTimelineName,
+                                     PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreatePVR(PPVRSRV_DEVICE_NODE psDeviceNode,
+                                  const IMG_CHAR *pszName,
+                                  PVRSRV_TIMELINE iTl,
+                                  PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext,
+                                  PVRSRV_FENCE *piOutFence,
+                                  IMG_UINT64 *puiFenceUID,
+                                  void **ppvFenceFinaliseData,
+                                  PSYNC_CHECKPOINT *ppsOutCheckpoint,
+                                  void **ppvTimelineUpdateSync,
+                                  IMG_UINT32 *puiTimelineUpdateValue);
+
+PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext,
+                                   PVRSRV_FENCE iFence,
+                                   IMG_UINT32 *puiNumCheckpoints,
+                                   PSYNC_CHECKPOINT **papsCheckpoints,
+                                   IMG_UINT64 *puiFenceUID);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                         GENERIC FUNCTIONS                                 */
+/*                                                                           */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence,
+                               void **ppvFenceObj);
+
+PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline,
+                                    void **ppvSWTimelineObj);
+
+PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl);
+
+PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence);
+PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj);
+
+PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence,
+                            PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1,
+                              PVRSRV_FENCE_SERVER *psInFence2,
+                              IMG_UINT32 uiFenceNameSize,
+                              const IMG_CHAR *pszFenceName,
+                              PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiTimeout);
+
+PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence,
+                             IMG_UINT32 uiLine,
+                             IMG_UINT32 uiFileNameLength,
+                             const IMG_CHAR *pszFile,
+                             IMG_UINT32 uiModuleLength,
+                             const IMG_CHAR *pszModule,
+                             IMG_UINT32 uiDescLength,
+                             const IMG_CHAR *pszDesc);
+
+PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj,
+                                  DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                  void *pvDumpDebugFile);
+
+PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj,
+                                    DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                    void *pvDumpDebugFile);
+
+PVRSRV_ERROR SyncFbRegisterSyncFunctions(void);
+
+PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs);
+
+IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value);
+
+/*****************************************************************************/
+/*                                                                           */
+/*                       IMPORT/EXPORT FUNCTIONS                             */
+/*                                                                           */
+/*****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence,
+                                       PVRSRV_FENCE_EXPORT **ppExport);
+
+PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport);
+
+PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection,
+                                       PVRSRV_DEVICE_NODE *psDevice,
+                                       PVRSRV_FENCE_EXPORT *psImport,
+                                       PVRSRV_FENCE_SERVER **psFence);
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE * psDevNode,
+                                     PVRSRV_FENCE_SERVER *psFence,
+                                     IMG_SECURE_TYPE *phSecure,
+                                     PVRSRV_FENCE_EXPORT **ppsExport,
+                                     CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport);
+
+PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection,
+                                     PVRSRV_DEVICE_NODE *psDevice,
+                                     IMG_SECURE_TYPE hSecure,
+                                     PVRSRV_FENCE_SERVER **psFence);
+
+#endif /* SYNC_FALLBACK_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/sync_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/sync_server.h
new file mode 100644 (file)
index 0000000..e356829
--- /dev/null
@@ -0,0 +1,249 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Describes the server side synchronisation functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "pdump_km.h"
+
+#ifndef SYNC_SERVER_H
+#define SYNC_SERVER_H
+
+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK;
+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA;
+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE;
+
+typedef struct _SYNC_ADDR_LIST_
+{
+       IMG_UINT32 ui32NumSyncs;
+       PRGXFWIF_UFO_ADDR *pasFWAddrs;
+} SYNC_ADDR_LIST;
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+                                               IMG_UINT32 ui32Offset,
+                                               PRGXFWIF_UFO_ADDR *psAddrOut);
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList);
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+                                               IMG_UINT32 ui32NumSyncs,
+                                               SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+                                               IMG_UINT32 *paui32SyncOffset);
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST          *psList,
+                                                  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim);
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+                                                               IMG_UINT32 ui32NumCheckpoints,
+                                                               PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+                                                                         IMG_UINT32 ui32NumCheckpoints,
+                                                                         PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+                                                        PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+                                PVRSRV_DEVICE_NODE * psDevNode,
+                                                               SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+                                                               IMG_UINT32 *puiSyncPrimVAddr,
+                                                               IMG_UINT32 *puiSyncPrimBlockSize,
+                                                               PMR        **ppsSyncPMR);
+
+PVRSRV_ERROR
+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+                                                                DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+                                       IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+                                          PVRSRV_DEVICE_NODE *psDevNode,
+                                          IMG_BOOL bServerSync,
+                       IMG_UINT32 ui32FWAddr,
+                       IMG_UINT32 ui32ClassNameSize,
+                       const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+                                          PVRSRV_DEVICE_NODE *psDevNode,
+                                          IMG_UINT32 ui32FWAddr);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+                                         PVRSRV_DEVICE_NODE *psDevNode,
+                                         SYNC_RECORD_HANDLE *phRecord,
+                                         SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+                                         IMG_UINT32 ui32FwBlockAddr,
+                                         IMG_UINT32 ui32SyncOffset,
+                                         IMG_BOOL bServerSync,
+                                         IMG_UINT32 ui32ClassNameSize,
+                                         const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+                       SYNC_RECORD_HANDLE hRecord);
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+                                         IMG_CHAR * pszSyncInfo, size_t len);
+
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData);
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+void SyncConnectionPDumpSyncBlocks(PVRSRV_DEVICE_NODE *psDevNode, void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent);
+
+/*!
+******************************************************************************
+@Function      SyncServerInit
+
+@Description   Per-device initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode);
+void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+                                                       IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+                                                IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+                                                PDUMP_POLL_OPERATOR eOperator,
+                                                PDUMP_FLAGS_T uiDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+                                                IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+                                                IMG_UINT64 uiBufferSize);
+
+#else  /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+       PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpValueKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+                                                       IMG_UINT32 ui32Value)
+{
+       PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+                                                IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+                                                PDUMP_POLL_OPERATOR eOperator,
+                                                PDUMP_FLAGS_T uiDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(eOperator);
+       PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpCBPKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+                                                IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+                                                IMG_UINT64 uiBufferSize)
+{
+       PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+       PVR_UNREFERENCED_PARAMETER(ui32Offset);
+       PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+       PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+       PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+#endif /*SYNC_SERVER_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/tlintern.h b/drivers/gpu/drm/img/img-rogue/services/server/include/tlintern.h
new file mode 100644 (file)
index 0000000..c3edce6
--- /dev/null
@@ -0,0 +1,345 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer internals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer header used by TL internally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef TLINTERN_H
+#define TLINTERN_H
+
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+#include "lock.h"
+#include "tlstream.h"
+
+/* Forward declarations */
+typedef struct _TL_SNODE_* PTL_SNODE;
+
+/* To debug buffer utilisation enable this macro here and define
+ * PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c
+ * before the inclusion of pvr_debug.h.
+ * Issue pvrtutils 6 on target to see stream buffer utilisation. */
+//#define TL_BUFFER_STATS 1
+
+/*! TL stream structure container.
+ *    pbyBuffer   holds the circular buffer.
+ *    ui32Read    points to the beginning of the buffer, ie to where data to
+ *                  Read begin.
+ *    ui32Write   points to the end of data that have been committed, ie this is
+ *                  where new data will be written.
+ *    ui32Pending number of bytes reserved in last reserve call which have not
+ *                  yet been submitted. Therefore these data are not ready to
+ *                  be transported.
+ *    hStreamWLock - provides atomic protection for the ui32Pending & ui32Write
+ *                   members of the structure for when they are checked and/or
+ *                   updated in the context of a stream writer (producer)
+ *                   calling DoTLStreamReserve() & TLStreamCommit().
+ *                 - Reader context is not multi-threaded, only one client per
+ *                   stream is allowed. Also note the read context may be in an
+ *                   ISR which prevents a design where locks can be held in the
+ *                   AcquireData/ReleaseData() calls. Thus this lock only
+ *                   protects the stream members from simultaneous writers.
+ *
+ *      ui32Read < ui32Write <= ui32Pending
+ *        where < and <= operators are overloaded to make sense in a circular way.
+ */
+typedef struct _TL_STREAM_
+{
+       IMG_CHAR                szName[PRVSRVTL_MAX_STREAM_NAME_SIZE];  /*!< String name identifier */
+       TL_OPMODE               eOpMode;                                /*!< Mode of Operation of TL Buffer */
+
+       IMG_BOOL                bWaitForEmptyOnDestroy;                 /*!< Flag: On destroying a non-empty stream block until
+                                                                         *         stream is drained. */
+       IMG_BOOL                bNoSignalOnCommit;                      /*!< Flag: Used to avoid the TL signalling waiting consumers
+                                                                         *         that new data is available on every commit. Producers
+                                                                         *         using this flag will need to manually signal when
+                                                                         *         appropriate using the TLStreamSync() API */
+
+       void                    (*pfOnReaderOpenCallback)(void *pvArg); /*!< Optional on reader connect callback */
+       void                    *pvOnReaderOpenUserData;                /*!< On reader connect user data */
+       void                    (*pfProducerCallback)(void);            /*!< Optional producer callback of type TL_STREAM_SOURCECB */
+       void                    *pvProducerUserData;                    /*!< Producer callback user data */
+
+       struct _TL_STREAM_      *psNotifStream;                         /*!< Pointer to the stream to which notification will be sent */
+
+       volatile IMG_UINT32     ui32Read;                               /*!< Pointer to the beginning of available data */
+       volatile IMG_UINT32     ui32Write;                              /*!< Pointer to already committed data which are ready to be
+                                                                         *   copied to user space */
+       IMG_UINT32              ui32Pending;                            /*!< Count pending bytes reserved in buffer */
+       IMG_UINT32              ui32Size;                               /*!< Buffer size */
+       IMG_UINT32              ui32ThresholdUsageForSignal;            /*!< Buffer usage threshold at which a TL writer signals a blocked/
+                                                                            *    waiting reader when transitioning from empty->non-empty */
+       IMG_UINT32              ui32MaxPacketSize;                      /*! Max TL packet size */
+       IMG_BYTE                *pbyBuffer;                             /*!< Actual data buffer */
+
+       PTL_SNODE               psNode;                                 /*!< Ptr to parent stream node */
+       DEVMEM_MEMDESC          *psStreamMemDesc;                       /*!< MemDescriptor used to allocate buffer space through PMR */
+
+       IMG_HANDLE              hProducerEvent;                         /*!< Handle to wait on if there is not enough space */
+       IMG_HANDLE              hProducerEventObj;                      /*!< Handle to signal blocked reserve calls */
+       IMG_BOOL                bSignalPending;                         /*!< Tracks if a "signal" is pending to be sent to a blocked/
+                                                                            *    waiting reader */
+
+       POS_LOCK                hStreamWLock;                           /*!< Writers Lock for ui32Pending & ui32Write*/
+       POS_LOCK                hReadLock;                              /*!< Readers Lock for bReadPending & ui32Read*/
+       IMG_BOOL                bReadPending;                           /*!< Tracks if a read operation is pending or not*/
+       IMG_BOOL                bNoWrapPermanent;                       /*!< Flag: Prevents buffer wrap and subsequent data loss
+                                                                            *    as well as resetting the read position on close. */
+
+#if defined(TL_BUFFER_STATS)
+       IMG_UINT32              ui32CntReadFails;                       /*!< Tracks how many times reader failed to acquire read lock */
+       IMG_UINT32              ui32CntReadSuccesses;                   /*!< Tracks how many times reader acquires read lock successfully */
+       IMG_UINT32              ui32CntWriteSuccesses;                  /*!< Tracks how many times writer acquires read lock successfully */
+       IMG_UINT32              ui32CntWriteWaits;                      /*!< Tracks how many times writer had to wait to acquire read lock */
+       IMG_UINT32              ui32CntNumWriteSuccess;                 /*!< Tracks how many write operations were successful*/
+       IMG_UINT32              ui32BufferUt;                           /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */
+       IMG_UINT32              ui32MaxReserveWatermark;                /*!< Max stream reserve size that was ever requested by a writer */
+       IMG_UINT32              ui32SignalsSent;                        /*!< Number of signals that were actually sent by the write API */
+       ATOMIC_T                bNoReaderSinceFirstReserve;             /*!< Tracks if a read has been done since the buffer was last found empty */
+       IMG_UINT32              ui32TimeStart;                          /*!< Time at which a write (Reserve call) was done into an empty buffer.
+                                                                            *    Guarded by hStreamWLock. */
+       IMG_UINT32              ui32MinTimeToFullInUs;                  /*!< Minimum time taken to (nearly) fully fill an empty buffer. Guarded
+                                                                            *    by hStreamWLock. */
+       /* Behaviour counters, protected by hStreamLock in case of
+        * multi-threaded access */
+       IMG_UINT32              ui32NumCommits;     /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32              ui32SignalNotSent;  /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32              ui32ManSyncs;       /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32              ui32ProducerByteCount; /*!< Counters used to analysing stream performance, see ++ loc */
+
+       /* Not protected by the lock, inc in the reader thread which is currently singular */
+       IMG_UINT32              ui32AcquireRead1;   /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32              ui32AcquireRead2;   /*!< Counters used to analysing stream performance, see ++ loc */
+#endif
+
+} TL_STREAM, *PTL_STREAM;
+
+/* there need to be enough space reserved in the buffer for 2 minimal packets
+ * and it needs to be aligned the same way the buffer is or there will be a
+ * compile error.*/
+#define BUFFER_RESERVED_SPACE (2 * PVRSRVTL_PACKET_ALIGNMENT)
+
+/* ensure the space reserved follows the buffer's alignment */
+static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)),
+                         "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT");
+
+/* Define the largest value that a uint that matches the
+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */
+#define MAX_UINT 0xffffFFFF
+
+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is
+ * outstanding on the stream. */
+#define NOTHING_PENDING IMG_UINT32_MAX
+
+
+/*
+ * Transport Layer Stream Descriptor types/defs
+ */
+typedef struct _TL_STREAM_DESC_
+{
+       PTL_SNODE   psNode;         /*!< Ptr to parent stream node */
+       IMG_UINT32  ui32Flags;      /*!< Flags supplied by client on stream open */
+       IMG_HANDLE  hReadEvent;     /*!< For wait call (only used/set in reader descriptors) */
+       IMG_INT     uiRefCount;     /*!< Reference count to the SD */
+
+#if defined(TL_BUFFER_STATS)
+       /* Behaviour counters, no multi-threading protection need as they are
+        * incremented in a single thread due to only supporting one reader
+        * at present */
+       IMG_UINT32  ui32AcquireCount;  /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32  ui32NoData;        /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32  ui32NoDataSleep;   /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32  ui32Signalled;     /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32  ui32TimeoutEmpty;  /*!< Counters used to analysing stream performance, see ++ loc */
+       IMG_UINT32  ui32TimeoutData;   /*!< Counters used to analysing stream performance, see ++ loc */
+#endif
+       IMG_UINT32  ui32ReadLimit;     /*!< Limit buffer reads to data present in the
+                                        buffer at the time of stream open. */
+       IMG_UINT32  ui32ReadLen;       /*!< Size of data returned by initial Acquire */
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3);
+
+#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000
+#define TL_STREAM_FLAG_TEST            0x10000000
+#define TL_STREAM_FLAG_WRAPREAD        0x00010000
+
+#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF
+
+#if defined(TL_BUFFER_STATS)
+#      define TL_COUNTER_INC(a)    ((a)++)
+#      define TL_COUNTER_ADD(a,b)  ((a) += (b))
+#else
+#      define TL_COUNTER_INC(a)    (void)(0)
+#      define TL_COUNTER_ADD(a,b)  (void)(0)
+#endif
+/*
+ * Transport Layer stream list node
+ */
+typedef struct _TL_SNODE_
+{
+       struct _TL_SNODE_*      psNext;                         /*!< Linked list next element */
+       IMG_HANDLE                      hReadEventObj;          /*!< Readers 'wait for data' event */
+       PTL_STREAM                      psStream;                       /*!< TL Stream object */
+       IMG_INT                         uiWRefCount;            /*!< Stream writer reference count */
+       PTL_STREAM_DESC         psRDesc;                        /*!< Stream reader 0 or ptr only */
+       PTL_STREAM_DESC         psWDesc;                        /*!< Stream writer 0 or ptr only */
+} TL_SNODE;
+
+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4);
+
+/*
+ * Transport Layer global top types and variables
+ * Use access function to obtain pointer.
+ *
+ * hTLGDLock - provides atomicity over read/check/write operations and
+ *             sequence of operations on uiClientCnt, psHead list of SNODEs and
+ *             the immediate members in a list element SNODE structure.
+ *           - This larger scope of responsibility for this lock helps avoid
+ *             the need for a lock in the SNODE structure.
+ *           - Lock held in the client (reader) context when streams are
+ *             opened/closed and in the server (writer) context when streams
+ *             are created/open/closed.
+ */
+typedef struct _TL_GDATA_
+{
+       IMG_HANDLE hTLEventObj;        /* Global TL signal object, new streams, etc */
+
+       IMG_UINT   uiClientCnt;        /* Counter to track the number of client stream connections. */
+       PTL_SNODE  psHead;             /* List of TL streams and associated client handle */
+
+       POS_LOCK   hTLGDLock;          /* Lock for structure AND psHead SNODE list */
+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA;
+
+/*
+ * Transport Layer Internal Kernel-Mode Server API
+ */
+TL_GLOBAL_DATA* TLGGD(void);           /* TLGetGlobalData() */
+
+PVRSRV_ERROR TLInit(void);
+void TLDeInit(void);
+
+void TLAddStreamNode(PTL_SNODE psAdd);
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName);
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+                          IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+                          IMG_UINT32 ui32Max);
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+void TLReturnStreamNode(PTL_SNODE psNode);
+
+/******************************************************************************
+ Function Name : TLTryRemoveStreamAndFreeStreamNode
+
+ Inputs                : PTL_SNODE     Pointer to the TL_SNODE whose stream is requested
+                       to be removed from TL_GLOBAL_DATA's list
+
+ Return Value  : IMG_TRUE      -       If the stream was made NULL and this
+                                       TL_SNODE was removed from the
+                                       TL_GLOBAL_DATA's list
+
+                 IMG_FALSE     -       If the stream wasn't made NULL as there
+                                       is a client connected to this stream
+
+ Description   : If there is no client currently connected to this stream then,
+                       This function removes this TL_SNODE from the
+                       TL_GLOBAL_DATA's list. The caller is responsible for the
+                       cleanup of the TL_STREAM whose TL_SNODE may be removed
+
+                 Otherwise, this function does nothing
+******************************************************************************/
+IMG_BOOL  TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove);
+
+/******************************************************************************
+ Function Name : TLUnrefDescAndTryFreeStreamNode
+
+ Inputs                : PTL_SNODE     Pointer to the TL_SNODE whose descriptor is
+                       requested to be removed
+                       : PTL_STREAM_DESC       Pointer to the STREAM_DESC
+
+ Return Value  : IMG_TRUE      -       If this TL_SNODE was removed from the
+                                       TL_GLOBAL_DATA's list
+
+                 IMG_FALSE     -       Otherwise
+
+ Description   : This function removes the stream descriptor from this TL_SNODE
+                       and, if there is no writer (producer context) currently bound to this
+                       stream, this function removes this TL_SNODE from the TL_GLOBAL_DATA's
+                       list. The caller is responsible for the cleanup of the TL_STREAM
+                       whose TL_SNODE may be removed
+******************************************************************************/
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD);
+
+/*
+ * Transport Layer stream interface to server part declared here to avoid
+ * circular dependency.
+ */
+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream,
+                                  IMG_BOOL bDisableCallback,
+                                  IMG_UINT32* puiReadOffset);
+PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream,
+                                    IMG_UINT32 uiReadLen,
+                                    IMG_UINT32 uiOrigReadLen);
+void TLStreamResetReadPos(PTL_STREAM psStream);
+
+DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream);
+IMG_BOOL TLStreamOutOfData(IMG_HANDLE psStream);
+
+/******************************************************************************
+ Function Name : TLStreamDestroy
+
+ Inputs                : PTL_STREAM    Pointer to the TL_STREAM to be destroyed
+
+ Description   : This function performs all the clean-up operations required for
+                       destruction of this stream
+******************************************************************************/
+void TLStreamDestroy(PTL_STREAM psStream);
+
+/*
+ * Test related functions
+ */
+PVRSRV_ERROR TUtilsInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR TUtilsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* TLINTERN_H */
+/******************************************************************************
+ End of file (tlintern.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/tlserver.h b/drivers/gpu/drm/img/img-rogue/services/server/include/tlserver.h
new file mode 100644 (file)
index 0000000..7ac2958
--- /dev/null
@@ -0,0 +1,97 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef TLSERVER_H
+#define TLSERVER_H
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#include "tlintern.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+
+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection);
+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection);
+
+PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName,
+                          IMG_UINT32 ui32Mode,
+                          PTL_STREAM_DESC* ppsSD,
+                          PMR** ppsTLPMR);
+
+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD);
+
+PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+                          IMG_UINT32 ui32Max,
+                          IMG_CHAR *pszStreams,
+                          IMG_UINT32 *pui32NumFound);
+
+PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+                                     IMG_UINT32* ui32BufferOffset,
+                                     IMG_UINT32 ui32Size,
+                                     IMG_UINT32 ui32SizeMin,
+                                     IMG_UINT32* pui32Available);
+
+PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+                                    IMG_UINT32 ui32Size);
+
+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+                          IMG_UINT32* puiReadOffset,
+                          IMG_UINT32* puiReadLen);
+
+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+                                IMG_UINT32 uiReadOffset,
+                                IMG_UINT32 uiReadLen);
+
+PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+                                 IMG_UINT32 ui32Size,
+                                 IMG_BYTE *pui8Data);
+
+#endif /* TLSERVER_H */
+
+/******************************************************************************
+ End of file (tlserver.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/tlstream.h b/drivers/gpu/drm/img/img-rogue/services/server/include/tlstream.h
new file mode 100644 (file)
index 0000000..911e720
--- /dev/null
@@ -0,0 +1,600 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    TL provides driver components with a way to copy data from kernel
+                space to user space (e.g. screen/file).
+
+                Data can be passed to the Transport Layer through the
+                TL Stream (kernel space) API interface.
+
+                The buffer provided to every stream is a modified version of a
+                circular buffer. Which CB version is created is specified by
+                relevant flags when creating a stream. Currently two types
+                of buffer are available:
+                - TL_OPMODE_DROP_NEWER:
+                  When the buffer is full, incoming data are dropped
+                  (instead of overwriting older data) and a marker is set
+                  to let the user know that data have been lost.
+                - TL_OPMODE_BLOCK:
+                  When the circular buffer is full, reserve/write calls block
+                  until enough space is freed.
+                - TL_OPMODE_DROP_OLDEST:
+                  When the circular buffer is full, the oldest packets in the
+                  buffer are dropped and a flag is set in header of next packet
+                  to let the user know that data have been lost.
+
+                All size/space requests are in bytes. However, the actual
+                implementation uses native word sizes (i.e. 4 byte aligned).
+
+                The user does not need to provide space for the stream buffer
+                as the TL handles memory allocations and usage.
+
+                Inserting data to a stream's buffer can be done either:
+                - by using TLReserve/TLCommit: User is provided with a buffer
+                                                 to write data to.
+                - or by using TLWrite:         User provides a buffer with
+                                                 data to be committed. The TL
+                                                 copies the data from the
+                                                 buffer into the stream buffer
+                                                 and returns.
+                Users should be aware that there are implementation overheads
+                associated with every stream buffer. If you find that less
+                data are captured than expected then try increasing the
+                stream buffer size or use TLInfo to obtain buffer parameters
+                and calculate optimum required values at run time.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef TLSTREAM_H
+#define TLSTREAM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+
+/*! Extract TL stream opmode from the given stream create flags.
+ * Last 3 bits of streamFlag is used for storing opmode, hence
+ * opmode mask is set as following. */
+#define TL_OPMODE_MASK 0x7
+
+/*
+ * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values
+ * within htbserver.c.
+ * As such we *MUST* keep the values matching in order of declaration.
+ */
+/*! Opmode specifying circular buffer behaviour */
+typedef enum
+{
+       /*! Undefined operation mode */
+       TL_OPMODE_UNDEF = 0,
+
+       /*! Reject new data if the buffer is full, producer may then decide to
+        *    drop the data or retry after some time. */
+       TL_OPMODE_DROP_NEWER,
+
+       /*! When buffer is full, advance the tail/read position to accept the new
+        * reserve call (size permitting), effectively overwriting the oldest
+        * data in the circular buffer. Not supported yet. */
+       TL_OPMODE_DROP_OLDEST,
+
+       /*! Block Reserve (subsequently Write) calls if there is not enough space
+        *    until some space is freed via a client read operation. */
+       TL_OPMODE_BLOCK,
+
+       /*!< For error checking */
+       TL_OPMODE_LAST
+
+} TL_OPMODE;
+
+typedef enum {
+       /* Enum to be used in conjunction with new Flags feature */
+
+       /* Flag set when Drop Oldest is set and packets have been dropped */
+       TL_FLAG_OVERWRITE_DETECTED = (1 << 0),
+       /* Prevents DoTLStreamReserve() from adding from injecting
+        * PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED */
+       TL_FLAG_NO_WRITE_FAILED = (1 << 1),
+} TL_Flags;
+
+static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK,
+             "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK");
+
+/*! Flags specifying stream behaviour */
+/*! Do not destroy stream if there still are data that have not been
+ *     copied in user space. Block until the stream is emptied. */
+#define TL_FLAG_FORCE_FLUSH            (1U<<8)
+/*! Do not signal consumers on commit automatically when the stream buffer
+ * transitions from empty to non-empty. Producer responsible for signal when
+ * it chooses. */
+#define TL_FLAG_NO_SIGNAL_ON_COMMIT    (1U<<9)
+
+/*! When a stream has this property it never wraps around and
+ * overwrites existing data, hence it is a fixed size persistent
+ * buffer, data written is permanent. Producers need to ensure
+ * the buffer is big enough for their needs.
+ * When a stream is opened for reading the client will always
+ * find the read position at the start of the buffer/data. */
+#define TL_FLAG_PERMANENT_NO_WRAP      (1U<<10)
+
+/*! Defer allocation of stream's shared memory until first open. */
+#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<11)
+
+/*! Structure used to pass internal TL stream sizes information to users.*/
+typedef struct _TL_STREAM_INFO_
+{
+    IMG_UINT32 headerSize;          /*!< Packet header size in bytes */
+    IMG_UINT32 minReservationSize;  /*!< Minimum data size reserved in bytes */
+    IMG_UINT32 pageSize;            /*!< Page size in bytes */
+    IMG_UINT32 pageAlign;           /*!< Page alignment in bytes */
+    IMG_UINT32 maxTLpacketSize;     /*! Max allowed TL packet size*/
+} TL_STREAM_INFO, *PTL_STREAM_INFO;
+
+/*! Callback operations or notifications that a stream producer may handle
+ * when requested by the Transport Layer.
+ */
+#define TL_SOURCECB_OP_CLIENT_EOS 0x01  /*!< Client has reached end of stream,
+                                         * can anymore data be supplied?
+                                         * ui32Resp ignored in this operation */
+
+/*! Function pointer type for the callback handler into the "producer" code
+ * that writes data to the TL stream.  Producer should handle the notification
+ * or operation supplied in ui32ReqOp on stream hStream. The
+ * Operations and notifications are defined above in TL_SOURCECB_OP */
+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream,
+               IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser);
+
+typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg);
+
+/*************************************************************************/ /*!
+ @Function      TLAllocSharedMemIfNull
+ @Description   Allocates shared memory for the stream.
+ @Input         hStream     Stream handle.
+ @Return        eError      Internal services call returned eError error
+                            number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLAllocSharedMemIfNull(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLFreeSharedMem
+ @Description   Frees stream's shared memory.
+ @Input         phStream    Stream handle.
+*/ /**************************************************************************/
+void
+TLFreeSharedMem(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCreate
+ @Description   Request the creation of a new stream and open a handle.
+                               If creating a stream which should continue to exist after the
+                               current context is finished, then TLStreamCreate must be
+                               followed by a TLStreamOpen call. On any case, the number of
+                               create/open calls must balance with the number of close calls
+                               used. This ensures the resources of a stream are released when
+                               it is no longer required.
+ @Output        phStream        Pointer to handle to store the new stream.
+ @Input         szStreamName    Name of stream, maximum length:
+                                PRVSRVTL_MAX_STREAM_NAME_SIZE.
+                                If a longer string is provided,creation fails.
+ @Input         ui32Size        Desired buffer size in bytes.
+ @Input         ui32StreamFlags Used to configure buffer behaviour. See above.
+ @Input         pfOnReaderOpenCB    Optional callback called when a client
+                                    opens this stream, may be null.
+ @Input         pvOnReaderOpenUD    Optional user data for pfOnReaderOpenCB,
+                                    may be null.
+ @Input         pfProducerCB    Optional callback, may be null.
+ @Input         pvProducerUD    Optional user data for callback, may be null.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or string name
+                                             exceeded MAX_STREAM_NAME_SIZE
+ @Return        PVRSRV_ERROR_OUT_OF_MEMORY   Failed to allocate space for
+                                             stream handle.
+ @Return        PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with
+                                             the same stream name string.
+ @Return        eError                       Internal services call returned
+                                             eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+               const IMG_CHAR *szStreamName,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+               void *pvOnReaderOpenUD,
+               TL_STREAM_SOURCECB pfProducerCB,
+               void *pvProducerUD);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOpen
+ @Description   Attach to existing stream that has already been created by a
+                  TLStreamCreate call. A handle is returned to the stream.
+ @Output        phStream        Pointer to handle to store the stream.
+ @Input         szStreamName    Name of stream, should match an already
+                                  existing stream name
+ @Return        PVRSRV_ERROR_NOT_FOUND       None of the streams matched the
+                                             requested stream name.
+                               PVRSRV_ERROR_INVALID_PARAMS  Non-NULL pointer to stream
+                                             handler is required.
+ @Return        PVRSRV_OK                    Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE     *phStream,
+             const IMG_CHAR *szStreamName);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReset
+ @Description   Resets read and write pointers and pending flag.
+ @Output        phStream Pointer to stream's handle
+*/ /**************************************************************************/
+void TLStreamReset(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamSetNotifStream
+ @Description   Registers a "notification stream" which will be used to
+                publish information about state change of the "hStream"
+                stream. Notification can inform about events such as stream
+                open/close, etc.
+ @Input         hStream         Handle to stream to update.
+ @Input         hNotifStream    Handle to the stream which will be used for
+                                publishing notifications.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  If either of the parameters is
+                                             NULL
+ @Return        PVRSRV_OK                    Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReconfigure
+ @Description   Request the stream flags controlling buffer behaviour to
+                be updated.
+                In the case where TL_OPMODE_BLOCK is to be used,
+                TLStreamCreate should be called without that flag and this
+                function used to change the stream mode once a consumer process
+                has been started. This avoids a deadlock scenario where the
+                TLStreaWrite/TLStreamReserve call will hold the Bridge Lock
+                while blocking if the TL buffer is full.
+                The TL_OPMODE_BLOCK should never drop the Bridge Lock
+                as this leads to another deadlock scenario where the caller to
+                TLStreamWrite/TLStreamReserve has already acquired another lock
+                (e.g. gHandleLock) which is not dropped. This then leads to that
+                thread acquiring locks out of order.
+ @Input         hStream         Handle to stream to update.
+ @Input         ui32StreamFlags Flags that configure buffer behaviour. See above.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or inconsistent
+                                             stream flags.
+ @Return        PVRSRV_ERROR_NOT_READY       Stream is currently being written to
+                                             try again later.
+ @Return        eError                       Internal services call returned
+                                             eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReconfigure(IMG_HANDLE hStream,
+                    IMG_UINT32 ui32StreamFlags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamClose
+ @Description   Detach from the stream associated with the given handle. If
+                  the current handle is the last one accessing the stream
+                                 (i.e. the number of TLStreamCreate+TLStreamOpen calls matches
+                                 the number of TLStreamClose calls) then the stream is also
+                                 deleted.
+                               On return the handle is no longer valid.
+ @Input         hStream     Handle to stream that will be closed.
+ @Return        None.
+*/ /**************************************************************************/
+void
+TLStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Number of bytes to reserve in buffer.
+ @Return        PVRSRV_INVALID_PARAMS       NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY      There are data previously reserved
+                                              that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Misusing the stream by trying to
+                                              reserve more space than the
+                                              buffer size.
+ @Return        PVRSRV_ERROR_STREAM_FULL    The reserve size requested
+                                            is larger than the free
+                                            space.
+ @Return         PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED  The reserve size
+                                                            requested is larger
+                                                            than max TL packet size
+ @Return        PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer
+                                                     does not have enough space
+                                                     for the reserve.
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve2
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Ideal number of bytes to reserve in buffer.
+ @Input         ui32SizeMin     Minimum number of bytes to reserve in buffer.
+ @Input         pui32Available  Optional, but when present and the
+                                  RESERVE_TOO_BIG error is returned, a size
+                                  suggestion is returned in this argument which
+                                  the caller can attempt to reserve again for a
+                                  successful allocation.
+ @Output        pbIsReaderConnected Let writing clients know if reader is
+                                    connected or not, in case of error.
+ @Return        PVRSRV_INVALID_PARAMS        NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY       There are data previously reserved
+                                             that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE   Misusing the stream by trying to
+                                             reserve more space than the
+                                             buffer size.
+ @Return        PVRSRV_ERROR_STREAM_FULL     The reserve size requested
+                                             is larger than the free
+                                             space.
+                                             Check the pui32Available
+                                             value for the correct
+                                             reserve size to use.
+ @Return         PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED   The reserve size
+                                                             requested is larger
+                                                             than max TL packet size
+ @Return        PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer
+                                                     does not have enough space
+                                                     for the reserve.
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available,
+                IMG_BOOL* pbIsReaderConnected);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserveReturnFlags
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Ideal number of bytes to reserve in buffer.
+ @Output        pui32Flags      Output parameter to return flags generated within
+                                the reserve function.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserveReturnFlags(IMG_HANDLE hStream,
+        IMG_UINT8  **ppui8Data,
+        IMG_UINT32 ui32Size,
+               IMG_UINT32* pui32Flags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamGetUT
+ @Description   Returns the current stream utilisation in bytes
+ @Input         hStream     Stream handle.
+ @Return        IMG_UINT32  Stream utilisation
+*/ /**************************************************************************/
+IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCommit
+ @Description   Notify TL that data have been written in the stream buffer.
+                  Should always follow and match TLStreamReserve call.
+ @Input         hStream         Stream handle.
+ @Input         ui32Size        Number of bytes that have been added to the
+                                  stream.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE   Commit results in more data
+                                             committed than the buffer size,
+                                             the stream is misused.
+ @Return        eError                       Commit was successful but
+                                             internal services call returned
+                                             eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream,
+               IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamWrite
+ @Description   Combined Reserve/Commit call. This function Reserves space in
+                  the specified stream buffer, copies ui32Size bytes of data
+                  from the array pui8Src points to and Commits in an "atomic"
+                  style operation.
+ @Input         hStream         Stream handle.
+ @Input         pui8Src         Source to read data from.
+ @Input         ui32Size        Number of bytes to copy and commit.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handler.
+ @Return        eError                       Error codes returned by either
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream,
+              IMG_UINT8  *pui8Src,
+              IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamWriteRetFlags
+ @Description   Combined Reserve/Commit call. This function Reserves space in
+                  the specified stream buffer, copies ui32Size bytes of data
+                  from the array pui8Src points to and Commits in an "atomic"
+                  style operation. Also accepts a pointer to a bit flag value
+                  for returning write status flags.
+ @Input         hStream         Stream handle.
+ @Input         pui8Src         Source to read data from.
+ @Input         ui32Size        Number of bytes to copy and commit.
+ @Output        pui32Flags      Output parameter for write status info
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handler.
+ @Return        eError                       Error codes returned by either
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamWriteRetFlags(IMG_HANDLE hStream,
+                      IMG_UINT8 *pui8Src,
+                                         IMG_UINT32 ui32Size,
+                                         IMG_UINT32 *pui32Flags);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamSync
+ @Description   Signal the consumer to start acquiring data from the stream
+                buffer. Called by producers that use the flag
+                TL_FLAG_NO_SIGNAL_ON_COMMIT to manually control when
+                consumers starting reading the stream.
+                Used when multiple small writes need to be batched.
+ @Input         hStream         Stream handle.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        eError                       Error codes returned by either
+                                             Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE hStream);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamMarkEOS
+ @Description   Insert a EOS marker packet in the given stream.
+ @Input         hStream         Stream handle.
+ @Input         bRemoveOld      if TRUE, remove old stream record file before
+                                splitting to new file.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS NULL stream handler.
+ @Return        eError                       Error codes returned by either
+                                             Reserve or Commit.
+ @Return        PVRSRV_OK                    Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE hStream, IMG_BOOL bRemoveOld);
+
+/*************************************************************************/ /*!
+@Function       TLStreamMarkStreamOpen
+@Description    Puts *open* stream packet into hStream's notification stream,
+                if set, error otherwise."
+@Input          hStream Stream handle.
+@Return         PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+@Function       TLStreamMarkStreamClose
+@Description    Puts *close* stream packet into hStream's notification stream,
+                if set, error otherwise."
+@Input          hStream Stream handle.
+@Return         PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamInfo
+ @Description   Run time information about buffer elemental sizes.
+                It sets psInfo members accordingly. Users can use those values
+                to calculate the parameters they use in TLStreamCreate and
+                TLStreamReserve.
+ @Output        psInfo          pointer to stream info structure.
+ @Return        None.
+*/ /**************************************************************************/
+void
+TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamIsOpenForReading
+ @Description   Query if a stream has any readers connected.
+ @Input         hStream         Stream handle.
+ @Return        IMG_BOOL        True if at least one reader is connected,
+                                false otherwise
+*/ /**************************************************************************/
+IMG_BOOL
+TLStreamIsOpenForReading(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOutOfData
+ @Description   Query if the stream is empty (no data waiting to be read).
+ @Input         hStream         Stream handle.
+ @Return        IMG_BOOL        True if read==write, no data waiting,
+                                false otherwise
+*/ /**************************************************************************/
+IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamResetProducerByteCount
+ @Description   Reset the producer byte counter on the specified stream.
+ @Input         hStream         Stream handle.
+ @Input         IMG_UINT32      Value to reset counter to, often 0.
+ @Return        PVRSRV_OK                   Success.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Success but the read and write
+                                            positions did not match,
+                                            stream not empty.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR
+TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value);
+
+#endif /* TLSTREAM_H */
+/*****************************************************************************
+ End of file (tlstream.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/tutils_km.h b/drivers/gpu/drm/img/img-rogue/services/server/include/tutils_km.h
new file mode 100644 (file)
index 0000000..d39c070
--- /dev/null
@@ -0,0 +1,172 @@
+/*************************************************************************/ /*!
+@File           tutils_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for calls to tutils (testing utils)
+                layer in the server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef TUTILS_KM_H
+#define TUTILS_KM_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "device.h"
+#include "pvrsrv_sync_km.h"
+
+
+PVRSRV_ERROR ServerTestIoctlKM(CONNECTION_DATA *psConnection,
+                               PVRSRV_DEVICE_NODE *psDeviceNode,
+                               IMG_UINT32  uiCmd,
+                               IMG_PBYTE   uiIn1,
+                               IMG_UINT32  uiIn2,
+                               IMG_UINT32*     puiOut1,
+                               IMG_UINT32* puiOut2);
+
+PVRSRV_ERROR PowMonTestIoctlKM(IMG_UINT32  uiCmd,
+                                 IMG_UINT32  uiIn1,
+                                 IMG_UINT32  uiIn2,
+                                 IMG_UINT32  *puiOut1,
+                                 IMG_UINT32  *puiOut2);
+
+PVRSRV_ERROR SyncCheckpointTestIoctlKM(CONNECTION_DATA *psConnection,
+                                 PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT32  uiCmd,
+                                 IMG_UINT32  uiIn1,
+                                 IMG_UINT32  uiIn2,
+                                 const IMG_CHAR *pszInName,
+                                 IMG_UINT32  *puiOut1,
+                                 IMG_UINT32  *puiOut2,
+                                 IMG_UINT8   *puiOut3);
+
+IMG_EXPORT
+PVRSRV_ERROR DevmemIntAllocHostMemKM(IMG_DEVMEM_SIZE_T ui32Size,
+                                     PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                     IMG_UINT32 ui32LableLength,
+                                     const IMG_CHAR *pszAllocLabel,
+                                     PMR **ppsPMR);
+
+PVRSRV_ERROR DevmemIntFreeHostMemKM(PMR *psPMR);
+
+IMG_EXPORT
+PVRSRV_ERROR PowerTestIoctlKM(IMG_UINT32  uiCmd,
+                                 IMG_UINT32  uiIn1,
+                                 IMG_UINT32  uiIn2,
+                                 IMG_UINT32  *puiOut1,
+                                 IMG_UINT32  *puiOut2);
+
+PVRSRV_ERROR TestIOCTLSyncFbFenceSignalPVR(CONNECTION_DATA * psConnection,
+                                           PVRSRV_DEVICE_NODE *psDevNode,
+                                           void *psFence);
+
+PVRSRV_ERROR TestIOCTLSyncFbFenceCreatePVR(CONNECTION_DATA * psConnection,
+                                            PVRSRV_DEVICE_NODE *psDevNode,
+                                            IMG_UINT32 uiNameLength,
+                                            const IMG_CHAR *pszName,
+                                            PVRSRV_TIMELINE iTL,
+                                            PVRSRV_FENCE *piOutFence);
+
+PVRSRV_ERROR TestIOCTLSyncFbFenceResolvePVR(CONNECTION_DATA * psConnection,
+                                            PVRSRV_DEVICE_NODE *psDevNode,
+                                            PVRSRV_FENCE iFence);
+PVRSRV_ERROR TestIOCTLSyncFbSWTimelineAdvance(CONNECTION_DATA * psConnection,
+                                              PVRSRV_DEVICE_NODE *psDevNode,
+                                              PVRSRV_TIMELINE iSWTl);
+
+PVRSRV_ERROR TestIOCTLSyncFbSWFenceCreate(CONNECTION_DATA * psConnection,
+                                          PVRSRV_DEVICE_NODE *psDevNode,
+                                          PVRSRV_TIMELINE iTl,
+                                          IMG_UINT32 uiFenceNameLength,
+                                          const IMG_CHAR *pszFenceName,
+                                          PVRSRV_FENCE *piFence);
+
+
+
+PVRSRV_ERROR TestIOCTLSyncSWTimelineFenceCreateKM(CONNECTION_DATA * psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDevNode,
+                                                  PVRSRV_TIMELINE sTimeline,
+                                                  IMG_UINT32 uiNameLength,
+                                                  const IMG_CHAR *pszFenceName,
+                                                  PVRSRV_FENCE *psOutFence);
+
+PVRSRV_ERROR TestIOCTLSyncSWTimelineAdvanceKM(CONNECTION_DATA * psConnection,
+                                              PVRSRV_DEVICE_NODE *psDevNode,
+                                              PVRSRV_TIMELINE sTimeline);
+
+PVRSRV_ERROR TestIOCTLIsTimelineValidKM(PVRSRV_TIMELINE sTimeline,
+                                        IMG_BOOL *bResult);
+
+PVRSRV_ERROR TestIOCTLIsFenceValidKM(PVRSRV_FENCE sFence,
+                                     IMG_BOOL *bResult);
+
+PVRSRV_ERROR TestIOCTLSyncCheckpointResolveFenceKM(CONNECTION_DATA * psConnection,
+                                                   PVRSRV_DEVICE_NODE *psDevNode,
+                                                   PVRSRV_FENCE hFence,
+                                                   IMG_UINT32 *pui32NumSyncCheckpoints);
+
+PVRSRV_ERROR TestIOCTLSyncCheckpointCreateFenceKM(CONNECTION_DATA *psConnection,
+                                                  PVRSRV_DEVICE_NODE *psDevNode,
+                                                  IMG_CHAR *pszFenceName,
+                                                  PVRSRV_TIMELINE hTimeline,
+                                                  PVRSRV_FENCE *phOutFence,
+                                                  IMG_UINT64 *puiUpdateFenceUID);
+
+PVRSRV_ERROR TestIOCTLWriteByteKM(IMG_BYTE ui8WriteData);
+
+PVRSRV_ERROR TestIOCTLReadByteKM(IMG_BYTE *pui8ReadData);
+
+typedef IMG_UINT32 DI_CONTEXT;
+PVRSRV_ERROR TestIOCTLHandleArray2CreateKM(DI_CONTEXT **ppsTestResources);
+PVRSRV_ERROR TestIOCTLHandleArray10CreateKM(DI_CONTEXT **ppsTestResources);
+PVRSRV_ERROR TestIOCTLHandleCleanupDestroy(DI_CONTEXT *psTestResource);
+PVRSRV_ERROR TestIOCTLHandleArray2CreateCPKM(DI_CONTEXT **ppsTestResources);
+PVRSRV_ERROR TestIOCTLHandleCleanupDestroyCP(DI_CONTEXT *psTestResource);
+PVRSRV_ERROR TestIOCTLHandleArray2CreatePPKM(CONNECTION_DATA    *psConnection,
+                                             PVRSRV_DEVICE_NODE *psDeviceNode,
+                                             DI_CONTEXT **ppsTestResources);
+PVRSRV_ERROR TestIOCTLHandleArray2CreateLUKM(DI_CONTEXT *psLookedup,
+                                             DI_CONTEXT **ppsTestResources);
+PVRSRV_ERROR TestIOCTLHandleArrayNCreate(IMG_UINT32 ui32NumResourcesRequested,
+                                         IMG_UINT32 *pui32NumResourcesCreated,
+                                         DI_CONTEXT **ppsTestResources);
+PVRSRV_ERROR TestIOCTLHandleArrayNCreateCP(IMG_UINT32 ui32NumResourcesRequested,
+                                           IMG_UINT32 *pui32NumResourcesCreated,
+                                           DI_CONTEXT **ppsTestResources);
+
+#endif /* TUTILS_KM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_impl.h b/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_impl.h
new file mode 100644 (file)
index 0000000..9ad5ade
--- /dev/null
@@ -0,0 +1,186 @@
+/*************************************************************************/ /*!
+@File           vmm_impl.h
+@Title          Common VM manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common VM manager definitions that need to
+                be shared by system virtualization layer itself and modules that
+                implement the actual VM manager types.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VMM_IMPL_H
+#define VMM_IMPL_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef enum _VMM_CONF_PARAM_
+{
+       VMM_CONF_PRIO_OSID0 = 0,
+       VMM_CONF_PRIO_OSID1 = 1,
+       VMM_CONF_PRIO_OSID2 = 2,
+       VMM_CONF_PRIO_OSID3 = 3,
+       VMM_CONF_PRIO_OSID4 = 4,
+       VMM_CONF_PRIO_OSID5 = 5,
+       VMM_CONF_PRIO_OSID6 = 6,
+       VMM_CONF_PRIO_OSID7 = 7,
+       VMM_CONF_HCS_DEADLINE = 8
+} VMM_CONF_PARAM;
+
+/*
+       Virtual machine manager (hypervisor) para-virtualization (PVZ) connection:
+               - Type is implemented by host and guest drivers
+                       - Assumes synchronous function call semantics
+                       - Unidirectional semantics
+                               - For Host  (vmm -> host)
+                               - For Guest (guest -> vmm)
+                       - Parameters can be IN/OUT/INOUT
+
+               - Host pvz entries are pre-implemented by IMG
+                       - For host implementation, see vmm_pvz_server.c
+                       - Called by host side hypercall handler or VMM
+
+               - Guest pvz entries are supplied by 3rd-party
+                       - These are specific to hypervisor (VMM) type
+                       - These implement the actual hypercalls mechanism
+
+       Para-virtualization (PVZ) call runtime sequence:
+               1 - Guest driver in guest VM calls PVZ function
+               1.1 - Guest PVZ connection calls
+               1.2 - Guest VM Manager type which
+               1.2.1 - Performs any pre-processing like parameter packing, etc.
+               1.2.2 - Issues hypercall (blocking synchronous call)
+
+               2 - VM Manager (hypervisor) receives hypercall
+               2.1 - Hypercall handler:
+               2.1.1 - Performs any pre-processing
+               2.1.2 - If call terminates in VM Manager: perform action and return from hypercall
+               2.1.3 - Otherwise forward to host driver (implementation specific call)
+
+               3 - Host driver receives call from VM Manager
+               3.1 - Host VM manager type:
+               3.1.1 - Performs any pre-processing like parameter unpacking, etc.
+               3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry
+               3.2 - Host PVZ connection calls corresponding host system virtualisation layer
+               3.3 - Host driver system virtualisation layer:
+               3.3.1 - Perform action requested by guest driver
+               3.3.2 - Return to host VM Manager type
+               3.4 - Host VM Manager type:
+               3.4.1 - Prepare to return from hypercall
+               3.4.2 - Perform any post-processing like result packing, etc.
+               3.4.3 - Issue return from hypercall
+
+               4 - VM Manager (hypervisor)
+               4.1 - Perform any post-processing
+               4.2 - Return control to guest driver
+
+               5 - Guest driver in guest VM
+               5.1 - Perform any post-processing like parameter unpacking, etc.
+               5.2 - Continue execution in guest VM
+ */
+typedef struct _VMM_PVZ_CONNECTION_
+{
+       struct {
+               /*
+                  This pair must be implemented if the guest is responsible
+                  for allocating the physical heap that backs its firmware
+                  allocations, this is the default configuration. The physical
+                  heap is allocated within the guest VM IPA space and this
+                  IPA Addr/Size must be translated into the host's IPA space
+                  by the VM manager before forwarding request to host.
+                  If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+                */
+               PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+                                                                                 IMG_UINT32 ui32DevID,
+                                                                                 IMG_UINT64 ui64Size,
+                                                                                 IMG_UINT64 ui64PAddr);
+
+               PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+                                                                                       IMG_UINT32 ui32DevID);
+       } sClientFuncTab;
+
+       struct {
+               /*
+                       Corresponding server side entries to handle guest PVZ calls
+                       NOTE:
+                                - Additional PVZ function ui32OSID parameter
+                                        - OSID determination is responsibility of VM manager
+                                        - Actual OSID value must be supplied by VM manager
+                                               - This can be done either in client/VMM/host side
+                                        - Must be done before host pvz function(s) are called
+                                        - Host pvz function validates incoming OSID values
+                */
+               PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID,
+                                                                                 IMG_UINT32 ui32FuncID,
+                                                                                 IMG_UINT32 ui32DevID,
+                                                                                 IMG_UINT64 ui64Size,
+                                                                                 IMG_UINT64 ui64PAddr);
+
+               PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID,
+                                                                                       IMG_UINT32 ui32FuncID,
+                                                                                       IMG_UINT32 ui32DevID);
+       } sServerFuncTab;
+
+       struct {
+               /*
+                  This is used by the VM manager to report pertinent runtime guest VM
+                  information to the host; these events may in turn be forwarded to
+                  the firmware
+                */
+               PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID);
+
+               PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID);
+
+               PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+       } sVmmFuncTab;
+} VMM_PVZ_CONNECTION;
+
+/*!
+*******************************************************************************
+ @Function      VMMCreatePvzConnection() and VMMDestroyPvzConnection()
+ @Description   Both the guest and VM manager call this in order to obtain a
+                PVZ connection to the VM and host respectively; that is, guest
+                calls it to obtain connection to VM, VM calls it to obtain a
+                connection to the host.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection);
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* VMM_IMPL_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_client.h b/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_client.h
new file mode 100644 (file)
index 0000000..688e9f3
--- /dev/null
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_client.h
+@Title          Guest VM manager client para-virtualization routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides guest VMM client para-virtualization APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VMM_PVZ_CLIENT_H
+#define VMM_PVZ_CLIENT_H
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+#include "vmm_impl.h"
+
+/*!
+*******************************************************************************
+ @Function      PvzClientMapDevPhysHeap
+ @Description   The guest front-end to initiate a pfnMapDevPhysHeap PVZ call
+                to the host.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*******************************************************************************
+ @Function      PvzClientUnmapDevPhysHeap
+ @Description   The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ call
+                to the host.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+#endif /* VMM_PVZ_CLIENT_H */
+
+/******************************************************************************
+ End of file (vmm_pvz_client.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_common.h b/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_common.h
new file mode 100644 (file)
index 0000000..82ab50d
--- /dev/null
@@ -0,0 +1,65 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_common.h
+@Title          Common VM manager function IDs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides VM manager para-virtualization function IDs and
+                definitions of their payload structures, if appropriate.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VMM_PVZ_COMMON_H
+#define VMM_PVZ_COMMON_H
+
+#include "img_types.h"
+
+#define PVZ_BRIDGE_DEFAULT                                     0UL
+#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP           (PVZ_BRIDGE_DEFAULT + 1)
+#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP         (PVZ_BRIDGE_MAPDEVICEPHYSHEAP   + 1)
+#define PVZ_BRIDGE_LAST                                                (PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1)
+
+typedef struct _PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP
+{
+       IMG_UINT64      ui64MemBase;
+       IMG_UINT32      ui32OSID;
+}PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP;
+
+#endif /* VMM_PVZ_COMMON_H */
+
+/*****************************************************************************
+ End of file (vmm_pvz_common.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_server.h b/drivers/gpu/drm/img/img-rogue/services/server/include/vmm_pvz_server.h
new file mode 100644 (file)
index 0000000..58223a0
--- /dev/null
@@ -0,0 +1,121 @@
+/*************************************************************************/ /*!
+@File           vmm_pvz_server.h
+@Title          VM manager para-virtualization interface helper routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header provides API(s) available to VM manager, this must be
+                called to close the loop during guest para-virtualization calls.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VMM_PVZ_SERVER_H
+#define VMM_PVZ_SERVER_H
+
+#include "vmm_impl.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+/*!
+*******************************************************************************
+ @Function      PvzServerMapDevPhysHeap
+ @Description   The VM manager calls this in response to guest PVZ interface
+                call pfnMapDevPhysHeap.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+                                               IMG_UINT32 ui32FuncID,
+                                               IMG_UINT32 ui32DevID,
+                                               IMG_UINT64 ui64Size,
+                                               IMG_UINT64 ui64PAddr);
+
+/*!
+*******************************************************************************
+ @Function      PvzServerUnmapDevPhysHeap
+ @Description   The VM manager calls this in response to guest PVZ interface
+                call pfnUnmapDevPhysHeap.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+                                                 IMG_UINT32 ui32FuncID,
+                                                 IMG_UINT32 ui32DevID);
+
+/*!
+*******************************************************************************
+ @Function      PvzServerOnVmOnline
+ @Description   The VM manager calls this when guest VM machine comes online.
+                The host driver will initialize the FW if it has not done so
+                already.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID);
+
+/*!
+*******************************************************************************
+ @Function      PvzServerOnVmOffline
+ @Description   The VM manager calls this when a guest VM machine is about to
+                go offline. The VM manager might have unmapped the GPU kick
+                register for such VM but not the GPU memory until the call
+                returns. Once the function returns, the FW does not hold any
+                reference for such VM and no workloads from it are running in
+                the GPU and it is safe to remove the memory for such VM.
+ @Return        PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason
+                the FW is taking too long to clean-up the resources of the
+                OSID. Otherwise, a PVRSRV_ERROR code.
+******************************************************************************/
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID);
+
+/*!
+*******************************************************************************
+ @Function      PvzServerVMMConfigure
+ @Description   The VM manager calls this to configure several parameters like
+                HCS or isolation.
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType,
+                      IMG_UINT32 ui32ParamValue);
+
+#endif /* VMM_PVZ_SERVER_H */
+
+/******************************************************************************
+ End of file (vmm_pvz_server.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/vz_vm.h b/drivers/gpu/drm/img/img-rogue/services/server/include/vz_vm.h
new file mode 100644 (file)
index 0000000..3a8042c
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File                  vz_vm.h
+@Title          System virtualization VM support APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides VM management support APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VZ_VM_H
+#define VZ_VM_H
+
+#include "vmm_impl.h"
+
+bool IsVmOnline(IMG_UINT32 ui32OSID);
+
+PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid);
+
+PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid);
+
+PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+#endif /* VZ_VM_H */
+
+/*****************************************************************************
+ End of file (vz_vm.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/server/include/vz_vmm_pvz.h b/drivers/gpu/drm/img/img-rogue/services/server/include/vz_vmm_pvz.h
new file mode 100644 (file)
index 0000000..abc6470
--- /dev/null
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File           vz_vmm_pvz.h
+@Title          System virtualization VM manager management APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides VM manager para-virtz management APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef VZ_VMM_PVZ_H
+#define VZ_VMM_PVZ_H
+
+#include "img_types.h"
+#include "vmm_impl.h"
+
+/*!
+*******************************************************************************
+ @Function      PvzConnectionInit() and PvzConnectionDeInit()
+ @Description   PvzConnectionInit initializes the VM manager para-virt
+                which is used subsequently for communication between guest and
+                host; depending on the underlying VM setup, this could either
+                be a hyper-call or cross-VM call
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+void PvzConnectionDeInit(void);
+
+/*!
+*******************************************************************************
+ @Function      PvzConnectionAcquire() and PvzConnectionRelease()
+ @Description   These are to acquire/release a handle to the VM manager
+                para-virtz connection to make a pvz call; on the client, use it
+                it to make the actual pvz call and on the server handler /
+                VM manager, use it to complete the processing for the pvz call
+                or make a VM manager to host pvzbridge call
+@Return         VMM_PVZ_CONNECTION* on success. Otherwise NULL
+******************************************************************************/
+VMM_PVZ_CONNECTION* PvzConnectionAcquire(void);
+void PvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* VZ_VMM_PVZ_H */
+
+/******************************************************************************
+ End of file (vz_vmm_pvz.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem.c
new file mode 100644 (file)
index 0000000..1516b94
--- /dev/null
@@ -0,0 +1,2962 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Front End (nominally Client side part, but now invokable
+                from server too) of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "client_cache_bridge.h"
+#include "services_km.h"
+#include "pvrsrv_memallocflags_internal.h"
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+#include "pdump_km.h"
+#else
+#include "pdump_um.h"
+#endif
+#include "devicemem_pdump.h"
+#endif
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "client_ri_bridge.h"
+#endif
+#include "client_devicememhistory_bridge.h"
+#include "info_page_client.h"
+
+#include "rgx_heaps.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "pvr_ricommon.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "srvcore.h"
+#if defined(__linux__)
+#include "linux/kernel.h"
+#endif
+#else
+#include "srvcore_intern.h"
+#include "rgxdefs.h"
+#endif
+
+#if defined(__KERNEL__) && defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+extern PVRSRV_ERROR RIDumpAllKM(void);
+#endif
+
+#if defined(__KERNEL__)
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError)
+#else
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError)
+#endif
+
+#if defined(__KERNEL__)
+/* Derive the virtual from the hPMR */
+static
+IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+       IMG_DEV_PHYADDR sDevAddr;
+       IMG_BOOL bValid;
+       PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+       IMG_DEV_PHYADDR sHeapAddr;
+
+       eError = PhysHeapGetDevPAddr(psPhysHeap, &sHeapAddr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", fail);
+
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+{
+       if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
+                       PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+       {
+               IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+               PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, &sDevPAddrCorrected, (IMG_CPU_PHYADDR *)&sHeapAddr);
+               sHeapAddr.uiAddr = sDevPAddrCorrected.uiAddr;
+       }
+}
+#endif
+
+       eError = PMRLockSysPhysAddresses(psPMR);
+       PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddr", fail);
+
+       eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_IF_ERROR(eError, "PMR_DevPhysAddr");
+               eError = PMRUnlockSysPhysAddresses(psPMR);
+               PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr");
+               goto fail;
+       }
+
+       eError = PMRUnlockSysPhysAddresses(psPMR);
+       PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr");
+
+       ui64OptionalMapAddress = RGX_FIRMWARE_RAW_HEAP_BASE | (sDevAddr.uiAddr - sHeapAddr.uiAddr);
+
+       PVR_DPF((PVR_DBG_ALLOC, "%s: sDevAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" sHeapAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" => ui64OptionalMapAddress = 0x%"IMG_UINT64_FMTSPECx,
+                __func__, sDevAddr.uiAddr, sHeapAddr.uiAddr, ui64OptionalMapAddress));
+fail:
+       return ui64OptionalMapAddress;
+}
+#endif
+
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+static INLINE PVRSRV_MEMALLOCFLAGS_T
+DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY)
+       /*
+        *  Override the requested memory flags of FW allocations only,
+        *  non-FW allocations pass-through unmodified.
+        *
+        *  On fully coherent platforms:
+        *    - We upgrade uncached, CPU-only cached or GPU-only cached to
+        *      full coherency. This gives caching improvements for free.
+        *
+        *  On ace-lite platforms:
+        *    - If the allocation is not CPU cached, then there is nothing
+        *      for the GPU to snoop regardless of the GPU cache setting.
+        *    - If the allocation is not GPU cached, then the SLC will not
+        *      be used and will not snoop the CPU even if it is CPU cached.
+        *    - Therefore only the GPU setting can be upgraded to coherent
+        *      if it is already GPU cached incoherent and the CPU is cached.
+        *
+        *  All other platforms:
+        *    - Do not modify the allocation flags.
+        */
+       if (PVRSRV_CHECK_FW_MAIN(uiFlags))
+       {
+               PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection;
+
+               if (PVRSRVSystemSnoopingOfDeviceCache(psDevNode->psDevConfig) &&
+                   PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig))
+               {
+                       /* Clear existing flags, mark the allocation as fully coherent. */
+                       uiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK);
+                       uiFlags |= PVRSRV_MEMALLOCFLAG_CACHE_COHERENT;
+               }
+               else if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) &&
+                        (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) &&
+                        PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) &&
+                        psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE)
+               {
+                       /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */
+                       uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK;
+                       uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT;
+               }
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(hDevConnection);
+#endif
+
+       return uiFlags;
+}
+
+static INLINE void
+CheckAnnotationLength(const IMG_CHAR *pszAnnotation)
+{
+       IMG_UINT32 length = OSStringLength(pszAnnotation);
+
+       if (length >= DEVMEM_ANNOTATION_MAX_LEN)
+       {
+               PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters",
+                               __func__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length));
+       }
+}
+
+static PVRSRV_ERROR
+AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_UINT32 uiLog2Quantum,
+               IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_SIZE_T uiChunkSize,
+               IMG_UINT32 ui32NumPhysChunks,
+               IMG_UINT32 ui32NumVirtChunks,
+               IMG_UINT32 *pui32MappingTable,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               IMG_BOOL bExportable,
+               const IMG_CHAR *pszAnnotation,
+               DEVMEM_IMPORT **ppsImport)
+{
+       DEVMEM_IMPORT *psImport;
+       PVRSRV_MEMALLOCFLAGS_T uiOutFlags;
+       IMG_HANDLE hPMR;
+       PVRSRV_ERROR eError;
+
+       eError = DevmemImportStructAlloc(hDevConnection,
+                       &psImport);
+       PVR_GOTO_IF_ERROR(eError, failAlloc);
+
+       /* check if shift value is not too big (sizeof(1ULL)) */
+       PVR_ASSERT(uiLog2Quantum < sizeof(unsigned long long) * 8);
+       /* Check the size is a multiple of the quantum */
+       PVR_ASSERT((uiSize & ((1ULL<<uiLog2Quantum)-1)) == 0);
+
+       CheckAnnotationLength(pszAnnotation);
+
+       /* Pass only the PMR flags down */
+       uiOutFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
+       eError = BridgePhysmemNewRamBackedPMR(GetBridgeHandle(hDevConnection),
+                       uiSize,
+                       uiChunkSize,
+                       ui32NumPhysChunks,
+                       ui32NumVirtChunks,
+                       pui32MappingTable,
+                       uiLog2Quantum,
+                       uiOutFlags,
+                       OSStringNLength(pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1) + 1,
+                       pszAnnotation,
+                       OSGetCurrentProcessID(),
+                       &hPMR,
+                       PDUMP_NONE,
+                       &uiOutFlags);
+
+       if (eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Failed to allocate memory for %s (%s)",
+                               __func__,
+                               pszAnnotation,
+                               PVRSRVGETERRORSTRING(eError)));
+               goto failPMR;
+       }
+
+       uiFlags &= ~PVRSRV_PHYS_HEAP_HINT_MASK;
+       uiFlags |= (uiOutFlags & PVRSRV_PHYS_HEAP_HINT_MASK);
+
+       DevmemImportStructInit(psImport,
+                       uiSize,
+                       uiAlign,
+                       uiFlags,
+                       hPMR,
+                       bExportable ? DEVMEM_PROPERTIES_EXPORTABLE : 0);
+
+       *ppsImport = psImport;
+       return PVRSRV_OK;
+
+failPMR:
+       DevmemImportDiscard(psImport);
+failAlloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+               IMG_UINT32 ui32AllocPageCount,
+               IMG_UINT32 *paui32AllocPageIndices,
+               IMG_UINT32 ui32FreePageCount,
+               IMG_UINT32 *pauiFreePageIndices,
+               SPARSE_MEM_RESIZE_FLAGS uiSparseFlags)
+{
+       PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+       DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+       SHARED_DEV_CONNECTION hDevConnection;
+       IMG_HANDLE hPMR;
+       IMG_HANDLE hSrvDevMemHeap;
+       POS_LOCK hLock;
+       IMG_DEV_VIRTADDR sDevVAddr;
+       IMG_CPU_VIRTADDR pvCpuVAddr;
+       DEVMEM_PROPERTIES_T uiProperties;
+
+       if (NULL == psImport)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__));
+               goto e0;
+       }
+
+       hDevConnection = psImport->hDevConnection;
+       hPMR = psImport->hPMR;
+       hLock = psImport->hLock;
+       sDevVAddr = psImport->sDeviceImport.sDevVAddr;
+       pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr;
+
+       if (NULL == hDevConnection)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__));
+               goto e0;
+       }
+
+       if (NULL == hPMR)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__));
+               goto e0;
+       }
+
+       if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__));
+               goto e0;
+       }
+
+       if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__));
+               goto e0;
+       }
+
+       uiProperties = GetImportProperties(psMemDesc->psImport);
+
+       if (uiProperties & DEVMEM_PROPERTIES_SECURE)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Secure buffers currently do not support sparse changes",
+                               __func__));
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               goto e0;
+       }
+
+       if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This memory descriptor doesn't support sparse changes",
+                               __func__));
+               eError = PVRSRV_ERROR_INVALID_REQUEST;
+               goto e0;
+       }
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+       if (psMemDesc->sCPUMemDesc.ui32RefCount > 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This memory descriptor is mapped more than once (refcnt: %u)into "
+                               "CPU Address space.\nRelease all CPU maps of this object and retry...",
+                               __func__, psMemDesc->sCPUMemDesc.ui32RefCount));
+               eError = PVRSRV_ERROR_OBJECT_STILL_REFERENCED;
+               goto e0;
+       }
+#endif
+
+       hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap;
+
+       OSLockAcquire(hLock);
+
+       eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection),
+                       hSrvDevMemHeap,
+                       hPMR,
+                       ui32AllocPageCount,
+                       paui32AllocPageIndices,
+                       ui32FreePageCount,
+                       pauiFreePageIndices,
+                       uiSparseFlags,
+                       psImport->uiFlags,
+                       sDevVAddr,
+                       (IMG_UINT64)((uintptr_t)pvCpuVAddr));
+
+       OSLockRelease(hLock);
+
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               BridgeDevicememHistorySparseChange(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               psMemDesc->uiOffset,
+                               psMemDesc->sDeviceMemDesc.sDevVAddr,
+                               psMemDesc->uiAllocSize,
+                               psMemDesc->szText,
+                               DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap),
+                               ui32AllocPageCount,
+                               paui32AllocPageIndices,
+                               ui32FreePageCount,
+                               pauiFreePageIndices,
+                               psMemDesc->ui32AllocationIndex,
+                               &psMemDesc->ui32AllocationIndex);
+       }
+
+e0:
+       return eError;
+}
+
+static void
+FreeDeviceMemory(DEVMEM_IMPORT *psImport)
+{
+       DevmemImportStructRelease(psImport);
+}
+
+static PVRSRV_ERROR
+SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
+               RA_LENGTH_T uiSize,
+               RA_FLAGS_T _flags,
+               const IMG_CHAR *pszAnnotation,
+               /* returned data */
+               RA_BASE_T *puiBase,
+               RA_LENGTH_T *puiActualSize,
+               RA_PERISPAN_HANDLE *phImport)
+{
+       /* When suballocations need a new lump of memory, the RA calls
+          back here.  Later, in the kernel, we must construct a new PMR
+          and a pairing between the new lump of virtual memory and the
+          PMR (whether or not such PMR is backed by physical memory) */
+       DEVMEM_HEAP *psHeap;
+       DEVMEM_IMPORT *psImport;
+       IMG_DEVMEM_ALIGN_T uiAlign;
+       PVRSRV_ERROR eError;
+       IMG_UINT32 ui32MappingTable = 0;
+       PVRSRV_MEMALLOCFLAGS_T uiFlags = (PVRSRV_MEMALLOCFLAGS_T) _flags;
+       IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+       /* Per-arena private handle is, for us, the heap */
+       psHeap = hArena;
+
+       /* align to the l.s.b. of the size...  e.g. 96kiB aligned to
+          32kiB. NB: There is an argument to say that the RA should never
+          ask us for Non-power-of-2 size anyway, but I don't want to make
+          that restriction arbitrarily now */
+       uiAlign = uiSize & ~(uiSize-1);
+
+       /* Technically this is only required for guest drivers due to
+          fw heaps being pre-allocated and pre-mapped resulting in
+          a 1:1 (i.e. virtual : physical) offset correlation but we
+          force this behaviour for all drivers to maintain consistency
+          (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */
+       if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum))
+       {
+               uiAlign = (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum);
+       }
+
+       /* The RA should not have invoked us with a size that is not a
+          multiple of the quantum anyway */
+       PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
+
+       eError = AllocateDeviceMemory(psHeap->psCtx->hDevConnection,
+                       psHeap->uiLog2Quantum,
+                       uiSize,
+                       uiSize,
+                       1,
+                       1,
+                       &ui32MappingTable,
+                       uiAlign,
+                       uiFlags,
+                       IMG_FALSE,
+                       "PMR sub-allocated",
+                       &psImport);
+       PVR_GOTO_IF_ERROR(eError, failAlloc);
+
+#if defined(PDUMP) && defined(DEBUG)
+#if defined(__KERNEL__)
+       PDUMPCOMMENTWITHFLAGS(PMR_DeviceNode((PMR*)psImport->hPMR), PDUMP_CONT,
+                       "Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)",
+                       psImport->hPMR, pszAnnotation, OSGetCurrentProcessID());
+#else
+       PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS,
+                       "Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)",
+                       psImport->hPMR, pszAnnotation, OSGetCurrentProcessID());
+#endif
+#else
+       PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+#if defined(__KERNEL__)
+               PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+               PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice;
+
+               PVR_ASSERT(PVRSRV_CHECK_FW_MAIN(uiFlags));
+
+               /* If allocation is made by the Kernel from the firmware heap, account for it
+                * under the PVR_SYS_ALLOC_PID.
+                */
+               if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap))
+               {
+                       eError = BridgeRIWritePMREntryWithOwner (GetBridgeHandle(psImport->hDevConnection),
+                                       psImport->hPMR,
+                                       PVR_SYS_ALLOC_PID);
+                       PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntryWithOwner");
+               }
+               else
+#endif
+               {
+                       eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection),
+                                       psImport->hPMR);
+                       PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry");
+               }
+       }
+#endif
+
+#if defined(__KERNEL__)
+       if (psHeap->bPremapped)
+       {
+               ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection);
+       }
+#endif
+
+       /*
+               Suballocations always get mapped into the device was we need to
+               key the RA off something and as we can't export suballocations
+               there is no valid reason to request an allocation an not map it
+        */
+       eError = DevmemImportStructDevMap(psHeap,
+                       IMG_TRUE,
+                       psImport,
+                       ui64OptionalMapAddress);
+       PVR_GOTO_IF_ERROR(eError, failMap);
+
+       OSLockAcquire(psImport->hLock);
+       /* Mark this import struct as zeroed so we can save some PDump LDBs
+        * and do not have to CPU map + mem set()*/
+       if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+       {
+               psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+       }
+       else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
+       {
+               psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED;
+       }
+       psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+       OSLockRelease(psImport->hLock);
+
+       *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr;
+       *puiActualSize = uiSize;
+       *phImport = psImport;
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+failMap:
+       FreeDeviceMemory(psImport);
+failAlloc:
+
+       return eError;
+}
+
+static void
+SubAllocImportFree(RA_PERARENA_HANDLE hArena,
+               RA_BASE_T uiBase,
+               RA_PERISPAN_HANDLE hImport)
+{
+       DEVMEM_IMPORT *psImport = hImport;
+#if !defined(PVRSRV_NEED_PVR_ASSERT)
+       PVR_UNREFERENCED_PARAMETER(hArena);
+       PVR_UNREFERENCED_PARAMETER(uiBase);
+#endif
+
+       PVR_ASSERT(psImport != NULL);
+       PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap);
+       PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr);
+
+       (void) DevmemImportStructDevUnmap(psImport);
+       (void) DevmemImportStructRelease(psImport);
+}
+
+/*****************************************************************************
+ *                    Devmem context internals                               *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+PopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx,
+               DEVMEM_HEAPCFGID uiHeapBlueprintID)
+{
+       PVRSRV_ERROR eError;
+       PVRSRV_ERROR eError2;
+       struct DEVMEM_HEAP_TAG **ppsHeapArray;
+       IMG_UINT32 uiNumHeaps;
+       IMG_UINT32 uiHeapsToUnwindOnError;
+       IMG_UINT32 uiHeapIndex;
+       IMG_DEV_VIRTADDR sDevVAddrBase;
+       IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH];
+       IMG_DEVMEM_SIZE_T uiHeapLength;
+       IMG_DEVMEM_SIZE_T uiReservedRegionLength;
+       IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize;
+       IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment;
+
+       eError = DevmemHeapCount(psCtx->hDevConnection,
+                       uiHeapBlueprintID,
+                       &uiNumHeaps);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       if (uiNumHeaps == 0)
+       {
+               ppsHeapArray = NULL;
+       }
+       else
+       {
+               ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps);
+               PVR_GOTO_IF_NOMEM(ppsHeapArray, eError, e0);
+       }
+
+       uiHeapsToUnwindOnError = 0;
+
+       for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++)
+       {
+               eError = DevmemHeapDetails(psCtx->hDevConnection,
+                               uiHeapBlueprintID,
+                               uiHeapIndex,
+                               &aszHeapName[0],
+                               sizeof(aszHeapName),
+                               &sDevVAddrBase,
+                               &uiHeapLength,
+                               &uiReservedRegionLength,
+                               &uiLog2DataPageSize,
+                               &uiLog2ImportAlignment);
+               PVR_GOTO_IF_ERROR(eError, e1);
+
+               eError = DevmemCreateHeap(psCtx,
+                               sDevVAddrBase,
+                               uiHeapLength,
+                               uiReservedRegionLength,
+                               uiLog2DataPageSize,
+                               uiLog2ImportAlignment,
+                               aszHeapName,
+                               uiHeapBlueprintID,
+                               &ppsHeapArray[uiHeapIndex]);
+               PVR_GOTO_IF_ERROR(eError, e1);
+
+               uiHeapsToUnwindOnError = uiHeapIndex + 1;
+       }
+
+       psCtx->uiAutoHeapCount = uiNumHeaps;
+       psCtx->ppsAutoHeapArray = ppsHeapArray;
+
+       PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+       PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps);
+
+       return PVRSRV_OK;
+
+       /* error exit paths */
+e1:
+       for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++)
+       {
+               eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]);
+               PVR_ASSERT(eError2 == PVRSRV_OK);
+       }
+
+       if (uiNumHeaps != 0)
+       {
+               OSFreeMem(ppsHeapArray);
+       }
+
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+static PVRSRV_ERROR
+UnpopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx)
+{
+       PVRSRV_ERROR eReturn = PVRSRV_OK;
+       PVRSRV_ERROR eError2;
+       IMG_UINT32 uiHeapIndex;
+       IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               bDoCheck = IMG_FALSE;
+       }
+#endif
+
+       for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++)
+       {
+               if (!psCtx->ppsAutoHeapArray[uiHeapIndex])
+               {
+                       continue;
+               }
+
+               eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]);
+               if (eError2 != PVRSRV_OK)
+               {
+                       eReturn = eError2;
+               }
+               else
+               {
+                       psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL;
+               }
+       }
+
+       if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray)
+       {
+               OSFreeMem(psCtx->ppsAutoHeapArray);
+               psCtx->ppsAutoHeapArray = NULL;
+               psCtx->uiAutoHeapCount = 0;
+       }
+
+       return eReturn;
+}
+
+/*****************************************************************************
+ *                    Devmem context functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+               DEVMEM_HEAPCFGID uiHeapBlueprintID,
+               DEVMEM_CONTEXT **ppsCtxPtr)
+{
+       PVRSRV_ERROR            eError;
+       DEVMEM_CONTEXT          *psCtx;
+       /* handle to the server-side counterpart of the device memory
+          context (specifically, for handling mapping to device MMU) */
+       IMG_HANDLE                      hDevMemServerContext;
+       IMG_HANDLE                      hPrivData;
+       IMG_BOOL                        bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META);
+
+       PVR_GOTO_IF_NOMEM(ppsCtxPtr, eError, e0);
+
+       psCtx = OSAllocMem(sizeof(*psCtx));
+       PVR_GOTO_IF_NOMEM(psCtx, eError, e0);
+
+       psCtx->uiNumHeaps = 0;
+
+       psCtx->hDevConnection = hDevConnection;
+
+       /* Create (server-side) Device Memory context */
+       eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection),
+                       bHeapCfgMetaId,
+                       &hDevMemServerContext,
+                       &hPrivData,
+                       &psCtx->ui32CPUCacheLineSize);
+       PVR_GOTO_IF_ERROR(eError, e1);
+
+       psCtx->hDevMemServerContext = hDevMemServerContext;
+       psCtx->hPrivData = hPrivData;
+
+       /* automagic heap creation */
+       psCtx->uiAutoHeapCount = 0;
+
+       eError = PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID);
+       PVR_GOTO_IF_ERROR(eError, e2);
+
+       *ppsCtxPtr = psCtx;
+
+       PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount);
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+e2:
+       PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+       PVR_ASSERT(psCtx->uiNumHeaps == 0);
+       BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), hDevMemServerContext);
+
+e1:
+       OSFreeMem(psCtx);
+
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+               IMG_HANDLE *hPrivData)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0);
+       PVR_GOTO_IF_INVALID_PARAM(hPrivData, eError, e0);
+
+       *hPrivData = psCtx->hPrivData;
+       return PVRSRV_OK;
+
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0);
+       return PVRSRV_OK;
+
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFindHeapByName(const struct DEVMEM_CONTEXT_TAG *psCtx,
+               const IMG_CHAR *pszHeapName,
+               struct DEVMEM_HEAP_TAG **ppsHeapRet)
+{
+       IMG_UINT32 uiHeapIndex;
+
+       /* N.B.  This func is only useful for finding "automagic" heaps by name */
+       for (uiHeapIndex = 0;
+                       uiHeapIndex < psCtx->uiAutoHeapCount;
+                       uiHeapIndex++)
+       {
+               if (!OSStringNCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName, OSStringLength(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName) + 1))
+               {
+                       *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex];
+                       return PVRSRV_OK;
+               }
+       }
+
+       return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx)
+{
+       PVRSRV_ERROR eError;
+       IMG_BOOL bDoCheck = IMG_TRUE;
+
+#if defined(__KERNEL__)
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+       if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               bDoCheck = IMG_FALSE;
+       }
+#endif
+
+       PVR_RETURN_IF_INVALID_PARAM(psCtx);
+
+       eError = UnpopulateContextFromBlueprint(psCtx);
+       if (bDoCheck && eError != PVRSRV_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: UnpopulateContextFromBlueprint failed (%d) leaving %d heaps",
+                               __func__, eError, psCtx->uiNumHeaps));
+               goto e1;
+       }
+
+       eError = DestroyServerResource(psCtx->hDevConnection,
+                                      NULL,
+                                      BridgeDevmemIntCtxDestroy,
+                                      psCtx->hDevMemServerContext);
+       if (bDoCheck)
+       {
+               PVR_LOG_GOTO_IF_ERROR(eError, "BridgeDevMemIntCtxDestroy", e1);
+
+               /* should be no more heaps left */
+               if (psCtx->uiNumHeaps)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Additional heaps remain in DEVMEM_CONTEXT",
+                                       __func__));
+                       eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT;
+                       goto e1;
+               }
+       }
+
+       OSCachedMemSet(psCtx, 0, sizeof(*psCtx));
+       OSFreeMem(psCtx);
+
+e1:
+       return eError;
+}
+
+/*****************************************************************************
+ *                 Devmem heap query functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_UINT32 *puiNumHeapConfigsOut)
+{
+       PVRSRV_ERROR eError;
+       eError = BridgeHeapCfgHeapConfigCount(GetBridgeHandle(hDevConnection),
+                       puiNumHeapConfigsOut);
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_UINT32 uiHeapConfigIndex,
+               IMG_UINT32 *puiNumHeapsOut)
+{
+       PVRSRV_ERROR eError;
+       eError = BridgeHeapCfgHeapCount(GetBridgeHandle(hDevConnection),
+                       uiHeapConfigIndex,
+                       puiNumHeapsOut);
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_UINT32 uiHeapConfigIndex,
+               IMG_CHAR *pszConfigNameOut,
+               IMG_UINT32 uiConfigNameBufSz)
+{
+       PVRSRV_ERROR eError;
+       eError = BridgeHeapCfgHeapConfigName(GetBridgeHandle(hDevConnection),
+                       uiHeapConfigIndex,
+                       uiConfigNameBufSz,
+                       pszConfigNameOut);
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_UINT32 uiHeapConfigIndex,
+               IMG_UINT32 uiHeapIndex,
+               IMG_CHAR *pszHeapNameOut,
+               IMG_UINT32 uiHeapNameBufSz,
+               IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+               IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+               IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut,
+               IMG_UINT32 *puiLog2DataPageSizeOut,
+               IMG_UINT32 *puiLog2ImportAlignmentOut)
+{
+       PVRSRV_ERROR eError;
+
+       eError = BridgeHeapCfgHeapDetails(GetBridgeHandle(hDevConnection),
+                       uiHeapConfigIndex,
+                       uiHeapIndex,
+                       uiHeapNameBufSz,
+                       pszHeapNameOut,
+                       psDevVAddrBaseOut,
+                       puiHeapLengthOut,
+                       puiReservedRegionLengthOut,
+                       puiLog2DataPageSizeOut,
+                       puiLog2ImportAlignmentOut);
+
+       VG_MARK_INITIALIZED(pszHeapNameOut, uiHeapNameBufSz);
+
+       return eError;
+}
+
+/*****************************************************************************
+ *                    Devmem heap functions                                  *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+               IMG_HANDLE *phDevmemHeap)
+{
+       PVR_RETURN_IF_INVALID_PARAM(psHeap);
+       *phDevmemHeap = psHeap->hDevMemServerHeap;
+       return PVRSRV_OK;
+}
+
+/* See devicemem.h for important notes regarding the arguments
+   to this function */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx,
+               IMG_DEV_VIRTADDR sBaseAddress,
+               IMG_DEVMEM_SIZE_T uiLength,
+               IMG_DEVMEM_SIZE_T uiReservedRegionLength,
+               IMG_UINT32 ui32Log2Quantum,
+               IMG_UINT32 ui32Log2ImportAlignment,
+               const IMG_CHAR *pszName,
+               DEVMEM_HEAPCFGID uiHeapBlueprintID,
+               DEVMEM_HEAP **ppsHeapPtr)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       PVRSRV_ERROR eError2;
+       DEVMEM_HEAP *psHeap;
+       /* handle to the server-side counterpart of the device memory heap
+          (specifically, for handling mapping to device MMU) */
+       IMG_HANDLE hDevMemServerHeap;
+       IMG_UINT32 ui32Policy = RA_POLICY_DEFAULT, ui32PolicyVMRA;
+
+       IMG_CHAR aszBuf[100];
+       IMG_CHAR *pszStr;
+       IMG_UINT32 ui32pszStrSize;
+
+       if (ppsHeapPtr == NULL ||
+           uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+       }
+
+       ui32PolicyVMRA = RA_POLICY_DEFAULT;
+
+       PVR_ASSERT(uiReservedRegionLength + DEVMEM_HEAP_MINIMUM_SIZE <= uiLength);
+
+       psHeap = OSAllocMem(sizeof(*psHeap));
+       PVR_GOTO_IF_NOMEM(psHeap, eError, e0);
+
+       /* Need to keep local copy of heap name, so caller may free theirs */
+       ui32pszStrSize = OSStringLength(pszName) + 1;
+       pszStr = OSAllocMem(ui32pszStrSize);
+       PVR_GOTO_IF_NOMEM(pszStr, eError, e1);
+       OSStringLCopy(pszStr, pszName, ui32pszStrSize);
+       psHeap->pszName = pszStr;
+
+       psHeap->uiSize = uiLength;
+       psHeap->uiReservedRegionSize = uiReservedRegionLength;
+       psHeap->sBaseAddress = sBaseAddress;
+       psHeap->bPremapped = IMG_FALSE;
+       OSAtomicWrite(&psHeap->hImportCount, 0);
+
+       OSSNPrintf(aszBuf, sizeof(aszBuf),
+                       "NDM heap '%s' (suballocs) ctx:%p",
+                       pszName, psCtx);
+       ui32pszStrSize = OSStringLength(aszBuf) + 1;
+       pszStr = OSAllocMem(ui32pszStrSize);
+       PVR_GOTO_IF_NOMEM(pszStr, eError, e2);
+       OSStringLCopy(pszStr, aszBuf, ui32pszStrSize);
+       psHeap->pszSubAllocRAName = pszStr;
+
+#if defined(__KERNEL__)
+       if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+       {
+               void *pvAppHintState = NULL;
+               IMG_UINT32 ui32FirmwarePolicydefault = 0, ui32FirmwarePolicy=0;
+               OSCreateKMAppHintState(&pvAppHintState);
+               OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DevMemFWHeapPolicy,
+                                               &ui32FirmwarePolicydefault, &ui32FirmwarePolicy);
+               ui32PolicyVMRA = ui32Policy = ui32FirmwarePolicy;
+               OSFreeKMAppHintState(pvAppHintState);
+       }
+#endif
+
+#if defined(PDUMP)
+       /* The META heap is shared globally so a single physical memory import
+        * may be used to satisfy allocations of different processes.
+        * This is problematic when PDumping because the physical memory
+        * import used to satisfy a new allocation may actually have been
+        * imported (and thus the PDump MALLOC generated) before the PDump
+        * client was started, leading to the MALLOC being missing.
+        *
+        * This is solved by disabling splitting of imports for the META physmem
+        * RA, meaning that every firmware allocation gets its own import, thus
+        * ensuring the MALLOC is present for every allocation made within the
+        * pdump capture range
+        */
+       if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+       {
+               ui32Policy |= RA_POLICY_NO_SPLIT;
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID);
+#endif
+
+       psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName,
+                       /* Subsequent imports: */
+                       ui32Log2Quantum,
+                       RA_LOCKCLASS_2,
+                       SubAllocImportAlloc,
+                       SubAllocImportFree,
+                       (RA_PERARENA_HANDLE) psHeap,
+                       ui32Policy);
+       if (psHeap->psSubAllocRA == NULL)
+       {
+               eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+               goto e3;
+       }
+
+       psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment;
+       psHeap->uiLog2Quantum = ui32Log2Quantum;
+
+       if (!OSStringNCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT, sizeof(RGX_GENERAL_SVM_HEAP_IDENT)))
+       {
+               /* The SVM heap normally starts out as this type though
+                  it may transition to DEVMEM_HEAP_MANAGER_USER
+                  on platforms with more processor virtual address
+                  bits than device virtual address bits */
+               psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL;
+       }
+       else if (uiReservedRegionLength != 0)
+       {
+               /* Heaps which specify reserved VA space range are dual managed:
+                * - sBaseAddress to (sBaseAddress+uiReservedRegionLength-1): User managed
+                * - (sBaseAddress+uiReservedRegionLength) to uiLength: RA managed
+                */
+               psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_DUAL_USER_RA;
+       }
+       else
+       {
+               /* Otherwise, heap manager is decided (USER or RA) at first map */
+               psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_UNKNOWN;
+       }
+
+       /* Mark the heap to be managed by RA */
+       if (!OSStringNCompare(pszName, RGX_VK_CAPT_REPLAY_HEAP_IDENT,
+                             sizeof(RGX_VK_CAPT_REPLAY_HEAP_IDENT)))
+       {
+               psHeap->ui32HeapManagerFlags |= DEVMEM_HEAP_MANAGER_RA;
+       }
+
+       OSSNPrintf(aszBuf, sizeof(aszBuf),
+                       "NDM heap '%s' (QVM) ctx:%p",
+                       pszName, psCtx);
+       ui32pszStrSize = OSStringLength(aszBuf) + 1;
+       pszStr = OSAllocMem(ui32pszStrSize);
+       if (pszStr == NULL)
+       {
+               eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+               goto e4;
+       }
+       OSStringLCopy(pszStr, aszBuf, ui32pszStrSize);
+       psHeap->pszQuantizedVMRAName = pszStr;
+
+       psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName,
+                       /* Subsequent import: */
+                       0, RA_LOCKCLASS_1, NULL, NULL,
+                       (RA_PERARENA_HANDLE) psHeap,
+                       ui32PolicyVMRA);
+       if (psHeap->psQuantizedVMRA == NULL)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5);
+       }
+
+       if (!RA_Add(psHeap->psQuantizedVMRA,
+                       /* Make sure the VMRA doesn't allocate from reserved VAs */
+                       (RA_BASE_T)sBaseAddress.uiAddr + uiReservedRegionLength,
+                       (RA_LENGTH_T)uiLength,
+                       (RA_FLAGS_T)0, /* This RA doesn't use or need flags */
+                       NULL /* per ispan handle */))
+       {
+               RA_Delete(psHeap->psQuantizedVMRA);
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5);
+       }
+
+       psHeap->psCtx = psCtx;
+
+
+       /* Create server-side counterpart of Device Memory heap */
+       eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection),
+                       psCtx->hDevMemServerContext,
+                       sBaseAddress,
+                       uiLength,
+                       ui32Log2Quantum,
+                       &hDevMemServerHeap);
+       PVR_GOTO_IF_ERROR(eError, e6);
+
+       psHeap->hDevMemServerHeap = hDevMemServerHeap;
+
+       eError = OSLockCreate(&psHeap->hLock);
+       PVR_GOTO_IF_ERROR(eError, e7);
+
+       psHeap->psCtx->uiNumHeaps++;
+       *ppsHeapPtr = psHeap;
+
+#if defined(PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING)
+       psHeap->psMemDescList = NULL;
+#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */
+
+       return PVRSRV_OK;
+
+       /* error exit paths */
+e7:
+       eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection),
+                       psHeap->hDevMemServerHeap);
+       PVR_ASSERT (eError2 == PVRSRV_OK);
+e6:
+       if (psHeap->psQuantizedVMRA)
+               RA_Delete(psHeap->psQuantizedVMRA);
+e5:
+       if (psHeap->pszQuantizedVMRAName)
+               OSFreeMem(psHeap->pszQuantizedVMRAName);
+e4:
+       RA_Delete(psHeap->psSubAllocRA);
+e3:
+       OSFreeMem(psHeap->pszSubAllocRAName);
+e2:
+       OSFreeMem(psHeap->pszName);
+e1:
+       OSFreeMem(psHeap);
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(struct DEVMEM_HEAP_TAG *psHeap,
+               IMG_DEV_VIRTADDR *pDevVAddr)
+{
+       PVR_RETURN_IF_INVALID_PARAM(psHeap);
+
+       *pDevVAddr = psHeap->sBaseAddress;
+
+       return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+               IMG_DEVMEM_SIZE_T *puiSize,
+               IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+       IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+       IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign;
+
+       /* Just in case someone changes definition of IMG_DEVMEM_ALIGN_T. */
+       static_assert(sizeof(unsigned long long) == sizeof(uiAlign),
+                     "invalid uiAlign size");
+       /* This value is used for shifting so it cannot be greater than number
+        * of bits in unsigned long long (sizeof(1ULL)). Using greater value is
+        * undefined behaviour. */
+       if (uiLog2Quantum >= sizeof(unsigned long long) * 8)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if ((1ULL << uiLog2Quantum) > uiAlign)
+       {
+               uiAlign = 1ULL << uiLog2Quantum;
+       }
+       uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+
+       *puiSize = uiSize;
+       *puiAlign = uiAlign;
+
+       return PVRSRV_OK;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap)
+{
+       PVRSRV_ERROR eError;
+       IMG_INT uiImportCount;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+       if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               bDoCheck = IMG_FALSE;
+       }
+#endif
+#endif
+
+       PVR_RETURN_IF_INVALID_PARAM(psHeap);
+
+       uiImportCount = OSAtomicRead(&psHeap->hImportCount);
+       if (uiImportCount > 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName));
+#if defined(__KERNEL__)
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+               PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):"));
+               RIDumpAllKM();
+#else
+               PVR_DPF((PVR_DBG_ERROR, "Compile with PVRSRV_ENABLE_GPU_MEMORY_INFO=1 to get a full "
+                               "list of all driver allocations."));
+#endif
+#endif
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+               if (bDoCheck)
+#endif
+               {
+                       return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+               }
+       }
+
+       eError = DestroyServerResource(psHeap->psCtx->hDevConnection,
+                                      NULL,
+                                      BridgeDevmemIntHeapDestroy,
+                                      psHeap->hDevMemServerHeap);
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       if (bDoCheck)
+#endif
+       {
+               PVR_LOG_RETURN_IF_ERROR(eError, "BridgeDevmemIntHeapDestroy");
+       }
+
+       PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0);
+       psHeap->psCtx->uiNumHeaps--;
+
+       OSLockDestroy(psHeap->hLock);
+
+       if (psHeap->psQuantizedVMRA)
+       {
+               RA_Delete(psHeap->psQuantizedVMRA);
+       }
+       if (psHeap->pszQuantizedVMRAName)
+       {
+               OSFreeMem(psHeap->pszQuantizedVMRAName);
+       }
+
+       RA_Delete(psHeap->psSubAllocRA);
+       OSFreeMem(psHeap->pszSubAllocRAName);
+
+       OSFreeMem(psHeap->pszName);
+
+       OSCachedMemSet(psHeap, 0, sizeof(*psHeap));
+       OSFreeMem(psHeap);
+
+       return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                Devmem allocation/free functions                           *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier,
+                       DEVMEM_HEAP *psHeap,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       IMG_DEVMEM_ALIGN_T uiAlign,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       const IMG_CHAR *pszText,
+                       DEVMEM_MEMDESC **ppsMemDescPtr,
+                       IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+       PVRSRV_ERROR eError;
+       eError = DevmemSubAllocate(uiPreAllocMultiplier,
+                               psHeap,
+                               uiSize,
+                               uiAlign,
+                               uiFlags,
+                               pszText,
+                               ppsMemDescPtr);
+       PVR_GOTO_IF_ERROR(eError, fail_alloc);
+
+       eError = DevmemMapToDevice(*ppsMemDescPtr,
+                               psHeap,
+                               psDevVirtAddr);
+       PVR_GOTO_IF_ERROR(eError, fail_map);
+
+       return PVRSRV_OK;
+
+fail_map:
+       DevmemFree(*ppsMemDescPtr);
+fail_alloc:
+       *ppsMemDescPtr = NULL;
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+               DEVMEM_HEAP *psHeap,
+               IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               const IMG_CHAR *pszText,
+               DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       RA_BASE_T uiAllocatedAddr = 0;
+       RA_LENGTH_T uiAllocatedSize;
+       RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */
+       PVRSRV_ERROR eError;
+       DEVMEM_MEMDESC *psMemDesc = NULL;
+       IMG_DEVMEM_OFFSET_T uiOffset = 0;
+       DEVMEM_IMPORT *psImport;
+       IMG_UINT32 ui32CPUCacheLineSize;
+       void *pvAddr = NULL;
+
+       IMG_BOOL bImportClean;
+       IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags);
+       IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+       IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+       IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) ||
+                       PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags));
+       IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) ||
+                       PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags));
+       IMG_BOOL bAlign = ! (PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags));
+       PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE;
+       IMG_UINT32      ui32CacheLineSize = 0;
+       DEVMEM_PROPERTIES_T uiProperties;
+
+       if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+       {
+               /* Deferred Allocation not supported on SubAllocs*/
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams);
+       }
+
+       PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams);
+       PVR_GOTO_IF_INVALID_PARAM(psHeap->psCtx, eError, failParams);
+       PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams);
+
+       uiFlags = DevmemOverrideFlagsOrPassThrough(psHeap->psCtx->hDevConnection, uiFlags);
+
+#if defined(__KERNEL__)
+       {
+               /* The hDevConnection holds two different types of pointers depending on the
+                * address space in which it is used.
+                * In this instance the variable points to the device node in server */
+               PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+               ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS));
+       }
+#else
+       ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE;
+#endif
+
+       /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU.
+        * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each.
+        * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM.
+        * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments.
+        */
+       ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize;
+       /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple
+        * Also checking if the allocation is going to be cached on the CPU
+        * Currently there is no check for the validity of the cache coherent option.
+        * In this case, the alignment could be applied but the mode could still fall back to uncached.
+        */
+       if (bAlign && ui32CPUCacheLineSize > uiAlign && bCPUCached)
+       {
+               uiAlign = ui32CPUCacheLineSize;
+       }
+
+       /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple
+        * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options.
+        * Currently there is no check for the validity of the cache coherent option.
+        * In this case, the alignment could be applied but the mode could still fall back to uncached.
+        */
+       if (bAlign && ui32CacheLineSize > uiAlign && bGPUCached)
+       {
+               uiAlign = ui32CacheLineSize;
+       }
+
+       eError = DevmemValidateParams(uiSize,
+                       uiAlign,
+                       &uiFlags);
+       PVR_GOTO_IF_ERROR(eError, failParams);
+
+       eError = DevmemMemDescAlloc(&psMemDesc);
+       PVR_GOTO_IF_ERROR(eError, failMemDescAlloc);
+
+       /* No request for exportable memory so use the RA */
+       eError = RA_Alloc(psHeap->psSubAllocRA,
+                       uiSize,
+                       uiPreAllocMultiplier,
+                       uiFlags,
+                       uiAlign,
+                       pszText,
+                       &uiAllocatedAddr,
+                       &uiAllocatedSize,
+                       &hImport);
+       PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc);
+
+       psImport = hImport;
+
+       /* This assignment is assuming the RA returns an hImport where suballocations
+        * can be made from if uiSize is NOT a page multiple of the passed heap.
+        *
+        * So we check if uiSize is a page multiple and mark it as exportable
+        * if it is not.
+        * */
+       OSLockAcquire(psImport->hLock);
+       if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) &&
+           (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER))
+       {
+               psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE;
+       }
+       psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE;
+       uiProperties = psImport->uiProperties;
+       OSLockRelease(psImport->hLock);
+
+       uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr;
+
+#if defined(PDUMP) && defined(DEBUG)
+#if defined(__KERNEL__)
+       PDUMPCOMMENTWITHFLAGS(PMR_DeviceNode((PMR*)psImport->hPMR), PDUMP_CONT,
+                       "Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)",
+                       (IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID());
+#else
+       PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS,
+                       "Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)",
+                       (IMG_UINT32) uiSize,
+                       pszText,
+                       psImport->hPMR,
+                       OSGetCurrentProcessID());
+#endif
+#endif
+
+       DevmemMemDescInit(psMemDesc,
+                       uiOffset,
+                       psImport,
+                       uiSize);
+
+#if defined(DEBUG)
+       DevmemMemDescSetPoF(psMemDesc, uiFlags);
+#endif
+
+       bImportClean = ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0);
+
+       /* Zero the memory */
+       if (bZero)
+       {
+               /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */
+               bImportClean = bImportClean && ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0);
+
+               if (!bImportClean)
+               {
+                       eOp = PVRSRV_CACHE_OP_FLUSH;
+
+                       eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+                       PVR_GOTO_IF_ERROR(eError, failMaintenance);
+
+                       /* uiSize is a 64-bit quantity whereas the 3rd argument
+                        * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems
+                        * hence a compiler warning of implicit cast and loss of data.
+                        * Added explicit cast and assert to remove warning.
+                        */
+                       PVR_ASSERT(uiSize < IMG_UINT32_MAX);
+
+                       DevmemCPUMemSet(pvAddr, 0, uiSize, uiFlags);
+
+#if defined(PDUMP)
+                       DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+               }
+       }
+       else if (bPoisonOnAlloc)
+       {
+               /* Has the import been poisoned on allocation and were no suballocations returned to it so far? */
+               bPoisonOnAlloc = (uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0;
+
+               if (!bPoisonOnAlloc)
+               {
+                       eOp = PVRSRV_CACHE_OP_FLUSH;
+
+                       eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+                       PVR_GOTO_IF_ERROR(eError, failMaintenance);
+
+                       DevmemCPUMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, uiSize, uiFlags);
+
+                       bPoisonOnAlloc = IMG_TRUE;
+               }
+       }
+
+       /* Flush or invalidate */
+       if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc))
+       {
+               eError = BridgeCacheOpExec (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               (IMG_UINT64)(uintptr_t)
+                               pvAddr - psMemDesc->uiOffset,
+                               psMemDesc->uiOffset,
+                               psMemDesc->uiAllocSize,
+                               eOp);
+               PVR_GOTO_IF_ERROR(eError, failMaintenance);
+       }
+
+       if (pvAddr)
+       {
+               DevmemReleaseCpuVirtAddr(psMemDesc);
+               pvAddr = NULL;
+       }
+
+       /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+        * the allocation gets mapped/unmapped
+        */
+       CheckAnnotationLength(pszText);
+       OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               /* Attach RI information */
+               eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN),
+                               psMemDesc->szText,
+                               psMemDesc->uiOffset,
+                               uiAllocatedSize,
+                               IMG_FALSE,
+                               IMG_TRUE,
+                               &(psMemDesc->hRIHandle));
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry");
+       }
+#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+       PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+       *ppsMemDescPtr = psMemDesc;
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+failMaintenance:
+       if (pvAddr)
+       {
+               DevmemReleaseCpuVirtAddr(psMemDesc);
+               pvAddr = NULL;
+       }
+       DevmemMemDescRelease(psMemDesc);
+       psMemDesc = NULL;       /* Make sure we don't do a discard after the release */
+failDeviceMemAlloc:
+       if (psMemDesc)
+       {
+               DevmemMemDescDiscard(psMemDesc);
+       }
+failMemDescAlloc:
+failParams:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC,
+                       __func__,
+                       PVRSRVGETERRORSTRING(eError),
+                       uiSize));
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_UINT32 uiLog2HeapPageSize,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               const IMG_CHAR *pszText,
+               DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_MEMDESC *psMemDesc = NULL;
+       DEVMEM_IMPORT *psImport;
+       IMG_UINT32 ui32MappingTable = 0;
+
+       eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+                       &uiSize,
+                       &uiAlign);
+       PVR_GOTO_IF_ERROR(eError, failParams);
+
+       uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags);
+
+       eError = DevmemValidateParams(uiSize,
+                       uiAlign,
+                       &uiFlags);
+       PVR_GOTO_IF_ERROR(eError, failParams);
+
+       eError = DevmemMemDescAlloc(&psMemDesc);
+       PVR_GOTO_IF_ERROR(eError, failMemDescAlloc);
+
+       eError = AllocateDeviceMemory(hDevConnection,
+                       uiLog2HeapPageSize,
+                       uiSize,
+                       uiSize,
+                       1,
+                       1,
+                       &ui32MappingTable,
+                       uiAlign,
+                       uiFlags,
+                       IMG_TRUE,
+                       pszText,
+                       &psImport);
+       PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc);
+
+       DevmemMemDescInit(psMemDesc,
+                       0,
+                       psImport,
+                       uiSize);
+
+#if defined(DEBUG)
+       DevmemMemDescSetPoF(psMemDesc, uiFlags);
+#endif
+
+       *ppsMemDescPtr = psMemDesc;
+
+       /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+        * the allocation gets mapped/unmapped
+        */
+       CheckAnnotationLength(pszText);
+       OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR);
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry");
+
+               /* Attach RI information */
+               eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR,
+                               sizeof("^"),
+                               "^",
+                               psMemDesc->uiOffset,
+                               uiSize,
+                               IMG_FALSE,
+                               IMG_FALSE,
+                               &psMemDesc->hRIHandle);
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry");
+       }
+#else  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+       PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+failDeviceMemAlloc:
+       DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC,
+                       __func__,
+                       PVRSRVGETERRORSTRING(eError),
+                       uiSize));
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_SIZE_T uiChunkSize,
+               IMG_UINT32 ui32NumPhysChunks,
+               IMG_UINT32 ui32NumVirtChunks,
+               IMG_UINT32 *pui32MappingTable,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_UINT32 uiLog2HeapPageSize,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               const IMG_CHAR *pszText,
+               DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_MEMDESC *psMemDesc = NULL;
+       DEVMEM_IMPORT *psImport;
+
+       eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+                       &uiSize,
+                       &uiAlign);
+       PVR_GOTO_IF_ERROR(eError, failParams);
+
+       uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags);
+
+       eError = DevmemValidateParams(uiSize,
+                       uiAlign,
+                       &uiFlags);
+       PVR_GOTO_IF_ERROR(eError, failParams);
+
+       eError = DevmemMemDescAlloc(&psMemDesc);
+       PVR_GOTO_IF_ERROR(eError, failMemDescAlloc);
+
+       eError = AllocateDeviceMemory(hDevConnection,
+                       uiLog2HeapPageSize,
+                       uiSize,
+                       uiChunkSize,
+                       ui32NumPhysChunks,
+                       ui32NumVirtChunks,
+                       pui32MappingTable,
+                       uiAlign,
+                       uiFlags,
+                       IMG_TRUE,
+                       pszText,
+                       &psImport);
+       PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc);
+
+       DevmemMemDescInit(psMemDesc,
+                       0,
+                       psImport,
+                       uiSize);
+
+#if defined(DEBUG)
+       DevmemMemDescSetPoF(psMemDesc, uiFlags);
+#endif
+
+       /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+        * the allocation gets mapped/unmapped
+        */
+       CheckAnnotationLength(pszText);
+       OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR);
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry");
+
+               /* Attach RI information */
+               eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               sizeof("^"),
+                               "^",
+                               psMemDesc->uiOffset,
+                               uiSize,
+                               IMG_FALSE,
+                               IMG_FALSE,
+                               &psMemDesc->hRIHandle);
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry");
+       }
+#else  /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+       PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+       *ppsMemDescPtr = psMemDesc;
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+failDeviceMemAlloc:
+       DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       PVR_DPF((PVR_DBG_ERROR,
+                       "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC,
+                       __func__,
+                       PVRSRVGETERRORSTRING(eError),
+                       uiSize));
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hServerHandle,
+               IMG_HANDLE *hLocalImportHandle)
+{
+       return BridgePMRMakeLocalImportHandle(GetBridgeHandle(hDevConnection),
+                       hServerHandle,
+                       hLocalImportHandle);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hLocalImportHandle)
+{
+       return DestroyServerResource(hDevConnection,
+                                    NULL,
+                                    BridgePMRUnmakeLocalImportHandle,
+                                    hLocalImportHandle);
+}
+
+/*****************************************************************************
+ *                Devmem unsecure export functions                           *
+ *****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static PVRSRV_ERROR
+_Mapping_Export(DEVMEM_IMPORT *psImport,
+               DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr,
+               DEVMEM_EXPORTKEY *puiExportKeyPtr,
+               DEVMEM_SIZE_T *puiSize,
+               DEVMEM_LOG2ALIGN_T *puiLog2Contig)
+{
+       /* Gets an export handle and key for the PMR used for this mapping */
+       /* Can only be done if there are no suballocations for this mapping */
+
+       PVRSRV_ERROR eError;
+       DEVMEM_EXPORTHANDLE hPMRExportHandle;
+       DEVMEM_EXPORTKEY uiExportKey;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig;
+
+       PVR_GOTO_IF_INVALID_PARAM(psImport, eError, failParams);
+
+       if ((GetImportProperties(psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, failParams);
+       }
+
+       eError = BridgePMRExportPMR(GetBridgeHandle(psImport->hDevConnection),
+                       psImport->hPMR,
+                       &hPMRExportHandle,
+                       &uiSize,
+                       &uiLog2Contig,
+                       &uiExportKey);
+       PVR_GOTO_IF_ERROR(eError, failExport);
+
+       PVR_ASSERT(uiSize == psImport->uiSize);
+
+       *phPMRExportHandlePtr = hPMRExportHandle;
+       *puiExportKeyPtr = uiExportKey;
+       *puiSize = uiSize;
+       *puiLog2Contig = uiLog2Contig;
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+failExport:
+failParams:
+
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+
+}
+
+static void
+_Mapping_Unexport(DEVMEM_IMPORT *psImport,
+               DEVMEM_EXPORTHANDLE hPMRExportHandle)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT (psImport != NULL);
+
+       eError = DestroyServerResource(psImport->hDevConnection,
+                                      NULL,
+                                      BridgePMRUnexportPMR,
+                                      hPMRExportHandle);
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+               DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+       /* Caller to provide storage for export cookie struct */
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hPMRExportHandle = 0;
+       IMG_UINT64 uiPMRExportPassword = 0;
+       IMG_DEVMEM_SIZE_T uiSize = 0;
+       IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0;
+
+       PVR_GOTO_IF_INVALID_PARAM(psMemDesc, eError, e0);
+       PVR_GOTO_IF_INVALID_PARAM(psExportCookie, eError, e0);
+
+       if (DEVMEM_PROPERTIES_EXPORTABLE !=
+                       (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This Memory (0x%p) cannot be exported!...",
+                               __func__, psMemDesc));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e0);
+       }
+
+       eError = _Mapping_Export(psMemDesc->psImport,
+                       &hPMRExportHandle,
+                       &uiPMRExportPassword,
+                       &uiSize,
+                       &uiLog2Contig);
+       if (eError != PVRSRV_OK)
+       {
+               psExportCookie->uiSize = 0;
+               goto e0;
+       }
+
+       psExportCookie->hPMRExportHandle = hPMRExportHandle;
+       psExportCookie->uiPMRExportPassword = uiPMRExportPassword;
+       psExportCookie->uiSize = uiSize;
+       psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig;
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+e0:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+IMG_INTERNAL void
+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+               DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+       _Mapping_Unexport(psMemDesc->psImport,
+                       psExportCookie->hPMRExportHandle);
+
+       psExportCookie->uiSize = 0;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+               DEVMEM_EXPORTCOOKIE *psCookie,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+       DEVMEM_MEMDESC *psMemDesc = NULL;
+       DEVMEM_IMPORT *psImport;
+       IMG_HANDLE hPMR;
+       PVRSRV_ERROR eError;
+
+       PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams);
+
+       eError = DevmemMemDescAlloc(&psMemDesc);
+       PVR_GOTO_IF_ERROR(eError, failMemDescAlloc);
+
+       eError = DevmemImportStructAlloc(hDevConnection,
+                       &psImport);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc);
+       }
+
+       /* Get a handle to the PMR (inc refcount) */
+       eError = BridgePMRImportPMR(GetBridgeHandle(hDevConnection),
+                       psCookie->hPMRExportHandle,
+                       psCookie->uiPMRExportPassword,
+                       psCookie->uiSize, /* not trusted - just for validation */
+                       psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for validation */
+                       &hPMR);
+       PVR_GOTO_IF_ERROR(eError, failImport);
+
+       DevmemImportStructInit(psImport,
+                       psCookie->uiSize,
+                       1ULL << psCookie->uiLog2ContiguityGuarantee,
+                       uiFlags,
+                       hPMR,
+                       DEVMEM_PROPERTIES_IMPORTED |
+                       DEVMEM_PROPERTIES_EXPORTABLE);
+
+       DevmemMemDescInit(psMemDesc,
+                       0,
+                       psImport,
+                       psImport->uiSize);
+
+       *ppsMemDescPtr = psMemDesc;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               /* Attach RI information */
+               eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               sizeof("^"),
+                               "^",
+                               psMemDesc->uiOffset,
+                               psMemDesc->psImport->uiSize,
+                               IMG_TRUE,
+                               IMG_TRUE,
+                               &psMemDesc->hRIHandle);
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry");
+       }
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+       return PVRSRV_OK;
+
+       /* error exit paths follow */
+
+failImport:
+       DevmemImportDiscard(psImport);
+failImportAlloc:
+       DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*****************************************************************************
+ *                   Common MemDesc functions                                *
+ *****************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+       DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport);
+
+       if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
+       {
+               eError = PVRSRV_ERROR_INVALID_REQUEST;
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: The passed allocation is not valid to unpin",
+                               __func__));
+
+               goto e_exit;
+       }
+
+       /* Stop if the allocation might have suballocations. */
+       if (!(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
+       {
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: The passed allocation is not valid to unpin because "
+                               "there might be suballocations on it. Make sure you allocate a page multiple "
+                               "of the heap when using PVRSRVAllocDeviceMem()",
+                               __func__));
+
+               goto e_exit;
+       }
+
+       /* Stop if the Import is still mapped to CPU */
+       if (psImport->sCPUImport.ui32RefCount)
+       {
+               eError = PVRSRV_ERROR_STILL_MAPPED;
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: There are still %u references on the CPU mapping. "
+                               "Please remove all CPU mappings before unpinning.",
+                               __func__,
+                               psImport->sCPUImport.ui32RefCount));
+
+               goto e_exit;
+       }
+
+       /* Only unpin if it is not already unpinned
+        * Return PVRSRV_OK */
+       if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+       {
+               goto e_exit;
+       }
+
+       /* Unpin it and invalidate mapping */
+       if (psImport->sDeviceImport.bMapped)
+       {
+               eError = BridgeDevmemIntUnpinInvalidate(GetBridgeHandle(psImport->hDevConnection),
+                               psImport->sDeviceImport.hMapping,
+                               psImport->hPMR);
+       }
+       else
+       {
+               /* Or just unpin it */
+               eError = BridgeDevmemIntUnpin(GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR);
+       }
+
+       /* Update flags and RI when call was successful */
+       if (eError == PVRSRV_OK)
+       {
+               OSLockAcquire(psImport->hLock);
+               psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED;
+               OSLockRelease(psImport->hLock);
+       }
+       else
+       {
+               /* Or just show what went wrong */
+               PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d",
+                               __func__,
+                               eError));
+       }
+
+e_exit:
+       return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+       DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport);
+
+       if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e_exit);
+       }
+
+       /* Only pin if it is unpinned */
+       if ((uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0)
+       {
+               goto e_exit;
+       }
+
+       /* Pin it and make mapping valid */
+       if (psImport->sDeviceImport.bMapped)
+       {
+               eError = BridgeDevmemIntPinValidate(GetBridgeHandle(psImport->hDevConnection),
+                               psImport->sDeviceImport.hMapping,
+                               psImport->hPMR);
+       }
+       else
+       {
+               /* Or just pin it */
+               eError = BridgeDevmemIntPin(GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR);
+       }
+
+       if ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY))
+       {
+               OSLockAcquire(psImport->hLock);
+               psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED;
+               OSLockRelease(psImport->hLock);
+       }
+       else
+       {
+               /* Or just show what went wrong */
+               PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d",
+                               __func__,
+                               eError));
+       }
+
+e_exit:
+       return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       *puiSize = psMemDesc->uiAllocSize;
+
+       return eError;
+}
+
+IMG_INTERNAL void
+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation)
+{
+       /*
+        * It is expected that psMemDesc->szText is a valid NUL-terminated string,
+        * since DevmemMemDescAlloc uses OSAllocZMem to create the memdesc.
+        */
+       *pszAnnotation = psMemDesc->szText;
+}
+
+/*
+       This function is called for freeing any class of memory
+ */
+IMG_INTERNAL IMG_BOOL
+DevmemFree(DEVMEM_MEMDESC *psMemDesc)
+{
+       if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_SECURE)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Please use methods dedicated to secure buffers.",
+                               __func__));
+               return IMG_FALSE;
+       }
+
+       return DevmemMemDescRelease(psMemDesc);
+}
+
+IMG_INTERNAL IMG_BOOL
+DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc)
+{
+       DevmemReleaseDevVirtAddr(psMemDesc);
+       return DevmemFree(psMemDesc);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+               DEVMEM_HEAP *psHeap,
+               IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+       DEVMEM_IMPORT *psImport;
+       IMG_DEV_VIRTADDR sDevVAddr;
+       PVRSRV_ERROR eError;
+       IMG_BOOL bMap = IMG_TRUE;
+       IMG_BOOL bDestroyed = IMG_FALSE;
+       IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+       DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport);
+
+       /* Do not try to map unpinned memory */
+       if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags);
+       }
+
+       OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+       PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams);
+
+       if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck);
+       }
+
+       /* Don't map memory for deferred allocations */
+       if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+       {
+               PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+               bMap = IMG_FALSE;
+       }
+
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+       psImport = psMemDesc->psImport;
+       DevmemMemDescAcquire(psMemDesc);
+
+#if defined(__KERNEL__)
+       if (psHeap->bPremapped)
+       {
+               ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection);
+       }
+#endif
+
+       eError = DevmemImportStructDevMap(psHeap,
+                       bMap,
+                       psImport,
+                       ui64OptionalMapAddress);
+       PVR_GOTO_IF_ERROR(eError, failMap);
+
+       sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+       sDevVAddr.uiAddr += psMemDesc->uiOffset;
+       psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+       psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+       *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+
+       OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+       if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               psMemDesc->uiOffset,
+                               psMemDesc->sDeviceMemDesc.sDevVAddr,
+                               psMemDesc->uiAllocSize,
+                               psMemDesc->szText,
+                               DevmemGetHeapLog2PageSize(psHeap),
+                               psMemDesc->ui32AllocationIndex,
+                               &psMemDesc->ui32AllocationIndex);
+       }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               if (psMemDesc->hRIHandle)
+               {
+                       eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection),
+                                       psMemDesc->hRIHandle,
+                                       psImport->sDeviceImport.sDevVAddr);
+                       PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr");
+               }
+       }
+#endif
+
+       return PVRSRV_OK;
+
+failMap:
+       bDestroyed = DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+       if (!bDestroyed)
+       {
+               OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+       }
+       PVR_ASSERT(eError != PVRSRV_OK);
+failFlags:
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+               DEVMEM_HEAP *psHeap,
+               IMG_DEV_VIRTADDR sDevVirtAddr)
+{
+       DEVMEM_IMPORT *psImport;
+       IMG_DEV_VIRTADDR sDevVAddr;
+       PVRSRV_ERROR eError;
+       IMG_BOOL bMap = IMG_TRUE;
+       IMG_BOOL bDestroyed = IMG_FALSE;
+       DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport);
+
+       /* Do not try to map unpinned memory */
+       if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags);
+       }
+
+       OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+       PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams);
+
+       if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck);
+       }
+
+       /* Don't map memory for deferred allocations */
+       if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+       {
+               PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+               bMap = IMG_FALSE;
+       }
+
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+       psImport = psMemDesc->psImport;
+       DevmemMemDescAcquire(psMemDesc);
+
+       eError = DevmemImportStructDevMap(psHeap,
+                       bMap,
+                       psImport,
+                       sDevVirtAddr.uiAddr);
+       PVR_GOTO_IF_ERROR(eError, failMap);
+
+       sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+       sDevVAddr.uiAddr += psMemDesc->uiOffset;
+       psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+       psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+       OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+       if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+       {
+               BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               psMemDesc->uiOffset,
+                               psMemDesc->sDeviceMemDesc.sDevVAddr,
+                               psMemDesc->uiAllocSize,
+                               psMemDesc->szText,
+                               DevmemGetHeapLog2PageSize(psHeap),
+                               psMemDesc->ui32AllocationIndex,
+                               &psMemDesc->ui32AllocationIndex);
+       }
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               if (psMemDesc->hRIHandle)
+               {
+                       eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection),
+                                       psMemDesc->hRIHandle,
+                                       psImport->sDeviceImport.sDevVAddr);
+                       PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr");
+               }
+       }
+#endif
+
+       return PVRSRV_OK;
+
+failMap:
+       bDestroyed = DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+       if (!bDestroyed)
+       {
+               OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+       }
+       PVR_ASSERT(eError != PVRSRV_OK);
+failFlags:
+       return eError;
+}
+
+IMG_INTERNAL IMG_DEV_VIRTADDR
+DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+       if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+       {
+               PVR_LOG_ERROR(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, "DevmemGetDevVirtAddr");
+       }
+
+       PVR_ASSERT(psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr !=0 );
+
+       return psMemDesc->sDeviceMemDesc.sDevVAddr;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+               IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+       PVRSRV_ERROR eError;
+
+       /* Do not try to map unpinned memory */
+       if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_UNPINNED)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failCheck);
+       }
+
+       OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+       if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, failRelease);
+       }
+       psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+       *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+       OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+       return PVRSRV_OK;
+
+failRelease:
+       OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+       PVR_ASSERT(eError != PVRSRV_OK);
+failCheck:
+       return eError;
+}
+
+IMG_INTERNAL void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+       PVR_ASSERT(psMemDesc != NULL);
+
+       OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount,
+                       psMemDesc->sDeviceMemDesc.ui32RefCount-1);
+
+       PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0);
+
+       if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+       {
+               if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
+               {
+                       BridgeDevicememHistoryUnmap(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                       psMemDesc->psImport->hPMR,
+                                       psMemDesc->uiOffset,
+                                       psMemDesc->sDeviceMemDesc.sDevVAddr,
+                                       psMemDesc->uiAllocSize,
+                                       psMemDesc->szText,
+                                       DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap),
+                                       psMemDesc->ui32AllocationIndex,
+                                       &psMemDesc->ui32AllocationIndex);
+               }
+
+               /* When device mapping destroyed, zero Dev VA so DevmemGetDevVirtAddr()
+                * returns 0 */
+               if (DevmemImportStructDevUnmap(psMemDesc->psImport) == IMG_TRUE)
+               {
+                       psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr = 0;
+               }
+               OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+               DevmemMemDescRelease(psMemDesc);
+       }
+       else
+       {
+               OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+       }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+               void **ppvCpuVirtAddr)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psMemDesc != NULL);
+       PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+       eError = DevmemCPUMapCheckImportProperties(psMemDesc);
+       PVR_LOG_RETURN_IF_ERROR(eError, "DevmemCPUMapCheckImportProperties");
+
+       OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sCPUMemDesc.ui32RefCount,
+                       psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+       if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0)
+       {
+               DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+               IMG_UINT8 *pui8CPUVAddr;
+
+               DevmemMemDescAcquire(psMemDesc);
+               eError = DevmemImportStructCPUMap(psImport);
+               PVR_GOTO_IF_ERROR(eError, failMap);
+
+               pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr;
+               pui8CPUVAddr += psMemDesc->uiOffset;
+               psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr;
+       }
+       *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+
+       VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+
+       OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+       return PVRSRV_OK;
+
+failMap:
+       PVR_ASSERT(eError != PVRSRV_OK);
+       psMemDesc->sCPUMemDesc.ui32RefCount--;
+
+       if (!DevmemMemDescRelease(psMemDesc))
+       {
+               OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+       }
+       return eError;
+}
+
+IMG_INTERNAL void
+DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+               void **ppvCpuVirtAddr)
+{
+       PVR_ASSERT(psMemDesc != NULL);
+       PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+       if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: CPU UnMapping is not possible on this allocation!",
+                               __func__));
+               return;
+       }
+
+       OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sCPUMemDesc.ui32RefCount,
+                       psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+       *ppvCpuVirtAddr = NULL;
+       if (psMemDesc->sCPUMemDesc.ui32RefCount)
+       {
+               *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+               psMemDesc->sCPUMemDesc.ui32RefCount += 1;
+       }
+
+       VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+       OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+}
+
+IMG_INTERNAL void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+       PVR_ASSERT(psMemDesc != NULL);
+
+       if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: CPU UnMapping is not possible on this allocation!",
+                               __func__));
+               return;
+       }
+
+       OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       psMemDesc->sCPUMemDesc.ui32RefCount,
+                       psMemDesc->sCPUMemDesc.ui32RefCount-1);
+
+       PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0);
+
+       if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0)
+       {
+               OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+               DevmemImportStructCPUUnmap(psMemDesc->psImport);
+               DevmemMemDescRelease(psMemDesc);
+       }
+       else
+       {
+               OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+       }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+               IMG_HANDLE *phImport)
+{
+       if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+       {
+               return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+       }
+
+       *phImport = psMemDesc->psImport->hPMR;
+
+       return PVRSRV_OK;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+               IMG_UINT64 *pui64UID)
+{
+       DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+       PVRSRV_ERROR eError;
+
+       if (!(GetImportProperties(psImport) & (DEVMEM_PROPERTIES_IMPORTED |
+                                       DEVMEM_PROPERTIES_EXPORTABLE)))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: This Memory (0x%p) doesn't support the functionality requested...",
+                               __func__, psMemDesc));
+               return PVRSRV_ERROR_INVALID_REQUEST;
+       }
+
+       eError = BridgePMRGetUID(GetBridgeHandle(psImport->hDevConnection),
+                       psImport->hPMR,
+                       pui64UID);
+
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+               IMG_HANDLE *hReservation)
+{
+       DEVMEM_IMPORT *psImport;
+
+       PVR_ASSERT(psMemDesc);
+       psImport = psMemDesc->psImport;
+
+       PVR_ASSERT(psImport);
+       *hReservation = psImport->sDeviceImport.hReservation;
+
+       return PVRSRV_OK;
+}
+
+#endif /* !__KERNEL__ */
+
+/* Kernel usage of this function will only work with
+ * memdescs of buffers allocated in the FW memory context
+ * that is created in the Server
+ */
+void
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+               IMG_HANDLE *phPMR,
+               IMG_DEVMEM_OFFSET_T *puiPMROffset)
+{
+       DEVMEM_IMPORT *psImport;
+
+       PVR_ASSERT(psMemDesc);
+       *puiPMROffset = psMemDesc->uiOffset;
+       psImport = psMemDesc->psImport;
+
+       PVR_ASSERT(psImport);
+       *phPMR = psImport->hPMR;
+}
+
+#if defined(__KERNEL__)
+IMG_INTERNAL void
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+               PVRSRV_MEMALLOCFLAGS_T *puiFlags)
+{
+       DEVMEM_IMPORT *psImport;
+
+       PVR_ASSERT(psMemDesc);
+       psImport = psMemDesc->psImport;
+
+       PVR_ASSERT(psImport);
+       *puiFlags = psImport->uiFlags;
+}
+
+IMG_INTERNAL SHARED_DEV_CONNECTION
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc)
+{
+       return psMemDesc->psImport->hDevConnection;
+}
+#endif /* __KERNEL__ */
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hExtHandle,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               DEVMEM_MEMDESC **ppsMemDescPtr,
+               IMG_DEVMEM_SIZE_T *puiSizePtr,
+               const IMG_CHAR *pszAnnotation)
+{
+       DEVMEM_MEMDESC *psMemDesc = NULL;
+       DEVMEM_IMPORT *psImport;
+       IMG_DEVMEM_SIZE_T uiSize;
+       IMG_DEVMEM_ALIGN_T uiAlign;
+       IMG_HANDLE hPMR;
+       PVRSRV_ERROR eError;
+
+       PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams);
+
+       eError = DevmemMemDescAlloc(&psMemDesc);
+       PVR_GOTO_IF_ERROR(eError, failMemDescAlloc);
+
+       eError = DevmemImportStructAlloc(hDevConnection,
+                       &psImport);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc);
+       }
+
+       /* Get the PMR handle and its size from the server */
+       eError = BridgePMRLocalImportPMR(GetBridgeHandle(hDevConnection),
+                       hExtHandle,
+                       &hPMR,
+                       &uiSize,
+                       &uiAlign);
+       PVR_GOTO_IF_ERROR(eError, failImport);
+
+       DevmemImportStructInit(psImport,
+                       uiSize,
+                       uiAlign,
+                       uiFlags,
+                       hPMR,
+                       DEVMEM_PROPERTIES_IMPORTED |
+                       DEVMEM_PROPERTIES_EXPORTABLE);
+
+       DevmemMemDescInit(psMemDesc,
+                       0,
+                       psImport,
+                       uiSize);
+
+       *ppsMemDescPtr = psMemDesc;
+       if (puiSizePtr)
+               *puiSizePtr = uiSize;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI))
+       {
+               /* Attach RI information.
+                * Set backed size to 0 since this allocation has been allocated
+                * by the same process and has been accounted for. */
+               eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                               psMemDesc->psImport->hPMR,
+                               sizeof("^"),
+                               "^",
+                               psMemDesc->uiOffset,
+                               psMemDesc->psImport->uiSize,
+                               IMG_TRUE,
+                               IMG_FALSE,
+                               &(psMemDesc->hRIHandle));
+               PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry");
+       }
+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */
+
+
+       /* Copy the allocation descriptive name and size so it can be passed
+        * to DevicememHistory when the allocation gets mapped/unmapped
+        */
+       CheckAnnotationLength(pszAnnotation);
+       OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+
+       return PVRSRV_OK;
+
+failImport:
+       DevmemImportDiscard(psImport);
+failImportAlloc:
+       DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+               IMG_DEV_VIRTADDR sDevVAddr)
+{
+       return BridgeDevmemIsVDevAddrValid(GetBridgeHandle(psContext->hDevConnection),
+                                          psContext->hDevMemServerContext,
+                                          sDevVAddr);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
+               IMG_DEV_VIRTADDR *psFaultAddress)
+{
+       return BridgeDevmemGetFaultAddress(GetBridgeHandle(psContext->hDevConnection),
+                                          psContext->hDevMemServerContext,
+                                          psFaultAddress);
+}
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEV_VIRTADDR sDevVAddr,
+                          IMG_DEVMEM_SIZE_T uiSize,
+                          IMG_BOOL bInvalidate)
+{
+       DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+       return BridgeDevmemFlushDevSLCRange(GetBridgeHandle(psImport->hDevConnection),
+                                           psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+                                           sDevVAddr,
+                                           uiSize,
+                                           bInvalidate);
+}
+
+#if defined(RGX_FEATURE_FBCDC)
+IMG_INTERNAL PVRSRV_ERROR
+DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext,
+                          IMG_UINT64 ui64FBSCEntries)
+{
+       return BridgeDevmemInvalidateFBSCTable(GetBridgeHandle(psContext->hDevConnection),
+                                              psContext->hDevMemServerContext,
+                                              ui64FBSCEntries);
+}
+#endif
+
+#endif /* !__KERNEL__ */
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap)
+{
+       return psHeap->uiLog2Quantum;
+}
+
+IMG_INTERNAL PVRSRV_MEMALLOCFLAGS_T
+DevmemGetMemAllocFlags(DEVMEM_MEMDESC *psMemDesc)
+{
+       return psMemDesc->psImport->uiFlags;
+}
+
+IMG_INTERNAL IMG_DEVMEM_SIZE_T
+DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap)
+{
+       return psHeap->uiReservedRegionSize;
+}
+
+#if !defined(__KERNEL__)
+/**************************************************************************/ /*!
+@Function       RegisterDevMemPFNotify
+@Description    Registers that the application wants to be signaled when a page
+                fault occurs.
+
+@Input          psContext      Memory context the process that would like to
+                               be notified about.
+@Input          ui32PID        The PID of the calling process.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR:  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               error code
+ */ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+               IMG_UINT32     ui32PID,
+               IMG_BOOL       bRegister)
+{
+       PVRSRV_ERROR eError;
+
+       eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection),
+                       psContext->hDevMemServerContext,
+                       ui32PID,
+                       bRegister);
+       if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+       {
+               PVR_LOG_ERROR(eError, "BridgeDevmemIntRegisterPFNotifyKM");
+       }
+
+       return eError;
+}
+#endif /* !__KERNEL__ */
+
+IMG_INTERNAL void
+DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped)
+{
+       psHeap->bPremapped = IsPremapped;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem_pdump.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem_pdump.c
new file mode 100644 (file)
index 0000000..639f93a
--- /dev/null
@@ -0,0 +1,404 @@
+/*************************************************************************/ /*!
+@File
+@Title          Shared device memory management PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common (client & server) PDump functions for the
+                memory management code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined(PDUMP)
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pdump.h"
+#include "devicemem.h"
+#include "devicemem_utils.h"
+#include "devicemem_pdump.h"
+#include "client_pdump_bridge.h"
+#include "client_pdumpmm_bridge.h"
+#if defined(__linux__) && !defined(__KERNEL__)
+#include <stdio.h>
+#endif
+
+IMG_INTERNAL void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+       eError = BridgePMRPDumpLoadMem(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                      psMemDesc->psImport->hPMR,
+                                      psMemDesc->uiOffset + uiOffset,
+                                      uiSize,
+                                      uiPDumpFlags,
+                                      IMG_FALSE);
+
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMem");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+       eError = BridgePMRPDumpLoadMem(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                      psMemDesc->psImport->hPMR,
+                                      psMemDesc->uiOffset + uiOffset,
+                                      uiSize,
+                                      uiPDumpFlags,
+                                      IMG_TRUE);
+
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMem");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32Value,
+                          PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError = BridgePMRPDumpLoadMemValue32(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                        psMemDesc->psImport->hPMR,
+                                        psMemDesc->uiOffset + uiOffset,
+                                        ui32Value,
+                                        uiPDumpFlags);
+
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMemValue32");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT64 ui64Value,
+                          PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       eError = BridgePMRPDumpLoadMemValue64(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                             psMemDesc->psImport->hPMR,
+                                             psMemDesc->uiOffset + uiOffset,
+                                             ui64Value,
+                                             uiPDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMemValue64");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC           *psMemDesc,
+                              IMG_DEVMEM_OFFSET_T      *puiMemOffset,
+                              IMG_CHAR                         *pszName,
+                              IMG_UINT32                       ui32Size)
+{
+       PVRSRV_ERROR            eError;
+       IMG_CHAR                        aszMemspaceName[100];
+       IMG_CHAR                        aszSymbolicName[100];
+       IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+       *puiMemOffset += psMemDesc->uiOffset;
+
+       eError = BridgePMRPDumpSymbolicAddr(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                           psMemDesc->psImport->hPMR,
+                                           *puiMemOffset,
+                                           sizeof(aszMemspaceName),
+                                           &aszMemspaceName[0],
+                                           sizeof(aszSymbolicName),
+                                           &aszSymbolicName[0],
+                                           puiMemOffset,
+                                           &uiNextSymName);
+
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpSymbolicAddr");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+
+       OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]);
+       return eError;
+}
+
+IMG_INTERNAL void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset)
+{
+       PVRSRV_ERROR eError;
+
+       eError = BridgePMRPDumpSaveToFile(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                         psMemDesc->psImport->hPMR,
+                                         psMemDesc->uiOffset + uiOffset,
+                                         uiSize,
+                                         OSStringLength(pszFilename) + 1,
+                                         pszFilename,
+                                         uiFileOffset);
+
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpSaveToFile");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+                             IMG_UINT32 ui32FileOffset,
+                             IMG_UINT32 ui32PdumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEV_VIRTADDR sDevAddrStart;
+
+       sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr;
+       sDevAddrStart.uiAddr += psMemDesc->uiOffset;
+       sDevAddrStart.uiAddr += uiOffset;
+
+       eError = BridgeDevmemIntPDumpSaveToFileVirtual(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                                      psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+                                                      sDevAddrStart,
+                                                      uiSize,
+                                                      OSStringLength(pszFilename) + 1,
+                                                      pszFilename,
+                                                      ui32FileOffset,
+                                                      ui32PdumpFlags);
+
+       PVR_LOG_IF_ERROR(eError, "BridgeDevmemIntPDumpSaveToFileVirtual");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL void
+DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_DEVMEM_SIZE_T uiSize,
+                          const IMG_CHAR *pszFilename,
+                          IMG_UINT32 ui32HeaderType,
+                          IMG_UINT32 ui32ElementType,
+                          IMG_UINT32 ui32ElementCount,
+                          IMG_UINT32 ui32PdumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEV_VIRTADDR sDevAddrStart;
+
+       sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr;
+       sDevAddrStart.uiAddr += psMemDesc->uiOffset;
+       sDevAddrStart.uiAddr += uiOffset;
+
+       eError = BridgePDumpDataDescriptor(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                          psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+                                       OSStringLength(pszFilename) + 1,
+                                          pszFilename,
+                                          sDevAddrStart,
+                                          uiSize,
+                                          ui32HeaderType,
+                                          ui32ElementType,
+                                          ui32ElementCount,
+                                          ui32PdumpFlags);
+
+       PVR_LOG_IF_ERROR(eError, "BridgePDumpDataDescriptor");
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError == PVRSRV_OK);
+       }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT32 ui32Value,
+                       IMG_UINT32 ui32Mask,
+                       PDUMP_POLL_OPERATOR eOperator,
+                       PDUMP_FLAGS_T ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEVMEM_SIZE_T uiNumBytes;
+
+       uiNumBytes = 4;
+
+       if (psMemDesc->uiOffset + uiOffset + uiNumBytes > psMemDesc->psImport->uiSize)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0);
+       }
+
+       eError = BridgePMRPDumpPol32(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                    psMemDesc->psImport->hPMR,
+                                    psMemDesc->uiOffset + uiOffset,
+                                    ui32Value,
+                                    ui32Mask,
+                                    eOperator,
+                                    ui32PDumpFlags);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       return PVRSRV_OK;
+
+       /*
+         error exit paths follow
+        */
+
+e0:
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError != PVRSRV_OK);
+       }
+       return eError;
+}
+
+#if defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc,
+                         IMG_DEVMEM_OFFSET_T uiOffset,
+                         IMG_UINT32 ui32Value,
+                         IMG_UINT32 ui32Mask,
+                         PDUMP_POLL_OPERATOR eOperator,
+                         PDUMP_FLAGS_T ui32PDumpFlags)
+{
+       PVRSRV_ERROR eError;
+       IMG_DEVMEM_SIZE_T uiNumBytes;
+
+       uiNumBytes = 4;
+
+       if (psMemDesc->uiOffset + uiOffset + uiNumBytes >= psMemDesc->psImport->uiSize)
+       {
+               eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+               goto e0;
+       }
+
+       eError = BridgePMRPDumpCheck32(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                    psMemDesc->psImport->hPMR,
+                                    psMemDesc->uiOffset + uiOffset,
+                                    ui32Value,
+                                    ui32Mask,
+                                    eOperator,
+                                    ui32PDumpFlags);
+       if (eError != PVRSRV_OK)
+       {
+               goto e0;
+       }
+
+       return PVRSRV_OK;
+
+       /*
+         error exit paths follow
+        */
+
+e0:
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError != PVRSRV_OK);
+       }
+       return eError;
+}
+#endif /* defined(__KERNEL__) */
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+               IMG_DEVMEM_OFFSET_T uiReadOffset,
+               IMG_DEVMEM_OFFSET_T uiWriteOffset,
+               IMG_DEVMEM_SIZE_T uiPacketSize,
+               IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+       PVRSRV_ERROR eError;
+
+       if ((psMemDesc->uiOffset + uiReadOffset) > psMemDesc->psImport->uiSize)
+       {
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0);
+       }
+
+       eError = BridgePMRPDumpCBP(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                  psMemDesc->psImport->hPMR,
+                                  psMemDesc->uiOffset + uiReadOffset,
+                                  uiWriteOffset,
+                                  uiPacketSize,
+                                  uiBufferSize);
+       PVR_GOTO_IF_ERROR(eError, e0);
+
+       return PVRSRV_OK;
+
+e0:
+       /* If PDump was rejected for this device, suppress silently */
+       if (eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
+       {
+               PVR_ASSERT(eError != PVRSRV_OK);
+       }
+       return eError;
+}
+
+#endif /* PDUMP */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem_utils.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/devicemem_utils.c
new file mode 100644 (file)
index 0000000..d4416ae
--- /dev/null
@@ -0,0 +1,1259 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "ra.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "client_cache_bridge.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "client_ri_bridge.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#else
+#include "pvr_bridge_client.h"
+#endif
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "proc_stats.h"
+#endif
+
+#if defined(__KERNEL__)
+#include "srvcore.h"
+#else
+#include "srvcore_intern.h"
+#endif
+
+/*
+       SVM heap management support functions for CPU (un)mapping
+ */
+#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY                              2
+
+static inline PVRSRV_ERROR
+DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap,
+               DEVMEM_IMPORT *psImport,
+               IMG_UINT64 *ui64MapAddress)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT64 ui64SvmMapAddr;
+       IMG_UINT64 ui64SvmMapAddrEnd;
+       IMG_UINT64 ui64SvmHeapAddrEnd;
+
+       /* SVM heap management always has XXX_MANAGER_KERNEL unless we
+          have triggered the fall back code-path in which case we
+          should not be calling into this code-path */
+       PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL);
+
+       /* By acquiring the CPU virtual address here, it essentially
+          means we lock-down the virtual address for the duration
+          of the life-cycle of the allocation until a de-allocation
+          request comes in. Thus the allocation is guaranteed not to
+          change its virtual address on the CPU during its life-time.
+          NOTE: Import might have already been CPU Mapped before now,
+          normally this is not a problem, see fall back */
+       eError = DevmemImportStructCPUMap(psImport);
+       if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "DevmemImportStructCPUMap");
+               eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+               goto failSVM;
+       }
+
+       /* Supplied kernel mmap virtual address is also device virtual address;
+          calculate the heap & kernel supplied mmap virtual address limits */
+       ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+       ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+       ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize;
+       PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0);
+
+       /* SVM limit test may fail if processor has more virtual address bits than device */
+       if ((ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd) ||
+               (ui64SvmMapAddr & ~(ui64SvmHeapAddrEnd - 1)))
+       {
+               /* Unmap incompatible SVM virtual address, this
+                  may not release address if it was elsewhere
+                  CPU Mapped before call into this function */
+               DevmemImportStructCPUUnmap(psImport);
+
+               /* Flag incompatible SVM mapping */
+               eError = PVRSRV_ERROR_BAD_MAPPING;
+               goto failSVM;
+       }
+
+       *ui64MapAddress = ui64SvmMapAddr;
+failSVM:
+       /* either OK, MAP_FAILED or BAD_MAPPING */
+       return eError;
+}
+
+static inline void
+DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+       PVR_UNREFERENCED_PARAMETER(psHeap);
+       DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap,
+               DEVMEM_IMPORT *psImport,
+               IMG_UINT uiAlign,
+               IMG_UINT64 *ui64MapAddress)
+{
+       RA_LENGTH_T uiAllocatedSize;
+       RA_BASE_T uiAllocatedAddr;
+       IMG_UINT64 ui64SvmMapAddr;
+       IMG_UINT uiRetry = 0;
+       PVRSRV_ERROR eError;
+
+       /* If SVM heap management has transitioned to XXX_MANAGER_USER,
+          this is essentially a fall back approach that ensures we
+          continue to satisfy SVM alloc. This approach is not without
+          hazards in that we may specify a virtual address that is
+          already in use by the user process */
+       PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER);
+
+       /* Normally, for SVM heap allocations, CPUMap _must_ be done
+          before DevMap; ideally the initial CPUMap should be done by
+          SVM functions though this is not a hard requirement as long
+          as the prior elsewhere obtained CPUMap virtual address meets
+          SVM address requirements. This is a fall-back code-pathway
+          so we have to test that this assumption holds before we
+          progress any further */
+       OSLockAcquire(psImport->sCPUImport.hLock);
+
+       if (psImport->sCPUImport.ui32RefCount)
+       {
+               /* Already CPU Mapped SVM heap allocation, this prior elsewhere
+                  obtained virtual address is responsible for the above
+                  XXX_MANAGER_KERNEL failure. As we are not responsible for
+                  this, we cannot progress any further so need to fail */
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Previously obtained CPU map address not SVM compatible"
+                               , __func__));
+
+               /* Revert SVM heap to DEVMEM_HEAP_MANAGER_KERNEL */
+               psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL;
+               PVR_DPF((PVR_DBG_MESSAGE,
+                               "%s: Reverting SVM heap back to kernel managed",
+                               __func__));
+
+               OSLockRelease(psImport->sCPUImport.hLock);
+
+               /* Do we need a more specific error code here */
+               eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+               goto failSVM;
+       }
+
+       OSLockRelease(psImport->sCPUImport.hLock);
+
+       do
+       {
+               /* Next we proceed to instruct the kernel to use the RA_Alloc supplied
+                  virtual address to map-in this SVM import suballocation; there is no
+                  guarantee that this RA_Alloc virtual address may not collide with an
+                  already in-use VMA range in the process */
+               eError = RA_Alloc(psHeap->psQuantizedVMRA,
+                               psImport->uiSize,
+                               RA_NO_IMPORT_MULTIPLIER,
+                               0, /* flags: this RA doesn't use flags*/
+                               uiAlign,
+                               "SVM_Virtual_Alloc",
+                               &uiAllocatedAddr,
+                               &uiAllocatedSize,
+                               NULL /* don't care about per-import priv data */);
+               if (eError != PVRSRV_OK)
+               {
+                       PVR_LOG_ERROR(eError, "RA_Alloc");
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                       if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+                       {
+                               PVRSRV_ERROR eErr;
+                               eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+                                                                 PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT,
+                                                                 OSGetCurrentProcessID());
+                               PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats");
+                       }
+#endif
+                       goto failSVM;
+               }
+
+               /* No reason for allocated virtual size to be different from
+                  the PMR's size */
+               psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr;
+               PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+               /* Map the import or allocation using the RA_Alloc virtual address;
+                  the kernel may fail the request if the supplied virtual address
+                  is already in-use in which case we re-try using another virtual
+                  address obtained from the RA_Alloc */
+               eError = DevmemImportStructCPUMap(psImport);
+               if (eError != PVRSRV_OK)
+               {
+                       /* For now we simply discard failed RA_Alloc() obtained virtual
+                          address (i.e. plenty of virtual space), this prevents us from
+                          re-using these and furthermore essentially blacklists these
+                          addresses from future SVM consideration; We exit fall-back
+                          attempt if retry exceeds the fall-back retry limit */
+                       if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Cannot find SVM compatible address, bad mapping",
+                                               __func__));
+                               eError = PVRSRV_ERROR_BAD_MAPPING;
+                               goto failSVM;
+                       }
+               }
+               else
+               {
+                       /* Found compatible SVM virtual address, set as device virtual address */
+                       ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+               }
+       } while (eError != PVRSRV_OK);
+
+       *ui64MapAddress = ui64SvmMapAddr;
+failSVM:
+       return eError;
+}
+
+static inline void
+DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+       RA_BASE_T uiAllocatedAddr;
+
+       /* We only free SVM compatible addresses, all addresses in
+          the blacklist are essentially excluded from future RA_Alloc */
+       uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+       RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr);
+
+       DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap,
+               DEVMEM_IMPORT *psImport,
+               IMG_UINT uiAlign,
+               IMG_UINT64 *ui64MapAddress)
+{
+       PVRSRV_ERROR eError;
+
+       switch (psHeap->ui32HeapManagerFlags)
+       {
+       case DEVMEM_HEAP_MANAGER_KERNEL:
+               eError = DevmemCPUMapSVMKernelManaged(psHeap,
+                               psImport,
+                               ui64MapAddress);
+               if (eError == PVRSRV_ERROR_BAD_MAPPING)
+               {
+                       /* If the SVM map address is outside of SVM heap limits,
+                                  change heap type to DEVMEM_HEAP_MANAGER_USER */
+                       psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER;
+
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: Kernel managed SVM heap is now user managed",
+                                       __func__));
+
+                       /* Retry using user managed fall-back approach */
+                       eError = DevmemCPUMapSVMUserManaged(psHeap,
+                                       psImport,
+                                       uiAlign,
+                                       ui64MapAddress);
+               }
+               break;
+
+       case DEVMEM_HEAP_MANAGER_USER:
+               eError = DevmemCPUMapSVMUserManaged(psHeap,
+                               psImport,
+                               uiAlign,
+                               ui64MapAddress);
+               break;
+
+       default:
+               eError = PVRSRV_ERROR_INVALID_PARAMS;
+               break;
+       }
+
+       return eError;
+}
+
+static inline void
+DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+       switch (psHeap->ui32HeapManagerFlags)
+       {
+       case DEVMEM_HEAP_MANAGER_KERNEL:
+               DevmemCPUUnmapSVMKernelManaged(psHeap, psImport);
+               break;
+
+       case DEVMEM_HEAP_MANAGER_USER:
+               DevmemCPUUnmapSVMUserManaged(psHeap, psImport);
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+       The Devmem import structure is the structure we use
+       to manage memory that is "imported" (which is page
+       granular) from the server into our process, this
+       includes allocations.
+
+       This allows memory to be imported without requiring
+       any CPU or device mapping. Memory can then be mapped
+       into the device or CPU on demand, but neither is
+       required.
+ */
+
+IMG_INTERNAL
+void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport)
+{
+       IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount);
+       PVR_UNREFERENCED_PARAMETER(iRefCount);
+       PVR_ASSERT(iRefCount != 1);
+
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       iRefCount-1,
+                       iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport)
+{
+       IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount);
+       PVR_ASSERT(iRefCount >= 0);
+
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       iRefCount+1,
+                       iRefCount);
+
+       if (iRefCount == 0)
+       {
+               PVRSRV_ERROR eError = DestroyServerResource(psImport->hDevConnection,
+                                                           NULL,
+                                                           BridgePMRUnrefPMR,
+                                                           psImport->hPMR);
+               PVR_ASSERT(eError == PVRSRV_OK);
+
+               OSLockDestroy(psImport->sCPUImport.hLock);
+               OSLockDestroy(psImport->sDeviceImport.hLock);
+               OSLockDestroy(psImport->hLock);
+               OSFreeMem(psImport);
+
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void DevmemImportDiscard(DEVMEM_IMPORT *psImport)
+{
+       PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0);
+       OSLockDestroy(psImport->sCPUImport.hLock);
+       OSLockDestroy(psImport->sDeviceImport.hLock);
+       OSLockDestroy(psImport->hLock);
+       OSFreeMem(psImport);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc)
+{
+       DEVMEM_MEMDESC *psMemDesc;
+       PVRSRV_ERROR eError;
+
+       /* Must be zeroed in case it needs to be freed before it is initialised */
+       psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC));
+       PVR_GOTO_IF_NOMEM(psMemDesc, eError, failAlloc);
+
+       eError = OSLockCreate(&psMemDesc->hLock);
+       PVR_GOTO_IF_ERROR(eError, failMDLock);
+
+       eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock);
+       PVR_GOTO_IF_ERROR(eError, failDMDLock);
+
+       eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock);
+       PVR_GOTO_IF_ERROR(eError, failCMDLock);
+
+       OSAtomicWrite(&psMemDesc->hRefCount, 0);
+
+       *ppsMemDesc = psMemDesc;
+
+       return PVRSRV_OK;
+
+failCMDLock:
+       OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+failDMDLock:
+       OSLockDestroy(psMemDesc->hLock);
+failMDLock:
+       OSFreeMem(psMemDesc);
+failAlloc:
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+/*
+       Init the MemDesc structure
+ */
+IMG_INTERNAL
+void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+               IMG_DEVMEM_OFFSET_T uiOffset,
+               DEVMEM_IMPORT *psImport,
+               IMG_DEVMEM_SIZE_T uiSize)
+{
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       0,
+                       1);
+
+       psMemDesc->psImport = psImport;
+       psMemDesc->uiOffset = uiOffset;
+
+       psMemDesc->sDeviceMemDesc.ui32RefCount = 0;
+       psMemDesc->sCPUMemDesc.ui32RefCount = 0;
+       psMemDesc->uiAllocSize = uiSize;
+       psMemDesc->hPrivData = NULL;
+       psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+
+#if defined(DEBUG)
+       psMemDesc->bPoisonOnFree = IMG_FALSE;
+#endif
+
+       OSAtomicWrite(&psMemDesc->hRefCount, 1);
+}
+
+#if defined(DEBUG)
+IMG_INTERNAL
+void DevmemMemDescSetPoF(DEVMEM_MEMDESC *psMemDesc, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+       if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags))
+       {
+               psMemDesc->bPoisonOnFree = IMG_TRUE;
+       }
+}
+#endif
+
+IMG_INTERNAL
+void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc)
+{
+       IMG_INT iRefCount = 0;
+
+       iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       iRefCount-1,
+                       iRefCount);
+
+       PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+#if defined(DEBUG)
+static void _DevmemPoisonOnFree(DEVMEM_MEMDESC *psMemDesc)
+{
+       void *pvAddr = NULL;
+       IMG_UINT8 *pui8CPUVAddr;
+       PVRSRV_ERROR eError;
+
+       eError = DevmemCPUMapCheckImportProperties(psMemDesc);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "DevmemCPUMapCheckImportProperties");
+
+       OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+       eError = DevmemImportStructCPUMap(psMemDesc->psImport);
+       OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+       PVR_LOG_RETURN_VOID_IF_ERROR(eError, "DevmemImportStructCPUMap");
+
+       pui8CPUVAddr = psMemDesc->psImport->sCPUImport.pvCPUVAddr;
+       pui8CPUVAddr += psMemDesc->uiOffset;
+       pvAddr = pui8CPUVAddr;
+
+       DevmemCPUMemSet(pvAddr,
+                       PVRSRV_POISON_ON_FREE_VALUE,
+                       psMemDesc->uiAllocSize,
+                       psMemDesc->psImport->uiFlags);
+
+       if (PVRSRV_CHECK_CPU_CACHE_COHERENT(psMemDesc->psImport->uiFlags) ||
+           PVRSRV_CHECK_CPU_CACHE_INCOHERENT(psMemDesc->psImport->uiFlags))
+       {
+               eError = BridgeCacheOpExec(GetBridgeHandle(psMemDesc->psImport->hDevConnection),
+                                                                  psMemDesc->psImport->hPMR,
+                                                                  (IMG_UINT64) (uintptr_t)
+                                                                  pvAddr - psMemDesc->uiOffset,
+                                                                  psMemDesc->uiOffset,
+                                                                  psMemDesc->uiAllocSize,
+                                                                  PVRSRV_CACHE_OP_FLUSH);
+               PVR_LOG_IF_ERROR(eError, "BridgeCacheOpExec");
+       }
+
+       DevmemImportStructCPUUnmap(psMemDesc->psImport);
+       pvAddr = NULL;
+}
+#endif
+
+IMG_INTERNAL
+IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc)
+{
+       IMG_INT iRefCount;
+       PVR_ASSERT(psMemDesc != NULL);
+
+       iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount);
+       PVR_ASSERT(iRefCount >= 0);
+
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psMemDesc,
+                       iRefCount+1,
+                       iRefCount);
+
+       if (iRefCount == 0)
+       {
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+               if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI) &&
+                   (psMemDesc->hRIHandle))
+               {
+                       PVRSRV_ERROR eError;
+
+                       eError = DestroyServerResource(psMemDesc->psImport->hDevConnection,
+                                                      NULL,
+                                                      BridgeRIDeleteMEMDESCEntry,
+                                                      psMemDesc->hRIHandle);
+                       PVR_LOG_IF_ERROR(eError, "BridgeRIDeleteMEMDESCEntry");
+               }
+#endif
+
+               OSLockAcquire(psMemDesc->psImport->hLock);
+               if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE)
+               {
+                       /* As soon as the first sub-allocation on the psImport is freed
+                        * we might get dirty memory when reusing it.
+                        * We have to delete the ZEROED, CLEAN & POISONED flag */
+                       psMemDesc->psImport->uiProperties &=
+                                       ~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED |
+                                                       DEVMEM_PROPERTIES_IMPORT_IS_CLEAN |
+                                                       DEVMEM_PROPERTIES_IMPORT_IS_POISONED);
+
+                       OSLockRelease(psMemDesc->psImport->hLock);
+
+#if defined(DEBUG)
+                       if (psMemDesc->bPoisonOnFree)
+                       {
+                               _DevmemPoisonOnFree(psMemDesc);
+                       }
+#endif
+
+                       RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA,
+                                       psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr +
+                                       psMemDesc->uiOffset);
+               }
+               else
+               {
+                       OSLockRelease(psMemDesc->psImport->hLock);
+                       DevmemImportStructRelease(psMemDesc->psImport);
+               }
+
+               OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+               OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+               OSLockDestroy(psMemDesc->hLock);
+               OSFreeMem(psMemDesc);
+
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc)
+{
+       PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0);
+
+       OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+       OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+       OSLockDestroy(psMemDesc->hLock);
+       OSFreeMem(psMemDesc);
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               PVRSRV_MEMALLOCFLAGS_T *puiFlags)
+{
+       if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+                       (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (uiAlign & (uiAlign-1))
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: The requested alignment is not a power of two.",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (uiSize == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: Please request a non-zero size value.",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       /* If zero or poison flags are set we have to have write access to the page. */
+       if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) ||
+           PVRSRV_CHECK_POISON_ON_ALLOC(*puiFlags) ||
+#if defined(DEBUG)
+           PVRSRV_CHECK_POISON_ON_FREE(*puiFlags) ||
+#endif
+           PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags))
+       {
+               (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                               PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*
+       Allocate and init an import structure
+ */
+IMG_INTERNAL
+PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+               DEVMEM_IMPORT **ppsImport)
+{
+       DEVMEM_IMPORT *psImport;
+       PVRSRV_ERROR eError;
+
+       psImport = OSAllocMem(sizeof(*psImport));
+       PVR_RETURN_IF_FALSE(psImport != NULL, PVRSRV_ERROR_OUT_OF_MEMORY);
+
+       /* Setup some known bad values for things we don't have yet */
+       psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON;
+       psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON;
+       psImport->sDeviceImport.psHeap = NULL;
+       psImport->sDeviceImport.bMapped = IMG_FALSE;
+
+       eError = OSLockCreate(&psImport->sDeviceImport.hLock);
+       PVR_GOTO_IF_ERROR(eError, failDIOSLockCreate);
+
+       psImport->sCPUImport.hOSMMapData = NULL;
+       psImport->sCPUImport.pvCPUVAddr = NULL;
+
+       eError = OSLockCreate(&psImport->sCPUImport.hLock);
+       PVR_GOTO_IF_ERROR(eError, failCIOSLockCreate);
+
+       /* Set up common elements */
+       psImport->hDevConnection = hDevConnection;
+
+       /* Setup properties */
+       psImport->uiProperties = 0;
+
+       /* Setup refcounts */
+       psImport->sDeviceImport.ui32RefCount = 0;
+       psImport->sCPUImport.ui32RefCount = 0;
+       OSAtomicWrite(&psImport->hRefCount, 0);
+
+       /* Create the lock */
+       eError = OSLockCreate(&psImport->hLock);
+       PVR_GOTO_IF_ERROR(eError, failILockAlloc);
+
+       *ppsImport = psImport;
+
+       return PVRSRV_OK;
+
+failILockAlloc:
+       OSLockDestroy(psImport->sCPUImport.hLock);
+failCIOSLockCreate:
+       OSLockDestroy(psImport->sDeviceImport.hLock);
+failDIOSLockCreate:
+       OSFreeMem(psImport);
+       PVR_ASSERT(eError != PVRSRV_OK);
+
+       return eError;
+}
+
+/*
+       Initialise the import structure
+ */
+IMG_INTERNAL
+void DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+               IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               PVRSRV_MEMALLOCFLAGS_T uiFlags,
+               IMG_HANDLE hPMR,
+               DEVMEM_PROPERTIES_T uiProperties)
+{
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       0,
+                       1);
+
+       psImport->uiSize = uiSize;
+       psImport->uiAlign = uiAlign;
+       psImport->uiFlags = uiFlags;
+       psImport->hPMR = hPMR;
+       psImport->uiProperties = uiProperties;
+       OSAtomicWrite(&psImport->hRefCount, 1);
+}
+
+/* Allocate the requested device virtual address region
+ * from the heap */
+static PVRSRV_ERROR DevmemReserveVARange(DEVMEM_HEAP *psHeap,
+                                         DEVMEM_SIZE_T uiSize,
+                                         IMG_UINT uiAlign,
+                                         RA_LENGTH_T *puiAllocatedSize,
+                                         IMG_UINT64 ui64OptionalMapAddress)
+{
+       PVRSRV_ERROR eError;
+
+       /* Allocate space in the VM */
+       eError = RA_Alloc_Range(psHeap->psQuantizedVMRA,
+                                                       uiSize,
+                                                       0,
+                                                       uiAlign,
+                                                       ui64OptionalMapAddress,
+                                                       puiAllocatedSize);
+
+       if (PVRSRV_OK != eError)
+       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+               if ((eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) ||
+                               (eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL))
+               {
+                       PVRSRV_ERROR eErr;
+                       eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+                                                                                       PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM,
+                                                                                       OSGetCurrentProcessID());
+                       PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats");
+               }
+#endif
+               return eError;
+       }
+
+       /* No reason for the allocated virtual size to be different from
+                                          the PMR's size */
+       PVR_ASSERT(*puiAllocatedSize == uiSize);
+
+       return PVRSRV_OK;
+}
+
+/*
+       Map an import to the device
+ */
+IMG_INTERNAL
+PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+               IMG_BOOL bMap,
+               DEVMEM_IMPORT *psImport,
+               IMG_UINT64 ui64OptionalMapAddress)
+{
+       DEVMEM_DEVICE_IMPORT *psDeviceImport;
+       RA_BASE_T uiAllocatedAddr;
+       RA_LENGTH_T uiAllocatedSize;
+       IMG_DEV_VIRTADDR sBase;
+       IMG_HANDLE hReservation;
+       PVRSRV_ERROR eError;
+       IMG_UINT uiAlign;
+       IMG_BOOL bDestroyed = IMG_FALSE;
+
+       /* Round the provided import alignment to the configured heap alignment */
+       uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
+       uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+
+       psDeviceImport = &psImport->sDeviceImport;
+
+       OSLockAcquire(psDeviceImport->hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       psDeviceImport->ui32RefCount,
+                       psDeviceImport->ui32RefCount+1);
+
+       if (psDeviceImport->ui32RefCount++ == 0)
+       {
+               DevmemImportStructAcquire(psImport);
+
+               OSAtomicIncrement(&psHeap->hImportCount);
+
+               if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+               {
+                       /*  SVM (shared virtual memory) imports or allocations always
+                               need to acquire CPU virtual address first as address is
+                               used to map the allocation into the device virtual address
+                               space; i.e. the virtual address of the allocation for both
+                               the CPU/GPU must be identical. */
+                       eError = DevmemImportStructDevMapSVM(psHeap,
+                                       psImport,
+                                       uiAlign,
+                                       &ui64OptionalMapAddress);
+                       PVR_GOTO_IF_ERROR(eError, failVMRAAlloc);
+               }
+
+               if (ui64OptionalMapAddress == 0)
+               {
+                       /* If heap is _completely_ managed by USER or KERNEL, we shouldn't
+                        * be here, as this is RA manager code-path */
+                       if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ||
+                               psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ?
+                                               "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().":
+                                               "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).",
+                                               __func__));
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc);
+                       }
+
+                       if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_UNKNOWN)
+                       {
+                               /* Only set the heap manager (to RA) at first map when heap manager
+                                * is unknown. It might be a dual heap (both, user and RA managed),
+                                * in which case heap manager is set at creation time */
+                               psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_RA;
+                       }
+
+                       /* Allocate space in the VM */
+                       eError = RA_Alloc(psHeap->psQuantizedVMRA,
+                                       psImport->uiSize,
+                                       RA_NO_IMPORT_MULTIPLIER,
+                                       0, /* flags: this RA doesn't use flags*/
+                                       uiAlign,
+                                       "Virtual_Alloc",
+                                       &uiAllocatedAddr,
+                                       &uiAllocatedSize,
+                                       NULL /* don't care about per-import priv data */
+                       );
+                       if (PVRSRV_OK != eError)
+                       {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+                               if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+                               {
+                                       PVRSRV_ERROR eErr;
+                                       eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+                                                                         PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT,
+                                                                         OSGetCurrentProcessID());
+                                       PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats");
+                               }
+#endif
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc);
+                       }
+
+                       /* No reason for the allocated virtual size to be different from
+                          the PMR's size */
+                       PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+                       sBase.uiAddr = uiAllocatedAddr;
+
+               }
+               else
+               {
+                       IMG_UINT64 ui64ValidEndAddr;
+
+                       /* Ensure supplied ui64OptionalMapAddress is within heap range */
+                       ui64ValidEndAddr = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+                       if ((ui64OptionalMapAddress + psImport->uiSize > ui64ValidEndAddr) ||
+                                       (ui64OptionalMapAddress < psHeap->sBaseAddress.uiAddr))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>."
+                                               , __func__
+                                               , (void*)(uintptr_t)ui64OptionalMapAddress
+                                               , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr
+                                               , (void*)(uintptr_t)ui64ValidEndAddr));
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc);
+                       }
+
+                       switch (psHeap->ui32HeapManagerFlags)
+                       {
+                               case DEVMEM_HEAP_MANAGER_UNKNOWN:
+                                       /* DEVMEM_HEAP_MANAGER_USER can apply to _any_ heap and can only
+                                        * be determined here. This heap type transitions from
+                                        * DEVMEM_HEAP_MANAGER_UNKNOWN to DEVMEM_HEAP_MANAGER_USER on
+                                        * 1st alloc. */
+                                       psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER;
+                                       break;
+
+                               case DEVMEM_HEAP_MANAGER_USER:
+                               case DEVMEM_HEAP_MANAGER_KERNEL:
+                                       if (! psHeap->uiSize)
+                                       {
+                                               PVR_DPF((PVR_DBG_ERROR,
+                                                               psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ?
+                                                                               "%s: Heap DEVMEM_HEAP_MANAGER_USER is disabled.":
+                                                                               "%s: Heap DEVMEM_HEAP_MANAGER_KERNEL is disabled."
+                                                                               , __func__));
+                                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failVMRAAlloc);
+                                       }
+                                       break;
+
+                               case DEVMEM_HEAP_MANAGER_DUAL_USER_RA:
+                                       /* When the heap is dual managed, ensure supplied ui64OptionalMapAddress
+                                        * and import size are within heap address space range */
+                                       if (ui64OptionalMapAddress + psImport->uiSize <=
+                                                       psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize)
+                                       {
+                                               break;
+                                       }
+                                       else
+                                       {
+                                               /* Allocate requested VM range */
+                                               eError = DevmemReserveVARange(psHeap,
+                                                                                                       psImport->uiSize,
+                                                                                                       uiAlign,
+                                                                                                       &uiAllocatedSize,
+                                                                                                       ui64OptionalMapAddress);
+                                               if (eError != PVRSRV_OK)
+                                               {
+                                                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc);
+                                               }
+
+                                       }
+                                       break;
+                               case DEVMEM_HEAP_MANAGER_RA:
+                                       /* Allocate requested VM range */
+                                       eError = DevmemReserveVARange(psHeap,
+                                                                                               psImport->uiSize,
+                                                                                               uiAlign,
+                                                                                               &uiAllocatedSize,
+                                                                                               ui64OptionalMapAddress);
+                                       if (eError != PVRSRV_OK)
+                                       {
+                                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc);
+                                       }
+                                       break;
+
+                               default:
+                                       break;
+                       }
+
+                       if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Invalid address to map to. Please provide an "
+                                               "address aligned to a page multiple of the heap."
+                                               , __func__));
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc);
+                       }
+
+                       if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                               "%s: Invalid heap to map to. "
+                                               "Please choose a heap that can handle smaller page sizes."
+                                               , __func__));
+                               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc);
+                       }
+
+                       uiAllocatedAddr = ui64OptionalMapAddress;
+                       uiAllocatedSize = psImport->uiSize;
+                       sBase.uiAddr = uiAllocatedAddr;
+               }
+
+               if (psHeap->bPremapped)
+               {
+                       /* no virtual address reservation and mapping are required for memory that's already mapped */
+                       psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
+                       psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
+               }
+               else
+               {
+                       /* Setup page tables for the allocated VM space */
+                       eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+                                       psHeap->hDevMemServerHeap,
+                                       sBase,
+                                       uiAllocatedSize,
+                                       &hReservation);
+                       PVR_GOTO_IF_ERROR(eError, failReserve);
+
+                       if (bMap)
+                       {
+                               PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+
+                               uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;
+
+                               /* Actually map the PMR to allocated VM space */
+                               eError = BridgeDevmemIntMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+                                               psHeap->hDevMemServerHeap,
+                                               hReservation,
+                                               psImport->hPMR,
+                                               uiMapFlags,
+                                               &psDeviceImport->hMapping);
+                               PVR_GOTO_IF_ERROR(eError, failMap);
+
+                               psDeviceImport->bMapped = IMG_TRUE;
+                       }
+
+                       psDeviceImport->hReservation = hReservation;
+               }
+
+               /* Setup device mapping specific parts of the mapping info */
+               psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
+               psDeviceImport->psHeap = psHeap;
+       }
+       else
+       {
+               /*
+                       Check that we've been asked to map it into the
+                       same heap 2nd time around
+                */
+               if (psHeap != psDeviceImport->psHeap)
+               {
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failParams);
+               }
+       }
+       OSLockRelease(psDeviceImport->hLock);
+
+       return PVRSRV_OK;
+
+failMap:
+       if (!psHeap->bPremapped)
+       {
+               BridgeDevmemIntUnreserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+                               hReservation);
+       }
+failReserve:
+       if (ui64OptionalMapAddress == 0)
+       {
+               RA_Free(psHeap->psQuantizedVMRA,
+                               uiAllocatedAddr);
+       }
+failVMRAAlloc:
+       if ((ui64OptionalMapAddress) && PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+       {
+               DevmemImportStructDevUnmapSVM(psHeap, psImport);
+       }
+       bDestroyed = DevmemImportStructRelease(psImport);
+       OSAtomicDecrement(&psHeap->hImportCount);
+failParams:
+       if (!bDestroyed)
+       {
+               psDeviceImport->ui32RefCount--;
+               OSLockRelease(psDeviceImport->hLock);
+       }
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*
+       Unmap an import from the Device
+ */
+IMG_INTERNAL
+IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_DEVICE_IMPORT *psDeviceImport;
+
+       psDeviceImport = &psImport->sDeviceImport;
+
+       OSLockAcquire(psDeviceImport->hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       psDeviceImport->ui32RefCount,
+                       psDeviceImport->ui32RefCount-1);
+
+       if (--psDeviceImport->ui32RefCount == 0)
+       {
+               DEVMEM_HEAP *psHeap = psDeviceImport->psHeap;
+
+               if (!psHeap->bPremapped)
+               {
+                       if (psDeviceImport->bMapped)
+                       {
+                               eError = DestroyServerResource(psImport->hDevConnection,
+                                                              NULL,
+                                                              BridgeDevmemIntUnmapPMR,
+                                                              psDeviceImport->hMapping);
+                               PVR_ASSERT(eError == PVRSRV_OK);
+                       }
+
+                       eError = DestroyServerResource(psImport->hDevConnection,
+                                                      NULL,
+                                                      BridgeDevmemIntUnreserveRange,
+                                                      psDeviceImport->hReservation);
+                       PVR_ASSERT(eError == PVRSRV_OK);
+               }
+
+               psDeviceImport->bMapped = IMG_FALSE;
+               psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
+               psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
+
+               /* DEVMEM_HEAP_MANAGER_RA can also come from a dual managed heap in which case,
+                  we need to check if the allocated VA falls within RA managed range */
+               if ((psHeap->ui32HeapManagerFlags & DEVMEM_HEAP_MANAGER_RA) &&
+                   psDeviceImport->sDevVAddr.uiAddr >= (psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) &&
+                   psDeviceImport->sDevVAddr.uiAddr < (psHeap->sBaseAddress.uiAddr + psHeap->uiSize))
+               {
+                       RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr);
+               }
+
+               if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+               {
+                       DevmemImportStructDevUnmapSVM(psHeap, psImport);
+               }
+
+               OSLockRelease(psDeviceImport->hLock);
+
+               DevmemImportStructRelease(psImport);
+
+               OSAtomicDecrement(&psHeap->hImportCount);
+
+               return IMG_TRUE;
+       }
+       else
+       {
+               OSLockRelease(psDeviceImport->hLock);
+               return IMG_FALSE;
+       }
+}
+
+/*
+       Map an import into the CPU
+ */
+IMG_INTERNAL
+PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport)
+{
+       PVRSRV_ERROR eError;
+       DEVMEM_CPU_IMPORT *psCPUImport;
+       size_t uiMappingLength;
+
+       psCPUImport = &psImport->sCPUImport;
+
+       OSLockAcquire(psCPUImport->hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       psCPUImport->ui32RefCount,
+                       psCPUImport->ui32RefCount+1);
+
+       if (psCPUImport->ui32RefCount++ == 0)
+       {
+               DevmemImportStructAcquire(psImport);
+
+               eError = OSMMapPMR(GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR,
+                               psImport->uiSize,
+                               psImport->uiFlags,
+                               &psCPUImport->hOSMMapData,
+                               &psCPUImport->pvCPUVAddr,
+                               &uiMappingLength);
+               PVR_GOTO_IF_ERROR(eError, failMap);
+
+               /* MappingLength might be rounded up to page size */
+               PVR_ASSERT(uiMappingLength >= psImport->uiSize);
+       }
+       OSLockRelease(psCPUImport->hLock);
+
+       return PVRSRV_OK;
+
+failMap:
+       psCPUImport->ui32RefCount--;
+       if (!DevmemImportStructRelease(psImport))
+       {
+               OSLockRelease(psCPUImport->hLock);
+       }
+       PVR_ASSERT(eError != PVRSRV_OK);
+       return eError;
+}
+
+/*
+       Unmap an import from the CPU
+ */
+IMG_INTERNAL
+void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport)
+{
+       DEVMEM_CPU_IMPORT *psCPUImport;
+
+       psCPUImport = &psImport->sCPUImport;
+
+       OSLockAcquire(psCPUImport->hLock);
+       DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+                       __func__,
+                       psImport,
+                       psCPUImport->ui32RefCount,
+                       psCPUImport->ui32RefCount-1);
+
+       if (--psCPUImport->ui32RefCount == 0)
+       {
+               /* psImport->uiSize is a 64-bit quantity whereas the 5th
+                * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems
+                * hence a compiler warning of implicit cast and loss of data.
+                * Added explicit cast and assert to remove warning.
+                */
+#if defined(__linux__) && defined(__i386__)
+               PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX);
+#endif
+               OSMUnmapPMR(GetBridgeHandle(psImport->hDevConnection),
+                               psImport->hPMR,
+                               psCPUImport->hOSMMapData,
+                               psCPUImport->pvCPUVAddr,
+                               (size_t)psImport->uiSize);
+
+               psCPUImport->hOSMMapData = NULL;
+               psCPUImport->pvCPUVAddr = NULL;
+
+               OSLockRelease(psCPUImport->hLock);
+
+               DevmemImportStructRelease(psImport);
+       }
+       else
+       {
+               OSLockRelease(psCPUImport->hLock);
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/devicememx_pdump.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/devicememx_pdump.c
new file mode 100644 (file)
index 0000000..9950d4d
--- /dev/null
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Title          Shared X device memory management PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common (client & server) PDump functions for the
+                memory management code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if defined(PDUMP)
+
+#include "devicememx_pdump.h"
+#include "pdump.h"
+#include "client_pdumpmm_bridge.h"
+#include "devicemem_utils.h"
+
+IMG_INTERNAL void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+                    IMG_DEVMEM_OFFSET_T uiOffset,
+                    IMG_DEVMEM_SIZE_T uiSize,
+                    PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(uiSize != 0);
+       PVR_ASSERT(uiOffset + uiSize <= (psMemDescPhys->uiNumPages <<
+                       psMemDescPhys->uiLog2PageSize));
+
+       eError = BridgePMRPDumpLoadMem(psMemDescPhys->hBridge,
+                                      psMemDescPhys->hPMR,
+                                      uiOffset,
+                                      uiSize,
+                                      uiPDumpFlags,
+                                      IMG_FALSE);
+       PVR_LOG_IF_ERROR(eError, "BridgePMRPDumpLoadMem");
+}
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/hash.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/hash.c
new file mode 100644 (file)
index 0000000..994ae58
--- /dev/null
@@ -0,0 +1,734 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+   Implements simple self scaling hash tables. Hash collisions are handled by
+   chaining entries together. Hash tables are increased in size when they
+   become more than (50%?) full and decreased in size when less than (25%?)
+   full. Hash tables are never decreased below their initial size.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include/ */
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/shared/include/ */
+#include "hash.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc_common.h"
+#include "allocmem.h"
+
+//#define PERF_DBG_RESIZE
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+#include <sys/time.h>
+#endif
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+#define        KEY_TO_INDEX(pHash, key, uSize) \
+       ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define        KEY_COMPARE(pHash, pKey1, pKey2) \
+       ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+#if defined(__linux__) && defined(__KERNEL__)
+#define _AllocMem OSAllocMemNoStats
+#define _AllocZMem OSAllocZMemNoStats
+#define _FreeMem OSFreeMemNoStats
+#else
+#define _AllocMem OSAllocMem
+#define _AllocZMem OSAllocZMem
+#define _FreeMem OSFreeMem
+#endif
+
+#define NO_SHRINK 0
+
+/* Each entry in a hash table is placed into a bucket */
+typedef struct _BUCKET_
+{
+       struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */
+       uintptr_t v;            /*!< entry value */
+       uintptr_t k[];          /* PRQA S 0642 */
+                               /* override dynamic array declaration warning */
+} BUCKET;
+
+struct _HASH_TABLE_
+{
+       IMG_UINT32 uSize;            /*!< current size of the hash table */
+       IMG_UINT32 uCount;           /*!< number of entries currently in the hash table */
+       IMG_UINT32 uMinimumSize;     /*!< the minimum size that the hash table should be re-sized to */
+       IMG_UINT32 uKeySize;         /*!< size of key in bytes */
+       IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */
+       IMG_UINT32 uGrowThreshold;   /*!< The threshold at which to trigger a grow */
+       HASH_FUNC*     pfnHashFunc;  /*!< hash function */
+       HASH_KEY_COMP* pfnKeyComp;   /*!< key comparison function */
+       BUCKET**   ppBucketTable;    /*!< the hash table array */
+#if defined(DEBUG)
+       const char*      pszFile;
+       unsigned int     ui32LineNum;
+#endif
+};
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of uintptr_t
+                arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table.
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINT32
+HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+       uintptr_t *p = (uintptr_t *)pKey;
+       IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+       IMG_UINT32 ui;
+       IMG_UINT32 uHashKey = 0;
+
+       PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+       PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+       for (ui = 0; ui < uKeyLen; ui++)
+       {
+               IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+               uHashPart += (uHashPart << 12);
+               uHashPart ^= (uHashPart >> 22);
+               uHashPart += (uHashPart << 4);
+               uHashPart ^= (uHashPart >> 9);
+               uHashPart += (uHashPart << 10);
+               uHashPart ^= (uHashPart >> 2);
+               uHashPart += (uHashPart << 7);
+               uHashPart ^= (uHashPart >> 12);
+
+               uHashKey += uHashPart;
+       }
+
+       return uHashKey;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey1        Pointer to first hash key to compare.
+@Input          pKey2        Pointer to second hash key to compare.
+@Return         IMG_TRUE  - The keys match.
+                IMG_FALSE - The keys don't match.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2)
+{
+       uintptr_t *p1 = (uintptr_t *)pKey1;
+       uintptr_t *p2 = (uintptr_t *)pKey2;
+       IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+       IMG_UINT32 ui;
+
+       PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+       for (ui = 0; ui < uKeyLen; ui++)
+       {
+               if (*p1++ != *p2++)
+                       return IMG_FALSE;
+       }
+
+       return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       _ChainInsert
+@Description    Insert a bucket into the appropriate hash table chain.
+@Input          pBucket       The bucket
+@Input          ppBucketTable The hash table
+@Input          uSize         The size of the hash table
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static void
+_ChainInsert(HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+       IMG_UINT32 uIndex;
+
+       /* We assume that all parameters passed by the caller are valid. */
+       PVR_ASSERT(pBucket != NULL);
+       PVR_ASSERT(ppBucketTable != NULL);
+       PVR_ASSERT(uSize != 0);
+
+       uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);        /* PRQA S 0432,0541 */ /* ignore dynamic array warning */
+       pBucket->pNext = ppBucketTable[uIndex];
+       ppBucketTable[uIndex] = pBucket;
+}
+
+/*************************************************************************/ /*!
+@Function       _Rehash
+@Description    Iterate over every entry in an old hash table and rehash into
+                the new table.
+@Input          ppOldTable   The old hash table
+@Input          uOldSize     The size of the old hash table
+@Input          ppNewTable   The new hash table
+@Input          uNewSize     The size of the new hash table
+@Return         None
+*/ /**************************************************************************/
+static void
+_Rehash(HASH_TABLE *pHash,
+                BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+                BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+       IMG_UINT32 uIndex;
+       for (uIndex=0; uIndex< uOldSize; uIndex++)
+       {
+               BUCKET *pBucket;
+               pBucket = ppOldTable[uIndex];
+               while (pBucket != NULL)
+               {
+                       BUCKET *pNextBucket = pBucket->pNext;
+                       _ChainInsert(pHash, pBucket, ppNewTable, uNewSize);
+                       pBucket = pNextBucket;
+               }
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       _Resize
+@Description    Attempt to resize a hash table, failure to allocate a new
+                larger hash table is not considered a hard failure. We simply
+                continue and allow the table to fill up, the effect is to
+                allow hash chains to become longer.
+@Input          pHash        Hash table to resize.
+@Input          uNewSize     Required table size.
+@Return         IMG_TRUE Success
+                IMG_FALSE Failed
+*/ /**************************************************************************/
+static IMG_BOOL
+_Resize(HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+       BUCKET **ppNewTable;
+       IMG_UINT32 uiThreshold = uNewSize >> 2;
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+       struct timeval start, end;
+#endif
+
+       if (uNewSize == pHash->uSize)
+       {
+               return IMG_TRUE;
+       }
+
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+       gettimeofday(&start, NULL);
+#endif
+
+       ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize);
+       if (ppNewTable == NULL)
+       {
+               return IMG_FALSE;
+       }
+
+       _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
+
+       _FreeMem(pHash->ppBucketTable);
+
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+       gettimeofday(&end, NULL);
+       if (start.tv_usec > end.tv_usec)
+       {
+               end.tv_usec = 1000000 - start.tv_usec + end.tv_usec;
+       }
+       else
+       {
+               end.tv_usec -= start.tv_usec;
+       }
+
+       PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec));
+#endif
+
+       /*not nulling pointer, being reassigned just below*/
+       pHash->ppBucketTable = ppNewTable;
+       pHash->uSize = uNewSize;
+
+       pHash->uGrowThreshold = uiThreshold * 3;
+       pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold;
+
+       return IMG_TRUE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied key size,
+                and the supplied hash and key comparison functions.
+@Input          uInitialLen  Initial and minimum length of the hash table,
+                             where the length refers to the number of entries
+                             in the hash table, not its size in bytes.
+@Input          uKeySize     The size of the key, in bytes.
+@Input          pfnHashFunc  Pointer to hash function.
+@Input          pfnKeyComp   Pointer to key comparison function.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Extended_Int (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+       HASH_TABLE *pHash;
+
+       if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid input parameters", __func__));
+               return NULL;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: InitialSize=0x%x", __func__, uInitialLen));
+
+       pHash = _AllocMem(sizeof(HASH_TABLE));
+       if (pHash == NULL)
+       {
+               return NULL;
+       }
+
+       pHash->uCount = 0;
+       pHash->uSize = uInitialLen;
+       pHash->uMinimumSize = uInitialLen;
+       pHash->uKeySize = uKeySize;
+       pHash->uGrowThreshold = (uInitialLen >> 2) * 3;
+       pHash->uShrinkThreshold = NO_SHRINK;
+       pHash->pfnHashFunc = pfnHashFunc;
+       pHash->pfnKeyComp = pfnKeyComp;
+
+       pHash->ppBucketTable = _AllocZMem(sizeof(BUCKET *) * pHash->uSize);
+       if (pHash->ppBucketTable == NULL)
+       {
+               _FreeMem(pHash);
+               /*not nulling pointer, out of scope*/
+               return NULL;
+       }
+
+       return pHash;
+}
+
+#if defined(DEBUG)
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp,
+                                                                                const char *file, const unsigned int line)
+{
+       HASH_TABLE *hash;
+       hash = HASH_Create_Extended_Int(uInitialLen, uKeySize,
+                                                                       pfnHashFunc, pfnKeyComp);
+       if (hash)
+       {
+               hash->pszFile = file;
+               hash->ui32LineNum = line;
+       }
+       return hash;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key consisting of a
+                single uintptr_t, and using the default hash and key
+                comparison functions.
+@Input          uInitialLen  Initial and minimum length of the hash table,
+                             where the length refers to the number of entries
+                             in the hash table, not its size in bytes.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Int (IMG_UINT32 uInitialLen)
+{
+       return HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t),
+                                                                       &HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+#if defined(DEBUG)
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Debug(IMG_UINT32 uInitialLen, const char *file, const unsigned int line)
+{
+       HASH_TABLE *hash;
+       hash = HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t),
+                                                                       &HASH_Func_Default, &HASH_Key_Comp_Default);
+       if (hash)
+       {
+               hash->pszFile = file;
+               hash->ui32LineNum = line;
+       }
+       return hash;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete_Extended
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create. All entries in the table should have been removed
+                before calling this function.
+@Input          pHash        Hash table
+@Input          bWarn        Set false to suppress warnings in the case of
+                             deletion with active entries.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn)
+{
+       IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__) && !defined(__QNXNTO__)
+       PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+       if (psPVRSRVData != NULL)
+       {
+               if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+               {
+                       bDoCheck = IMG_FALSE;
+               }
+       }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+       else
+       {
+               bDoCheck = IMG_FALSE;
+       }
+#endif
+#endif
+       if (pHash != NULL)
+       {
+               PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete"));
+
+               if (bDoCheck)
+               {
+                       PVR_ASSERT(pHash->uCount==0);
+               }
+               if (pHash->uCount != 0)
+               {
+                       IMG_UINT32 i;
+                       if (bWarn)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__));
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__));
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, pHash->uCount));
+#if defined(DEBUG)
+                               PVR_DPF ((PVR_DBG_ERROR, "%s: Hash %p created at %s:%u.", __func__, (uintptr_t*)pHash, pHash->pszFile, pHash->ui32LineNum));
+#endif
+                       }
+
+                       for (i = 0; i < pHash->uSize; i++)
+                       {
+                               BUCKET *pBucket = pHash->ppBucketTable[i];
+                               while (pBucket != NULL)
+                               {
+                                       BUCKET *pNextBucket = pBucket->pNext;
+                                       _FreeMem(pBucket);
+                                       pBucket = pNextBucket;
+                               }
+                       }
+
+               }
+               _FreeMem(pHash->ppBucketTable);
+               pHash->ppBucketTable = NULL;
+               _FreeMem(pHash);
+               /*not nulling pointer, copy on stack*/
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create. All entries in the table must have been removed
+                before calling this function.
+@Input          pHash        Hash table
+*/ /**************************************************************************/
+IMG_INTERNAL void
+HASH_Delete(HASH_TABLE *pHash)
+{
+       HASH_Delete_Extended(pHash, IMG_TRUE);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create_Extended.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to the key.
+@Input          v            The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v)
+{
+       BUCKET *pBucket;
+
+       PVR_ASSERT(pHash != NULL);
+
+       if (pHash == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter", __func__));
+               return IMG_FALSE;
+       }
+
+       pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize);
+       if (pBucket == NULL)
+       {
+               return IMG_FALSE;
+       }
+
+       pBucket->v = v;
+       /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/
+       OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize);
+
+       _ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
+
+       pHash->uCount++;
+
+       /* check if we need to think about re-balancing */
+       if (pHash->uCount > pHash->uGrowThreshold)
+       {
+               /* Ignore the return code from _Resize because the hash table is
+                  still in a valid state and although not ideally sized, it is still
+                  functional */
+               _Resize(pHash, pHash->uSize << 1);
+       }
+
+       return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash        The hash table.
+@Input          k            The key value.
+@Input          v            The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v)
+{
+       return HASH_Insert_Extended(pHash, &k, v);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey)
+{
+       BUCKET **ppBucket;
+       IMG_UINT32 uIndex;
+
+       PVR_ASSERT(pHash != NULL);
+
+       if (pHash == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__));
+               return 0;
+       }
+
+       uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+       for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+       {
+               /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+               if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+               {
+                       BUCKET *pBucket = *ppBucket;
+                       uintptr_t v = pBucket->v;
+                       (*ppBucket) = pBucket->pNext;
+
+                       _FreeMem(pBucket);
+                       /*not nulling original pointer, already overwritten*/
+
+                       pHash->uCount--;
+
+                       /* check if we need to think about re-balancing, when the shrink
+                        * threshold is 0 we are at the minimum size, no further shrink */
+                       if (pHash->uCount < pHash->uShrinkThreshold)
+                       {
+                               /* Ignore the return code from _Resize because the
+                                  hash table is still in a valid state and although
+                                  not ideally sized, it is still functional */
+                               _Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize));
+                       }
+
+                       return v;
+               }
+       }
+       return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created with
+                HASH_Create.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove(HASH_TABLE *pHash, uintptr_t k)
+{
+       return HASH_Remove_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey)
+{
+       BUCKET **ppBucket;
+       IMG_UINT32 uIndex;
+
+       PVR_ASSERT(pHash != NULL);
+
+       if (pHash == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__));
+               return 0;
+       }
+
+       uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+       for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+       {
+               /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+               if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+               {
+                       BUCKET *pBucket = *ppBucket;
+                       uintptr_t v = pBucket->v;
+
+                       return v;
+               }
+       }
+       return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with HASH_Create.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k)
+{
+       return HASH_Retrieve_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table.
+@Input          pHash        Hash table to iterate.
+@Input          pfnCallback  Callback to call with the key and data for each
+.                            entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args)
+{
+       IMG_UINT32 uIndex;
+       for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+       {
+               BUCKET *pBucket;
+               pBucket = pHash->ppBucketTable[uIndex];
+               while (pBucket != NULL)
+               {
+                       PVRSRV_ERROR eError;
+                       BUCKET *pNextBucket = pBucket->pNext;
+
+                       eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v, args);
+
+                       /* The callback might want us to break out early */
+                       if (eError != PVRSRV_OK)
+                               return eError;
+
+                       pBucket = pNextBucket;
+               }
+       }
+       return PVRSRV_OK;
+}
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    Dump out some information about a hash table.
+@Input          pHash         The hash table.
+*/ /**************************************************************************/
+void
+HASH_Dump(HASH_TABLE *pHash)
+{
+       IMG_UINT32 uIndex;
+       IMG_UINT32 uMaxLength=0;
+       IMG_UINT32 uEmptyCount=0;
+
+       PVR_ASSERT(pHash != NULL);
+       for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+       {
+               BUCKET *pBucket;
+               IMG_UINT32 uLength = 0;
+               if (pHash->ppBucketTable[uIndex] == NULL)
+               {
+                       uEmptyCount++;
+               }
+               for (pBucket=pHash->ppBucketTable[uIndex];
+                               pBucket != NULL;
+                               pBucket = pBucket->pNext)
+               {
+                       uLength++;
+               }
+               uMaxLength = MAX(uMaxLength, uLength);
+       }
+
+       PVR_TRACE(("hash table: uMinimumSize=%d  size=%d  count=%d",
+                          pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+       PVR_TRACE(("  empty=%d  max=%d", uEmptyCount, uMaxLength));
+}
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/htbuffer.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/htbuffer.c
new file mode 100644 (file)
index 0000000..c326ae2
--- /dev/null
@@ -0,0 +1,197 @@
+/*************************************************************************/ /*!
+@File           htbuffer.c
+@Title          Host Trace Buffer shared API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+  #include <linux/stdarg.h>
+ #else
+  #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
+
+#include "htbuffer.h"
+#include "osfunc.h"
+#include "client_htbuffer_bridge.h"
+
+/* The group flags array of ints large enough to store all the group flags
+ * NB: This will only work while all logging is in the kernel
+ */
+IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0};
+
+
+/*************************************************************************/ /*!
+ @Function      HTBControl
+ @Description   Update the configuration of the Host Trace Buffer
+ @Input         hSrvHandle      Server Handle
+ @Input         ui32NumFlagGroups Number of group enable flags words
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+ @Input         ui32LogLevel    Log level to record
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+ @Input         eLogPidMode     Enable logging for all or specific processes,
+ @Input         eOpMode         Control what trace data is dropped if the TL
+                                buffer is full
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+       IMG_HANDLE hSrvHandle,
+       IMG_UINT32 ui32NumFlagGroups,
+       IMG_UINT32 * aui32GroupEnable,
+       IMG_UINT32 ui32LogLevel,
+       IMG_UINT32 ui32EnablePID,
+       HTB_LOGMODE_CTRL eLogPidMode,
+       HTB_OPMODE_CTRL eOpMode
+)
+{
+       return BridgeHTBControl(
+                       hSrvHandle,
+                       ui32NumFlagGroups,
+                       aui32GroupEnable,
+                       ui32LogLevel,
+                       ui32EnablePID,
+                       eLogPidMode,
+                       eOpMode
+                       );
+}
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampus,
+               HTB_LOG_SFids SF, va_list args)
+{
+#if defined(__KERNEL__)
+       IMG_UINT32 i;
+       IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
+#if defined(__KLOCWORK__)
+       IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1];   // Prevent KW False-positive
+#else
+       IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS];
+#endif
+
+       PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
+       ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ?
+                       HTB_LOG_MAX_PARAMS : ui32NumArgs;
+
+       /* unpack var args before sending over bridge */
+       for (i=0; i<ui32NumArgs; i++)
+       {
+               aui32Args[i] = va_arg(args, IMG_UINT32);
+       }
+
+       return BridgeHTBLog(hSrvHandle, PID, TID, ui64TimeStampus, SF,
+                       ui32NumArgs, aui32Args);
+#else
+       PVR_UNREFERENCED_PARAMETER(hSrvHandle);
+       PVR_UNREFERENCED_PARAMETER(PID);
+       PVR_UNREFERENCED_PARAMETER(TID);
+       PVR_UNREFERENCED_PARAMETER(ui64TimeStampus);
+       PVR_UNREFERENCED_PARAMETER(SF);
+       PVR_UNREFERENCED_PARAMETER(args);
+
+       PVR_ASSERT(0=="HTB Logging in UM is not yet supported");
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLog
+ @Description   Record a Host Trace Buffer log event
+ @Input         PID     The PID of the process the event is associated
+                        with. This is provided as an argument rather
+                        than querying internally so that events
+                        associated with a particular process, but
+                        performed by another can be logged correctly.
+ @Input         ui64TimeStampus The timestamp to be associated with this
+                                log event
+ @Input         SF              The log event ID
+ @Input         ...             Log parameters
+ @Return        PVRSRV_OK       Success.
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampns,
+               IMG_UINT32 SF, ...)
+{
+       PVRSRV_ERROR eError;
+       va_list args;
+       va_start(args, SF);
+       eError =_HTBLog(hSrvHandle, PID, TID, ui64TimeStampns, SF, args);
+       va_end(args);
+       return eError;
+}
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogSimple
+ @Description   Record a Host Trace Buffer log event with implicit PID and
+                Timestamp
+ @Input         SF              The log event ID
+ @Input         ...             Log parameters
+ @Return        PVRSRV_OK       Success.
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...)
+{
+       PVRSRV_ERROR eError;
+       IMG_UINT64 ui64Timestamp;
+       va_list args;
+       va_start(args, SF);
+       OSClockMonotonicns64(&ui64Timestamp);
+       eError = _HTBLog(hSrvHandle, OSGetCurrentProcessID(), OSGetCurrentThreadID(), ui64Timestamp,
+                       SF, args);
+       va_end(args);
+       return eError;
+}
+
+/* EOF */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/mem_utils.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/mem_utils.c
new file mode 100644 (file)
index 0000000..1244e24
--- /dev/null
@@ -0,0 +1,449 @@
+/*************************************************************************/ /*!
+@File
+@Title          Memory manipulation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory related functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "osfunc_common.h"
+#include "img_defs.h"
+
+/* This workaround is only *required* on ARM64. Avoid building or including
+ * it by default on other architectures, unless the 'safe memcpy' test flag
+ * is enabled. (The code should work on other architectures.)
+ */
+
+
+
+/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching
+ *       by the compiler to stdlib functions, and it must only use the below
+ *       headers. Do not include any IMG or services headers in this file.
+ */
+#if defined(__KERNEL__) && defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/* The attribute "vector_size" will generate floating point instructions
+ * and use FPU registers. In kernel OS, the FPU registers might be corrupted
+ * when CPU is doing context switch because FPU registers are not expected to
+ * be stored.
+ * GCC enables compiler option, -mgeneral-regs-only, by default.
+ * This option restricts the generated code to use general registers only
+ * so that we don't have issues on that.
+ */
+#if defined(__KERNEL__) && defined(__clang__)
+
+#define DEVICE_MEMSETCPY_NON_VECTOR_KM
+#if !defined(BITS_PER_BYTE)
+#define BITS_PER_BYTE (8)
+#endif /* BITS_PER_BYTE */
+
+/* Loading or storing 16 or 32 bytes is only supported on 64-bit machines. */
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES > 8
+typedef __uint128_t uint128_t;
+
+typedef struct
+{
+       uint128_t ui128DataFields[2];
+}
+uint256_t;
+#endif
+
+#endif
+
+/* This file is only intended to be used on platforms which use GCC or Clang,
+ * due to its requirement on __attribute__((vector_size(n))), typeof() and
+ * __SIZEOF__ macros.
+ */
+
+#if defined(__GNUC__)
+
+#ifndef MIN
+#define MIN(a, b) \
+ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;})
+#endif
+
+#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__
+#endif
+#if (DEVICE_MEMSETCPY_ALIGN_IN_BYTES & (DEVICE_MEMSETCPY_ALIGN_IN_BYTES - 1)) != 0
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2"
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4"
+#endif
+
+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
+#error No support for architectures where void* and long are sized differently
+#endif
+
+#if   __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+/* Meaningless, and harder to do correctly */
+# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long)
+typedef unsigned long block_t;
+#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM)
+#  if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+    typedef uint64_t block_t;
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+    typedef uint128_t block_t;
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+    typedef uint256_t block_t;
+#  endif
+# else
+typedef unsigned int block_t
+       __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)));
+# endif
+# if defined(__arm64__) || defined(__aarch64__)
+#  if   DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+#   define DEVICE_MEMSETCPY_ARM64
+#   define REGSZ "w"
+#   define REGCL "w"
+#   define BVCLB "r"
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+#   define DEVICE_MEMSETCPY_ARM64
+#   define REGSZ "x"
+#   define REGCL "x"
+#   define BVCLB "r"
+#  elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+#   if defined(__ARM_NEON_FP)
+#    define DEVICE_MEMSETCPY_ARM64
+#    define REGSZ "q"
+#    define REGCL "v"
+#    define BVCLB "w"
+#   endif
+#  endif
+#  if defined(DEVICE_MEMSETCPY_ARM64)
+#   if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL)
+#    define NSHLD() __asm__ ("dmb nshld")
+#    define NSHST() __asm__ ("dmb nshst")
+#    define LDP "ldnp"
+#    define STP "stnp"
+#   else
+#    define NSHLD()
+#    define NSHST()
+#    define LDP "ldp"
+#    define STP "stp"
+#   endif
+#   if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM)
+#    if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+typedef uint32_t block_half_t;
+#    elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+typedef uint64_t block_half_t;
+#    elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+typedef uint128_t block_half_t;
+#    endif
+#   else
+ typedef unsigned int block_half_t
+       __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2)));
+#   endif
+#  endif
+# endif
+#endif
+
+__attribute__((visibility("hidden")))
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+       volatile const char *pcSrc = pvSrc;
+       volatile char *pcDst = pvDst;
+       size_t uPreambleBytes;
+       int bBlockCopy = 0;
+
+       size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t);
+       size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+       if (!uSrcUnaligned && !uDstUnaligned)
+       {
+               /* Neither pointer is unaligned. Optimal case. */
+               bBlockCopy = 1;
+       }
+       else
+       {
+               if (uSrcUnaligned == uDstUnaligned)
+               {
+                       /* Neither pointer is usefully aligned, but they are misaligned in
+                        * the same way, so we can copy a preamble in a slow way, then
+                        * optimize the rest.
+                        */
+                       uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+                       uSize -= uPreambleBytes;
+                       while (uPreambleBytes)
+                       {
+                               *pcDst++ = *pcSrc++;
+                               uPreambleBytes--;
+                       }
+
+                       bBlockCopy = 1;
+               }
+               else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0)
+               {
+                       /* Both pointers are at least 32-bit aligned, and we assume that
+                        * the processor must handle all kinds of 32-bit load-stores.
+                        * NOTE: Could we optimize this with a non-temporal version?
+                        */
+                       if (uSize >= sizeof(int))
+                       {
+                               volatile int *piSrc = (int *)((void *)pcSrc);
+                               volatile int *piDst = (int *)((void *)pcDst);
+
+                               while (uSize >= sizeof(int))
+                               {
+                                       *piDst++ = *piSrc++;
+                                       uSize -= sizeof(int);
+                               }
+
+                               pcSrc = (char *)((void *)piSrc);
+                               pcDst = (char *)((void *)piDst);
+                       }
+               }
+       }
+
+       if (bBlockCopy && uSize >= sizeof(block_t))
+       {
+               volatile block_t *pSrc = (block_t *)((void *)pcSrc);
+               volatile block_t *pDst = (block_t *)((void *)pcDst);
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+               NSHLD();
+#endif
+
+               while (uSize >= sizeof(block_t))
+               {
+#if defined(DEVICE_MEMSETCPY_ARM64)
+                       __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t"
+                                STP " " REGSZ "0, " REGSZ "1, [%[pDst]]"
+                                               :
+                                               : [pSrc] "r" (pSrc), [pDst] "r" (pDst)
+                                               : "memory", REGCL "0", REGCL "1");
+#else
+                       *pDst = *pSrc;
+#endif
+                       pDst++;
+                       pSrc++;
+                       uSize -= sizeof(block_t);
+               }
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+               NSHST();
+#endif
+
+               pcSrc = (char *)((void *)pSrc);
+               pcDst = (char *)((void *)pDst);
+       }
+
+       while (uSize)
+       {
+               *pcDst++ = *pcSrc++;
+               uSize--;
+       }
+}
+
+__attribute__((visibility("hidden")))
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+       volatile char *pcDst = pvDst;
+       size_t uPreambleBytes;
+
+       size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+       if (uDstUnaligned)
+       {
+               uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+               uSize -= uPreambleBytes;
+               while (uPreambleBytes)
+               {
+                       *pcDst++ = ui8Value;
+                       uPreambleBytes--;
+               }
+       }
+
+       if (uSize >= sizeof(block_t))
+       {
+               volatile block_t *pDst = (block_t *)((void *)pcDst);
+               size_t i, uBlockSize;
+#if defined(DEVICE_MEMSETCPY_ARM64)
+               typedef block_half_t BLK_t;
+#else
+               typedef block_t BLK_t;
+#endif /* defined(DEVICE_MEMSETCPY_ARM64) */
+
+#if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM)
+               BLK_t bValue = 0;
+
+               uBlockSize = sizeof(BLK_t) / sizeof(ui8Value);
+
+               for (i = 0; i < uBlockSize; i++)
+               {
+                       bValue |= (BLK_t)ui8Value << ((uBlockSize - i - 1) * BITS_PER_BYTE);
+               }
+#else
+               BLK_t bValue = {0};
+
+               uBlockSize = sizeof(bValue) / sizeof(unsigned int);
+               for (i = 0; i < uBlockSize; i++)
+                       bValue[i] = ui8Value << 24U |
+                                   ui8Value << 16U |
+                                   ui8Value <<  8U |
+                                   ui8Value;
+#endif /* defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) */
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+               NSHLD();
+#endif
+
+               while (uSize >= sizeof(block_t))
+               {
+#if defined(DEVICE_MEMSETCPY_ARM64)
+                       __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]"
+                                               :
+                                               : [bValue] BVCLB (bValue), [pDst] "r" (pDst)
+                                               : "memory");
+#else
+                       *pDst = bValue;
+#endif
+                       pDst++;
+                       uSize -= sizeof(block_t);
+               }
+
+#if defined(DEVICE_MEMSETCPY_ARM64)
+               NSHST();
+#endif
+
+               pcDst = (char *)((void *)pDst);
+       }
+
+       while (uSize)
+       {
+               *pcDst++ = ui8Value;
+               uSize--;
+       }
+}
+
+#endif /* defined(__GNUC__) */
+
+/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */
+IMG_INTERNAL
+void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t uSize)
+{
+       volatile const char *pcSrc = pvSrc;
+       volatile char *pcDst = pvDst;
+
+       while (uSize)
+       {
+               *pcDst++ = *pcSrc++;
+               uSize--;
+       }
+}
+
+IMG_INTERNAL
+void DeviceMemSetBytes(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+       volatile char *pcDst = pvDst;
+
+       while (uSize)
+       {
+               *pcDst++ = ui8Value;
+               uSize--;
+       }
+}
+
+#if !defined(__QNXNTO__) /* Ignore Neutrino as it uses strlcpy */
+
+#if defined(__KERNEL__) && defined(__linux__)
+/*
+ * In case of Linux kernel-mode in a debug build, choose the variant
+ * of StringLCopy that uses strlcpy and logs truncation via a stack dump.
+ * For Linux kernel-mode in a release build, strlcpy alone is used.
+ */
+#if defined(DEBUG)
+IMG_INTERNAL
+size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize)
+{
+       /*
+        * Let strlcpy handle any truncation cases correctly.
+        * We will definitely get a NUL-terminated string set in pszDest
+        */
+       size_t  uSrcSize = strlcpy(pszDest, pszSrc, uDataSize);
+
+#if defined(PVR_DEBUG_STRLCPY)
+       /* Handle truncation by dumping calling stack if debug allows */
+       if (uSrcSize >= uDataSize)
+       {
+               PVR_DPF((PVR_DBG_WARNING,
+                       "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'",
+                       __func__, pszSrc, (long)uDataSize, pszDest));
+               OSDumpStack();
+       }
+#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */
+
+       return uSrcSize;
+}
+#endif /* defined(DEBUG) */
+
+#else /* defined(__KERNEL__) && defined(__linux__) */
+/*
+ * For every other platform, make use of the strnlen and strncpy
+ * implementation of StringLCopy.
+ * NOTE: It is crucial to avoid memcpy as this has a hidden side-effect of
+ * dragging in whatever the build-environment flavour of GLIBC is which can
+ * cause unexpected failures for host-side command execution.
+ */
+IMG_INTERNAL
+size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize)
+{
+       size_t uSrcSize = strnlen(pszSrc, uDataSize);
+
+       (void)strncpy(pszDest, pszSrc, uSrcSize);
+       if (uSrcSize == uDataSize)
+       {
+               pszDest[uSrcSize-1] = '\0';
+       }
+       else
+       {
+               pszDest[uSrcSize] = '\0';
+       }
+
+       return uSrcSize;
+}
+
+#endif /* defined(__KERNEL__) && defined(__linux__) */
+
+#endif /* !defined(__QNXNTO__) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/pvrsrv_error.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/pvrsrv_error.c
new file mode 100644 (file)
index 0000000..5cd02a2
--- /dev/null
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services error support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+IMG_EXPORT
+const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError)
+{
+       switch (eError)
+       {
+               case PVRSRV_OK:
+                       return "PVRSRV_OK";
+#define PVRE(x) \
+               case x: \
+                       return #x;
+#include "pvrsrv_errors.h"
+#undef PVRE
+               default:
+                       return "Unknown PVRSRV error number";
+       }
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/ra.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/ra.c
new file mode 100644 (file)
index 0000000..4c2981e
--- /dev/null
@@ -0,0 +1,2166 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+@Description
+ Implements generic resource allocation. The resource allocator was originally
+ intended to manage address spaces. In practice the resource allocator is
+ generic and can manage arbitrary sets of integers.
+
+ Resources are allocated from arenas. Arenas can be created with an initial
+ span of resources. Further resources spans can be added to arenas. A
+ callback mechanism allows an arena to request further resource spans on
+ demand.
+
+ Each arena maintains an ordered list of resource segments each described by a
+ boundary tag. Each boundary tag describes a segment of resources which are
+ either 'free', available for allocation, or 'busy' currently allocated.
+ Adjacent 'free' segments are always coalesced to avoid fragmentation.
+
+ For allocation, all 'free' segments are kept on lists of 'free' segments in
+ a table index by pvr_log2(segment size) i.e., each table index n holds 'free'
+ segments in the size range 2^n -> 2^(n+1) - 1.
+
+ Allocation policy is based on an *almost* good fit strategy.
+
+ Allocated segments are inserted into a self-scaling hash table which maps
+ the base resource of the span to the relevant boundary tag. This allows the
+ code to get back to the boundary tag without exporting explicit boundary tag
+ references through the API.
+
+ Each arena has an associated quantum size, all allocations from the arena are
+ made in multiples of the basic quantum.
+
+ On resource exhaustion in an arena, a callback if provided will be used to
+ request further resources. Resource spans allocated by the callback mechanism
+ will be returned when freed (through one of the two callbacks).
+*/ /**************************************************************************/
+
+/* Issues:
+ * - flags, flags are passed into the resource allocator but are not currently used.
+ * - determination, of import size, is currently braindead.
+ * - debug code should be moved out to own module and #ifdef'd
+ */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "uniq_key_splay_tree.h"
+
+#include "hash.h"
+#include "ra.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "osfunc.h"
+#include "allocmem.h"
+#include "lock.h"
+#include "pvr_intrinsics.h"
+
+/* The initial, and minimum size of the live address -> boundary tag structure
+ * hash table. The value 64 is a fairly arbitrary choice. The hash table
+ * resizes on demand so the value chosen is not critical.
+ */
+#define MINIMUM_HASH_SIZE (64)
+
+
+/* #define RA_VALIDATE */
+
+#if defined(__KLOCWORK__)
+       /* Make sure Klocwork analyses all the code (including the debug one) */
+       #if !defined(RA_VALIDATE)
+               #define RA_VALIDATE
+       #endif
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) || !defined(RA_VALIDATE)
+/* Disable the asserts unless explicitly told otherwise.
+ * They slow the driver too much for other people
+ */
+
+#undef PVR_ASSERT
+/* Use a macro that really do not do anything when compiling in release
+ * mode!
+ */
+#define PVR_ASSERT(x)
+#endif
+
+/* boundary tags, used to describe a resource segment */
+struct _BT_
+{
+       enum bt_type
+       {
+               btt_free,               /* free resource segment */
+               btt_live                /* allocated resource segment */
+       } type;
+
+       unsigned int is_leftmost;
+       unsigned int is_rightmost;
+       unsigned int free_import;
+
+       /* The base resource and extent of this segment */
+       RA_BASE_T base;
+       RA_LENGTH_T uSize;
+
+       /* doubly linked ordered list of all segments within the arena */
+       struct _BT_ *pNextSegment;
+       struct _BT_ *pPrevSegment;
+
+       /* doubly linked un-ordered list of free segments with the same flags. */
+       struct _BT_ *next_free;
+       struct _BT_ *prev_free;
+
+       /* A user reference associated with this span, user references are
+        * currently only provided in the callback mechanism
+        */
+       IMG_HANDLE hPriv;
+
+       /* Flags to match on this span */
+       RA_FLAGS_T uFlags;
+
+};
+typedef struct _BT_ BT;
+
+
+/* resource allocation arena */
+struct _RA_ARENA_
+{
+       /* arena name for diagnostics output */
+       IMG_CHAR name[RA_MAX_NAME_LENGTH];
+
+       /* allocations within this arena are quantum sized */
+       RA_LENGTH_T uQuantum;
+
+       /* import interface, if provided */
+       PFN_RA_ALLOC pImportAlloc;
+
+       PFN_RA_FREE pImportFree;
+
+       /* Arbitrary handle provided by arena owner to be passed into the
+        * import alloc and free hooks
+        */
+       void *pImportHandle;
+
+       IMG_PSPLAY_TREE per_flags_buckets;
+
+       /* resource segment list */
+       BT *pHeadSegment;
+
+       /* segment address to boundary tag hash table */
+       HASH_TABLE *pSegmentHash;
+
+       /* Lock for this arena */
+       POS_LOCK hLock;
+
+       /* Policies that govern the resource area */
+       IMG_UINT32 ui32PolicyFlags;
+
+       /* LockClass of this arena. This is used within lockdep to decide if a
+        * recursive call sequence with the same lock class is allowed or not.
+        */
+       IMG_UINT32 ui32LockClass;
+
+       /* Total Size of the Arena */
+       IMG_UINT64      ui64TotalArenaSize;
+
+       /* Size available for allocation in the arena */
+       IMG_UINT64      ui64FreeArenaSize;
+
+};
+
+struct _RA_ARENA_ITERATOR_
+{
+       RA_ARENA *pArena;
+       BT *pCurrent;
+       IMG_BOOL bIncludeFreeSegments;
+};
+
+/*************************************************************************/ /*!
+@Function       _RequestAllocFail
+@Description    Default callback allocator used if no callback is specified,
+                always fails to allocate further resources to the arena.
+@Input          _h - callback handle
+@Input          _uSize - requested allocation size
+@Input          _uflags - allocation flags
+@Input          _pBase - receives allocated base
+@Output         _pActualSize - actual allocation size
+@Input          _pRef - user reference
+@Return         PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails
+                to allocate.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_RequestAllocFail(RA_PERARENA_HANDLE _h,
+                  RA_LENGTH_T _uSize,
+                  RA_FLAGS_T _uFlags,
+                  const IMG_CHAR *_pszAnnotation,
+                  RA_BASE_T *_pBase,
+                  RA_LENGTH_T *_pActualSize,
+                  RA_PERISPAN_HANDLE *_phPriv)
+{
+       PVR_UNREFERENCED_PARAMETER(_h);
+       PVR_UNREFERENCED_PARAMETER(_uSize);
+       PVR_UNREFERENCED_PARAMETER(_pActualSize);
+       PVR_UNREFERENCED_PARAMETER(_phPriv);
+       PVR_UNREFERENCED_PARAMETER(_uFlags);
+       PVR_UNREFERENCED_PARAMETER(_pBase);
+       PVR_UNREFERENCED_PARAMETER(_pszAnnotation);
+
+       return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL;
+}
+
+
+#if defined(PVR_CTZLL)
+       /* Make sure to trigger an error if someone change the buckets or the bHasEltsMapping size
+          the bHasEltsMapping is used to quickly determine the smallest bucket containing elements.
+          therefore it must have at least as many bits has the buckets array have buckets. The RA
+          implementation actually uses one more bit. */
+       static_assert(ARRAY_SIZE(((IMG_PSPLAY_TREE)0)->buckets)
+                                 < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping),
+                                 "Too many buckets for bHasEltsMapping bitmap");
+#endif
+
+
+/*************************************************************************/ /*!
+@Function       pvr_log2
+@Description    Computes the floor of the log base 2 of a unsigned integer
+@Input          n       Unsigned integer
+@Return         Floor(Log2(n))
+*/ /**************************************************************************/
+#if defined(PVR_CLZLL)
+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type
+   indeed the __builtin_clzll is for unsigned long long variables.
+
+   if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl
+   if it changes to unsigned int, use __builtin_clz
+
+   if it changes for something bigger than unsigned long long,
+   then revert the pvr_log2 to the classic implementation */
+static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long),
+                         "RA log routines not tuned for sizeof(RA_LENGTH_T)");
+
+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n)
+{
+       PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */
+
+       return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n);
+}
+#else
+static IMG_UINT32
+pvr_log2(RA_LENGTH_T n)
+{
+       IMG_UINT32 l = 0;
+
+       PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */
+
+       n >>= 1;
+       while (n > 0)
+       {
+               n >>= 1;
+               l++;
+       }
+       return l;
+}
+#endif
+
+
+#if defined(RA_VALIDATE)
+/*************************************************************************/ /*!
+@Function       _IsInSegmentList
+@Description    Tests if a BT is in the segment list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's segment list.
+                IMG_TRUE   BT was in the arena's segment list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInSegmentList(RA_ARENA *pArena, BT *pBT)
+{
+       BT* pBTScan;
+
+       PVR_ASSERT(pArena != NULL);
+       PVR_ASSERT(pBT != NULL);
+
+       /* Walk the segment list until we see the BT pointer... */
+       pBTScan = pArena->pHeadSegment;
+       while (pBTScan != NULL  &&  pBTScan != pBT)
+       {
+               pBTScan = pBTScan->pNextSegment;
+       }
+
+       /* Test if we found it and then return */
+       return (pBTScan == pBT);
+}
+
+/*************************************************************************/ /*!
+@Function       _IsInFreeList
+@Description    Tests if a BT is in the free list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's free list.
+                IMG_TRUE   BT was in the arena's free list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInFreeList(RA_ARENA *pArena, BT *pBT)
+{
+       BT* pBTScan;
+       IMG_UINT32 uIndex;
+
+       PVR_ASSERT(pArena != NULL);
+       PVR_ASSERT(pBT != NULL);
+
+       /* Look for the free list that holds BTs of this size... */
+       uIndex = pvr_log2(pBT->uSize);
+       PVR_ASSERT(uIndex < FREE_TABLE_LIMIT);
+
+       pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+       if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags))
+       {
+               return 0;
+       }
+       else
+       {
+               pBTScan = pArena->per_flags_buckets->buckets[uIndex];
+               while (pBTScan != NULL  &&  pBTScan != pBT)
+               {
+                       pBTScan = pBTScan->next_free;
+               }
+
+               /* Test if we found it and then return */
+               return (pBTScan == pBT);
+       }
+}
+
+/* is_arena_valid should only be used in debug mode.
+ * It checks that some properties an arena must have are verified
+ */
+static int is_arena_valid(struct _RA_ARENA_ *arena)
+{
+       struct _BT_ *chunk;
+#if defined(PVR_CTZLL)
+       unsigned int i;
+#endif
+
+       for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment)
+       {
+               /* if next segment is NULL, then it must be a rightmost */
+               PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost));
+               /* if prev segment is NULL, then it must be a leftmost */
+               PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost));
+
+               if (chunk->type == btt_free)
+               {
+                       /* checks the correctness of the type field */
+                       PVR_ASSERT(_IsInFreeList(arena, chunk));
+
+                       /* check that there can't be two consecutive free chunks.
+                          Indeed, instead of having two consecutive free chunks,
+                          there should be only one that span the size of the two. */
+                       PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free));
+                       PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free));
+               }
+               else
+               {
+                       /* checks the correctness of the type field */
+                       PVR_ASSERT(!_IsInFreeList(arena, chunk));
+               }
+
+               PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base));
+               PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base));
+
+               /* all segments of the same imports must have the same flags ... */
+               PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags));
+               /* ... and the same import handle */
+               PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv));
+
+
+               /* if a free chunk spans a whole import, then it must be an 'not to free import'.
+                  Otherwise it should have been freed. */
+               PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import));
+       }
+
+#if defined(PVR_CTZLL)
+       if (arena->per_flags_buckets != NULL)
+       {
+               for (i = 0; i < FREE_TABLE_LIMIT; ++i)
+               {
+                       /* verify that the bHasEltsMapping is correct for this flags bucket */
+                       PVR_ASSERT(
+                               ((arena->per_flags_buckets->buckets[i] == NULL) &&
+                                (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0)))
+                               ||
+                               ((arena->per_flags_buckets->buckets[i] != NULL) &&
+                                (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0)))
+                               );
+               }
+       }
+#endif
+
+       /* if arena was not valid, an earlier assert should have triggered */
+       return 1;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       _SegmentListInsertAfter
+@Description    Insert a boundary tag into an arena segment list after a
+                specified boundary tag.
+@Input          pInsertionPoint  The insertion point.
+@Input          pBT              The boundary tag to insert.
+*/ /**************************************************************************/
+static INLINE void
+_SegmentListInsertAfter(BT *pInsertionPoint,
+                        BT *pBT)
+{
+       PVR_ASSERT(pBT != NULL);
+       PVR_ASSERT(pInsertionPoint != NULL);
+
+       pBT->pNextSegment = pInsertionPoint->pNextSegment;
+       pBT->pPrevSegment = pInsertionPoint;
+       if (pInsertionPoint->pNextSegment != NULL)
+       {
+               pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+       }
+       pInsertionPoint->pNextSegment = pBT;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListInsert
+@Description    Insert a boundary tag into an arena segment list
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to insert.
+*/ /**************************************************************************/
+static INLINE void
+_SegmentListInsert(RA_ARENA *pArena, BT *pBT)
+{
+       PVR_ASSERT(!_IsInSegmentList(pArena, pBT));
+
+       /* insert into the segment chain */
+       pBT->pNextSegment = pArena->pHeadSegment;
+       pArena->pHeadSegment = pBT;
+       if (pBT->pNextSegment != NULL)
+       {
+               pBT->pNextSegment->pPrevSegment = pBT;
+       }
+
+       pBT->pPrevSegment = NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListRemove
+@Description    Remove a boundary tag from an arena segment list.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to remove.
+*/ /**************************************************************************/
+static void
+_SegmentListRemove(RA_ARENA *pArena, BT *pBT)
+{
+       PVR_ASSERT(_IsInSegmentList(pArena, pBT));
+
+       if (pBT->pPrevSegment == NULL)
+               pArena->pHeadSegment = pBT->pNextSegment;
+       else
+               pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+       if (pBT->pNextSegment != NULL)
+               pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _BuildBT
+@Description    Construct a boundary tag for a free segment.
+@Input          base     The base of the resource segment.
+@Input          uSize    The extent of the resource segment.
+@Input          uFlags   The flags to give to the boundary tag
+@Return         Boundary tag or NULL
+*/ /**************************************************************************/
+static BT *
+_BuildBT(RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags)
+{
+       BT *pBT;
+
+       pBT = OSAllocZMem(sizeof(BT));
+       if (pBT == NULL)
+       {
+               return NULL;
+       }
+
+       pBT->is_leftmost = 1;
+       pBT->is_rightmost = 1;
+       /* pBT->free_import = 0; */
+       pBT->type = btt_live;
+       pBT->base = base;
+       pBT->uSize = uSize;
+       pBT->uFlags = uFlags;
+
+       return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _SegmentSplit
+@Description    Split a segment into two, maintain the arena segment list. The
+                boundary tag should not be in the free table. Neither the
+                original or the new neighbour boundary tag will be in the free
+                table.
+@Input          pBT       The boundary tag to split.
+@Input          uSize     The required segment size of boundary tag after
+                          splitting.
+@Return         New neighbour boundary tag or NULL.
+*/ /**************************************************************************/
+static BT *
+_SegmentSplit(BT *pBT, RA_LENGTH_T uSize)
+{
+       BT *pNeighbour;
+
+       pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags);
+       if (pNeighbour == NULL)
+       {
+               return NULL;
+       }
+
+       _SegmentListInsertAfter(pBT, pNeighbour);
+
+       pNeighbour->is_leftmost = 0;
+       pNeighbour->is_rightmost = pBT->is_rightmost;
+       pNeighbour->free_import = pBT->free_import;
+       pBT->is_rightmost = 0;
+       pNeighbour->hPriv = pBT->hPriv;
+       pBT->uSize = uSize;
+       pNeighbour->uFlags = pBT->uFlags;
+
+       return pNeighbour;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListInsert
+@Description    Insert a boundary tag into an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListInsert(RA_ARENA *pArena, BT *pBT)
+{
+       IMG_UINT32 uIndex;
+       BT *pBTTemp = NULL;
+       uIndex = pvr_log2(pBT->uSize);
+
+       PVR_ASSERT(uIndex < FREE_TABLE_LIMIT);
+       PVR_ASSERT(!_IsInFreeList(pArena, pBT));
+
+       pBT->type = btt_free;
+
+       pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+       /* the flags item in the splay tree must have been created before-hand by
+          _InsertResource */
+       PVR_ASSERT(pArena->per_flags_buckets != NULL);
+       PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+       /* Handle NULL values for RELEASE builds and/or disabled ASSERT DEBUG builds */
+       if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL)))
+       {
+               return;
+       }
+
+       /* Get the first node in the bucket */
+       pBTTemp = pArena->per_flags_buckets->buckets[uIndex];
+
+       if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_ALLOC_NODE_SELECT_MASK) == RA_POLICY_ALLOC_OPTIMAL))
+       {
+               /* Add the node to the start if the bucket is empty */
+               if (NULL == pBTTemp)
+               {
+                       pArena->per_flags_buckets->buckets[uIndex] = pBT;
+                       pBT->next_free = NULL;
+                       pBT->prev_free = NULL;
+
+               }
+               else
+               {
+                       BT *pBTPrev = NULL;
+                       /* Traverse the list and identify the appropriate
+                        * place based on the size of the Boundary being inserted */
+                       while (pBTTemp && (pBTTemp->uSize < pBT->uSize))
+                       {
+                               pBTPrev = pBTTemp;
+                               pBTTemp = pBTTemp->next_free;
+                       }
+                       /* point the new node to the first higher size element */
+                       pBT->next_free = pBTTemp;
+                       pBT->prev_free = pBTPrev;
+
+                       if (pBTPrev)
+                       {
+                               /* Set the lower size element in the
+                                * chain to point new node */
+                               pBTPrev->next_free = pBT;
+                       }
+                       else
+                       {
+                               /* Assign the new node to the start of the bucket
+                                * if the bucket is empty */
+                               pArena->per_flags_buckets->buckets[uIndex] = pBT;
+                       }
+                       /* Make sure the higher size element in the chain points back
+                        * to the new node to be introduced */
+                       if (pBTTemp)
+                       {
+                               pBTTemp->prev_free = pBT;
+                       }
+               }
+       }
+       else
+       {
+               pBT->next_free =  pBTTemp;
+               if (pBT->next_free != NULL)
+               {
+                       pBT->next_free->prev_free = pBT;
+               }
+               pBT->prev_free = NULL;
+               pArena->per_flags_buckets->buckets[uIndex] = pBT;
+       }
+
+#if defined(PVR_CTZLL)
+       /* tells that bucket[index] now contains elements */
+       pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex);
+#endif
+
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListRemove
+@Description    Remove a boundary tag from an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListRemove(RA_ARENA *pArena, BT *pBT)
+{
+       IMG_UINT32 uIndex;
+       uIndex = pvr_log2(pBT->uSize);
+
+       PVR_ASSERT(uIndex < FREE_TABLE_LIMIT);
+       PVR_ASSERT(_IsInFreeList(pArena, pBT));
+
+       if (pBT->next_free != NULL)
+       {
+               pBT->next_free->prev_free = pBT->prev_free;
+       }
+
+       if (pBT->prev_free != NULL)
+       {
+               pBT->prev_free->next_free = pBT->next_free;
+       }
+       else
+       {
+               pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+               /* the flags item in the splay tree must have already been created
+                  (otherwise how could there be a segment with these flags */
+               PVR_ASSERT(pArena->per_flags_buckets != NULL);
+               PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+               /* Handle unlikely NULL values for RELEASE or ASSERT-disabled builds */
+               if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL)))
+               {
+                       pBT->type = btt_live;
+                       return;
+               }
+
+               pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free;
+#if defined(PVR_CTZLL)
+               if (pArena->per_flags_buckets->buckets[uIndex] == NULL)
+               {
+                       /* there is no more elements in this bucket. Update the mapping. */
+                       pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex);
+               }
+#endif
+       }
+
+       PVR_ASSERT(!_IsInFreeList(pArena, pBT));
+       pBT->type = btt_live;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _InsertResource
+@Description    Add a free resource segment to an arena.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Input          uFlags    The flags of the new resources.
+@Return         New bucket pointer
+                NULL on failure
+*/ /**************************************************************************/
+static BT *
+_InsertResource(RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize,
+                RA_FLAGS_T uFlags)
+{
+       BT *pBT;
+       PVR_ASSERT(pArena!=NULL);
+
+       pBT = _BuildBT(base, uSize, uFlags);
+
+       if (pBT != NULL)
+       {
+               IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets);
+               if (tmp == NULL)
+               {
+                       OSFreeMem(pBT);
+                       return NULL;
+               }
+
+               pArena->per_flags_buckets = tmp;
+               _SegmentListInsert(pArena, pBT);
+               _FreeListInsert(pArena, pBT);
+       }
+       return pBT;
+}
+
+/*************************************************************************/ /*!
+@Function       _InsertResourceSpan
+@Description    Add a free resource span to an arena, marked for free_import.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Return         The boundary tag representing the free resource segment,
+                or NULL on failure.
+*/ /**************************************************************************/
+static INLINE BT *
+_InsertResourceSpan(RA_ARENA *pArena,
+                    RA_BASE_T base,
+                    RA_LENGTH_T uSize,
+                    RA_FLAGS_T uFlags)
+{
+       BT *pBT = _InsertResource(pArena, base, uSize, uFlags);
+       if (pBT != NULL)
+       {
+               pBT->free_import = 1;
+       }
+       return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _RemoveResourceSpan
+@Description    Frees a resource span from an arena, returning the imported
+                span via the callback.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+@Return         IMG_FALSE failure - span was still in use
+                IMG_TRUE  success - span was removed and returned
+*/ /**************************************************************************/
+static INLINE IMG_BOOL
+_RemoveResourceSpan(RA_ARENA *pArena, BT *pBT)
+{
+       PVR_ASSERT(pArena!=NULL);
+       PVR_ASSERT(pBT!=NULL);
+
+       if (pBT->free_import &&
+               pBT->is_leftmost &&
+               pBT->is_rightmost)
+       {
+               _SegmentListRemove(pArena, pBT);
+               pArena->pImportFree(pArena->pImportHandle, pBT->base, pBT->hPriv);
+               OSFreeMem(pBT);
+
+               return IMG_TRUE;
+       }
+
+       return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeBT
+@Description    Free a boundary tag taking care of the segment list and the
+                boundary tag free table.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+*/ /**************************************************************************/
+static void
+_FreeBT(RA_ARENA *pArena, BT *pBT)
+{
+       BT *pNeighbour;
+
+       PVR_ASSERT(pArena!=NULL);
+       PVR_ASSERT(pBT!=NULL);
+       PVR_ASSERT(!_IsInFreeList(pArena, pBT));
+
+       /* try and coalesce with left neighbour */
+       pNeighbour = pBT->pPrevSegment;
+       if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free))
+       {
+               /* Verify list correctness */
+               PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base);
+
+               _FreeListRemove(pArena, pNeighbour);
+               _SegmentListRemove(pArena, pNeighbour);
+               pBT->base = pNeighbour->base;
+
+               pBT->uSize += pNeighbour->uSize;
+               pBT->is_leftmost = pNeighbour->is_leftmost;
+               OSFreeMem(pNeighbour);
+       }
+
+       /* try to coalesce with right neighbour */
+       pNeighbour = pBT->pNextSegment;
+       if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free))
+       {
+               /* Verify list correctness */
+               PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base);
+
+               _FreeListRemove(pArena, pNeighbour);
+               _SegmentListRemove(pArena, pNeighbour);
+               pBT->uSize += pNeighbour->uSize;
+               pBT->is_rightmost = pNeighbour->is_rightmost;
+               OSFreeMem(pNeighbour);
+       }
+
+       if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE)
+       {
+               _FreeListInsert(pArena, pBT);
+               PVR_ASSERT((!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import));
+       }
+
+       PVR_ASSERT(is_arena_valid(pArena));
+}
+
+
+/*
+  This function returns the first element in a bucket that can be split
+  in a way that one of the sub-segments can meet the size and alignment
+  criteria.
+
+  The first_elt is the bucket to look into. Remember that a bucket is
+  implemented as a pointer to the first element of the linked list.
+
+  nb_max_try is used to limit the number of elements considered.
+  This is used to only consider the first nb_max_try elements in the
+  free-list. The special value ~0 is used to say unlimited i.e. consider
+  all elements in the free list
+ */
+static INLINE
+struct _BT_ *find_chunk_in_bucket(struct _BT_ * first_elt,
+                                  RA_LENGTH_T uSize,
+                                  RA_LENGTH_T uAlignment,
+                                  unsigned int nb_max_try)
+{
+       struct _BT_ *walker;
+
+       for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
+       {
+               const RA_BASE_T aligned_base = (uAlignment > 1) ?
+                       (walker->base + uAlignment - 1) & ~(uAlignment - 1)
+                       : walker->base;
+
+               if (walker->base + walker->uSize >= aligned_base + uSize)
+               {
+                       return walker;
+               }
+
+               /* 0xFFFF...FFFF is used has nb_max_try = infinity. */
+               if (nb_max_try != (unsigned int) ~0)
+               {
+                       nb_max_try--;
+               }
+       }
+
+       return NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       _AllocAlignSplit
+@Description    Given a valid BT, trim the start and end of the BT according
+                to alignment and size requirements. Also add the resulting
+                BT to the live hash table.
+@Input          pArena       The arena.
+@Input          pBT          The BT to trim and add to live hash table
+@Input          uSize        The requested allocation size.
+@Input          uAlignment   The alignment requirements of the allocation
+                             Required uAlignment, or 0.
+                             Must be a power of 2 if not 0
+@Output         pBase        Allocated, corrected, resource base
+                             (non-optional, must not be NULL)
+@Output         phPriv       The user references associated with
+                             the imported segment. (optional)
+@Return         IMG_FALSE failure
+                IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AllocAlignSplit(RA_ARENA *pArena,
+                 BT *pBT,
+                 RA_LENGTH_T uSize,
+                 RA_LENGTH_T uAlignment,
+                 RA_BASE_T *pBase,
+                 RA_PERISPAN_HANDLE *phPriv)
+{
+       RA_BASE_T aligned_base;
+
+       aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+
+       _FreeListRemove(pArena, pBT);
+
+       if ((pArena->ui32PolicyFlags & RA_POLICY_NO_SPLIT_MASK) == RA_POLICY_NO_SPLIT)
+       {
+               goto nosplit;
+       }
+
+       /* with uAlignment we might need to discard the front of this segment */
+       if (aligned_base > pBT->base)
+       {
+               BT *pNeighbour;
+               pNeighbour = _SegmentSplit(pBT, (RA_LENGTH_T)(aligned_base - pBT->base));
+               /* partition the buffer, create a new boundary tag */
+               if (pNeighbour == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Front split failed", __func__));
+                       /* Put pBT back in the list */
+                       _FreeListInsert(pArena, pBT);
+                       return IMG_FALSE;
+               }
+
+               _FreeListInsert(pArena, pBT);
+               pBT = pNeighbour;
+       }
+
+       /* the segment might be too big, if so, discard the back of the segment */
+       if (pBT->uSize > uSize)
+       {
+               BT *pNeighbour;
+               pNeighbour = _SegmentSplit(pBT, uSize);
+               /* partition the buffer, create a new boundary tag */
+               if (pNeighbour == NULL)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Back split failed", __func__));
+                       /* Put pBT back in the list */
+                       _FreeListInsert(pArena, pBT);
+                       return IMG_FALSE;
+               }
+
+               _FreeListInsert(pArena, pNeighbour);
+       }
+nosplit:
+       pBT->type = btt_live;
+
+       if (!HASH_Insert_Extended(pArena->pSegmentHash, &aligned_base, (uintptr_t)pBT))
+       {
+               _FreeBT(pArena, pBT);
+               return IMG_FALSE;
+       }
+
+       if (phPriv != NULL)
+               *phPriv = pBT->hPriv;
+
+       *pBase = aligned_base;
+
+       return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       _AttemptAllocAligned
+@Description    Attempt an allocation from an arena.
+@Input          pArena       The arena.
+@Input          uSize        The requested allocation size.
+@Input          uFlags       Allocation flags
+@Output         phPriv       The user references associated with
+                             the imported segment. (optional)
+@Input          uAlignment   Required uAlignment, or 0.
+                             Must be a power of 2 if not 0
+@Output         base         Allocated resource base (non-optional, must not
+                             be NULL)
+@Return         IMG_FALSE failure
+                IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AttemptAllocAligned(RA_ARENA *pArena,
+                     RA_LENGTH_T uSize,
+                     RA_FLAGS_T uFlags,
+                     RA_LENGTH_T uAlignment,
+                     RA_BASE_T *base,
+                     RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */
+{
+
+       IMG_UINT32 index_low;
+       IMG_UINT32 index_high;
+       IMG_UINT32 i;
+       struct _BT_ *pBT = NULL;
+
+       PVR_ASSERT(pArena!=NULL);
+       PVR_ASSERT(base != NULL);
+
+       pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+       if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != uFlags))
+       {
+               /* no chunks with these flags. */
+               return IMG_FALSE;
+       }
+
+       index_low = pvr_log2(uSize);
+       if (uAlignment)
+       {
+               index_high = pvr_log2(uSize + uAlignment - 1);
+       }
+       else
+       {
+               index_high = index_low;
+       }
+
+       PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+       PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+       PVR_ASSERT(index_low <= index_high);
+
+       if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_BUCKET_MASK) == RA_POLICY_BUCKET_BEST_FIT))
+       {
+               /* This policy ensures the selection of the first lowest size bucket that
+                * satisfies the request size is selected */
+#if defined(PVR_CTZLL)
+               i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_low )) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+               i = index_low;
+#endif
+               for ( ; (i < FREE_TABLE_LIMIT) && (pBT == NULL); ++i)
+               {
+                       if (pArena->per_flags_buckets->buckets[i])
+                       {
+                               pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);
+                       }
+               }
+       }
+       else
+       {
+#if defined(PVR_CTZLL)
+               i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+               for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i)
+               {
+               }
+#endif
+               PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+               if (i != FREE_TABLE_LIMIT)
+               {
+                       /* since we start at index_high + 1, we are guaranteed to exit */
+                       pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+               }
+               else
+               {
+                       for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i)
+                       {
+                               pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);
+                       }
+               }
+       }
+
+       if (pBT == NULL)
+       {
+               return IMG_FALSE;
+       }
+
+       return _AllocAlignSplit(pArena, pBT, uSize, uAlignment, base, phPriv);
+}
+
+/*************************************************************************/ /*!
+@Function       _AttemptImportSpanAlloc
+@Description    Attempt to Import more memory and create a new span.
+                Function attempts to import more memory from the callback
+                provided at RA creation time, if successful the memory
+                will form a new span in the RA.
+@Input          pArena            The arena.
+@Input          uRequestSize      The requested allocation size.
+@Input          uImportMultiplier Import x-times more for future requests if
+                                  we have to import new memory.
+@Input          uImportFlags      Flags influencing allocation policy.
+@Input          uAlignment        The alignment requirements of the allocation
+                                  Required uAlignment, or 0.
+                                  Must be a power of 2 if not 0
+@Input          pszAnnotation     String to describe the allocation
+@Output         pImportBase       Allocated import base
+                                  (non-optional, must not be NULL)
+@Output         pImportSize       Allocated import size
+@Output         pImportBT         Allocated import BT
+@Return         PVRSRV_OK - success
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_AttemptImportSpanAlloc(RA_ARENA *pArena,
+                        RA_LENGTH_T uRequestSize,
+                        IMG_UINT8 uImportMultiplier,
+                        RA_FLAGS_T uImportFlags,
+                        RA_LENGTH_T uAlignment,
+                        const IMG_CHAR *pszAnnotation,
+                        RA_BASE_T *pImportBase,
+                        RA_LENGTH_T *pImportSize,
+                        BT **pImportBT)
+{
+       IMG_HANDLE hPriv;
+       RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+       BT *pBT;
+       PVRSRV_ERROR eError;
+
+       *pImportSize = uRequestSize;
+       /*
+               Ensure that we allocate sufficient space to meet the uAlignment
+               constraint
+        */
+       if (uAlignment > pArena->uQuantum)
+       {
+               *pImportSize += (uAlignment - pArena->uQuantum);
+       }
+
+       /* apply over-allocation multiplier after all alignment adjustments */
+       *pImportSize *= uImportMultiplier;
+
+       /* ensure that we import according to the quanta of this arena */
+       *pImportSize = (*pImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+
+       eError = pArena->pImportAlloc(pArena->pImportHandle,
+                                                                 *pImportSize, uImportFlags,
+                                                                 pszAnnotation,
+                                                                 pImportBase, pImportSize,
+                                                                 &hPriv);
+       if (PVRSRV_OK != eError)
+       {
+               return eError;
+       }
+
+       /* If we successfully import more resource, create a span to
+        * represent it else free the resource we imported.
+        */
+       pBT = _InsertResourceSpan(pArena, *pImportBase, *pImportSize, uFlags);
+       if (pBT == NULL)
+       {
+               /* insufficient resources to insert the newly acquired span,
+                  so free it back again */
+               pArena->pImportFree(pArena->pImportHandle, *pImportBase, hPriv);
+
+               PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', "
+                       "size=0x%llx failed!", __func__, pArena->name,
+                       (unsigned long long)uRequestSize));
+               /* RA_Dump (arena); */
+
+               return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED;
+       }
+
+       pBT->hPriv = hPriv;
+       *pImportBT = pBT;
+
+       return eError;
+}
+
+IMG_INTERNAL RA_ARENA *
+RA_Create(IMG_CHAR *name,
+          RA_LOG2QUANTUM_T uLog2Quantum,
+          IMG_UINT32 ui32LockClass,
+          PFN_RA_ALLOC imp_alloc,
+          PFN_RA_FREE imp_free,
+          RA_PERARENA_HANDLE arena_handle,
+          IMG_UINT32 ui32PolicyFlags)
+{
+       RA_ARENA *pArena;
+       PVRSRV_ERROR eError;
+
+       if (name == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter 'name' (NULL not accepted)", __func__));
+               return NULL;
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s'", __func__, name));
+
+       pArena = OSAllocMem(sizeof(*pArena));
+       if (pArena == NULL)
+       {
+               goto arena_fail;
+       }
+
+       eError = OSLockCreate(&pArena->hLock);
+       if (eError != PVRSRV_OK)
+       {
+               goto lock_fail;
+       }
+
+       pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default);
+
+       if (pArena->pSegmentHash==NULL)
+       {
+               goto hash_fail;
+       }
+
+       OSStringLCopy(pArena->name, name, RA_MAX_NAME_LENGTH);
+       pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail;
+       pArena->pImportFree = imp_free;
+       pArena->pImportHandle = arena_handle;
+       pArena->pHeadSegment = NULL;
+       pArena->uQuantum = 1ULL << uLog2Quantum;
+       pArena->per_flags_buckets = NULL;
+       pArena->ui32LockClass = ui32LockClass;
+       pArena->ui32PolicyFlags = ui32PolicyFlags;
+       pArena->ui64TotalArenaSize = 0;
+       pArena->ui64FreeArenaSize = 0;
+
+       PVR_ASSERT(is_arena_valid(pArena));
+       return pArena;
+
+hash_fail:
+       OSLockDestroy(pArena->hLock);
+lock_fail:
+       OSFreeMem(pArena);
+       /* not nulling pointer, out of scope */
+arena_fail:
+       return NULL;
+}
+
+static void _LogRegionCreation(const char *pszMemType,
+                               IMG_UINT64 ui64CpuPA,
+                               IMG_UINT64 ui64DevPA,
+                               IMG_UINT64 ui64Size)
+{
+#if !defined(DEBUG)
+       PVR_UNREFERENCED_PARAMETER(pszMemType);
+       PVR_UNREFERENCED_PARAMETER(ui64CpuPA);
+       PVR_UNREFERENCED_PARAMETER(ui64DevPA);
+       PVR_UNREFERENCED_PARAMETER(ui64Size);
+#else
+       if ((ui64CpuPA != 0) && (ui64DevPA != 0) && (ui64CpuPA != ui64DevPA))
+       {
+               PVR_DPF((PVR_DBG_MESSAGE,
+                       "Creating RA for \"%s\" memory"
+                       " - Cpu PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx
+                       " - Dev PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx,
+                       pszMemType,
+                       ui64CpuPA, ui64CpuPA + ui64Size,
+                       ui64DevPA, ui64DevPA + ui64Size));
+       }
+       else
+       {
+               __maybe_unused IMG_UINT64 ui64PA =
+                       ui64CpuPA != 0 ? ui64CpuPA : ui64DevPA;
+               __maybe_unused const IMG_CHAR *pszAddrType =
+                       ui64CpuPA == ui64DevPA ? "Cpu/Dev" : (ui64CpuPA != 0 ? "Cpu" : "Dev");
+
+               PVR_DPF((PVR_DBG_MESSAGE,
+                       "Creating RA for \"%s\" memory - %s PA 0x%016"
+                       IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx,
+                       pszMemType, pszAddrType,
+                       ui64PA, ui64PA + ui64Size));
+       }
+#endif
+}
+
+IMG_INTERNAL RA_ARENA *
+RA_Create_With_Span(IMG_CHAR *name,
+                    RA_LOG2QUANTUM_T uLog2Quantum,
+                    IMG_UINT64 ui64CpuBase,
+                    IMG_UINT64 ui64SpanDevBase,
+                    IMG_UINT64 ui64SpanSize)
+{
+       RA_ARENA *psRA;
+       IMG_BOOL bSuccess;
+
+       psRA = RA_Create(name,
+                        uLog2Quantum,       /* Use OS page size, keeps things simple */
+                        RA_LOCKCLASS_0,     /* This arena doesn't use any other arenas. */
+                        NULL,               /* No Import */
+                        NULL,               /* No free import */
+                        NULL,               /* No import handle */
+                        RA_POLICY_DEFAULT); /* No restriction on import splitting */
+       PVR_LOG_GOTO_IF_FALSE(psRA != NULL, "RA_Create() failed", return_);
+
+       bSuccess = RA_Add(psRA, (RA_BASE_T) ui64SpanDevBase, (RA_LENGTH_T) ui64SpanSize, 0, NULL);
+       PVR_LOG_GOTO_IF_FALSE(bSuccess, "RA_Add() failed", cleanup_);
+
+       _LogRegionCreation(name, ui64CpuBase, ui64SpanDevBase, ui64SpanSize);
+
+       return psRA;
+
+cleanup_:
+       RA_Delete(psRA);
+return_:
+       return NULL;
+}
+
+IMG_INTERNAL void
+RA_Delete(RA_ARENA *pArena)
+{
+       IMG_UINT32 uIndex;
+       IMG_BOOL bWarn = IMG_TRUE;
+
+       PVR_ASSERT(pArena != NULL);
+
+       if (pArena == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__));
+               return;
+       }
+
+       PVR_ASSERT(is_arena_valid(pArena));
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+                         "%s: name='%s'", __func__, pArena->name));
+
+       while (pArena->pHeadSegment != NULL)
+       {
+               BT *pBT = pArena->pHeadSegment;
+
+               if (pBT->type != btt_free)
+               {
+                       if (bWarn)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__));
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__));
+                               PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__,
+                                         (unsigned long long)pBT->base, (unsigned long long)pBT->uSize));
+                               PVR_DPF((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__));
+                               bWarn = IMG_FALSE;
+                       }
+               }
+               else
+               {
+                       _FreeListRemove(pArena, pBT);
+               }
+
+               _SegmentListRemove(pArena, pBT);
+               OSFreeMem(pBT);
+               /* not nulling original pointer, it has changed */
+       }
+
+       while (pArena->per_flags_buckets != NULL)
+       {
+               for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+               {
+                       PVR_ASSERT(pArena->per_flags_buckets->buckets[uIndex] == NULL);
+               }
+
+               pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->uiFlags, pArena->per_flags_buckets);
+       }
+
+       HASH_Delete(pArena->pSegmentHash);
+       OSLockDestroy(pArena->hLock);
+       OSFreeMem(pArena);
+       /* not nulling pointer, copy on stack */
+}
+
+IMG_INTERNAL IMG_BOOL
+RA_Add(RA_ARENA *pArena,
+       RA_BASE_T base,
+       RA_LENGTH_T uSize,
+       RA_FLAGS_T uFlags,
+       RA_PERISPAN_HANDLE hPriv)
+{
+       struct _BT_* bt;
+       PVR_ASSERT(pArena != NULL);
+       PVR_ASSERT(uSize != 0);
+
+       if (pArena == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__));
+               return IMG_FALSE;
+       }
+
+       if (uSize == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid size 0 added to arena %s", __func__, pArena->name));
+               return IMG_FALSE;
+       }
+
+       OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+       PVR_ASSERT(is_arena_valid(pArena));
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', "
+                        "base=0x%llx, size=0x%llx", __func__, pArena->name,
+                        (unsigned long long)base, (unsigned long long)uSize));
+
+       uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+       bt = _InsertResource(pArena, base, uSize, uFlags);
+       if (bt != NULL)
+       {
+               bt->hPriv = hPriv;
+       }
+
+       PVR_ASSERT(is_arena_valid(pArena));
+
+       pArena->ui64TotalArenaSize += uSize;
+       pArena->ui64FreeArenaSize += uSize;
+       OSLockRelease(pArena->hLock);
+
+       return bt != NULL;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_Alloc(RA_ARENA *pArena,
+         RA_LENGTH_T uRequestSize,
+         IMG_UINT8 uImportMultiplier,
+         RA_FLAGS_T uImportFlags,
+         RA_LENGTH_T uAlignment,
+         const IMG_CHAR *pszAnnotation,
+         RA_BASE_T *base,
+         RA_LENGTH_T *pActualSize,
+         RA_PERISPAN_HANDLE *phPriv)
+{
+       PVRSRV_ERROR eError;
+       IMG_BOOL bResult;
+       RA_LENGTH_T uSize = uRequestSize;
+       RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+
+       if (pArena == NULL || uImportMultiplier == 0 || uSize == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: One of the necessary parameters is 0", __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+       PVR_ASSERT(is_arena_valid(pArena));
+
+       if (pActualSize != NULL)
+       {
+               *pActualSize = uSize;
+       }
+
+       /* Must be a power of 2 or 0 */
+       PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+       PVR_DPF((PVR_DBG_MESSAGE,
+               "%s: arena='%s', size=0x%llx(0x%llx), "
+               "alignment=0x%llx", __func__, pArena->name,
+               (unsigned long long)uSize, (unsigned long long)uRequestSize,
+               (unsigned long long)uAlignment));
+
+       /* if allocation failed then we might have an import source which
+          can provide more resource, else we will have to fail the
+          allocation to the caller. */
+       bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+       if (!bResult)
+       {
+               RA_BASE_T uImportBase;
+               RA_LENGTH_T uImportSize;
+               BT *pBT = NULL;
+
+               eError = _AttemptImportSpanAlloc(pArena,
+                                                uSize,
+                                                uImportMultiplier,
+                                                uFlags,
+                                                uAlignment,
+                                                pszAnnotation,
+                                                &uImportBase,
+                                                &uImportSize,
+                                                &pBT);
+               if (eError != PVRSRV_OK)
+               {
+                       OSLockRelease(pArena->hLock);
+                       return eError;
+               }
+
+               bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+               if (!bResult)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                               "%s: name='%s' second alloc failed!",
+                               __func__, pArena->name));
+
+                       /*
+                         On failure of _AttemptAllocAligned() depending on the exact point
+                         of failure, the imported segment may have been used and freed, or
+                         left untouched. If the later, we need to return it.
+                       */
+                       _FreeBT(pArena, pBT);
+
+                       OSLockRelease(pArena->hLock);
+                       return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+               }
+               else
+               {
+                       /* Check if the new allocation was in the span we just added... */
+                       if (*base < uImportBase  ||  *base > (uImportBase + uImportSize))
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: name='%s' alloc did not occur in the imported span!",
+                                       __func__, pArena->name));
+
+                               /*
+                                 Remove the imported span which should not be in use (if it is then
+                                 that is okay, but essentially no span should exist that is not used).
+                               */
+                               _FreeBT(pArena, pBT);
+                       }
+                       else
+                       {
+                               pArena->ui64FreeArenaSize += uImportSize;
+                               pArena->ui64TotalArenaSize += uImportSize;
+                       }
+               }
+       }
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', size=0x%llx, "
+               "*base=0x%llx = %d", __func__, pArena->name, (unsigned long long)uSize,
+               (unsigned long long)*base, bResult));
+
+       PVR_ASSERT(is_arena_valid(pArena));
+
+       pArena->ui64FreeArenaSize -= uSize;
+
+       OSLockRelease(pArena->hLock);
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Find_BT_VARange
+@Description    To find the boundary tag associated with the given device
+                                  virtual address.
+@Input          pArena            The arena
+@input          base              Allocated base resource
+@Input          uRequestSize      The size of resource segment requested.
+@Input          uImportFlags            Flags influencing allocation policy.
+@Return         Boundary Tag - success, NULL on failure
+*/ /**************************************************************************/
+static BT *RA_Find_BT_VARange(RA_ARENA *pArena,
+                              RA_BASE_T base,
+                              RA_LENGTH_T uRequestSize,
+                              RA_FLAGS_T uImportFlags)
+{
+       IMG_PSPLAY_TREE psSplaynode;
+       BT *pBT = pArena->pHeadSegment;
+       IMG_UINT32 uIndex;
+
+       uIndex = pvr_log2 (uRequestSize);
+
+       /* Find the splay node associated with these import flags */
+       psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets);
+
+       if (psSplaynode == NULL)
+       {
+               return NULL;
+       }
+
+       /* Find the free Boundary Tag from the bucket that holds the requested range */
+       while (uIndex < FREE_TABLE_LIMIT)
+       {
+               pBT = psSplaynode->buckets[uIndex];
+
+               while (pBT)
+               {
+                       if ((pBT->base <= base) && ((pBT->base + pBT->uSize) >= (base + uRequestSize)))
+                       {
+                               if (pBT->type == btt_free)
+                               {
+                                       return pBT;
+                               }
+                               else
+                               {
+                                       PVR_ASSERT(pBT->type == btt_free);
+                               }
+                       }
+                       else{
+                               pBT = pBT->next_free;
+                       }
+               }
+
+#if defined(PVR_CTZLL)
+               /* This could further be optimised to get the next valid bucket */
+               while (!(psSplaynode->bHasEltsMapping & (1ULL << ++uIndex)));
+#else
+               uIndex++;
+#endif
+       }
+
+       return NULL;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_Alloc_Range(RA_ARENA *pArena,
+               RA_LENGTH_T uRequestSize,
+               RA_FLAGS_T uImportFlags,
+               RA_LENGTH_T uAlignment,
+               RA_BASE_T base,
+               RA_LENGTH_T *pActualSize)
+{
+       RA_LENGTH_T uSize = uRequestSize;
+       BT *pBT = NULL;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       if (pArena == NULL || uSize == 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: One of the necessary parameters is 0", __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+       PVR_ASSERT(is_arena_valid(pArena));
+
+       /* Align the requested size to the Arena Quantum */
+       uSize = ((uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1));
+
+       /* Must be a power of 2 or 0 */
+       PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+       if (uAlignment > 1)
+       {
+               if (base != ((base + uAlignment - 1) & ~(uAlignment - 1)))
+               {
+                       PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_);
+               }
+       }
+
+       /* Find if the segment in the range exists and is free
+        * Check if the segment can be split
+        * Find the bucket that points to this segment
+        * Find the free segment is in the free list
+        * remove the free segment
+        * split the segment into three segments one prior free, alloc range,
+        *     free segment after the range.
+        * remove the allocated range segment from the free list
+        * hook up the prior and after segments back to free list
+        * For each free, find the bucket the segment should go to
+        */
+
+       pBT = RA_Find_BT_VARange(pArena, base, uSize, uImportFlags);
+
+       if (pBT == NULL)
+       {
+               PVR_GOTO_WITH_ERROR(eError,
+                                   PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL,
+                                   unlock_);
+       }
+
+       /* Remove the boundary tag from the free list */
+       _FreeListRemove (pArena, pBT);
+
+       /* if requested VA start in the middle of the BT, split the BT accordingly */
+       if (base > pBT->base)
+       {
+               BT *pNeighbour;
+               pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(base - pBT->base));
+               /* partition the buffer, create a new boundary tag */
+               if (pNeighbour == NULL)
+               {
+                       /* Put pBT back in the list */
+                       _FreeListInsert (pArena, pBT);
+                       PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (1)", eError,
+                                               PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL,
+                                               unlock_);
+               }
+
+               /* Insert back the free BT to the free list */
+               _FreeListInsert(pArena, pBT);
+               pBT = pNeighbour;
+       }
+
+       /* the segment might be too big, if so, discard the back of the segment */
+       if (pBT->uSize > uSize)
+       {
+               BT *pNeighbour;
+               pNeighbour = _SegmentSplit(pBT, uSize);
+               /* partition the buffer, create a new boundary tag */
+               if (pNeighbour == NULL)
+               {
+                       /* Put pBT back in the list */
+                       _FreeListInsert (pArena, pBT);
+                       PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (2)", eError,
+                                               PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL,
+                                               unlock_);
+               }
+
+               /* Insert back the free BT to the free list */
+               _FreeListInsert (pArena, pNeighbour);
+       }
+
+       pBT->type = btt_live;
+
+       if (!HASH_Insert_Extended (pArena->pSegmentHash, &base, (uintptr_t)pBT))
+       {
+               _FreeBT (pArena, pBT);
+               PVR_GOTO_WITH_ERROR(eError,
+                                   PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED,
+                                   unlock_);
+       }
+
+       if (pActualSize != NULL)
+       {
+               *pActualSize = uSize;
+       }
+
+       pArena->ui64FreeArenaSize -= uSize;
+
+unlock_:
+       OSLockRelease(pArena->hLock);
+
+       return eError;
+}
+
+IMG_INTERNAL void
+RA_Free(RA_ARENA *pArena, RA_BASE_T base)
+{
+       BT *pBT;
+
+       PVR_ASSERT(pArena != NULL);
+
+       if (pArena == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__));
+               return;
+       }
+
+       OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+       PVR_ASSERT(is_arena_valid(pArena));
+
+       PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', base=0x%llx", __func__, pArena->name,
+               (unsigned long long)base));
+
+       pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &base);
+       PVR_ASSERT(pBT != NULL);
+
+       if (pBT)
+       {
+               pArena->ui64FreeArenaSize += pBT->uSize;
+
+               PVR_ASSERT(pBT->base == base);
+               _FreeBT(pArena, pBT);
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                       "%s: no resource span found for given base (0x%llX) in arena %s",
+                        __func__, (unsigned long long) base, pArena->name));
+       }
+
+       PVR_ASSERT(is_arena_valid(pArena));
+       OSLockRelease(pArena->hLock);
+}
+
+IMG_INTERNAL void
+RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats)
+{
+       psRAStats->ui64TotalArenaSize = pArena->ui64TotalArenaSize;
+       psRAStats->ui64FreeArenaSize = pArena->ui64FreeArenaSize;
+}
+
+/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */
+#define _DBG(...)
+
+IMG_INTERNAL RA_ARENA_ITERATOR *
+RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments)
+{
+       RA_ARENA_ITERATOR *pIter = OSAllocMem(sizeof(*pIter));
+       PVR_LOG_RETURN_IF_FALSE(pIter != NULL, "OSAllocMem", NULL);
+
+       OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+
+       pIter->pArena = pArena;
+       pIter->bIncludeFreeSegments = bIncludeFreeSegments;
+
+       RA_IteratorReset(pIter);
+
+       return pIter;
+}
+
+IMG_INTERNAL void
+RA_IteratorRelease(RA_ARENA_ITERATOR *pIter)
+{
+       PVR_ASSERT(pIter != NULL);
+
+       if (pIter == NULL)
+       {
+               return;
+       }
+
+       OSLockRelease(pIter->pArena->hLock);
+
+       OSFreeMem(pIter);
+}
+
+IMG_INTERNAL void
+RA_IteratorReset(RA_ARENA_ITERATOR *pIter)
+{
+       BT *pNext;
+
+       PVR_ASSERT(pIter != NULL);
+
+       pNext = pIter->pArena->pHeadSegment;
+
+       /* find next element if we're not including the free ones */
+       if (!pIter->bIncludeFreeSegments)
+       {
+               while (pNext != NULL && pNext->type != btt_live)
+               {
+                       _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
+                            "type=%u", __func__, (void *) pNext->base, pNext->uSize,
+                            pNext->type);
+                       pNext = pNext->pNextSegment;
+               }
+       }
+
+       _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
+            "type=%u", __func__,
+            pNext != NULL ? (void *) pNext->base : NULL,
+            pNext != NULL ? pNext->uSize : 0,
+            pNext != NULL ? pNext->type : 0);
+
+       /* if bIncludeFreeSegments then pNext here is either a valid pointer to
+        * "live" segment or NULL and if !bIncludeFreeSegments then it's either
+        * a valid pointer to any next segment or NULL */
+       pIter->pCurrent = pNext;
+}
+
+IMG_INTERNAL IMG_BOOL
+RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData)
+{
+       BT *pNext;
+
+       PVR_ASSERT(pIter != NULL);
+
+       if (pIter == NULL)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "pIter in %s() is NULL", __func__));
+               return IMG_FALSE;
+       }
+
+       if (pIter->pCurrent == NULL)
+       {
+               return IMG_FALSE;
+       }
+
+       pNext = pIter->pCurrent;
+
+       _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
+            "type=%u", __func__, (void *) pNext->base, pNext->uSize,
+            pNext->type);
+
+       pData->uiAddr = pIter->pCurrent->base;
+       pData->uiSize = pIter->pCurrent->uSize;
+       pData->bFree = pIter->pCurrent->type == btt_free;
+
+       /* combine contiguous segments */
+       while ((pNext = pNext->pNextSegment) != NULL &&
+              pNext->type == btt_live &&
+              pNext->base == pData->uiAddr + pData->uiSize)
+       {
+               _DBG("(%s()) combining segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
+                    "type=%u", __func__, (void *) pNext->base, pNext->uSize,
+                    pNext->type);
+               pData->uiSize += pNext->uSize;
+       }
+
+       /* advance to next */
+       if (!pIter->bIncludeFreeSegments)
+       {
+               while (pNext != NULL && pNext->type != btt_live)
+               {
+                       _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
+                            "type=%u", __func__, (void *) pNext->base, pNext->uSize,
+                            pNext->type);
+                       pNext = pNext->pNextSegment;
+               }
+       }
+
+       _DBG("(%s()) next segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
+            "type=%u", __func__,
+            pNext != NULL ? (void *) pNext->base : NULL,
+            pNext != NULL ? pNext->uSize : 0,
+            pNext != NULL ? pNext->type : 0);
+
+       /* if bIncludeFreeSegments then pNext here is either a valid pointer to
+        * "live" segment or NULL and if !bIncludeFreeSegments then it's either
+        * a valid pointer to any next segment or NULL */
+       pIter->pCurrent = pNext;
+
+       return IMG_TRUE;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_BlockDump(RA_ARENA *pArena, void (*pfnLogDump)(void*, IMG_CHAR*, ...), void *pPrivData)
+{
+       RA_ARENA_ITERATOR *pIter = NULL;
+       RA_ITERATOR_DATA sIterData;
+       const IMG_UINT32 uiLineWidth = 64;
+
+       IMG_UINT32 **papRegionArray = NULL;
+       IMG_UINT32 uiRegionCount = 0;
+
+       const IMG_UINT32 uiChunkSize = 32; /* 32-bit chunks */
+       const IMG_UINT32 uiChunkCount = (uiLineWidth / uiChunkSize) * 2; /* This should equal 2 or a multiple of 2 */
+       const IMG_UINT32 uiRegionSize = uiChunkSize * uiChunkCount;
+
+       IMG_UINT32 uiRecognisedQuantum = 0;
+
+       IMG_UINT32 uiLastBase = 0;
+       IMG_UINT32 uiLastSize = 0;
+
+       IMG_UINT32 i;
+       PVRSRV_ERROR eError = PVRSRV_OK;
+
+       /* -- papRegionArray Structure --
+        *  papRegionArray Indexes
+        *  |         Chunk 0      Chunk 1      Chunk 2      Chunk 3
+        *  v     |------------|------------|------------|------------|
+        * [0] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | -- |
+        * [1] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 |    |
+        * [2] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 |    |
+        * [3] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 |    | Regions
+        * [4] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 |    |
+        * [5] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 |    |
+        * [6] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | -- |
+        * ...
+        */
+
+       if (pArena == NULL || pfnLogDump == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       pIter = RA_IteratorAcquire(pArena, IMG_FALSE);
+       PVR_LOG_RETURN_IF_NOMEM(pIter, "RA_IteratorAcquire");
+
+       uiRecognisedQuantum = pArena->uQuantum > 0 ? pArena->uQuantum : 4096;
+
+       while (RA_IteratorNext(pIter, &sIterData))
+       {
+               if (sIterData.uiAddr >= uiLastBase)
+               {
+                       uiLastBase = sIterData.uiAddr;
+                       uiLastSize = sIterData.uiSize;
+               }
+       }
+
+       uiRegionCount = ((uiLastBase + uiLastSize) / uiRecognisedQuantum) / uiRegionSize;
+       if (((uiLastBase + uiLastSize) / uiRecognisedQuantum) % uiRegionSize != 0
+          || uiRegionCount == 0)
+       {
+               uiRegionCount += 1;
+       }
+
+       papRegionArray = OSAllocZMem(sizeof(IMG_UINT32*) * uiRegionCount);
+       PVR_LOG_GOTO_IF_NOMEM(papRegionArray, eError, cleanup_array);
+
+       RA_IteratorReset(pIter);
+
+       while (RA_IteratorNext(pIter, &sIterData))
+       {
+               IMG_UINT32 uiAddrRegionIdx = 0;
+               IMG_UINT32 uiAddrRegionOffset = 0;
+               IMG_UINT32 uiAddrChunkIdx = 0;
+               IMG_UINT32 uiAddrChunkOffset = 0;
+               IMG_UINT32 uiAddrChunkShift; /* The bit-shift needed to fill the chunk */
+
+               IMG_UINT32 uiQuantisedSize;
+               IMG_UINT32 uiQuantisedSizeMod;
+               IMG_UINT32 uiAllocLastRegionIdx = 0; /* The last region that this alloc appears in */
+               IMG_UINT32 uiAllocChunkSize = 0; /* The number of chunks this alloc spans */
+
+               IMG_INT32 iBitSetCount = 0;
+               IMG_INT32 iOverflowCheck = 0;
+               IMG_INT32 iOverflow = 0;
+               IMG_UINT32 uiRegionIdx = 0;
+               IMG_UINT32 uiChunkIdx = 0;
+
+#if defined(__KERNEL__) && defined(__linux__)
+               IMG_UINT64 uiDataDivRecQuant = sIterData.uiSize;
+               uiQuantisedSizeMod = do_div(uiDataDivRecQuant, uiRecognisedQuantum);
+               uiQuantisedSize = (IMG_UINT32)uiDataDivRecQuant;
+
+               uiDataDivRecQuant = sIterData.uiAddr;
+               do_div(uiDataDivRecQuant, uiRecognisedQuantum);
+               uiAddrRegionOffset = do_div(uiDataDivRecQuant, uiRegionSize);
+               uiAddrRegionIdx = (IMG_UINT32)uiDataDivRecQuant;
+
+               uiDataDivRecQuant = sIterData.uiAddr;
+               do_div(uiDataDivRecQuant, uiRecognisedQuantum);
+#else
+               IMG_UINT64 uiDataDivRecQuant = sIterData.uiAddr / uiRecognisedQuantum;
+               uiAddrRegionIdx = uiDataDivRecQuant / uiRegionSize;
+               uiAddrRegionOffset = uiDataDivRecQuant % uiRegionSize;
+
+               uiQuantisedSize = sIterData.uiSize / uiRecognisedQuantum;
+               uiQuantisedSizeMod = sIterData.uiSize % uiRecognisedQuantum;
+#endif
+               uiAddrChunkIdx = uiAddrRegionOffset / uiChunkSize;
+               uiAddrChunkOffset = uiAddrRegionOffset % uiChunkSize;
+               uiAddrChunkShift = uiChunkSize - uiAddrChunkOffset;
+               uiRegionIdx = uiAddrRegionIdx;
+               uiChunkIdx = uiAddrChunkIdx;
+
+               if ((uiQuantisedSize == 0) || (uiQuantisedSizeMod != 0))
+               {
+                       uiQuantisedSize += 1;
+               }
+
+#if defined(__KERNEL__) && defined(__linux__)
+               uiDataDivRecQuant += uiQuantisedSize - 1;
+               do_div(uiDataDivRecQuant, uiRegionSize);
+               uiAllocLastRegionIdx = (IMG_UINT32)uiDataDivRecQuant;
+#else
+               uiAllocLastRegionIdx =
+                   (uiDataDivRecQuant + uiQuantisedSize - 1) / uiRegionSize;
+#endif
+               uiAllocChunkSize = (uiAddrChunkOffset + uiQuantisedSize) / uiChunkSize;
+
+               if ((uiAddrChunkOffset + uiQuantisedSize) % uiChunkSize > 0)
+               {
+                       uiAllocChunkSize += 1;
+               }
+
+               iBitSetCount = uiQuantisedSize;
+               iOverflowCheck = uiQuantisedSize - uiAddrChunkShift;
+
+               if (iOverflowCheck > 0)
+               {
+                       iOverflow = iOverflowCheck;
+                       iBitSetCount = uiQuantisedSize - iOverflow;
+               }
+
+               /**
+                * Allocate memory to represent the chunks for each region the allocation
+                * spans. If one was already allocated before don't do it again.
+                */
+               for (i = 0; uiAddrRegionIdx + i <= uiAllocLastRegionIdx; i++)
+               {
+                       if (papRegionArray[uiAddrRegionIdx + i] == NULL)
+                       {
+                               papRegionArray[uiAddrRegionIdx + i] = OSAllocZMem(sizeof(IMG_UINT32) * uiChunkCount);
+                               PVR_LOG_GOTO_IF_NOMEM(papRegionArray[uiAddrRegionIdx + i], eError, cleanup_regions);
+                       }
+               }
+
+               for (i = 0; i < uiAllocChunkSize; i++)
+               {
+                       if (uiChunkIdx >= uiChunkCount)
+                       {
+                               uiRegionIdx++;
+                               uiChunkIdx = 0;
+                       }
+
+                       if ((IMG_UINT32)iBitSetCount != uiChunkSize)
+                       {
+                               IMG_UINT32 uiBitMask = 0;
+
+                               uiBitMask = (1U << iBitSetCount) - 1;
+                               uiBitMask <<= (uiAddrChunkShift - iBitSetCount);
+
+                               papRegionArray[uiRegionIdx][uiChunkIdx] |= uiBitMask;
+                       }
+                       else
+                       {
+                               papRegionArray[uiRegionIdx][uiChunkIdx] |= 0xFFFFFFFF;
+                       }
+
+                       uiChunkIdx++;
+                       iOverflow -= uiChunkSize;
+                       iBitSetCount = iOverflow >= 0 ? uiChunkSize : uiChunkSize + iOverflow;
+                       if (iOverflow < 0)
+                       {
+                               uiAddrChunkShift = 32;
+                       }
+               }
+       }
+
+       RA_IteratorRelease(pIter);
+
+       pfnLogDump(pPrivData, "~~~ '%s' Resource Arena Block Dump", pArena->name);
+       pfnLogDump(pPrivData, "    Block Size: %uB", uiRecognisedQuantum);
+       pfnLogDump(pPrivData,
+                  "    Span Memory Usage: %"IMG_UINT64_FMTSPEC"B"
+                  "    Free Span Memory: %"IMG_UINT64_FMTSPEC"B",
+                  pArena->ui64TotalArenaSize,
+                  pArena->ui64FreeArenaSize);
+       pfnLogDump(pPrivData,
+                  "===============================================================================");
+
+       for (i = 0; i < uiRegionCount; i++)
+       {
+               static IMG_BOOL bEmptyRegion = IMG_FALSE;
+               if (papRegionArray[i] != NULL)
+               {
+                       IMG_CHAR pszLine[65];
+                       IMG_UINT32 j;
+
+                       bEmptyRegion = IMG_FALSE;
+                       pszLine[64] = '\0';
+
+                       for (j = 0; j < uiChunkCount; j+=2)
+                       {
+                               IMG_UINT8 uiBit = 0;
+                               IMG_UINT32 k;
+                               IMG_UINT64 uiLineAddress =
+                                   (i * uiRegionSize + (j >> 1) * uiLineWidth) * uiRecognisedQuantum;
+
+                               /**
+                                * Move through each of the 32 bits in the chunk and check their
+                                * value. If it is 1 we set the corresponding character to '#',
+                                * otherwise it is set to '.' representing empty space
+                                */
+                               for (k = 1 << 31; k != 0; k >>= 1)
+                               {
+                                       pszLine[uiBit] = papRegionArray[i][j] & k ? '#' : '.';
+                                       pszLine[32 + uiBit] = papRegionArray[i][j+1] & k ? '#' : '.';
+                                       uiBit++;
+                               }
+
+                               pfnLogDump(pPrivData,
+                                          "| 0x%08"IMG_UINT64_FMTSPECx" | %s",
+                                          uiLineAddress,
+                                          pszLine);
+                       }
+                       OSFreeMem(papRegionArray[i]);
+               }
+               else
+               {
+                       /* We only print this once per gap of n regions */
+                       if (!bEmptyRegion)
+                       {
+                               pfnLogDump(pPrivData, "     ....");
+                               bEmptyRegion = IMG_TRUE;
+                       }
+               }
+       }
+       OSFreeMem(papRegionArray);
+       return eError;
+
+cleanup_regions:
+       for (i = 0; i < uiRegionCount; i++)
+       {
+               if (papRegionArray[i] != NULL)
+               {
+                       OSFreeMem(papRegionArray[i]);
+               }
+       }
+
+cleanup_array:
+       OSFreeMem(papRegionArray);
+       RA_IteratorRelease(pIter);
+
+       return eError;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/sync.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/sync.c
new file mode 100644 (file)
index 0000000..36234ae
--- /dev/null
@@ -0,0 +1,907 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements client side code for services synchronisation
+                interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "client_sync_bridge.h"
+#include "client_synctracking_bridge.h"
+#include "info_page_client.h"
+#include "pvr_bridge.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "lock.h"
+#include "log2.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#include "srvcore.h"
+#else
+#include "srvcore_intern.h"
+#endif
+
+
+#define SYNC_BLOCK_LIST_CHUNCK_SIZE    10
+
+/*
+       This defines the maximum amount of synchronisation memory
+       that can be allocated per SyncPrim context.
+       In reality this number is meaningless as we would run out
+       of synchronisation memory before we reach this limit, but
+       we need to provide a size to the span RA.
+ */
+#define MAX_SYNC_MEM                           (4 * 1024 * 1024)
+
+/* forward declaration */
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value);
+
+/*
+       Internal interfaces for management of SYNC_PRIM_CONTEXT
+ */
+static void
+_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext)
+{
+       if (!OSAtomicRead(&psContext->hRefCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: context already freed", __func__));
+       }
+       else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+       {
+               /* SyncPrimContextDestroy only when no longer referenced */
+               RA_Delete(psContext->psSpanRA);
+               RA_Delete(psContext->psSubAllocRA);
+               OSFreeMem(psContext);
+       }
+}
+
+static void
+_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext)
+{
+       if (!OSAtomicRead(&psContext->hRefCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: context use after free", __func__));
+       }
+       else
+       {
+               OSAtomicIncrement(&psContext->hRefCount);
+       }
+}
+
+/*
+       Internal interfaces for management of synchronisation block memory
+ */
+static PVRSRV_ERROR
+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext,
+                        SYNC_PRIM_BLOCK **ppsSyncBlock)
+{
+       SYNC_PRIM_BLOCK *psSyncBlk;
+       IMG_HANDLE hSyncPMR;
+       IMG_HANDLE hSyncImportHandle;
+       IMG_DEVMEM_SIZE_T uiImportSize;
+       PVRSRV_ERROR eError;
+
+       psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK));
+       PVR_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc);
+
+       psSyncBlk->psContext = psContext;
+
+       /* Allocate sync prim block */
+       eError = BridgeAllocSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection),
+                                              &psSyncBlk->hServerSyncPrimBlock,
+                                              &psSyncBlk->ui32FirmwareAddr,
+                                              &psSyncBlk->ui32SyncBlockSize,
+                                              &hSyncPMR);
+       PVR_GOTO_IF_ERROR(eError, fail_blockalloc);
+
+       /* Make it mappable by the client */
+       eError = DevmemMakeLocalImportHandle(psContext->hDevConnection,
+                                            hSyncPMR,
+                                            &hSyncImportHandle);
+       PVR_GOTO_IF_ERROR(eError, fail_export);
+
+       /* Get CPU mapping of the memory block */
+       eError = DevmemLocalImport(psContext->hDevConnection,
+                                  hSyncImportHandle,
+                                  PVRSRV_MEMALLOCFLAG_CPU_READABLE,
+                                  &psSyncBlk->hMemDesc,
+                                  &uiImportSize,
+                                  "SyncPrimitiveBlock");
+
+       /*
+               Regardless of success or failure we "undo" the export
+        */
+       DevmemUnmakeLocalImportHandle(psContext->hDevConnection,
+                                     hSyncImportHandle);
+
+       PVR_GOTO_IF_ERROR(eError, fail_import);
+
+       eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+                                         (void **) &psSyncBlk->pui32LinAddr);
+       PVR_GOTO_IF_ERROR(eError, fail_cpuvaddr);
+
+       *ppsSyncBlock = psSyncBlk;
+       return PVRSRV_OK;
+
+fail_cpuvaddr:
+       DevmemFree(psSyncBlk->hMemDesc);
+fail_import:
+fail_export:
+       BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection),
+                                    psSyncBlk->hServerSyncPrimBlock);
+fail_blockalloc:
+       OSFreeMem(psSyncBlk);
+fail_alloc:
+       return eError;
+}
+
+static void
+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk)
+{
+       SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext;
+
+       DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+       DevmemFree(psSyncBlk->hMemDesc);
+       (void) DestroyServerResource(psContext->hDevConnection,
+                                    NULL,
+                                    BridgeFreeSyncPrimitiveBlock,
+                                    psSyncBlk->hServerSyncPrimBlock);
+       OSFreeMem(psSyncBlk);
+}
+
+static PVRSRV_ERROR
+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
+                    RA_LENGTH_T uSize,
+                    RA_FLAGS_T uFlags,
+                    const IMG_CHAR *pszAnnotation,
+                    RA_BASE_T *puiBase,
+                    RA_LENGTH_T *puiActualSize,
+                    RA_PERISPAN_HANDLE *phImport)
+{
+       SYNC_PRIM_CONTEXT *psContext = hArena;
+       SYNC_PRIM_BLOCK *psSyncBlock = NULL;
+       RA_LENGTH_T uiSpanSize;
+       PVRSRV_ERROR eError;
+       PVR_UNREFERENCED_PARAMETER(uFlags);
+
+       /* Check we've not been called with an unexpected size */
+       PVR_LOG_GOTO_IF_INVALID_PARAM(hArena, eError, e0);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(uSize == sizeof(IMG_UINT32), eError, e0);
+
+       /*
+               Ensure the synprim context doesn't go away while we have sync blocks
+               attached to it
+        */
+       _SyncPrimContextRef(psContext);
+
+       /* Allocate the block of memory */
+       eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock);
+       PVR_LOG_GOTO_IF_ERROR(eError, "AllocSyncPrimitiveBlock", fail_syncblockalloc);
+
+       /* Allocate a span for it */
+       eError = RA_Alloc(psContext->psSpanRA,
+                         psSyncBlock->ui32SyncBlockSize,
+                         RA_NO_IMPORT_MULTIPLIER,
+                         0,
+                         psSyncBlock->ui32SyncBlockSize,
+                         pszAnnotation,
+                         &psSyncBlock->uiSpanBase,
+                         &uiSpanSize,
+                         NULL);
+       PVR_GOTO_IF_ERROR(eError, fail_spanalloc);
+
+       /*
+               There is no reason the span RA should return an allocation larger
+               then we request
+        */
+       PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize);
+
+       *puiBase = psSyncBlock->uiSpanBase;
+       *puiActualSize = psSyncBlock->ui32SyncBlockSize;
+       *phImport = psSyncBlock;
+       return PVRSRV_OK;
+
+fail_spanalloc:
+       FreeSyncPrimitiveBlock(psSyncBlock);
+fail_syncblockalloc:
+       _SyncPrimContextUnref(psContext);
+e0:
+       return eError;
+}
+
+static void
+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena,
+                      RA_BASE_T uiBase,
+                      RA_PERISPAN_HANDLE hImport)
+{
+       SYNC_PRIM_CONTEXT *psContext = hArena;
+       SYNC_PRIM_BLOCK *psSyncBlock = hImport;
+
+       if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase)
+       {
+               /* Invalid input params */
+               return;
+       }
+
+       /* Free the span this import is using */
+       RA_Free(psContext->psSpanRA, uiBase);
+
+       /* Free the syncpim block */
+       FreeSyncPrimitiveBlock(psSyncBlock);
+
+       /*      Drop our reference to the syncprim context */
+       _SyncPrimContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
+{
+       IMG_UINT64 ui64Temp;
+
+       PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+
+       ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+       PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+       return TRUNCATE_64BITS_TO_32BITS(ui64Temp);
+}
+
+static void SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
+{
+       SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+
+       psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
+                       (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
+}
+
+static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim)
+{
+       SYNC_PRIM_BLOCK *psSyncBlock;
+       SYNC_PRIM_CONTEXT *psContext;
+
+       psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+       psContext = psSyncBlock->psContext;
+
+#if !defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
+       PVR_UNREFERENCED_PARAMETER(bFreeFirstSyncPrim);
+#else
+       /* Defer freeing the first allocated sync prim in the sync context */
+       if (psSyncInt != psContext->hFirstSyncPrim || (psSyncInt == psContext->hFirstSyncPrim && bFreeFirstSyncPrim))
+#endif
+       {
+               PVRSRV_ERROR eError;
+               SHARED_DEV_CONNECTION hDevConnection =
+                       psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+
+               if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+               {
+                       if (psSyncInt->u.sLocal.hRecord)
+                       {
+                               /* remove this sync record */
+                               eError = DestroyServerResource(hDevConnection,
+                                                              NULL,
+                                                              BridgeSyncRecordRemoveByHandle,
+                                                              psSyncInt->u.sLocal.hRecord);
+                               PVR_LOG_IF_ERROR(eError, "BridgeSyncRecordRemoveByHandle");
+                       }
+               }
+               else
+               {
+                       IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr +
+                                       SyncPrimGetOffset(psSyncInt);
+
+                       eError = BridgeSyncFreeEvent(GetBridgeHandle(hDevConnection), ui32FWAddr);
+                       PVR_LOG_IF_ERROR(eError, "BridgeSyncFreeEvent");
+               }
+#if defined(PVRSRV_ENABLE_SYNC_POISONING)
+               (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE);
+#else
+               /* reset the sync prim value as it is freed.
+                * this guarantees the client sync allocated to the client will
+                * have a value of zero and the client does not need to
+                * explicitly initialise the sync value to zero.
+                * the allocation of the backing memory for the sync prim block
+                * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+                */
+               (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
+#endif
+
+               RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+               OSFreeMem(psSyncInt);
+               _SyncPrimContextUnref(psContext);
+       }
+}
+
+static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
+{
+       if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
+       }
+       else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+       {
+               SyncPrimLocalFree(psSyncInt, IMG_FALSE);
+       }
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt)
+{
+       SYNC_PRIM_BLOCK *psSyncBlock;
+
+       psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+       return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);
+}
+
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+       PVR_ASSERT(IsPower2(ui32Align));
+       return ExactLog2(ui32Align);
+}
+
+/*
+       External interfaces
+ */
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+                      PSYNC_PRIM_CONTEXT *phSyncPrimContext)
+{
+       SYNC_PRIM_CONTEXT *psContext;
+       PVRSRV_ERROR eError;
+
+       psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT));
+       PVR_GOTO_IF_NOMEM(psContext, eError, fail_alloc);
+
+       psContext->hDevConnection = hDevConnection;
+
+       OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext);
+       OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+       /*
+               Create the RA for sub-allocations of the SynPrim's
+
+               Note:
+               The import size doesn't matter here as the server will pass
+               back the blocksize when does the import which overrides
+               what we specify here.
+        */
+
+       psContext->psSubAllocRA = RA_Create(psContext->azName,
+                                           /* Params for imports */
+                                           _Log2(sizeof(IMG_UINT32)),
+                                           RA_LOCKCLASS_2,
+                                           SyncPrimBlockImport,
+                                           SyncPrimBlockUnimport,
+                                           psContext,
+                                           RA_POLICY_DEFAULT);
+       PVR_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc);
+
+       /*
+               Create the span-management RA
+
+               The RA requires that we work with linear spans. For our use
+               here we don't require this behaviour as we're always working
+               within offsets of blocks (imports). However, we need to keep
+               the RA happy so we create the "span" management RA which
+               ensures that all are imports are added to the RA in a linear
+               fashion
+        */
+       psContext->psSpanRA = RA_Create(psContext->azSpanName,
+                                       /* Params for imports */
+                                       0,
+                                       RA_LOCKCLASS_1,
+                                       NULL,
+                                       NULL,
+                                       NULL,
+                                       RA_POLICY_DEFAULT);
+       PVR_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span);
+
+       if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL))
+       {
+               RA_Delete(psContext->psSpanRA);
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, fail_span);
+       }
+
+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
+       psContext->hFirstSyncPrim = NULL;
+#endif
+
+       OSAtomicWrite(&psContext->hRefCount, 1);
+
+       *phSyncPrimContext = psContext;
+       return PVRSRV_OK;
+fail_span:
+       RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+       OSFreeMem(psContext);
+fail_alloc:
+       return eError;
+}
+
+IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext)
+{
+       SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+
+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
+       /* Free the first sync prim that was allocated as part of this context */
+       if (psContext->hFirstSyncPrim)
+       {
+               SyncPrimLocalFree((SYNC_PRIM *)psContext->hFirstSyncPrim, IMG_TRUE);
+               psContext->hFirstSyncPrim = NULL;
+       }
+#endif
+
+       if (1 != OSAtomicRead(&psContext->hRefCount))
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __func__));
+       }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+#if defined(__KERNEL__)
+       if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state", __func__));
+               OSAtomicWrite(&psContext->hRefCount, 1);
+       }
+#endif
+#endif
+       _SyncPrimContextUnref(psContext);
+}
+
+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+                                   PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                   const IMG_CHAR *pszClassName,
+                                   IMG_BOOL bServerSync)
+{
+       SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+       SYNC_PRIM_BLOCK *psSyncBlock;
+       SYNC_PRIM *psNewSync;
+       PVRSRV_ERROR eError;
+       RA_BASE_T uiSpanAddr;
+
+       PVR_LOG_RETURN_IF_INVALID_PARAM(hSyncPrimContext, "hSyncPrimeContext");
+
+       psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+       PVR_GOTO_IF_NOMEM(psNewSync, eError, fail_alloc);
+
+       eError = RA_Alloc(psContext->psSubAllocRA,
+                         sizeof(IMG_UINT32),
+                         RA_NO_IMPORT_MULTIPLIER,
+                         0,
+                         sizeof(IMG_UINT32),
+                         "Sync_Prim",
+                         &uiSpanAddr,
+                         NULL,
+                         (RA_PERISPAN_HANDLE *) &psSyncBlock);
+       PVR_GOTO_IF_ERROR(eError, fail_raalloc);
+
+       psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
+       OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
+       psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
+       psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+       SyncPrimGetCPULinAddr(psNewSync);
+       *ppsSync = &psNewSync->sCommon;
+       _SyncPrimContextRef(psContext);
+#if defined(PVRSRV_ENABLE_SYNC_POISONING)
+       (void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE);
+#endif
+
+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
+       /* If this is the first sync prim allocated in the context, keep a handle to it */
+       if (psSyncBlock->uiSpanBase == 0 && psNewSync->u.sLocal.uiSpanAddr == 0)
+       {
+               psContext->hFirstSyncPrim = psNewSync;
+       }
+#endif
+
+       if (GetInfoPageDebugFlags(psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
+       {
+               IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH];
+               size_t uiSize;
+
+               if (pszClassName)
+               {
+                       uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH);
+                       /* Copy the class name annotation into a fixed-size array */
+                       OSCachedMemCopy(szClassName, pszClassName, uiSize);
+                       if (uiSize == PVRSRV_SYNC_NAME_LENGTH)
+                               szClassName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+                       else
+                               szClassName[uiSize++] = '\0';
+               }
+               else
+               {
+                       /* No class name annotation */
+                       uiSize = 0;
+                       szClassName[0] = '\0';
+               }
+
+               /* record this sync */
+               eError = BridgeSyncRecordAdd(
+                               GetBridgeHandle(psSyncBlock->psContext->hDevConnection),
+                               &psNewSync->u.sLocal.hRecord,
+                               psSyncBlock->hServerSyncPrimBlock,
+                               psSyncBlock->ui32FirmwareAddr,
+                               SyncPrimGetOffset(psNewSync),
+                               bServerSync,
+                               uiSize,
+                               szClassName);
+               if (PVRSRV_OK != eError)
+               {
+                       PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)",
+                                       __func__,
+                                       szClassName,
+                                       PVRSRVGETERRORSTRING(eError)));
+                       psNewSync->u.sLocal.hRecord = NULL;
+               }
+       }
+       else
+       {
+               size_t  uiSize;
+
+               uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH);
+
+               if (uiSize < PVRSRV_SYNC_NAME_LENGTH)
+                       uiSize++;
+               /* uiSize now reflects size used for pszClassName + NUL byte */
+
+               eError = BridgeSyncAllocEvent(GetBridgeHandle(hSyncPrimContext->hDevConnection),
+                                             bServerSync,
+                                             psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync),
+                                             uiSize,
+                                             pszClassName);
+               PVR_LOG_IF_ERROR(eError, "BridgeSyncAllocEvent");
+       }
+
+       return PVRSRV_OK;
+
+fail_raalloc:
+       OSFreeMem(psNewSync);
+fail_alloc:
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+                                        PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+                                        const IMG_CHAR *pszClassName)
+{
+       return _SyncPrimAlloc(hSyncPrimContext,
+                             ppsSync,
+                             pszClassName,
+                             IMG_FALSE);
+}
+
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eError;
+
+       if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+       {
+               SYNC_PRIM_BLOCK *psSyncBlock;
+               SYNC_PRIM_CONTEXT *psContext;
+
+               psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+               psContext = psSyncBlock->psContext;
+
+               eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection),
+                                          psSyncBlock->hServerSyncPrimBlock,
+                                          SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+                                          ui32Value);
+       }
+       else
+       {
+       /* Server sync not supported, attempted use of server sync */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       SYNC_PRIM *psSyncInt;
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
+
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+       if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+       {
+               SyncPrimLocalUnref(psSyncInt);
+       }
+       else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+       {
+       /* Server sync not supported, attempted use of server sync */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+       else
+       {
+               /*
+                       Either the client has given us a bad pointer or there is an
+                       error in this module
+                */
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out);
+       }
+
+err_out:
+       return eError;
+}
+
+#if defined(NO_HARDWARE)
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       SYNC_PRIM *psSyncInt;
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
+
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+       /* There is no check for the psSyncInt to be LOCAL as this call
+          substitutes the Firmware updating a sync and that sync could
+          be a server one */
+
+       eError =  _SyncPrimSetValue(psSyncInt, ui32Value);
+
+err_out:
+       return eError;
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       SYNC_PRIM *psSyncInt;
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
+
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+       if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+       {
+               /* Invalid sync type */
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out);
+       }
+
+       eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+#if defined(PDUMP)
+       SyncPrimPDump(psSync);
+#endif
+err_out:
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                                          IMG_HANDLE *phBlock,
+                                                          IMG_UINT32 *pui32Offset)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       SYNC_PRIM *psSyncInt;
+
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(phBlock, eError, err_out);
+       PVR_LOG_GOTO_IF_INVALID_PARAM(pui32Offset, eError, err_out);
+
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+       if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL))
+       {
+               *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock;
+               *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)",
+                               __func__, psSyncInt->eType));
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, err_out);
+       }
+
+err_out:
+       return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       SYNC_PRIM *psSyncInt;
+
+       *pui32FwAddr = 0;
+       PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
+
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+       if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+       {
+               *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
+       }
+       else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+       {
+       /* Server sync not supported, attempted use of server sync */
+       return PVRSRV_ERROR_NOT_SUPPORTED;
+       }
+       else
+       {
+               /* Either the client has given us a bad pointer or there is an
+                * error in this module
+                */
+               PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out);
+       }
+
+err_out:
+       return eError;
+}
+
+#if defined(PDUMP)
+IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+       SYNC_PRIM *psSyncInt;
+       SYNC_PRIM_BLOCK *psSyncBlock;
+       SYNC_PRIM_CONTEXT *psContext;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psSync != NULL);
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+       if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+       {
+               /* Invalid sync type */
+               PVR_ASSERT(IMG_FALSE);
+               return;
+       }
+
+       psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+       psContext = psSyncBlock->psContext;
+
+       eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection),
+                                    psSyncBlock->hServerSyncPrimBlock,
+                                    SyncPrimGetOffset(psSyncInt));
+       PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDump");
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+       SYNC_PRIM *psSyncInt;
+       SYNC_PRIM_BLOCK *psSyncBlock;
+       SYNC_PRIM_CONTEXT *psContext;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psSync != NULL);
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+       if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+       {
+               /* Invalid sync type */
+               PVR_ASSERT(IMG_FALSE);
+               return;
+       }
+
+       psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+       psContext = psSyncBlock->psContext;
+
+       eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection),
+                                         psSyncBlock->hServerSyncPrimBlock,
+                                         SyncPrimGetOffset(psSyncInt),
+                                         ui32Value);
+       PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpValue");
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                   IMG_UINT32 ui32Value,
+                                   IMG_UINT32 ui32Mask,
+                                   PDUMP_POLL_OPERATOR eOperator,
+                                   IMG_UINT32 ui32PDumpFlags)
+{
+       SYNC_PRIM *psSyncInt;
+       SYNC_PRIM_BLOCK *psSyncBlock;
+       SYNC_PRIM_CONTEXT *psContext;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psSync != NULL);
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+       if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+       {
+               /* Invalid sync type */
+               PVR_ASSERT(IMG_FALSE);
+               return;
+       }
+
+       psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+       psContext = psSyncBlock->psContext;
+
+       eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection),
+                                       psSyncBlock->hServerSyncPrimBlock,
+                                       SyncPrimGetOffset(psSyncInt),
+                                       ui32Value,
+                                       ui32Mask,
+                                       eOperator,
+                                       ui32PDumpFlags);
+       PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpPol");
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                   IMG_UINT64 uiWriteOffset,
+                                   IMG_UINT64 uiPacketSize,
+                                   IMG_UINT64 uiBufferSize)
+{
+       SYNC_PRIM *psSyncInt;
+       SYNC_PRIM_BLOCK *psSyncBlock;
+       SYNC_PRIM_CONTEXT *psContext;
+       PVRSRV_ERROR eError;
+
+       PVR_ASSERT(psSync != NULL);
+       psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+       if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+       {
+               /* Invalid sync type */
+               PVR_ASSERT(IMG_FALSE);
+               return;
+       }
+
+       psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+       psContext = psSyncBlock->psContext;
+
+#if defined(__linux__) && defined(__i386__)
+       PVR_ASSERT(uiWriteOffset<IMG_UINT32_MAX);
+       PVR_ASSERT(uiPacketSize<IMG_UINT32_MAX);
+       PVR_ASSERT(uiBufferSize<IMG_UINT32_MAX);
+#endif
+       eError = BridgeSyncPrimPDumpCBP(GetBridgeHandle(psContext->hDevConnection),
+                                       psSyncBlock->hServerSyncPrimBlock,
+                                       SyncPrimGetOffset(psSyncInt),
+                                       TRUNCATE_64BITS_TO_32BITS(uiWriteOffset),
+                                       TRUNCATE_64BITS_TO_32BITS(uiPacketSize),
+                                       TRUNCATE_64BITS_TO_32BITS(uiBufferSize));
+       PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpCBP");
+       PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/tlclient.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/tlclient.c
new file mode 100644 (file)
index 0000000..dc3f17a
--- /dev/null
@@ -0,0 +1,500 @@
+/*************************************************************************/ /*!
+@File           tlclient.c
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* DESIGN NOTE
+ * This transport layer consumer-role API was created as a shared API when a
+ * client wanted to read the data of a TL stream from within the KM server
+ * driver. This was in addition to the existing clients supported externally
+ * by the UM client library component via PVR API layer.
+ * This shared API is thus used by the PVR TL API in the client library and
+ * by clients internal to the server driver module. It depends on
+ * client entry points of the TL and DEVMEM bridge modules. These entry points
+ * encapsulate from the TL shared API whether a direct bridge or an indirect
+ * (ioctl) bridge is used.
+ * One reason for needing this layer centres around the fact that some of the
+ * API functions make multiple bridge calls and the logic that glues these
+ * together is common regardless of client location. Further this layer has
+ * allowed the defensive coding that checks parameters to move into the PVR
+ * API layer where untrusted clients enter giving a more efficient KM code path.
+ */
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "client_pvrtl_bridge.h"
+
+#if defined(__KERNEL__)
+#include "srvcore.h"
+#else
+#include "srvcore_intern.h"
+#endif
+
+/* Defines/Constants
+ */
+
+#define NO_ACQUIRE             0xffffffffU
+
+/* User-side stream descriptor structure.
+ */
+typedef struct _TL_STREAM_DESC_
+{
+       /* Handle on kernel-side stream descriptor*/
+       IMG_HANDLE              hServerSD;
+
+       /* Stream data buffer variables */
+       DEVMEM_MEMDESC*                 psUMmemDesc;
+       IMG_PBYTE                               pBaseAddr;
+
+       /* Offset in bytes into the circular buffer and valid only after
+        * an Acquire call and undefined after a release. */
+       IMG_UINT32      uiReadOffset;
+
+       /* Always a positive integer when the Acquire call returns and a release
+        * is outstanding. Undefined at all other times. */
+       IMG_UINT32      uiReadLen;
+
+       /* Counter indicating how many writes to a stream failed.
+        * It's used to reduce number of errors in output log. */
+       IMG_UINT32 ui32WritesFailed;
+
+       /* Name of the stream. */
+       IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection,
+               const IMG_CHAR* pszName,
+               IMG_UINT32   ui32Mode,
+               IMG_HANDLE*  phSD)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       TL_STREAM_DESC *psSD = NULL;
+       IMG_HANDLE hTLPMR;
+       IMG_HANDLE hTLImportHandle;
+       IMG_DEVMEM_SIZE_T uiImportSize;
+       PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(pszName);
+       PVR_ASSERT(phSD);
+       *phSD = NULL;
+
+       /* Allocate memory for the stream descriptor object, initialise with
+        * "no data read" yet. */
+       psSD = OSAllocZMem(sizeof(TL_STREAM_DESC));
+       PVR_LOG_GOTO_IF_NOMEM(psSD, eError, e0);
+       psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+       /* Send open stream request to kernel server to get stream handle and
+        * buffer cookie so we can get access to the buffer in this process. */
+       eError = BridgeTLOpenStream(GetBridgeHandle(hDevConnection), pszName,
+                       ui32Mode, &psSD->hServerSD, &hTLPMR);
+       if (eError != PVRSRV_OK)
+       {
+               if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) &&
+                       (eError == PVRSRV_ERROR_TIMEOUT))
+               {
+                       goto e1;
+               }
+               PVR_LOG_GOTO_IF_ERROR(eError, "BridgeTLOpenStream", e1);
+       }
+
+       /* Convert server export cookie into a cookie for use by this client */
+       eError = DevmemMakeLocalImportHandle(hDevConnection,
+                       hTLPMR, &hTLImportHandle);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2);
+
+       uiMemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+               PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0ULL;
+       /* Now convert client cookie into a client handle on the buffer's
+        * physical memory region */
+       eError = DevmemLocalImport(hDevConnection,
+                                  hTLImportHandle,
+                                  uiMemFlags,
+                                  &psSD->psUMmemDesc,
+                                  &uiImportSize,
+                                  "TLBuffer");
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemImport", e3);
+
+       /* Now map the memory into the virtual address space of this process. */
+       eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **)
+                                                                                                                       &psSD->pBaseAddr);
+       PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+       /* Ignore error, not much that can be done */
+       (void) DevmemUnmakeLocalImportHandle(hDevConnection,
+                       hTLImportHandle);
+
+       /* Copy stream name */
+       OSStringLCopy(psSD->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE);
+
+       /* Return client descriptor handle to caller */
+       *phSD = psSD;
+       return PVRSRV_OK;
+
+/* Clean up post buffer setup */
+e4:
+       DevmemFree(psSD->psUMmemDesc);
+e3:
+       (void) DevmemUnmakeLocalImportHandle(hDevConnection,
+                               &hTLImportHandle);
+/* Clean up post stream open */
+e2:
+       BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD);
+
+/* Clean up post allocation of the descriptor object */
+e1:
+       OSFreeMem(psSD);
+
+e0:
+       return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+
+       /* Check the caller provided connection is valid */
+       if (!psSD->hServerSD)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: descriptor already "
+                               "closed/not open", __func__));
+               return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+       }
+
+       /* Check if acquire is outstanding, perform release if it is, ignore result
+        * as there is not much we can do if it is an error other than close */
+       if (psSD->uiReadLen != NO_ACQUIRE)
+       {
+               (void) BridgeTLReleaseData(GetBridgeHandle(hDevConnection),
+                               psSD->hServerSD, psSD->uiReadOffset, psSD->uiReadLen);
+               psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+       }
+
+       /* Clean up DevMem resources used for this stream in this client */
+       DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc);
+
+       DevmemFree(psSD->psUMmemDesc);
+
+       /* Send close to server to clean up kernel mode resources for this
+        * handle and release the memory. */
+       eError = DestroyServerResource(hDevConnection,
+                                      NULL,
+                                      BridgeTLCloseStream,
+                                      psSD->hServerSD);
+       PVR_LOG_IF_ERROR(eError, "BridgeTLCloseStream");
+
+       if (psSD->ui32WritesFailed != 0)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s() %u writes failed to stream %s (%c)",
+                       __func__, psSD->ui32WritesFailed, psSD->szName,
+                       psSD->ui32WritesFailed == IMG_UINT32_MAX ? 'T' : 'F'));
+       }
+
+       OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC));
+       OSFreeMem(psSD);
+
+       return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection,
+               const IMG_CHAR *pszNamePattern,
+               IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+               IMG_UINT32 *pui32NumFound)
+{
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(pszNamePattern);
+       PVR_ASSERT(pui32NumFound);
+
+       return BridgeTLDiscoverStreams(GetBridgeHandle(hDevConnection),
+                       pszNamePattern,
+                       /* we need to treat this as one dimensional array */
+                       *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE,
+                       (IMG_CHAR *) aszStreams,
+                       pui32NumFound);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT8 **ppui8Data,
+               IMG_UINT32 ui32Size)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+       IMG_UINT32 ui32BufferOffset, ui32Unused;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+       PVR_ASSERT(ppui8Data);
+       PVR_ASSERT(ui32Size);
+
+       eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection),
+                       psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32Size, &ui32Unused);
+       PVR_RETURN_IF_ERROR(eError);
+
+       *ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+       return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT8 **ppui8Data,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32SizeMin,
+               IMG_UINT32 *pui32Available)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+       IMG_UINT32 ui32BufferOffset;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+       PVR_ASSERT(ppui8Data);
+       PVR_ASSERT(ui32Size);
+
+       eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection),
+                       psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32SizeMin,
+                       pui32Available);
+       PVR_RETURN_IF_ERROR(eError);
+
+       *ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+       return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT32 ui32Size)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+       PVR_ASSERT(ui32Size);
+
+       eError = BridgeTLCommitStream(GetBridgeHandle(hDevConnection),
+                       psSD->hServerSD, ui32Size);
+       PVR_RETURN_IF_ERROR(eError);
+
+       return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE  hSD,
+               IMG_PBYTE*  ppPacketBuf,
+               IMG_UINT32* pui32BufLen)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+       PVR_ASSERT(ppPacketBuf);
+       PVR_ASSERT(pui32BufLen);
+
+       /* In case of non-blocking acquires, which can return no data, and
+        * error paths ensure we clear the output parameters first. */
+       *ppPacketBuf = NULL;
+       *pui32BufLen = 0;
+
+       /* Check Acquire has not been called twice in a row without a release */
+       if (psSD->uiReadOffset != NO_ACQUIRE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: acquire already "
+                               "outstanding, ReadOffset(%d), ReadLength(%d)",
+                               __func__, psSD->uiReadOffset, psSD->uiReadLen));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       /* Ask the kernel server for the next chunk of data to read */
+       eError = BridgeTLAcquireData(GetBridgeHandle(hDevConnection),
+                       psSD->hServerSD, &psSD->uiReadOffset, &psSD->uiReadLen);
+       if (eError != PVRSRV_OK)
+       {
+               /* Mask reporting of the errors seen under normal operation */
+               if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
+                       (eError != PVRSRV_ERROR_TIMEOUT) &&
+                       (eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED))
+               {
+                       PVR_LOG_ERROR(eError, "BridgeTLAcquireData");
+               }
+               psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE;
+               return eError;
+       }
+       /* else PVRSRV_OK */
+
+       /* Return the data offset and length to the caller if bytes are available
+        * to be read. Could be zero for non-blocking mode so pass back cleared
+        * values above */
+       if (psSD->uiReadLen)
+       {
+               *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset;
+               *pui32BufLen = psSD->uiReadLen;
+       }
+
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _TLClientReleaseDataLen(
+               SHARED_DEV_CONNECTION hDevConnection,
+               TL_STREAM_DESC* psSD,
+               IMG_UINT32 uiReadLen)
+{
+       PVRSRV_ERROR eError;
+
+       /* the previous acquire did not return any data, this is a no-operation */
+       if (psSD->uiReadLen == 0)
+       {
+               return PVRSRV_OK;
+       }
+
+       /* Check release has not been called twice in a row without an acquire */
+       if (psSD->uiReadOffset == NO_ACQUIRE)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__));
+               return PVRSRV_ERROR_RETRY;
+       }
+
+       /* Inform the kernel to release the data from the buffer */
+       eError = BridgeTLReleaseData(GetBridgeHandle(hDevConnection),
+                       psSD->hServerSD,
+                       psSD->uiReadOffset, uiReadLen);
+       PVR_LOG_IF_ERROR(eError, "BridgeTLReleaseData");
+
+       /* Reset state to indicate no outstanding acquire */
+       psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+       return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD)
+{
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+
+       return _TLClientReleaseDataLen(hDevConnection, psSD, psSD->uiReadLen);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen)
+{
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+
+       /* Check the specified size is within the size returned by Acquire */
+       if (uiActualReadLen > psSD->uiReadLen)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__));
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       return _TLClientReleaseDataLen(hDevConnection, psSD, uiActualReadLen);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT32 ui32Size,
+               IMG_BYTE *pui8Data)
+{
+       PVRSRV_ERROR eError;
+       TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+       PVR_ASSERT(hDevConnection);
+       PVR_ASSERT(hSD);
+       PVR_ASSERT(ui32Size);
+       PVR_ASSERT(pui8Data);
+
+       eError = BridgeTLWriteData(GetBridgeHandle(hDevConnection),
+                       psSD->hServerSD, ui32Size, pui8Data);
+
+       if (eError == PVRSRV_ERROR_STREAM_FULL)
+       {
+               if (psSD->ui32WritesFailed == 0)
+               {
+                       PVR_LOG_ERROR(eError, "BridgeTLWriteData");
+               }
+               if (psSD->ui32WritesFailed != IMG_UINT32_MAX)
+               {
+                       psSD->ui32WritesFailed++;
+               }
+       }
+       else if (eError != PVRSRV_OK)
+       {
+               PVR_LOG_ERROR(eError, "BridgeTLWriteData");
+       }
+
+       return eError;
+}
+
+/******************************************************************************
+ End of file (tlclient.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/uniq_key_splay_tree.c b/drivers/gpu/drm/img/img-rogue/services/shared/common/uniq_key_splay_tree.c
new file mode 100644 (file)
index 0000000..8adf200
--- /dev/null
@@ -0,0 +1,280 @@
+/*************************************************************************/ /*!
+@File
+@Title          Provides splay-trees.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of splay-trees.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */
+#include "osfunc.h" /* for OSMemFree */
+#include "pvr_debug.h"
+#include "uniq_key_splay_tree.h"
+
+/**
+ * This function performs a simple top down splay
+ *
+ * @param uiFlags the flags that must splayed to the root (if possible).
+ * @param psTree The tree to splay.
+ * @return the resulting tree after the splay operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
+{
+       IMG_SPLAY_TREE sTmp1;
+       IMG_PSPLAY_TREE psLeft;
+       IMG_PSPLAY_TREE psRight;
+       IMG_PSPLAY_TREE psTmp2;
+
+       if (psTree == NULL)
+       {
+               return NULL;
+       }
+
+       sTmp1.psLeft = NULL;
+       sTmp1.psRight = NULL;
+
+       psLeft = &sTmp1;
+       psRight = &sTmp1;
+
+       for (;;)
+       {
+               if (uiFlags < psTree->uiFlags)
+               {
+                       if (psTree->psLeft == NULL)
+                       {
+                               break;
+                       }
+
+                       if (uiFlags < psTree->psLeft->uiFlags)
+                       {
+                               /* if we get to this point, we need to rotate right the tree */
+                               psTmp2 = psTree->psLeft;
+                               psTree->psLeft = psTmp2->psRight;
+                               psTmp2->psRight = psTree;
+                               psTree = psTmp2;
+                               if (psTree->psLeft == NULL)
+                               {
+                                       break;
+                               }
+                       }
+
+                       /* if we get to this point, we need to link right */
+                       psRight->psLeft = psTree;
+                       psRight = psTree;
+                       psTree = psTree->psLeft;
+               }
+               else
+               {
+                       if (uiFlags > psTree->uiFlags)
+                       {
+                               if (psTree->psRight == NULL)
+                               {
+                                       break;
+                               }
+
+                               if (uiFlags > psTree->psRight->uiFlags)
+                               {
+                                       /* if we get to this point, we need to rotate left the tree */
+                                       psTmp2 = psTree->psRight;
+                                       psTree->psRight = psTmp2->psLeft;
+                                       psTmp2->psLeft = psTree;
+                                       psTree = psTmp2;
+                                       if (psTree->psRight == NULL)
+                                       {
+                                               break;
+                                       }
+                               }
+
+                               /* if we get to this point, we need to link left */
+                               psLeft->psRight = psTree;
+                               psLeft = psTree;
+                               psTree = psTree->psRight;
+                       }
+                       else
+                       {
+                               break;
+                       }
+               }
+       }
+
+       /* at this point re-assemble the tree */
+       psLeft->psRight = psTree->psLeft;
+       psRight->psLeft = psTree->psRight;
+       psTree->psLeft = sTmp1.psRight;
+       psTree->psRight = sTmp1.psLeft;
+       return psTree;
+}
+
+
+/**
+ * This function inserts a node into the Tree (unless it is already present, in
+ * which case it is equivalent to performing only a splay operation
+ *
+ * @param uiFlags the key of the new node
+ * @param psTree The tree into which one wants to add a new node
+ * @return The resulting with the node in it
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
+{
+       IMG_PSPLAY_TREE psNew;
+
+       if (psTree != NULL)
+       {
+               psTree = PVRSRVSplay(uiFlags, psTree);
+               if (psTree->uiFlags == uiFlags)
+               {
+                       return psTree;
+               }
+       }
+
+       psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE));
+       if (psNew == NULL)
+       {
+               PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree."));
+               return NULL;
+       }
+
+       psNew->uiFlags = uiFlags;
+       OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets));
+
+#if defined(PVR_CTZLL)
+       psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1);
+#endif
+
+       if (psTree == NULL)
+       {
+               psNew->psLeft  = NULL;
+               psNew->psRight = NULL;
+               return psNew;
+       }
+
+       if (uiFlags < psTree->uiFlags)
+       {
+               psNew->psLeft  = psTree->psLeft;
+               psNew->psRight = psTree;
+               psTree->psLeft = NULL;
+       }
+       else
+       {
+               psNew->psRight  = psTree->psRight;
+               psNew->psLeft   = psTree;
+               psTree->psRight = NULL;
+       }
+
+       return psNew;
+}
+
+
+/**
+ * Deletes a node from the tree (unless it is not there, in which case it is
+ * equivalent to a splay operation)
+ *
+ * @param uiFlags the value of the node to remove
+ * @param psTree the tree into which the node must be removed
+ * @return the resulting tree
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
+{
+       IMG_PSPLAY_TREE psTmp;
+       if (psTree == NULL)
+       {
+               return NULL;
+       }
+
+       psTree = PVRSRVSplay(uiFlags, psTree);
+       if (uiFlags == psTree->uiFlags)
+       {
+               /* The value was present in the tree */
+               if (psTree->psLeft == NULL)
+               {
+                       psTmp = psTree->psRight;
+               }
+               else
+               {
+                       psTmp = PVRSRVSplay(uiFlags, psTree->psLeft);
+                       psTmp->psRight = psTree->psRight;
+               }
+               OSFreeMem(psTree);
+               return psTmp;
+       }
+
+       /* The value was not present in the tree, so just return it as is
+        * (after the splay) */
+       return psTree;
+}
+
+/**
+ * This function picks up the appropriate node for the given flags
+ *
+ * @param uiFlags the flags that must associated with the node.
+ * @param psTree current splay tree node.
+ * @return the resulting tree node after the search operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
+{
+       if (psTree == NULL)
+       {
+               return NULL;
+       }
+
+       while (psTree)
+       {
+               if (uiFlags == psTree->uiFlags)
+               {
+                       return psTree;
+               }
+
+               if (uiFlags < psTree->uiFlags)
+               {
+                       psTree = psTree->psLeft;
+                       continue;
+               }
+
+               if (uiFlags > psTree->uiFlags)
+               {
+                       psTree = psTree->psRight;
+                       continue;
+               }
+       }
+
+       return NULL;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/common/uniq_key_splay_tree.h b/drivers/gpu/drm/img/img-rogue/services/shared/common/uniq_key_splay_tree.h
new file mode 100644 (file)
index 0000000..75ec929
--- /dev/null
@@ -0,0 +1,90 @@
+/*************************************************************************/ /*!
+@File
+@Title          Splay trees interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef UNIQ_KEY_SPLAY_TREE_H_
+#define UNIQ_KEY_SPLAY_TREE_H_
+
+#include "img_types.h"
+#include "pvr_intrinsics.h"
+
+#if defined(PVR_CTZLL)
+  /* map the is_bucket_n_free to an int.
+   * This way, the driver can find the first non empty without loop
+   */
+  typedef IMG_UINT64 IMG_ELTS_MAPPINGS;
+#endif
+
+typedef IMG_UINT64 IMG_PSPLAY_FLAGS_T;
+
+/* head of list of free boundary tags for indexed by pvr_log2 of the
+   boundary tag size */
+
+#define FREE_TABLE_LIMIT 40
+
+struct _BT_;
+
+typedef struct img_splay_tree
+{
+       /* left child/subtree */
+    struct img_splay_tree * psLeft;
+
+       /* right child/subtree */
+    struct img_splay_tree * psRight;
+
+    /* Flags to match on this span, used as the key. */
+    IMG_PSPLAY_FLAGS_T uiFlags;
+#if defined(PVR_CTZLL)
+       /* each bit of this int is a boolean telling if the corresponding
+          bucket is empty or not */
+    IMG_ELTS_MAPPINGS bHasEltsMapping;
+#endif
+       struct _BT_ * buckets[FREE_TABLE_LIMIT];
+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE;
+
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree);
+
+
+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/devices/rogue/rgx_hwperf_table.c b/drivers/gpu/drm/img/img-rogue/services/shared/devices/rogue/rgx_hwperf_table.c
new file mode 100644 (file)
index 0000000..268ba65
--- /dev/null
@@ -0,0 +1,635 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance counter table
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance counters table
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgx_fwif_hwperf.h"
+#if defined(__KERNEL__)
+#include "rgxdefs_km.h"
+#else
+#include "rgxdefs.h"
+#endif
+#include "rgx_hwperf_table.h"
+
+/* Includes needed for PVRSRVKM (Server) context */
+#      include "rgx_bvnc_defs_km.h"
+#      if defined(__KERNEL__)
+#              include "rgxdevice.h"
+#      endif
+
+/* Shared compile-time context ASSERT macro */
+#if defined(RGX_FIRMWARE)
+#      include "rgxfw_utils.h"
+/* firmware context */
+#      define DBG_ASSERT(_c) RGXFW_ASSERT((_c))
+#else
+#      include "pvr_debug.h"
+/* host client/server context */
+#      define DBG_ASSERT(_c) PVR_ASSERT((_c))
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered()
+
+ Referenced in gasCntBlkTypeModel[] table below and only called from
+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used.
+ *****************************************************************************/
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS)
+#      include "rgxfw_pow.h"
+#      include "rgxfw_utils.h"
+
+static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+       PVR_UNREFERENCED_PARAMETER(eBlkType);
+       PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+       /* S7XT: JONES */
+       return (eBlkType == RGX_CNTBLK_ID_JONES);
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+       /* S6XT: TA, TORNADO */
+       return true;
+#else
+       /* S6  : TA, HUB, RASTER (RASCAL) */
+       return (gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U;
+#endif
+}
+
+/* Only use conditional compilation when counter blocks appear in different
+ * islands for different Rogue families.
+ */
+static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+       IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units();
+
+       if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) &&
+                       (ui32NumDustsEnabled > 0U))
+       {
+#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER)
+               IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U;
+
+               switch (eBlkType)
+               {
+               case RGX_CNTBLK_ID_TPU_MCU0:                   /* S6 and S6XT */
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+               case RGX_CNTBLK_ID_TEXAS0:                     /* S7 */
+#endif
+                       if (ui8UnitId >= ui32NumDustsEnabled)
+                       {
+                               return false;
+                       }
+                       break;
+               case RGX_CNTBLK_ID_USC0:                       /* S6, S6XT, S7 */
+               case RGX_CNTBLK_ID_PBE0:                       /* S7, PBE2_IN_XE */
+                       /* Handle single cluster cores */
+                       if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled))
+                       {
+                               return false;
+                       }
+                       break;
+               case RGX_CNTBLK_ID_BLACKPEARL0:                /* S7 */
+               case RGX_CNTBLK_ID_RASTER0:                    /* S6XT */
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+               case RGX_CNTBLK_ID_TEXAS0:                     /* S6XT */
+#endif
+                       if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled)))
+                       {
+                               return false;
+                       }
+                       break;
+               default:
+                       RGXFW_ASSERT(false); /* should never get here, table error */
+                       break;
+               }
+#else
+               /* Always true, no fused DUSTs, all powered so do not check unit */
+               PVR_UNREFERENCED_PARAMETER(eBlkType);
+               PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+#endif
+       }
+       else
+       {
+               return false;
+       }
+       return true;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_direct   ((void*)NULL)
+# define rgxfw_hwperf_pow_st_indirect ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start
+
+ Referenced in gasCntBlkTypeModel[] table below and called from all build
+ contexts:
+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server).
+
+ Therefore each function has two implementations, one for compile time and one
+ run time configuration depending on the context. The functions will inform the
+ caller whether this block is valid for this particular RGX device. Other
+ run-time dependent data is returned in psRtInfo for the caller to use.
+ *****************************************************************************/
+
+/* Used for block types: USC */
+static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+       DBG_ASSERT(psBlkTypeDesc != NULL);
+       DBG_ASSERT(psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_USC0);
+
+#if defined(__KERNEL__) /* Server context */
+       PVR_ASSERT(pvDev_km != NULL);
+       PVR_ASSERT(pvRtInfo != NULL);
+       {
+               RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo;
+               const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km;
+
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+               {
+                       psRtInfo->ui32NumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0;
+                       psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                       return IMG_TRUE;
+               }
+       }
+#else /* FW context */
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+# if defined(RGX_FEATURE_PERFBUS)
+       return IMG_TRUE;
+# endif
+#endif
+       return IMG_FALSE;
+}
+
+/* Used for block types: Direct RASTERISATION, HUB */
+static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+       DBG_ASSERT(psBlkTypeDesc != NULL);
+       DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_HUB));
+
+#if defined(__KERNEL__) /* Server context */
+       PVR_ASSERT(pvDev_km != NULL);
+       PVR_ASSERT(pvRtInfo != NULL);
+       {
+               RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo;
+               const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km;
+               if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) &&
+                               (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)))
+               {
+                       psRtInfo->ui32NumUnits = 1;
+                       psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                       return IMG_TRUE;
+               }
+       }
+#else /* FW context */
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS)
+       return IMG_TRUE;
+# endif
+#endif
+       return IMG_FALSE;
+}
+
+#if defined(__KERNEL__) /* Server context */
+static IMG_UINT32 rgx_units_indirect_by_phantom(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+       /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */
+       return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1
+                       : (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4;
+}
+
+static IMG_UINT32 rgx_units_phantom_indirect_by_dust(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+       /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */
+       return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1);
+}
+
+static IMG_UINT32 rgx_units_phantom_indirect_by_cluster(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+       /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */
+       return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX];
+}
+#endif /* defined(__KERNEL__) */
+
+/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */
+static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+       DBG_ASSERT(psBlkTypeDesc != NULL);
+       DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0));
+
+#if defined(__KERNEL__) /* Server context */
+       PVR_ASSERT(pvDev_km != NULL);
+       PVR_ASSERT(pvRtInfo != NULL);
+       {
+               RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo;
+               const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km;
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+               {
+                       if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO)
+                       {
+                               psRtInfo->ui32NumUnits = 1;
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = RGX_CR_TEXAS_PERF_INDIRECT;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+               }
+       }
+#else /* FW context */
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+       return IMG_TRUE;
+# endif
+#endif
+       return IMG_FALSE;
+}
+
+/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */
+static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+       DBG_ASSERT(psBlkTypeDesc != NULL);
+       DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(__KERNEL__) /* Server context */
+       PVR_ASSERT(pvDev_km != NULL);
+       PVR_ASSERT(pvRtInfo != NULL);
+       {
+               RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo;
+               const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km;
+               if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+               {
+                       if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = RGX_CR_TPU_PERF_INDIRECT;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = RGX_CR_TEXAS3_PERF_INDIRECT;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES)
+                       {
+                               psRtInfo->ui32NumUnits = 1;
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+               }
+       }
+#else /* FW context */
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+       return IMG_TRUE;
+# else
+# endif
+#endif
+       return IMG_FALSE;
+}
+
+/* Used for block types: TA, TPU_MCU. Also PBE when PBE2_IN_XE is present */
+static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+       DBG_ASSERT(psBlkTypeDesc != NULL);
+       DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+                       (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(__KERNEL__) /* Server context */
+       PVR_ASSERT(pvDev_km != NULL);
+       PVR_ASSERT(pvRtInfo != NULL);
+       {
+               RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo;
+               const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km;
+               if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) &&
+                               RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+               {
+                       if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA)
+                       {
+                               psRtInfo->ui32NumUnits = 1;
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+                       {
+                               if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+                               {
+                                       /* PBE counters are not present on this config */
+                                       return IMG_FALSE;
+                               }
+                               psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg;
+                               return IMG_TRUE;
+                       }
+                       else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+                       {
+                               psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+                               psRtInfo->ui32IndirectReg = RGX_CR_TPU_MCU_L0_PERF_INDIRECT;
+                               return IMG_TRUE;
+                       }
+               }
+       }
+#else /* FW context */
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+#  if !defined(RGX_FEATURE_PBE2_IN_XE)
+       if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+       {
+               /* No support for PBE counters without PBE2_IN_XE */
+               return IMG_FALSE;
+       }
+#  endif
+       return IMG_TRUE;
+# endif
+#endif
+       return IMG_FALSE;
+}
+
+static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_not(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+#if defined(__KERNEL__)
+       return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo)
+            || rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo));
+
+#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+       return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo);
+
+#elif defined(RGX_FEATURE_PBE2_IN_XE) || defined(RGX_FEATURE_PERFBUS)
+       return rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo);
+#else
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+       return IMG_FALSE;
+#endif
+}
+
+static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+#if defined(__KERNEL__)
+       return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo)
+            || rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo));
+
+#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+       return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo);
+
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+       return rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo);
+#else
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+       return IMG_FALSE;
+#endif
+}
+
+#if !defined(__KERNEL__) /* Firmware or User-mode context */
+static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo)
+{
+       PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+       PVR_UNREFERENCED_PARAMETER(pvDev_km);
+       PVR_UNREFERENCED_PARAMETER(pvRtInfo);
+
+       /* Some functions not used on some BVNCs, silence compiler warnings */
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus);
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping);
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop);
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top);
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top);
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_not);
+       PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_xttop);
+
+       return IMG_FALSE;
+}
+
+/* Used to instantiate a null row in the block type model table below where the
+ * block is not supported for a given build BVNC in firmware/user mode context.
+ * This is needed as the blockid to block type lookup uses the table as well
+ * and clients may try to access blocks not in the hardware. */
+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false}
+
+#endif
+
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end
+ *****************************************************************************/
+
+#if defined(__KERNEL__) /* Values will be calculated at run-time */
+#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC
+#define RGX_INDIRECT_REG_TEXAS 0xFFFFFFFF
+#define RGX_INDIRECT_REG_TPU 0xFFFFFFFF
+
+#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST
+#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS3_PERF_INDIRECT
+#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_PERF_INDIRECT
+
+#else
+
+#if defined(RGX_FEATURE_PERFBUS)
+#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_MCU_L0_PERF_INDIRECT
+#endif
+
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_INDIRECT_BY_PHANTOM
+#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS_PERF_INDIRECT
+#endif
+
+#endif
+
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table
+
+ This table holds the entries for the performance counter block type model.
+ Where the block is not present on an RGX device in question the
+ pfnIsBlkPresent() returns false, if valid and present it returns true.
+ Columns in the table with a ** indicate the value is a default and the
+ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()
+ should be used at runtime by the caller. These columns are only valid for
+ compile time BVNC configured contexts.
+
+ Order of table rows must match order of counter block IDs in the enumeration
+ RGX_HWPERF_CNTBLK_ID.
+ *****************************************************************************/
+
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] =
+{
+               /*   ui32CntBlkIdBase,         ui32IndirectReg,                  ui32PerfReg,                  ui32Select0BaseReg,                    ui32Counter0BaseReg                   ui8NumCounters,  ui32NumUnits**,                  ui8SelectRegModeShift, ui8SelectRegOffsetShift,            pfnIsBlkPowered               pfnIsBlkPresent
+                *                                                                                                                                                                                                                                                   pszBlockNameComment,  */
+               /*RGX_CNTBLK_ID_TA*/
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_TA,       0, /* direct */                RGX_CR_TA_PERF,             RGX_CR_TA_PERF_SELECT0,              RGX_CR_TA_PERF_COUNTER_0,             4,              1,                              21,                  3,  "RGX_CR_TA_PERF",              rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_s7top },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA),
+#endif
+
+               /*RGX_CNTBLK_ID_RASTER*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_RASTER,   0, /* direct */                RGX_CR_RASTERISATION_PERF,  RGX_CR_RASTERISATION_PERF_SELECT0,   RGX_CR_RASTERISATION_PERF_COUNTER_0,  4,              1,                              21,                  3,  "RGX_CR_RASTERISATION_PERF",   rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_clustergrouping },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER),
+#endif
+
+               /*RGX_CNTBLK_ID_HUB*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_HUB,      0, /* direct */                RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0,  RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4,              1,                              21,                  3,  "RGX_CR_HUB_BIFPMCACHE_PERF",  rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_not_clustergrouping },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB),
+#endif
+
+               /*RGX_CNTBLK_ID_TORNADO*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_TORNADO,  0, /* direct */                RGX_CR_TORNADO_PERF,        RGX_CR_TORNADO_PERF_SELECT0,         RGX_CR_TORNADO_PERF_COUNTER_0,        4,              1,                              21,                  4,  "RGX_CR_TORNADO_PERF",         rgxfw_hwperf_pow_st_direct,   rgx_hwperf_blk_present_xttop },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO),
+#endif
+
+               /*RGX_CNTBLK_ID_JONES*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_JONES,   0, /* direct */                 RGX_CR_JONES_PERF,          RGX_CR_JONES_PERF_SELECT0,           RGX_CR_JONES_PERF_COUNTER_0,          4,              1,                              21,                  3,  "RGX_CR_JONES_PERF",           rgxfw_hwperf_pow_st_direct,    rgx_hwperf_blk_present_s7top },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES),
+#endif
+
+               /*RGX_CNTBLK_ID_TPU_MCU0*/
+#if defined(__KERNEL__) || (defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+               {RGX_CNTBLK_ID_TPU_MCU0, RGX_INDIRECT_REG_TPU,      RGX_CR_TPU_MCU_L0_PERF,   RGX_CR_TPU_MCU_L0_PERF_SELECT0,     RGX_CR_TPU_MCU_L0_PERF_COUNTER_0,     4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST,    21,          3,  "RGX_CR_TPU_MCU_L0_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+
+               /*RGX_CNTBLK_ID_USC0*/
+#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_USC0,    RGX_CR_USC_PERF_INDIRECT,       RGX_CR_USC_PERF,            RGX_CR_USC_PERF_SELECT0,            RGX_CR_USC_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_USC_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0),
+#endif
+
+               /*RGX_CNTBLK_ID_TEXAS0*/
+#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+               {RGX_CNTBLK_ID_TEXAS0,  RGX_INDIRECT_REG_TEXAS,      RGX_CR_TEXAS_PERF,          RGX_CR_TEXAS_PERF_SELECT0,          RGX_CR_TEXAS_PERF_COUNTER_0,          6,              RGX_HWPERF_NUM_BLOCK_UNITS,             31,          3,  "RGX_CR_TEXAS_PERF",           rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_xttop },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+
+               /*RGX_CNTBLK_ID_RASTER0*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0,  4,            RGX_HWPERF_INDIRECT_BY_PHANTOM,         21,          3,  "RGX_CR_RASTERISATION_PERF",   rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0),
+#endif
+
+               /*RGX_CNTBLK_ID_BLACKPEARL0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+               {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0,    RGX_CR_BLACKPEARL_PERF_COUNTER_0,     6,              RGX_HWPERF_INDIRECT_BY_PHANTOM,         21,          3,  "RGX_CR_BLACKPEARL_PERF",      rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0),
+#endif
+
+               /*RGX_CNTBLK_ID_PBE0*/
+#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_PBE2_IN_XE)
+               {RGX_CNTBLK_ID_PBE0,    RGX_CR_PBE_PERF_INDIRECT,        RGX_CR_PBE_PERF,            RGX_CR_PBE_PERF_SELECT0,            RGX_CR_PBE_PERF_COUNTER_0,            4,              RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21,          3,  "RGX_CR_PBE_PERF",             rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not },
+#else
+               RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0),
+#endif
+};
+
+
+IMG_INTERNAL IMG_UINT32
+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel)
+{
+       *ppsModel = gasCntBlkTypeModel;
+       return ARRAY_SIZE(gasCntBlkTypeModel);
+}
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/devices/rogue/rgx_hwperf_table.h b/drivers/gpu/drm/img/img-rogue/services/shared/devices/rogue/rgx_hwperf_table.h
new file mode 100644 (file)
index 0000000..449885c
--- /dev/null
@@ -0,0 +1,116 @@
+/*************************************************************************/ /*!
+@File
+@Title          HWPerf counter table header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally for HWPerf data retrieval
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_HWPERF_TABLE_H
+#define RGX_HWPERF_TABLE_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_fwif_hwperf.h"
+#if defined(__KERNEL__)
+#include "rgxdevice.h"
+#endif
+/*****************************************************************************/
+
+/* Forward declaration */
+typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL;
+
+/* Function pointer type for functions to check dynamic power state of
+ * counter block instance. Used only in firmware. */
+typedef bool (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)(
+               RGX_HWPERF_CNTBLK_ID eBlkType,
+               IMG_UINT8 ui8UnitId);
+
+#if defined(__KERNEL__)
+/* Counter block run-time info */
+typedef struct
+{
+       IMG_UINT32 ui32IndirectReg;          /* 0 if direct type otherwise the indirect control register to select indirect unit */
+       IMG_UINT32 ui32NumUnits;             /* Number of instances of this block type in the core */
+} RGX_HWPERF_CNTBLK_RT_INFO;
+#endif
+
+/* Function pointer type for functions to check block is valid and present
+ * on that RGX Device at runtime. It may have compile logic or run-time
+ * logic depending on where the code executes: server, srvinit or firmware.
+ * Values in the psRtInfo output parameter are only valid if true returned.
+ */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)(
+               const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc,
+               const void *pvDev_km,
+               void *pvRtInfo);
+
+/* This structure encodes properties of a type of performance counter block.
+ * The structure is sometimes referred to as a block type descriptor. These
+ * properties contained in this structure represent the columns in the block
+ * type model table variable below. These values vary depending on the build
+ * BVNC and core type.
+ * Each direct block has a unique type descriptor and each indirect group has
+ * a type descriptor.
+ */
+struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_
+{
+       /* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */
+       IMG_UINT32 ui32CntBlkIdBase;         /* The starting block id for this block type */
+       IMG_UINT32 ui32IndirectReg;          /* 0 if direct type otherwise the indirect control register to select indirect unit */
+       IMG_UINT32 ui32PerfReg;              /* RGX_CR_*_PERF register for this block type */
+       IMG_UINT32 ui32Select0BaseReg;       /* RGX_CR_*_PERF_SELECT0 register for this block type */
+       IMG_UINT32 ui32Counter0BaseReg;      /* RGX_CR_*_PERF_COUNTER_0 register for this block type */
+       IMG_UINT8  ui8NumCounters;          /* Number of counters in this block type */
+       IMG_UINT8  ui8NumUnits;             /* Number of instances of this block type in the core */
+       IMG_UINT8  ui8SelectRegModeShift;   /* Mode field shift value of select registers */
+       IMG_UINT8  ui8SelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */
+       const IMG_CHAR *pszBlockNameComment;             /* Name of the PERF register. Used while dumping the perf counters to pdumps */
+       PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */
+       PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */
+};
+
+/*****************************************************************************/
+
+IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel);
+
+#endif /* RGX_HWPERF_TABLE_H */
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/allocmem.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/allocmem.h
new file mode 100644 (file)
index 0000000..3de9e67
--- /dev/null
@@ -0,0 +1,224 @@
+/*************************************************************************/ /*!
+@File           allocmem.h
+@Title          memory allocation header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory-Allocation API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef ALLOCMEM_H
+#define ALLOCMEM_H
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * PVRSRV_ENABLE_PROCESS_STATS enables process statistics regarding events,
+ *     resources and memory across all processes
+ * PVRSRV_ENABLE_MEMORY_STATS enables recording of Linux kernel memory
+ *     allocations, provided that PVRSRV_ENABLE_PROCESS_STATS is enabled
+ *   - Output can be found in:
+ *     /(sys/kernel/debug|proc)/pvr/proc_stats/[live|retired]_pids_stats/mem_area
+ * PVRSRV_DEBUG_LINUX_MEMORY_STATS provides more details about memory
+ *     statistics in conjunction with PVRSRV_ENABLE_MEMORY_STATS
+ * PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON is defined to encompass both memory
+ *     allocation statistics functionalities described above in a single macro
+ */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+#define PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON
+#endif
+
+/*
+ * When using detailed memory allocation statistics, the line number and
+ * file name where the allocation happened are also provided.
+ * When this feature is not used, these parameters are not needed.
+ */
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
+#define DEBUG_MEMSTATS_PARAMS ,void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine
+#define DEBUG_MEMSTATS_ARGS   ,pvAllocFromFile, ui32AllocFromLine
+#define DEBUG_MEMSTATS_UNREF  (void)pvAllocFromFile; (void)ui32AllocFromLine;
+#define DEBUG_MEMSTATS_VALUES ,__FILE__, __LINE__
+#else
+#define DEBUG_MEMSTATS_PARAMS /*!<
+                                 * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON
+                                 * build option. */
+#define DEBUG_MEMSTATS_ARGS   /*!<
+                                 * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON
+                                 * build option. */
+#define DEBUG_MEMSTATS_UNREF  /*!<
+                                 * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON
+                                 * build option. */
+#define DEBUG_MEMSTATS_VALUES  /*!<
+                                 * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON
+                                 * build option. */
+#endif
+
+
+/**************************************************************************/ /*!
+@Function       OSAllocMem
+@Description    Allocates CPU memory. Contents are uninitialized.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+#if defined(DOXYGEN)
+void *OSAllocMem(IMG_UINT32 ui32Size);
+#else
+void *OSAllocMem(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS);
+#define OSAllocMem(_size)      (OSAllocMem)((_size) DEBUG_MEMSTATS_VALUES)
+#endif
+
+/**************************************************************************/ /*!
+@Function       OSAllocZMem
+@Description    Allocates CPU memory and initializes the contents to zero.
+                If passed a size of zero, function should not assert,
+                but just return a NULL pointer.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+#if defined(DOXYGEN)
+void *OSAllocZMem(IMG_UINT32 ui32Size);
+#else
+void *OSAllocZMem(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS);
+#define OSAllocZMem(_size)     (OSAllocZMem)((_size) DEBUG_MEMSTATS_VALUES)
+#endif
+
+
+/**************************************************************************/ /*!
+@Function       OSAllocMemNoStats
+@Description    Allocates CPU memory. Contents are uninitialized.
+                 If passed a size of zero, function should not assert,
+                 but just return a NULL pointer.
+                 The allocated memory is not accounted for by process stats.
+                 Process stats are an optional feature (enabled only when
+                 PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                 of memory allocated to help in debugging. Where this is not
+                 required, OSAllocMem() and OSAllocMemNoStats() equate to
+                 the same operation.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function       OSAllocZMemNoStats
+@Description    Allocates CPU memory and initializes the contents to zero.
+                 If passed a size of zero, function should not assert,
+                 but just return a NULL pointer.
+                 The allocated memory is not accounted for by process stats.
+                 Process stats are an optional feature (enabled only when
+                 PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                 of memory allocated to help in debugging. Where this is not
+                 required, OSAllocZMem() and OSAllocZMemNoStats() equate to
+                 the same operation.
+@Input          ui32Size        Size of required allocation (in bytes)
+@Return         Pointer to allocated memory on success.
+                Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function       OSFreeMem
+@Description    Frees previously allocated CPU memory.
+@Input          pvCpuVAddr       Pointer to the memory to be freed.
+@Return         None.
+ */ /**************************************************************************/
+void OSFreeMem(void *pvCpuVAddr);
+
+/**************************************************************************/ /*!
+@Function       OSFreeMemNoStats
+@Description    Frees previously allocated CPU memory.
+                 The freed memory does not update the figures in process stats.
+                 Process stats are an optional feature (enabled only when
+                 PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+                 of memory allocated to help in debugging. Where this is not
+                 required, OSFreeMem() and OSFreeMemNoStats() equate to the
+                 same operation.
+@Input          pvCpuVAddr       Pointer to the memory to be freed.
+@Return         None.
+ */ /**************************************************************************/
+void OSFreeMemNoStats(void *pvCpuVAddr);
+
+/*
+ * These macros allow us to catch double-free bugs on DEBUG builds and
+ * prevent crashes on RELEASE builds.
+ */
+
+/*! @cond Doxygen_Suppress */
+#if defined(DEBUG)
+#define double_free_sentinel ((void *)&OSFreeMem)
+#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp)
+#else
+#define double_free_sentinel NULL
+#define ALLOCMEM_ASSERT(exp) do {} while (0)
+#endif
+/*! @endcond */
+
+/*! Frees memory allocated by OSAllocMem(). */
+#define OSFreeMem(_ptr) do { \
+               ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+               (OSFreeMem)(_ptr); \
+               (_ptr) = double_free_sentinel; \
+               MSC_SUPPRESS_4127 \
+       } while (0)
+
+/*! Frees memory allocated by OSAllocMemNoStats(). */
+#define OSFreeMemNoStats(_ptr) do { \
+               ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+               (OSFreeMemNoStats)(_ptr); \
+               (_ptr) = double_free_sentinel; \
+               MSC_SUPPRESS_4127 \
+       } while (0)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* ALLOCMEM_H */
+
+/******************************************************************************
+ End of file (allocmem.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/device_connection.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/device_connection.h
new file mode 100644 (file)
index 0000000..2491774
--- /dev/null
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File           device_connection.h
+@Title
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(DEVICE_CONNECTION_H)
+#define DEVICE_CONNECTION_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#if defined(__KERNEL__)
+typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION;
+#else
+#include "connection.h"
+typedef const struct PVRSRV_DEV_CONNECTION_TAG *SHARED_DEV_CONNECTION;
+#endif
+
+/******************************************************************************
+ * Device capability flags and masks
+ *
+ * Following bitmask shows allocated ranges and values for our device
+ * capability settings:
+ *
+ * 31 27  23  19  15  11   7   3  0
+ * |...|...|...|...|...|...|...|...
+ *                               ** CACHE_COHERENT                   [0x1..0x2]
+ *                                x  PVRSRV_CACHE_COHERENT_DEVICE_FLAG
+ *                               x.  PVRSRV_CACHE_COHERENT_CPU_FLAG
+ *                             *... NONMAPPABLE_MEMORY                    [0x8]
+ *                             x...  PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG
+ *                            *.... PDUMP_IS_RECORDING                   [0x10]
+ *                            x....  PVRSRV_PDUMP_IS_RECORDING
+ *                      ***........ DEVMEM_SVM_ALLOC             [0x100..0x400]
+ *                        x........  PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED
+ *                       x.........  PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED
+ *                      x..........  PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL
+ *                     *........... FBCDC_V3_1             [0x800]
+ *                     x...........  FBCDC_V3_1_USED
+ *                    *............ PVRSRV_SYSTEM_DMA
+ *                    x............  PVRSRV_SYSTEM_DMA_USED
+ * |...|...|...|...|...|...|...|...
+ *****************************************************************************/
+
+/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/
+#define PVRSRV_CACHE_COHERENT_SHIFT (0)
+#define        PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define        PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define        PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7)
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT)
+
+/* Flag to be passed over the bridge to indicate PDump activity */
+#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4)
+#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating SVM allocation availability */
+#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8)
+#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether GPU uses FBCDC v3.1 */
+#define PVRSRV_FBCDC_V3_1_USED_SHIFT (11)
+#define PVRSRV_FBCDC_V3_1_USED (1U << PVRSRV_FBCDC_V3_1_USED_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether System has
+   DMA transfer capability to and from device memory */
+#define PVRSRV_SYSTEM_DMA_SHIFT (12)
+#define PVRSRV_SYSTEM_DMA_USED (1U << PVRSRV_SYSTEM_DMA_SHIFT)
+
+static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection)
+{
+#if defined(__KERNEL__)
+    return hDevConnection;
+#else
+    return hDevConnection->hServices;
+#endif
+}
+
+
+#endif /* !defined(DEVICE_CONNECTION_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem.h
new file mode 100644 (file)
index 0000000..1466eb3
--- /dev/null
@@ -0,0 +1,730 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management core internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to core device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVCLIENT_DEVICEMEM_H
+#define SRVCLIENT_DEVICEMEM_H
+
+/******************************************************************************
+ *                                                                            *
+ *  +------------+   +------------+      +--------------+   +--------------+  *
+ *  | a   sub-   |   | a   sub-   |      |  an          |   | allocation   |  *
+ *  | allocation |   | allocation |      |  allocation  |   | also mapped  |  *
+ *  |            |   |            |      |  in proc 1   |   | into proc 2  |  *
+ *  +------------+   +------------+      +--------------+   +--------------+  *
+ *            |         |                       |                  |          *
+ *         +--------------+              +--------------+   +--------------+  *
+ *         | page   gran- |              | page   gran- |   | page   gran- |  *
+ *         | ular mapping |              | ular mapping |   | ular mapping |  *
+ *         +--------------+              +--------------+   +--------------+  *
+ *                |                                   |       |               *
+ *                |                                   |       |               *
+ *                |                                   |       |               *
+ *         +--------------+                       +--------------+            *
+ *         |              |                       |              |            *
+ *         | A  "P.M.R."  |                       | A  "P.M.R."  |            *
+ *         |              |                       |              |            *
+ *         +--------------+                       +--------------+            *
+ *                                                                            *
+ ******************************************************************************/
+
+/*
+    All device memory allocations are ultimately a view upon (not
+    necessarily the whole of) a "PMR".
+
+    A PMR is a "Physical Memory Resource", which may be a
+    "pre-faulted" lump of physical memory, or it may be a
+    representation of some physical memory that will be instantiated
+    at some future time.
+
+    PMRs always represent multiple of some power-of-2 "contiguity"
+    promised by the PMR, which will allow them to be mapped in whole
+    pages into the device MMU.  As memory allocations may be smaller
+    than a page, these mappings may be suballocated and thus shared
+    between multiple allocations in one process.  A PMR may also be
+    mapped simultaneously into multiple device memory contexts
+    (cross-process scenario), however, for security reasons, it is not
+    legal to share a PMR "both ways" at once, that is, mapped into
+    multiple processes and divided up amongst several suballocations.
+
+    This PMR terminology is introduced here for background
+    information, but is generally of little concern to the caller of
+    this API.  This API handles suballocations and mappings, and the
+    caller thus deals primarily with MEMORY DESCRIPTORS representing
+    an allocation or suballocation, HEAPS representing ranges of
+    virtual addresses in a CONTEXT.
+*/
+
+/*
+   |<---------------------------context------------------------------>|
+   |<-------heap------->|   |<-------heap------->|<-------heap------->|
+   |<-alloc->|          |   |<-alloc->|<-alloc->||   |<-alloc->|      |
+*/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "pdump.h"
+
+#include "device_connection.h"
+
+
+typedef IMG_UINT32 DEVMEM_HEAPCFGID;
+#define DEVMEM_HEAPCFG_FORCLIENTS 0
+#define DEVMEM_HEAPCFG_META 1
+
+
+/*
+  In order to call the server side functions, we need a bridge handle.
+  We abstract that here, as we may wish to change its form.
+ */
+
+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
+
+/*************************************************************************/ /*!
+@Function       DevmemUnpin
+@Description    This is the counterpart to DevmemPin(). It is meant to be
+                called before repinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          phMemDesc       The MemDesc that is going to be unpinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the memory is
+                                registered to be reclaimed. Error otherwise.
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc);
+
+/*************************************************************************/ /*!
+@Function       DevmemPin
+@Description    This is the counterpart to DevmemUnpin(). It is meant to be
+                called after unpinning an allocation.
+
+                For a detailed description see client API documentation.
+
+@Input          phMemDesc       The MemDesc that is going to be pinned.
+
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success and the allocation content
+                                was successfully restored.
+
+                                PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+                                could not be restored and new physical memory
+                                was allocated.
+
+                                A different error otherwise.
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+                                IMG_HANDLE *phDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc,
+                         IMG_DEVMEM_SIZE_T* puiSize);
+
+IMG_INTERNAL void
+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc,
+                               IMG_CHAR **pszAnnotation);
+
+/*
+ * DevmemCreateContext()
+ *
+ * Create a device memory context
+ *
+ * This must be called before any heap is created in this context
+ *
+ * Caller to provide bridge handle which will be recorded internally and used
+ * for all future operations on items from this memory context.  Caller also
+ * to provide devicenode handle, as this is used for MMU configuration and
+ * also to determine the heap configuration for the auto-instantiated heaps.
+ *
+ * Note that when compiled in services/server, the hBridge is not used and
+ * is thrown away by the "fake" direct bridge.  (This may change. It is
+ * recommended that NULL be passed for the handle for now.)
+ *
+ * hDeviceNode and uiHeapBlueprintID shall together dictate which heap-config
+ * to use.
+ *
+ * This will cause the server side counterpart to be created also.
+ *
+ * If you call DevmemCreateContext() (and the call succeeds) you are promising
+ * that you will later call Devmem_ContextDestroy(), except for abnormal
+ * process termination in which case it is expected it will be destroyed as
+ * part of handle clean up.
+ *
+ * Caller to provide storage for the pointer to the newly created
+ * NEWDEVMEM_CONTEXT object.
+ */
+PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+                    DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                    DEVMEM_CONTEXT **ppsCtxPtr);
+
+/*
+ * DevmemAcquireDevPrivData()
+ *
+ * Acquire the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+                         IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemReleaseDevPrivData()
+ *
+ * Release the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemDestroyContext()
+ *
+ * Undoes that done by DevmemCreateContext()
+ */
+PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemCreateHeap()
+ *
+ * Create a heap in the given context.
+ *
+ * N.B.  Not intended to be called directly, though it can be.
+ * Normally, heaps are instantiated at context creation time according
+ * to the specified blueprint.  See DevmemCreateContext() for details.
+ *
+ * This will cause MMU code to set up data structures for the heap,
+ * but may not cause page tables to be modified until allocations are
+ * made from the heap.
+ *
+ * uiReservedRegionLength Reserved address space for static VAs shared
+ * between clients and firmware
+ *
+ * The "Quantum" is both the device MMU page size to be configured for
+ * this heap, and the unit multiples of which "quantized" allocations
+ * are made (allocations smaller than this, known as "suballocations"
+ * will be made from a "sub alloc RA" and will "import" chunks
+ * according to this quantum)
+ *
+ * Where imported PMRs (or, for example, PMRs created by device class
+ * buffers) are mapped into this heap, it is important that the
+ * physical contiguity guarantee offered by the PMR is greater than or
+ * equal to the quantum size specified here, otherwise the attempt to
+ * map it will fail.  "Normal" allocations via Devmem_Allocate
+ * shall automatically meet this requirement, as each "import" will
+ * trigger the creation of a PMR with the desired contiguity.  The
+ * supported quantum sizes in that case shall be dictated by the OS
+ * specific implementation of PhysmemNewOSRamBackedPMR() (see)
+ */
+PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
+                 /* base and length of heap */
+                 IMG_DEV_VIRTADDR sBaseAddress,
+                 IMG_DEVMEM_SIZE_T uiLength,
+                 IMG_DEVMEM_SIZE_T uiReservedRegionLength,
+                 /* log2 of allocation quantum, i.e. "page" size.
+                    All allocations (that go to server side) are
+                    multiples of this.  We use a client-side RA to
+                    make sub-allocations from this */
+                 IMG_UINT32 ui32Log2Quantum,
+                 /* The minimum import alignment for this heap */
+                 IMG_UINT32 ui32Log2ImportAlignment,
+                 /* Name of heap for debug */
+                 /* N.B.  Okay to exist on caller's stack - this
+                    func takes a copy if it needs it. */
+                 const IMG_CHAR *pszName,
+                 DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                 DEVMEM_HEAP **ppsHeapPtr);
+/*
+ * DevmemDestroyHeap()
+ *
+ * Reverses DevmemCreateHeap()
+ *
+ * N.B. All allocations must have been freed and all mappings must
+ * have been unmapped before invoking this call
+ */
+PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
+
+/*
+ * DevmemExportalignAdjustSizeAndAlign()
+ * Compute the Size and Align passed to avoid suballocations
+ * (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN).
+ *
+ * Returns PVRSRV_ERROR_INVALID_PARAMS if uiLog2Quantum has invalid value.
+ */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+                                    IMG_DEVMEM_SIZE_T *puiSize,
+                                    IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * DevmemSubAllocate()
+ *
+ * Makes an allocation (possibly a "suballocation", as described
+ * below) of device virtual memory from this heap.
+ *
+ * The size and alignment of the allocation will be honoured by the RA
+ * that allocates the "suballocation".  The resulting allocation will
+ * be mapped into GPU virtual memory and the physical memory to back
+ * it will exist, by the time this call successfully completes.
+ *
+ * The size must be a positive integer multiple of the alignment.
+ * (i.e. the alignment specifies the alignment of both the start and
+ * the end of the resulting allocation.)
+ *
+ * Allocations made via this API are routed through a "suballocation
+ * RA" which is responsible for ensuring that small allocations can be
+ * made without wasting physical memory in the server.  Furthermore,
+ * such suballocations can be made entirely client side without
+ * needing to go to the server unless the allocation spills into a new
+ * page.
+ *
+ * Such suballocations cause many allocations to share the same "PMR".
+ * This happens only when the flags match exactly.
+ *
+ */
+
+PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+                  DEVMEM_HEAP *psHeap,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiAlign,
+                  PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                  const IMG_CHAR *pszText,
+                  DEVMEM_MEMDESC **ppsMemDescPtr);
+
+#define DevmemAllocate(...) \
+    DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__)
+
+PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_DEVMEM_ALIGN_T uiAlign,
+                         IMG_UINT32 uiLog2HeapPageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         const IMG_CHAR *pszText,
+                         DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_UINT32 ui32AllocPageCount,
+                      IMG_UINT32 *paui32AllocPageIndices,
+                      IMG_UINT32 ui32FreePageCount,
+                      IMG_UINT32 *pauiFreePageIndices,
+                      SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+                     IMG_DEVMEM_SIZE_T uiSize,
+                     IMG_DEVMEM_SIZE_T uiChunkSize,
+                     IMG_UINT32 ui32NumPhysChunks,
+                     IMG_UINT32 ui32NumVirtChunks,
+                     IMG_UINT32 *pui32MappingTable,
+                     IMG_DEVMEM_ALIGN_T uiAlign,
+                     IMG_UINT32 uiLog2HeapPageSize,
+                     PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                     const IMG_CHAR *pszText,
+                     DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier,
+                       DEVMEM_HEAP *psHeap,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       IMG_DEVMEM_ALIGN_T uiAlign,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       const IMG_CHAR *pszText,
+                       DEVMEM_MEMDESC **ppsMemDescPtr,
+                       IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+#define DevmemAllocateAndMap(...) \
+       DevmemSubAllocateAndMap(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__)
+
+/*
+ * DevmemFree()
+ *
+ * Reverses that done by DevmemSubAllocate() N.B.  The underlying
+ * mapping and server side allocation _may_ not be torn down, for
+ * example, if the allocation has been exported, or if multiple
+ * allocations were suballocated from the same mapping, but this is
+ * properly refcounted, so the caller does not have to care.
+ */
+
+IMG_BOOL
+DevmemFree(DEVMEM_MEMDESC *psMemDesc);
+
+IMG_BOOL
+DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+       DevmemMapToDevice:
+
+       Map an allocation to the device it was allocated from.
+       This function _must_ be called before any call to
+       DevmemAcquireDevVirtAddr is made as it binds the allocation
+       to the heap.
+       DevmemReleaseDevVirtAddr is used to release the reference
+       to the device mapping this function created, but it doesn't
+       mean that the memory will actually be unmapped from the
+       device as other references to the mapping obtained via
+       DevmemAcquireDevVirtAddr could still be active.
+*/
+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+                                                          DEVMEM_HEAP *psHeap,
+                                                          IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+/*
+       DevmemMapToDeviceAddress:
+
+       Same as DevmemMapToDevice but the caller chooses the address
+       to map to.
+*/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+                         DEVMEM_HEAP *psHeap,
+                         IMG_DEV_VIRTADDR sDevVirtAddr);
+
+/*
+       DevmemGetDevVirtAddr
+
+       Obtain the MemDesc's device virtual address.
+       This function _must_ be called after DevmemMapToDevice(Address)
+       and is expected to be used be functions which didn't allocate
+       the MemDesc but need to know it's address.
+       It will PVR_ASSERT if no device mapping exists and 0 is returned.
+ */
+IMG_DEV_VIRTADDR
+DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+       DevmemAcquireDevVirtAddr
+
+       Acquire the MemDesc's device virtual address.
+       This function _must_ be called after DevmemMapToDevice
+       and is expected to be used be functions which didn't allocate
+       the MemDesc but need to know it's address
+ */
+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      IMG_DEV_VIRTADDR *psDevVirtAddrRet);
+
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the device virtual address that was
+ * acquired by "Acquire" or "MapToDevice"
+ */
+void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemAcquireCpuVirtAddr()
+ *
+ * Acquires a license to use the cpu virtual address of this mapping.
+ * Note that the memory may not have been mapped into cpu virtual
+ * memory prior to this call.  On first "acquire" the memory will be
+ * mapped in (if it wasn't statically mapped in) and on last put it
+ * _may_ become unmapped.  Later calling "Acquire" again, _may_ cause
+ * the memory to be mapped at a different address.
+ */
+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReacquireCpuVirtAddr()
+ *
+ * (Re)acquires license to use the cpu virtual address of this mapping
+ * if (and only if) there is already a pre-existing license to use the
+ * cpu virtual address for the mapping, returns NULL otherwise.
+ */
+void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the cpu virtual address that was granted
+ * with the "Get" call.
+ */
+void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/*
+ * DevmemExport()
+ *
+ * Given a memory allocation allocated with DevmemAllocateExportable()
+ * create a "cookie" that can be passed intact by the caller's own choice
+ * of secure IPC to another process and used as the argument to "map"
+ * to map this memory into a heap in the target processes.  N.B.  This can
+ * also be used to map into multiple heaps in one process, though that's not
+ * the intention.
+ *
+ * Note, the caller must later call Unexport before freeing the
+ * memory.
+ */
+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+                          DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+
+void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+                                       DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+                        DEVMEM_EXPORTCOOKIE *psCookie,
+                        PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                        DEVMEM_MEMDESC **ppsMemDescPtr);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*
+ * DevmemMakeLocalImportHandle()
+ *
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+                            IMG_HANDLE hServerExport,
+                            IMG_HANDLE *hClientExport);
+
+/*
+ * DevmemUnmakeLocalImportHandle()
+ *
+ * Free any resource associated with the Make operation
+ */
+PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+                              IMG_HANDLE hClientExport);
+
+/*
+ *
+ * The following set of functions is specific to the heap "blueprint"
+ * stuff, for automatic creation of heaps when a context is created
+ *
+ */
+
+
+/* Devmem_HeapConfigCount: returns the number of heap configs that
+   this device has.  Note that there is no acquire/release semantics
+   required, as this data is guaranteed to be constant for the
+   lifetime of the device node */
+PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+                      IMG_UINT32 *puiNumHeapConfigsOut);
+
+/* Devmem_HeapCount: returns the number of heaps that a given heap
+   config on this device has.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node */
+PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+                IMG_UINT32 uiHeapConfigIndex,
+                IMG_UINT32 *puiNumHeapsOut);
+/* Devmem_HeapConfigName: return the name of the given heap config.
+   The caller is to provide the storage for the returned string and
+   indicate the number of bytes (including null terminator) for such
+   string in the BufSz arg.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node.
+ */
+PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection,
+                     IMG_UINT32 uiHeapConfigIndex,
+                     IMG_CHAR *pszConfigNameOut,
+                     IMG_UINT32 uiConfigNameBufSz);
+
+/* Devmem_HeapDetails: fetches all the metadata that is recorded in
+   this heap "blueprint".  Namely: heap name (caller to provide
+   storage, and indicate buffer size (including null terminator) in
+   BufSz arg), device virtual address and length, log2 of data page
+   size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
+   Note that there is no acquire/release semantics required, as this
+   data is guaranteed to be constant for the lifetime of the device
+   node. */
+PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+                  IMG_UINT32 uiHeapConfigIndex,
+                  IMG_UINT32 uiHeapIndex,
+                  IMG_CHAR *pszHeapNameOut,
+                  IMG_UINT32 uiHeapNameBufSz,
+                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                  IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut,
+                  IMG_UINT32 *puiLog2DataPageSize,
+                  IMG_UINT32 *puiLog2ImportAlignmentOut);
+
+/*
+ * Devmem_FindHeapByName()
+ *
+ * returns the heap handle for the named _automagic_ heap in this
+ * context.  "automagic" heaps are those that are born with the
+ * context from a blueprint
+ */
+PVRSRV_ERROR
+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
+                     const IMG_CHAR *pszHeapName,
+                     DEVMEM_HEAP **ppsHeapRet);
+
+/*
+ * DevmemGetHeapBaseDevVAddr()
+ *
+ * returns the device virtual address of the base of the heap.
+ */
+
+PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
+                         IMG_DEV_VIRTADDR *pDevVAddr);
+
+PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_HANDLE *phImport);
+
+PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+                  IMG_UINT64 *pui64UID);
+
+PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+                    IMG_HANDLE *hReservation);
+
+IMG_INTERNAL void
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+               IMG_HANDLE *hPMR,
+               IMG_DEVMEM_OFFSET_T *puiPMROffset);
+
+IMG_INTERNAL void
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+                               PVRSRV_MEMALLOCFLAGS_T *puiFlags);
+
+IMG_INTERNAL SHARED_DEV_CONNECTION
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection,
+                                 IMG_HANDLE hExtHandle,
+                                 PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                                 DEVMEM_MEMDESC **ppsMemDescPtr,
+                                 IMG_DEVMEM_SIZE_T *puiSizePtr,
+                                 const IMG_CHAR *pszAnnotation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+                         IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
+                      IMG_DEV_VIRTADDR *psFaultAddress);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEV_VIRTADDR sDevVAddr,
+                          IMG_DEVMEM_SIZE_T uiSize,
+                          IMG_BOOL bInvalidate);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext,
+                          IMG_UINT64 ui64FBSCEntries);
+
+/* DevmemGetHeapLog2PageSize()
+ *
+ * Get the page size used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap);
+
+/* DevmemGetMemFlags()
+ *
+ * Get the memalloc flags for a certain memdesc.
+ */
+PVRSRV_MEMALLOCFLAGS_T
+DevmemGetMemAllocFlags(DEVMEM_MEMDESC *psMemDesc);
+
+/* DevmemGetHeapReservedSize()
+ *
+ * Get the reserved size used for a certain heap.
+ */
+IMG_DEVMEM_SIZE_T
+DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap);
+
+/*************************************************************************/ /*!
+@Function       RegisterDevMemPFNotify
+@Description    Registers that the application wants to be signaled when a page
+                fault occurs.
+
+@Input          psContext      Memory context the process that would like to
+                               be notified about.
+@Input          ui32PID        The PID  of the calling process.
+@Input          bRegister      If true, register. If false, de-register.
+@Return         PVRSRV_ERROR:  PVRSRV_OK on success. Otherwise, a PVRSRV_
+                               error code
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+                       IMG_UINT32     ui32PID,
+                       IMG_BOOL       bRegister);
+
+/*************************************************************************/ /*!
+@Function       DevmemHeapSetPremapStatus
+@Description    In some special cases like virtualisation, a device memory heap
+                           must be entirely backed by physical memory and mapped into the
+                               device's virtual address space. This is done at context creation.
+                           When objects are allocated from such a heap, the mapping part
+                           must be skipped. The 'bPremapped' flag dictates if allocations
+                           are to be mapped or not.
+
+@Input          psHeap            Device memory heap to be updated
+@Input          IsPremapped       The premapping status to be set
+*/ /**************************************************************************/
+IMG_INTERNAL void
+DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped);
+
+#endif /* #ifndef SRVCLIENT_DEVICEMEM_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem_pdump.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem_pdump.h
new file mode 100644 (file)
index 0000000..09b28af
--- /dev/null
@@ -0,0 +1,363 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management PDump internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to PDump device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_PDUMP_H
+#define DEVICEMEM_PDUMP_H
+
+#include "devicemem.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemPDumpLoadMem()
+ *
+ * takes a memory descriptor, offset, and size, and takes the current contents
+ * of the memory at that location and writes it to the prm pdump file, and
+ * emits a pdump LDB to load the data from that file.  The intention here is
+ * that the contents of the simulated buffer upon pdump playback will be made
+ * to be the same as they are when this command is run, enabling pdump of
+ * cases where the memory has been modified externally, i.e. by the host cpu
+ * or by a third party.
+ */
+void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpLoadZeroMem()
+ *
+ * As DevmemPDumpLoadMem() but the PDump allocation will be populated with
+ * zeros from the zero page in the parameter stream
+ */
+void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpLoadMemValue32()
+ *
+ * As above but dumps the value at a dword-aligned address in plain text to
+ * the pdump script2 file. Useful for patching a buffer at pdump playback by
+ * simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but the
+ *  binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32Value,
+                          PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue64()
+ *
+ * As above but dumps the 64bit-value at a dword-aligned address in plain text
+ * to the pdump script2 file. Useful for patching a buffer at pdump playback by
+ * simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but the
+ *  binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT64 ui64Value,
+                          PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpPageCatBaseToSAddr()
+ *
+ * Returns the symbolic address of a piece of memory represented by an offset
+ * into the mem descriptor.
+ */
+PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+                              IMG_DEVMEM_OFFSET_T *puiMemOffset,
+                              IMG_CHAR *pszName,
+                              IMG_UINT32 ui32Size);
+
+/*
+ * DevmemPDumpSaveToFile()
+ *
+ * Emits a pdump SAB to cause the current contents of the memory to be written
+ * to the given file during playback
+ */
+void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset);
+
+/*
+ * DevmemPDumpSaveToFileVirtual()
+ *
+ * Emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the virtual
+ * address and device MMU context to cause the pdump player to traverse the
+ * MMU page tables itself.
+ */
+void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+                             IMG_UINT32 ui32FileOffset,
+                             IMG_UINT32 ui32PdumpFlags);
+
+/*
+ * DevmemPDumpDataDescriptor()
+ *
+ * Emits a pdump CMD:OutputData, using the virtual address and device MMU
+ * context. Provides more flexibility than a pdump SAB because metadata can
+ * be passed to an external pdump player library via the command header.
+ */
+void
+DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_DEVMEM_SIZE_T uiSize,
+                          const IMG_CHAR *pszFilename,
+                          IMG_UINT32 ui32HeaderType,
+                          IMG_UINT32 ui32ElementType,
+                          IMG_UINT32 ui32ElementCount,
+                          IMG_UINT32 ui32PdumpFlags);
+
+
+/*
+ *
+ * DevmemPDumpDevmemPol32()
+ *
+ * Writes a PDump 'POL' command to wait for a masked 32-bit memory location to
+ * become the specified value.
+ */
+PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT32 ui32Value,
+                       IMG_UINT32 ui32Mask,
+                       PDUMP_POLL_OPERATOR eOperator,
+                       PDUMP_FLAGS_T ui32PDumpFlags);
+
+#if defined(__KERNEL__)
+/*
+ *
+ * DevmemPDumpDevmemCheck32()
+ *
+ * Writes a PDump 'POL' command to run a single-shot check for a masked
+ * 32-bit memory location to match the specified value.
+ */
+PVRSRV_ERROR
+DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc,
+                         IMG_DEVMEM_OFFSET_T uiOffset,
+                         IMG_UINT32 ui32Value,
+                         IMG_UINT32 ui32Mask,
+                         PDUMP_POLL_OPERATOR eOperator,
+                         PDUMP_FLAGS_T ui32PDumpFlags);
+#endif
+
+/*
+ * DevmemPDumpCBP()
+ *
+ * Polls for space in circular buffer. Reads the read offset from memory and
+ * waits until there is enough space to write the packet.
+ *
+ * psMemDesc     - MemDesc which contains the read offset
+ * uiReadOffset  - Offset into MemDesc to the read offset
+ * uiWriteOffset - Current write offset
+ * uiPacketSize  - Size of packet to write
+ * uiBufferSize  - Size of circular buffer
+ */
+PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+               IMG_DEVMEM_OFFSET_T uiReadOffset,
+               IMG_DEVMEM_OFFSET_T uiWriteOffset,
+               IMG_DEVMEM_SIZE_T uiPacketSize,
+               IMG_DEVMEM_SIZE_T uiBufferSize);
+
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMem)
+#endif
+static INLINE void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue32)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32Value,
+                          PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue64)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT64 ui64Value,
+                          PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(ui64Value);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpPageCatBaseToSAddr)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+                              IMG_DEVMEM_OFFSET_T *puiMemOffset,
+                              IMG_CHAR *pszName,
+                              IMG_UINT32 ui32Size)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(puiMemOffset);
+       PVR_UNREFERENCED_PARAMETER(pszName);
+       PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFile)
+#endif
+static INLINE void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename,
+                      IMG_UINT32 uiFileOffset)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(pszFilename);
+       PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFileVirtual)
+#endif
+static INLINE void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+                             IMG_UINT32 ui32FileOffset,
+                             IMG_UINT32 ui32PdumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(pszFilename);
+       PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpDevmemPol32)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT32 ui32Value,
+                       IMG_UINT32 ui32Mask,
+                       PDUMP_POLL_OPERATOR eOperator,
+                       PDUMP_FLAGS_T ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(eOperator);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+       return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+               IMG_DEVMEM_OFFSET_T uiReadOffset,
+               IMG_DEVMEM_OFFSET_T uiWriteOffset,
+               IMG_DEVMEM_SIZE_T uiPacketSize,
+               IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDesc);
+       PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+       PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+       PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+       PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+       return PVRSRV_OK;
+}
+#endif /* PDUMP */
+#endif /* DEVICEMEM_PDUMP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem_utils.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/devicemem_utils.h
new file mode 100644 (file)
index 0000000..3dcef24
--- /dev/null
@@ -0,0 +1,605 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_UTILS_H
+#define DEVICEMEM_UTILS_H
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "osmmap.h"
+#include "pvrsrv_memallocflags_internal.h"
+
+#define DEVMEM_HEAPNAME_MAXLENGTH 160
+
+/*
+ * Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY,
+ * this check is validated in the DDK. Note this is only reserving "Virtual Address" space and
+ * physical allocations (and mappings thereon) should only be done as much as required (to avoid
+ * wastage).
+ * Granularity has been chosen to support the max possible practically used OS page size.
+ */
+#define DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY        0x10000 /* 64KB is MAX anticipated OS page size */
+
+/*
+ * VA heap size should be at least OS page size. This check is validated in the DDK.
+ */
+#define DEVMEM_HEAP_MINIMUM_SIZE                     0x10000 /* 64KB is MAX anticipated OS page size */
+
+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* If we need a "hMapping" but we don't have a server-side mapping, we poison
+ * the entry with this value so that it's easily recognised in the debugger.
+ * Note that this is potentially a valid handle, but then so is NULL, which is
+ * no better, indeed worse, as it's not obvious in the debugger. The value
+ * doesn't matter. We _never_ use it (and because it's valid, we never assert
+ * it isn't this) but it's nice to have a value in the source code that we can
+ * grep for if things go wrong.
+ */
+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead)
+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead)
+
+#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF
+
+struct DEVMEM_CONTEXT_TAG
+{
+
+       SHARED_DEV_CONNECTION hDevConnection;
+
+       /* Number of heaps that have been created in this context
+        * (regardless of whether they have allocations)
+        */
+       IMG_UINT32 uiNumHeaps;
+
+       /* Each "DEVMEM_CONTEXT" has a counterpart in the server, which
+        * is responsible for handling the mapping into device MMU.
+        * We have a handle to that here.
+        */
+       IMG_HANDLE hDevMemServerContext;
+
+       /* Number of automagically created heaps in this context,
+        *  i.e. those that are born at context creation time from the
+        * chosen "heap config" or "blueprint"
+        */
+       IMG_UINT32 uiAutoHeapCount;
+
+       /* Pointer to array of such heaps */
+       struct DEVMEM_HEAP_TAG **ppsAutoHeapArray;
+
+       /* The cache line size for use when allocating memory,
+        * as it is not queryable on the client side
+        */
+       IMG_UINT32 ui32CPUCacheLineSize;
+
+       /* Private data handle for device specific data */
+       IMG_HANDLE hPrivData;
+};
+
+/* Flags that record how a heaps virtual address space is managed. */
+#define DEVMEM_HEAP_MANAGER_UNKNOWN      0
+/* Heap VAs assigned by the client of Services APIs, heap's RA not used at all. */
+#define DEVMEM_HEAP_MANAGER_USER         (1U << 0)
+/* Heap VAs managed by the OSs kernel, VA from CPU mapping call used */
+#define DEVMEM_HEAP_MANAGER_KERNEL       (1U << 1)
+/* Heap VAs managed by the heap's own RA  */
+#define DEVMEM_HEAP_MANAGER_RA           (1U << 2)
+/* Heap VAs managed jointly by Services and the client of Services.
+ * The reserved region of the heap is managed explicitly by the client of Services
+ * The non-reserved region of the heap is managed by the heap's own RA */
+#define DEVMEM_HEAP_MANAGER_DUAL_USER_RA (DEVMEM_HEAP_MANAGER_USER | DEVMEM_HEAP_MANAGER_RA)
+
+struct DEVMEM_HEAP_TAG
+{
+       /* Name of heap - for debug and lookup purposes. */
+       IMG_CHAR *pszName;
+
+       /* Number of live imports in the heap */
+       ATOMIC_T hImportCount;
+
+       /* Base address and size of heap, required by clients due to some
+        * requesters not being full range
+        */
+       IMG_DEV_VIRTADDR sBaseAddress;
+       DEVMEM_SIZE_T uiSize;
+
+       DEVMEM_SIZE_T uiReservedRegionSize; /* uiReservedRegionLength in DEVMEM_HEAP_BLUEPRINT */
+
+       /* The heap manager, describing if the space is managed by the user, an RA,
+        * kernel or combination */
+       IMG_UINT32 ui32HeapManagerFlags;
+
+       /* This RA is for managing sub-allocations within the imports (PMRs)
+        * within the heap's virtual space. RA only used in DevmemSubAllocate()
+        * to track sub-allocated buffers.
+        *
+        * Resource Span - a PMR import added when the RA calls the
+        *                 imp_alloc CB (SubAllocImportAlloc) which returns the
+        *                 PMR import and size (span length).
+        * Resource - an allocation/buffer i.e. a MemDesc. Resource size represents
+        *            the size of the sub-allocation.
+        */
+       RA_ARENA *psSubAllocRA;
+       IMG_CHAR *pszSubAllocRAName;
+
+       /* The psQuantizedVMRA is for the coarse allocation (PMRs) of virtual
+        * space from the heap.
+        *
+        * Resource Span - the heap's VM space from base to base+length,
+        *                 only one is added at heap creation.
+        * Resource - a PMR import associated with the heap. Dynamic number
+        *            as memory is allocated/freed from or mapped/unmapped to
+        *            the heap. Resource size follows PMR logical size.
+        */
+       RA_ARENA *psQuantizedVMRA;
+       IMG_CHAR *pszQuantizedVMRAName;
+
+       /* We also need to store a copy of the quantum size in order to feed
+        * this down to the server.
+        */
+       IMG_UINT32 uiLog2Quantum;
+
+       /* Store a copy of the minimum import alignment */
+       IMG_UINT32 uiLog2ImportAlignment;
+
+       /* The parent memory context for this heap */
+       struct DEVMEM_CONTEXT_TAG *psCtx;
+
+       /* Lock to protect this structure */
+       POS_LOCK hLock;
+
+       /* Each "DEVMEM_HEAP" has a counterpart in the server, which is
+        * responsible for handling the mapping into device MMU.
+        * We have a handle to that here.
+        */
+       IMG_HANDLE hDevMemServerHeap;
+
+       /* This heap is fully allocated and premapped into the device address space.
+        * Used in virtualisation for firmware heaps of Guest and optionally Host drivers. */
+       IMG_BOOL bPremapped;
+};
+
+typedef IMG_UINT32 DEVMEM_PROPERTIES_T;                  /*!< Typedef for Devicemem properties */
+#define DEVMEM_PROPERTIES_EXPORTABLE         (1UL<<0)    /*!< Is it exportable? */
+#define DEVMEM_PROPERTIES_IMPORTED           (1UL<<1)    /*!< Is it imported from another process? */
+#define DEVMEM_PROPERTIES_SUBALLOCATABLE     (1UL<<2)    /*!< Is it suballocatable? */
+#define DEVMEM_PROPERTIES_UNPINNED           (1UL<<3)    /*!< Is it currently pinned? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED   (1UL<<4)    /*!< Is the memory fully zeroed? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN    (1UL<<5)    /*!< Is the memory clean, i.e. not been used before? */
+#define DEVMEM_PROPERTIES_SECURE             (1UL<<6)    /*!< Is it a special secure buffer? No CPU maps allowed! */
+#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7)    /*!< Is the memory fully poisoned? */
+#define DEVMEM_PROPERTIES_NO_CPU_MAPPING     (1UL<<8)    /* No CPU Mapping is allowed, RW attributes
+                                                            are further derived from allocation memory flags */
+#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE      (1UL<<9)    /* No sparse resizing allowed, once a memory
+                                                            layout is chosen, no change allowed later,
+                                                            This includes pinning and unpinning */
+
+
+typedef struct DEVMEM_DEVICE_IMPORT_TAG
+{
+       DEVMEM_HEAP *psHeap;            /*!< Heap this import is bound to */
+       IMG_DEV_VIRTADDR sDevVAddr;     /*!< Device virtual address of the import */
+       IMG_UINT32 ui32RefCount;        /*!< Refcount of the device virtual address */
+       IMG_HANDLE hReservation;        /*!< Device memory reservation handle */
+       IMG_HANDLE hMapping;            /*!< Device mapping handle */
+       IMG_BOOL bMapped;               /*!< This is import mapped? */
+       POS_LOCK hLock;                 /*!< Lock to protect the device import */
+} DEVMEM_DEVICE_IMPORT;
+
+typedef struct DEVMEM_CPU_IMPORT_TAG
+{
+       void *pvCPUVAddr;               /*!< CPU virtual address of the import */
+       IMG_UINT32 ui32RefCount;        /*!< Refcount of the CPU virtual address */
+       IMG_HANDLE hOSMMapData;         /*!< CPU mapping handle */
+       POS_LOCK hLock;                 /*!< Lock to protect the CPU import */
+} DEVMEM_CPU_IMPORT;
+
+typedef struct DEVMEM_IMPORT_TAG
+{
+       SHARED_DEV_CONNECTION hDevConnection;
+       IMG_DEVMEM_ALIGN_T uiAlign;         /*!< Alignment of the PMR */
+       DEVMEM_SIZE_T uiSize;               /*!< Size of import */
+       ATOMIC_T hRefCount;                 /*!< Refcount for this import */
+       DEVMEM_PROPERTIES_T uiProperties;   /*!< Stores properties of an import like if
+                                                it is exportable, pinned or suballocatable */
+       IMG_HANDLE hPMR;                    /*!< Handle to the PMR */
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;     /*!< Flags for this import */
+       POS_LOCK hLock;                     /*!< Lock to protect the import */
+
+       DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */
+       DEVMEM_CPU_IMPORT sCPUImport;       /*!< CPU specifics of the import */
+} DEVMEM_IMPORT;
+
+typedef struct DEVMEM_DEVICE_MEMDESC_TAG
+{
+       IMG_DEV_VIRTADDR sDevVAddr;     /*!< Device virtual address of the allocation */
+       IMG_UINT32 ui32RefCount;        /*!< Refcount of the device virtual address */
+       POS_LOCK hLock;                 /*!< Lock to protect device memdesc */
+} DEVMEM_DEVICE_MEMDESC;
+
+typedef struct DEVMEM_CPU_MEMDESC_TAG
+{
+       void *pvCPUVAddr;           /*!< CPU virtual address of the import */
+       IMG_UINT32 ui32RefCount;    /*!< Refcount of the device CPU address */
+       POS_LOCK hLock;             /*!< Lock to protect CPU memdesc */
+} DEVMEM_CPU_MEMDESC;
+
+struct DEVMEM_MEMDESC_TAG
+{
+       DEVMEM_IMPORT *psImport;                /*!< Import this memdesc is on */
+       IMG_DEVMEM_OFFSET_T uiOffset;           /*!< Offset into import where our allocation starts */
+       IMG_DEVMEM_SIZE_T uiAllocSize;          /*!< Size of the allocation */
+       ATOMIC_T hRefCount;                     /*!< Refcount of the memdesc */
+       POS_LOCK hLock;                         /*!< Lock to protect memdesc */
+       IMG_HANDLE hPrivData;
+
+       DEVMEM_DEVICE_MEMDESC sDeviceMemDesc;   /*!< Device specifics of the memdesc */
+       DEVMEM_CPU_MEMDESC sCPUMemDesc;         /*!< CPU specifics of the memdesc */
+
+       IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */
+
+       IMG_UINT32 ui32AllocationIndex;
+
+#if defined(DEBUG)
+       IMG_BOOL bPoisonOnFree;
+#endif
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       IMG_HANDLE hRIHandle;                   /*!< Handle to RI information */
+#endif
+};
+
+/* The physical descriptor used to store handles and information of device
+ * physical allocations.
+ */
+struct DEVMEMX_PHYS_MEMDESC_TAG
+{
+       IMG_UINT32 uiNumPages;                  /*!< Number of pages that the import has*/
+       IMG_UINT32 uiLog2PageSize;              /*!< Page size */
+       ATOMIC_T hRefCount;                     /*!< Refcount of the memdesc */
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;         /*!< Flags for this import */
+       IMG_HANDLE hPMR;                        /*!< Handle to the PMR */
+       DEVMEM_CPU_IMPORT sCPUImport;           /*!< CPU specifics of the memdesc */
+       DEVMEM_BRIDGE_HANDLE hBridge;           /*!< Bridge connection for the server */
+       void *pvUserData;                                               /*!< User data */
+};
+
+/* The virtual descriptor used to store handles and information of a device
+ * virtual range and the mappings to it.
+ */
+struct DEVMEMX_VIRT_MEMDESC_TAG
+{
+       IMG_UINT32 uiNumPages;                  /*!< Number of pages that the import has*/
+       PVRSRV_MEMALLOCFLAGS_T uiFlags;         /*!< Flags for this import */
+       DEVMEMX_PHYSDESC **apsPhysDescTable;    /*!< Table to store links to physical descs */
+       DEVMEM_DEVICE_IMPORT sDeviceImport;     /*!< Device specifics of the memdesc */
+
+       IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */
+       IMG_UINT32 ui32AllocationIndex;         /*!< To track mappings in this range */
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+       IMG_HANDLE hRIHandle;                   /*!< Handle to RI information */
+#endif
+};
+
+#define DEVICEMEM_UTILS_NO_ADDRESS 0
+
+/******************************************************************************
+@Function       DevmemValidateParams
+@Description    Check if flags are conflicting and if align is a size multiple.
+
+@Input          uiSize      Size of the import.
+@Input          uiAlign     Alignment of the import.
+@Input          puiFlags    Pointer to the flags for the import.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+                                   IMG_DEVMEM_ALIGN_T uiAlign,
+                                   PVRSRV_MEMALLOCFLAGS_T *puiFlags);
+
+/******************************************************************************
+@Function       DevmemImportStructAlloc
+@Description    Allocates memory for an import struct. Does not allocate a PMR!
+                Create locks for CPU and Devmem mappings.
+
+@Input          hDevConnection  Connection to use for calls from the import.
+@Input          ppsImport       The import to allocate.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+                                      DEVMEM_IMPORT **ppsImport);
+
+/******************************************************************************
+@Function       DevmemImportStructInit
+@Description    Initialises the import struct with the given parameters.
+                Set it's refcount to 1!
+
+@Input          psImport     The import to initialise.
+@Input          uiSize       Size of the import.
+@Input          uiAlign      Alignment of allocations in the import.
+@Input          uiMapFlags
+@Input          hPMR         Reference to the PMR of this import struct.
+@Input          uiProperties Properties of the import. Is it exportable,
+                              imported, suballocatable, unpinned?
+******************************************************************************/
+void DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             IMG_DEVMEM_ALIGN_T uiAlign,
+                             PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                             IMG_HANDLE hPMR,
+                             DEVMEM_PROPERTIES_T uiProperties);
+
+/******************************************************************************
+@Function       DevmemImportStructDevMap
+@Description    NEVER call after the last DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the device's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+
+@Input          psHeap    The heap to map to.
+@Input          bMap      Caller can choose if the import should be really
+                          mapped in the page tables or if just a virtual range
+                          should be reserved and the refcounts increased.
+@Input          psImport  The import we want to map.
+@Input          uiOptionalMapAddress  An optional address to map to.
+                                      Pass DEVICEMEM_UTILS_NOADDRESS if not used.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+                                       IMG_BOOL bMap,
+                                       DEVMEM_IMPORT *psImport,
+                                       IMG_UINT64 uiOptionalMapAddress);
+
+/******************************************************************************
+@Function       DevmemImportStructDevUnmap
+@Description    Unmaps the PMR referenced by the import struct from the
+                device's virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+@return         A boolean to signify if the import was unmapped.
+******************************************************************************/
+IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       DevmemImportStructCPUMap
+@Description    NEVER call after the last DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the CPU's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       DevmemImportStructCPUUnmap
+@Description    Unmaps the PMR referenced by the import struct from the CPU's
+                virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport);
+
+
+/******************************************************************************
+@Function       DevmemImportStructAcquire
+@Description    Acquire an import struct by increasing it's refcount.
+******************************************************************************/
+void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       DevmemImportStructRelease
+@Description    Reduces the refcount of the import struct.
+                Destroys the import in the case it was the last reference.
+                Destroys underlying PMR if this import was the last reference
+                to it.
+@return         A boolean to signal if the import was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       DevmemImportDiscard
+@Description    Discard a created, but uninitialised import structure.
+                This must only be called before DevmemImportStructInit
+                after which DevmemImportStructRelease must be used to
+                "free" the import structure.
+******************************************************************************/
+void DevmemImportDiscard(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       DevmemMemDescAlloc
+@Description    Allocates a MemDesc and create it's various locks.
+                Zero the allocated memory.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc);
+
+#if defined(DEBUG)
+/******************************************************************************
+@Function       DevmemMemDescSetPoF
+@Description    Sets the Poison on Free flag to true for this MemDesc if the
+                given MemAllocFlags have the Poison on Free bit set.
+                Poison on Free is a debug only feature.
+******************************************************************************/
+void DevmemMemDescSetPoF(DEVMEM_MEMDESC *psMemDesc, PVRSRV_MEMALLOCFLAGS_T uiFlags);
+#endif
+
+/******************************************************************************
+@Function       DevmemMemDescInit
+@Description    Sets the given offset and import struct fields in the MemDesc.
+                Initialises refcount to 1 and other values to 0.
+
+@Input          psMemDesc    MemDesc to initialise.
+@Input          uiOffset     Offset in the import structure.
+@Input          psImport     Import the MemDesc is on.
+@Input          uiAllocSize  Size of the allocation
+******************************************************************************/
+void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+                                               IMG_DEVMEM_OFFSET_T uiOffset,
+                                               DEVMEM_IMPORT *psImport,
+                                               IMG_DEVMEM_SIZE_T uiAllocSize);
+
+/******************************************************************************
+@Function       DevmemMemDescAcquire
+@Description    Acquires the MemDesc by increasing it's refcount.
+******************************************************************************/
+void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       DevmemMemDescRelease
+@Description    Releases the MemDesc by reducing it's refcount.
+                Destroy the MemDesc if it's recount is 0.
+                Destroy the import struct the MemDesc is on if that was the
+                last MemDesc on the import, probably following the destruction
+                of the underlying PMR.
+@return         A boolean to signal if the MemDesc was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       DevmemMemDescDiscard
+@Description    Discard a created, but uninitialised MemDesc structure.
+                This must only be called before DevmemMemDescInit after
+                which DevmemMemDescRelease must be used to "free" the
+                MemDesc structure.
+******************************************************************************/
+void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc);
+
+
+/******************************************************************************
+@Function       GetImportProperties
+@Description    Atomically read psImport->uiProperties
+                It's possible that another thread modifies uiProperties
+                immediately after this function returns, making its result
+                stale. So, it's recommended to use this function only to
+                check if certain non-volatile flags were set.
+******************************************************************************/
+static INLINE DEVMEM_PROPERTIES_T GetImportProperties(DEVMEM_IMPORT *psImport)
+{
+       DEVMEM_PROPERTIES_T uiProperties;
+
+       OSLockAcquire(psImport->hLock);
+       uiProperties = psImport->uiProperties;
+       OSLockRelease(psImport->hLock);
+       return uiProperties;
+}
+
+/******************************************************************************
+@Function       DevmemCPUMemSet
+@Description    Given a CPU Mapped Devmem address, set the memory at that
+                range (address, address + size) to the uiPattern provided.
+                Flags determine the OS abstracted MemSet method to use.
+******************************************************************************/
+static INLINE void DevmemCPUMemSet(void *pvMem,
+                                   IMG_UINT8 uiPattern,
+                                   IMG_DEVMEM_SIZE_T uiSize,
+                                   PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+       if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags))
+       {
+               OSDeviceMemSet(pvMem, uiPattern, uiSize);
+       }
+       else
+       {
+               /* it's safe to use OSCachedMemSet() for cached and wc memory */
+               OSCachedMemSet(pvMem, uiPattern, uiSize);
+       }
+}
+
+/******************************************************************************
+@Function       DevmemCPUMapCheckImportProperties
+@Description    Given a MemDesc check that the import properties are correct
+                to allow for mapping the MemDesc to the CPU.
+                Returns PVRSRV_OK on success.
+******************************************************************************/
+static INLINE PVRSRV_ERROR DevmemCPUMapCheckImportProperties(DEVMEM_MEMDESC *psMemDesc)
+{
+       DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport);
+
+       if (uiProperties &
+                       (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE))
+       {
+#if defined(SUPPORT_SECURITY_VALIDATION)
+               if (uiProperties & DEVMEM_PROPERTIES_SECURE)
+               {
+                       PVR_DPF((PVR_DBG_WARNING,
+                                       "%s: Allocation is a secure buffer. "
+                                       "It should not be possible to map to CPU, but for security "
+                                       "validation this will be allowed for testing purposes, "
+                                       "as long as the buffer is pinned.",
+                                       __func__));
+               }
+
+               if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+#endif
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "%s: Allocation is currently unpinned or a secure buffer. "
+                                       "Not possible to map to CPU!",
+                                       __func__));
+                       return PVRSRV_ERROR_INVALID_MAP_REQUEST;
+               }
+       }
+
+       if (uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
+       {
+               PVR_DPF((PVR_DBG_ERROR,
+                               "%s: CPU Mapping is not possible on this allocation!",
+                               __func__));
+               return PVRSRV_ERROR_INVALID_MAP_REQUEST;
+       }
+
+       return PVRSRV_OK;
+}
+
+#endif /* DEVICEMEM_UTILS_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/devicememx.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/devicememx.h
new file mode 100644 (file)
index 0000000..769d2dc
--- /dev/null
@@ -0,0 +1,223 @@
+/*************************************************************************/ /*!
+@File
+@Title          X Device Memory Management core internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Internal interface for extended device memory management.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEMX_H
+#define DEVICEMEMX_H
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "devicemem_utils.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "osfunc.h"
+
+/* DevmemXAllocPhysical()
+ *
+ * Allocate physical device memory and return a physical
+ * descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocPhysical(DEVMEM_CONTEXT *psCtx,
+                    IMG_UINT32 uiNumPages,
+                    IMG_UINT32 uiLog2PageSize,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    const IMG_CHAR *pszText,
+                    DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+/* DevmemXReleasePhysical()
+ *
+ * Removes a physical device allocation if all references
+ * to it are dropped, otherwise just decreases the refcount.
+ */
+void
+DevmemXReleasePhysical(DEVMEMX_PHYSDESC *psPhysDesc);
+
+/* DevmemAllocVirtualAddr()
+ *
+ * Reserve a requested device virtual range and return
+ * a virtual descriptor for it.
+ */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemXAllocVirtualAddr(DEVMEM_HEAP* hHeap,
+                   IMG_UINT32 uiNumPages,
+                   PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                   const IMG_CHAR *pszText,
+                   IMG_DEV_VIRTADDR sVirtAddr,
+                   DEVMEMX_VIRTDESC **ppsVirtDesc);
+
+/* DevmemAllocVirtual()
+ *
+ * Allocate and reserve a device virtual range and return
+ * a virtual descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocVirtual(DEVMEM_HEAP* hHeap,
+                   IMG_UINT32 uiNumPages,
+                   PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                   const IMG_CHAR *pszText,
+                   DEVMEMX_VIRTDESC **ppsVirtDesc,
+                   IMG_DEV_VIRTADDR *psVirtAddr);
+
+/* DevmemXFreeVirtual()
+ *
+ * Removes a device virtual range if all mappings on it
+ * have been removed.
+ */
+PVRSRV_ERROR
+DevmemXFreeVirtual(DEVMEMX_VIRTDESC *psVirtDesc);
+
+/* DevmemXMapVirtualRange()
+ *
+ * Map memory from a physical descriptor into a virtual range.
+ */
+PVRSRV_ERROR
+DevmemXMapVirtualRange(IMG_UINT32 ui32PageCount,
+                      DEVMEMX_PHYSDESC *psPhysDesc,
+                      IMG_UINT32 ui32PhysOffset,
+                      DEVMEMX_VIRTDESC *psVirtDesc,
+                      IMG_UINT32 ui32VirtOffset);
+
+/* DevmemXUnmapVirtualRange()
+ *
+ * Unmap pages from a device virtual range.
+ */
+PVRSRV_ERROR
+DevmemXUnmapVirtualRange(IMG_UINT32 ui32PageCount,
+                        DEVMEMX_VIRTDESC *psVirtDesc,
+                        IMG_UINT32 ui32VirtPgOffset);
+
+/* DevmemXMapPhysicalToCPU()
+ *
+ * Map a full physical descriptor to CPU space.
+ */
+PVRSRV_ERROR
+DevmemXMapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys,
+                       IMG_CPU_VIRTADDR *psVirtAddr);
+
+/* DevmemXUnmapPhysicalToCPU()
+ *
+ * Remove the CPU mapping from the descriptor.
+ */
+PVRSRV_ERROR
+DevmemXUnmapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys);
+
+/* DevmemXReacquireCpuVirtAddr()
+ *
+ * Reacquire the CPU mapping by incrementing the refcount.
+ */
+void
+DevmemXReacquireCpuVirtAddr(DEVMEMX_PHYSDESC *psPhysDesc,
+                            void **ppvCpuVirtAddr);
+
+/* DevmemXReleaseCpuVirtAddr()
+ *
+ * Release CPU mapping by decrementing the refcount.
+ */
+void
+DevmemXReleaseCpuVirtAddr(DEVMEMX_PHYSDESC *psPhysDesc);
+
+/* DevmemXCreateDevmemMemDescVA()
+ *
+ * (Deprecated)
+ *
+ * Create a devmem memdesc from a virtual address.
+ * Always destroy with DevmemXFreeDevmemMemDesc().
+ */
+
+PVRSRV_ERROR
+DevmemXCreateDevmemMemDescVA(const IMG_DEV_VIRTADDR sVirtualAddress,
+                             DEVMEM_MEMDESC **ppsMemDesc);
+
+/* DevmemXCreateDevmemMemDesc()
+ *
+ * Create a devmem memdesc from a physical and
+ * virtual descriptor.
+ * Always destroy with DevmemXFreeDevmemMemDesc().
+ */
+
+PVRSRV_ERROR
+DevmemXCreateDevmemMemDesc(DEVMEMX_PHYSDESC *psPhysDesc,
+                           DEVMEMX_VIRTDESC *psVirtDesc,
+                           DEVMEM_MEMDESC **ppsMemDesc);
+
+/* DevmemXFreeDevmemMemDesc()
+ *
+ * Free the memdesc again. Has no impact on the underlying
+ * physical and virtual descriptors.
+ */
+PVRSRV_ERROR
+DevmemXFreeDevmemMemDesc(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+DevmemXFlagCompatibilityCheck(PVRSRV_MEMALLOCFLAGS_T uiPhysFlags,
+                              PVRSRV_MEMALLOCFLAGS_T uiVirtFlags);
+
+PVRSRV_ERROR
+DevmemXPhysDescAlloc(DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+void
+DevmemXPhysDescInit(DEVMEMX_PHYSDESC *psPhysDesc,
+                    IMG_HANDLE hPMR,
+                    IMG_UINT32 uiNumPages,
+                    IMG_UINT32 uiLog2PageSize,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    IMG_HANDLE hBridge);
+
+void
+DevmemXPhysDescFree(DEVMEMX_PHYSDESC *psPhysDesc);
+
+void
+DevmemXPhysDescAcquire(DEVMEMX_PHYSDESC *psPhysDesc,
+                       IMG_UINT32 uiAcquireCount);
+void
+DevmemXPhysDescRelease(DEVMEMX_PHYSDESC *psPhysDesc,
+                       IMG_UINT32 uiReleaseCount);
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR
+DevmemXGetImportUID(DEVMEMX_PHYSDESC *psMemDescPhys,
+                    IMG_UINT64       *pui64UID);
+#endif
+
+#endif /* DEVICEMEMX_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/devicememx_pdump.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/devicememx_pdump.h
new file mode 100644 (file)
index 0000000..b6e99f7
--- /dev/null
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title          X Device Memory Management PDump internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to PDump device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEMX_PDUMP_H
+#define DEVICEMEMX_PDUMP_H
+
+#include "devicememx.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemXPDumpLoadMem()
+ *
+ * Same as DevmemPDumpLoadMem().
+ */
+IMG_INTERNAL void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+                    IMG_DEVMEM_OFFSET_T uiOffset,
+                    IMG_DEVMEM_SIZE_T uiSize,
+                    PDUMP_FLAGS_T uiPDumpFlags);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemXPDumpLoadMem)
+#endif
+
+static INLINE void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+                    IMG_DEVMEM_OFFSET_T uiOffset,
+                    IMG_DEVMEM_SIZE_T uiSize,
+                    PDUMP_FLAGS_T uiPDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psMemDescPhys);
+       PVR_UNREFERENCED_PARAMETER(uiOffset);
+       PVR_UNREFERENCED_PARAMETER(uiSize);
+       PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+#endif /* PDUMP */
+#endif /* DEVICEMEMX_PDUMP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/hash.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/hash.h
new file mode 100644 (file)
index 0000000..92d4899
--- /dev/null
@@ -0,0 +1,247 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements simple self scaling hash tables.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef HASH_H
+#define HASH_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Keys passed to the comparison function are only guaranteed to be aligned on
+ * an uintptr_t boundary.
+ */
+typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+       uintptr_t k,
+       uintptr_t v,
+       void* pvPriv
+);
+
+#if defined(DEBUG)
+#else
+#define HASH_CREATE(LEN)               HASH_Create(LEN)
+#endif
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of uintptr_t
+                arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table.
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_UINT32 HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of uintptr_t arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey1        Pointer to first hash key to compare.
+@Input          pKey2        Pointer to second hash key to compare.
+@Return         IMG_TRUE  - The keys match.
+                IMG_FALSE - The keys don't match.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied key size,
+                and the supplied hash and key comparison functions.
+@Input          uInitialLen  Initial and minimum length of the hash table,
+                             where the length refers to the number of entries
+                             in the hash table, not its size in bytes.
+@Input          uKeySize     The size of the key, in bytes.
+@Input          pfnHashFunc  Pointer to hash function.
+@Input          pfnKeyComp   Pointer to key comparison function.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Extended_Int(IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+#if defined(DEBUG)
+#define HASH_Create_Extended(LEN, KS, FUN, CMP)                HASH_Create_Extended_Debug(LEN, KS, FUN, CMP, __FILE__, __LINE__)
+HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp,
+                                                                                const char *file, const unsigned int line);
+#else
+#define HASH_Create_Extended   HASH_Create_Extended_Int
+#endif
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key consisting of a
+                single uintptr_t, and using the default hash and key
+                comparison functions.
+@Input          uInitialLen  Initial and minimum length of the hash table,
+                             where the length refers to the number of entries
+                             in the hash table, not its size in bytes.
+@Return         NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Int(IMG_UINT32 uInitialLen);
+#if defined(DEBUG)
+#define HASH_Create(LEN)               HASH_Create_Debug(LEN, __FILE__, __LINE__)
+HASH_TABLE * HASH_Create_Debug (IMG_UINT32 uInitialLen, const char *file, const unsigned int line);
+#else
+#define HASH_Create                            HASH_Create_Int
+#endif
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete_Extended
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create. All entries in the table should have been removed
+                before calling this function.
+@Input          pHash        Hash table
+@Input          bWarn        Set false to suppress warnings in the case of
+                             deletion with active entries.
+@Return         None
+*/ /**************************************************************************/
+void HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn);
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create. All entries in the table must have been removed
+                before calling this function.
+@Input          pHash        Hash table
+@Return         None
+*/ /**************************************************************************/
+void HASH_Delete(HASH_TABLE *pHash);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create_Extended.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to the key.
+@Input          v            The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash        The hash table.
+@Input          k            The key value.
+@Input          v            The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created with
+                HASH_Create.
+@Input          pHash        The hash table.
+@Input          k            The key value.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove(HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash        The hash table.
+@Input          pKey         Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with HASH_Create.
+@Input          pHash        The hash table.
+@Input          k            The key value.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table.
+@Input          pHash        Hash table to iterate.
+@Input          pfnCallback  Callback to call with the key and data for each
+.                            entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args);
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    Dump out some information about a hash table.
+@Input          pHash         The hash table.
+*/ /**************************************************************************/
+void HASH_Dump(HASH_TABLE *pHash);
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* HASH_H */
+
+/******************************************************************************
+ End of file (hash.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/htbuffer.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/htbuffer.h
new file mode 100644 (file)
index 0000000..4e9c65c
--- /dev/null
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File           htbuffer.h
+@Title          Host Trace Buffer shared API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Host Trace Buffer provides a mechanism to log Host events to a
+                buffer in a similar way to the Firmware Trace mechanism.
+                Host Trace Buffer logs data using a Transport Layer buffer.
+                The Transport Layer and pvrtld tool provides the mechanism to
+                retrieve the trace data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef HTBUFFER_H
+#define HTBUFFER_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "htbuffer_sf.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#if defined(__KERNEL__)
+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple((IMG_HANDLE) NULL, SF, ## args); } while (0)
+
+/* Host Trace Buffer name */
+#define HTB_STREAM_NAME        "PVRHTBuffer"
+
+#else
+#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0)
+#endif
+
+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
+#define HTBLOG_PTR_BITS_LOW(p)  ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
+
+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
+#define HTBLOG_U64_BITS_LOW(u)  ((IMG_UINT32)(u&0xffffffff))
+
+/*************************************************************************/ /*!
+ @Function      HTBLog
+ @Description   Record a Host Trace Buffer log event
+
+ @Input         PID             The PID of the process the event is associated
+                                with. This is provided as an argument rather
+                                than querying internally so that events associated
+                                with a particular process, but performed by
+                                another can be logged correctly.
+
+ @Input         TID             The TID (Thread ID) of the thread the event is
+                                associated with.
+
+ @Input         TimeStampus     The timestamp in us for this event
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...);
+
+
+/*************************************************************************/ /*!
+ @Function      HTBLogSimple
+ @Description   Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input         SF              The log event ID
+
+ @Input         ...             Log parameters
+
+ @Return        PVRSRV_OK       Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...);
+
+
+
+/*  DEBUG log group enable */
+#if !defined(HTB_DEBUG_LOG_GROUP)
+#undef HTB_LOG_TYPE_DBG    /* No trace statements in this log group should be checked in */
+#define HTB_LOG_TYPE_DBG    __BUILDERROR__
+#endif
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* HTBUFFER_H */
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/htbuffer_init.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/htbuffer_init.h
new file mode 100644 (file)
index 0000000..d114579
--- /dev/null
@@ -0,0 +1,114 @@
+/*************************************************************************/ /*!
+@File           htbuffer_init.h
+@Title          Host Trace Buffer functions needed for Services initialisation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef HTBUFFER_INIT_H
+#define HTBUFFER_INIT_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+
+/*************************************************************************/ /*!
+ @Function      HTBConfigure
+ @Description   Configure the Host Trace Buffer.
+                Once these parameters are set they may not be changed
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         pszBufferName   Name to use for the TL buffer, this will be
+                                required to request trace data from the TL
+
+ @Input         ui32BufferSize  Requested TL buffer size in bytes
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+       IMG_HANDLE hSrvHandle,
+       IMG_CHAR * pszBufferName,
+       IMG_UINT32 ui32BufferSize
+);
+
+/*************************************************************************/ /*!
+ @Function      HTBControl
+ @Description   Update the configuration of the Host Trace Buffer
+
+ @Input         hSrvHandle      Server Handle
+
+ @Input         ui32NumFlagGroups Number of group enable flags words
+
+ @Input         aui32GroupEnable  Flags words controlling groups to be logged
+
+ @Input         ui32LogLevel    Log level to record
+
+ @Input         ui32EnablePID   PID to enable logging for a specific process
+
+ @Input         eLogMode        Enable logging for all or specific processes,
+
+ @Input         eOpMode         Control what trace data is dropped if the TL
+                                buffer is full
+
+ @Return        eError          Internal services call returned eError error
+                                number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+       IMG_HANDLE hSrvHandle,
+       IMG_UINT32 ui32NumFlagGroups,
+       IMG_UINT32 * aui32GroupEnable,
+       IMG_UINT32 ui32LogLevel,
+       IMG_UINT32 ui32EnablePID,
+       HTB_LOGMODE_CTRL eLogMode,
+       HTB_OPMODE_CTRL eOpMode
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* HTBUFFER_INIT_H */
+/*****************************************************************************
+ End of file (htbuffer_init.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/lock.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/lock.h
new file mode 100644 (file)
index 0000000..3ef7821
--- /dev/null
@@ -0,0 +1,431 @@
+/*************************************************************************/ /*!
+@File           lock.h
+@Title          Locking interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal locking interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOCK_H
+#define LOCK_H
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#include "lock_types.h"
+
+#if defined(__linux__) && defined(__KERNEL__)
+
+#include "allocmem.h"
+#include <linux/atomic.h>
+
+#define OSLockCreateNoStats(phLock) ({ \
+       PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+       *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \
+       if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+       e;})
+#define OSLockCreate(phLock) ({ \
+       PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+       *(phLock) = OSAllocMem(sizeof(struct mutex)); \
+       if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+       e;})
+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock));})
+#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock));})
+
+#define OSLockAcquire(hLock) ({mutex_lock((hLock));})
+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass));})
+#define OSLockRelease(hLock) ({mutex_unlock((hLock));})
+
+#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE)
+#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE)
+
+#define OSSpinLockCreate(_ppsLock) ({ \
+       PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+       *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \
+       if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \
+       e;})
+#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);})
+
+typedef unsigned long OS_SPINLOCK_FLAGS;
+#define OSSpinLockAcquire(_pLock, _flags) spin_lock_irqsave(_pLock, _flags)
+#define OSSpinLockRelease(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags)
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+#define OSAtomicRead(pCounter) atomic_read(pCounter)
+#define OSAtomicWrite(pCounter, i)     atomic_set(pCounter, i)
+
+/* The following atomic operations, in addition to being SMP-safe, also
+   imply a memory barrier around the operation  */
+#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter)
+#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv)
+#define OSAtomicExchange(pCounter, iNewVal) atomic_xchg(pCounter, iNewVal)
+
+static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal)
+{
+       IMG_INT iOldVal, iLastVal, iNewVal;
+
+       iLastVal = OSAtomicRead(pCounter);
+       do
+       {
+               iOldVal = iLastVal;
+               iNewVal = iOldVal | iVal;
+
+               iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal);
+       }
+       while (iOldVal != iLastVal);
+
+       return iNewVal;
+}
+
+#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter)
+#define OSAtomicAddUnless(pCounter, incr, test) atomic_add_unless(pCounter, (incr), (test))
+
+#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter)
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), (test))
+
+#else /* defined(__linux__) && defined(__KERNEL__) */
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function       OSLockCreate
+@Description    Creates an operating system lock object.
+@Output         phLock           The created lock.
+@Return         PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver
+                cannot allocate CPU memory needed for the lock.
+                PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to
+                allocate the lock.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock);
+#if defined(INTEGRITY_OS)
+#define OSLockCreateNoStats OSLockCreate
+#endif
+
+/**************************************************************************/ /*!
+@Function       OSLockDestroy
+@Description    Destroys an operating system lock object.
+@Input          hLock            The lock to be destroyed.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockDestroy(POS_LOCK hLock);
+
+#if defined(INTEGRITY_OS)
+#define OSLockDestroyNoStats OSLockDestroy
+#endif
+/**************************************************************************/ /*!
+@Function       OSLockAcquire
+@Description    Acquires an operating system lock.
+                NB. This function must not return until the lock is acquired
+                (meaning the implementation should not timeout or return with
+                an error, as the caller will assume they have the lock).
+@Input          hLock            The lock to be acquired.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockAcquire(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function       OSTryLockAcquire
+@Description    Try to acquire an operating system lock.
+                NB. If lock is acquired successfully in the first attempt,
+                then the function returns true and else it will return false.
+@Input          hLock            The lock to be acquired.
+@Return         IMG_TRUE if lock acquired successfully,
+                IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSTryLockAcquire(POS_LOCK hLock);
+
+/* Nested notation isn't used in UM or other OS's */
+/**************************************************************************/ /*!
+@Function       OSLockAcquireNested
+@Description    For operating systems other than Linux, this equates to an
+                OSLockAcquire() call. On Linux, this function wraps a call
+                to mutex_lock_nested(). This recognises the scenario where
+                there may be multiple subclasses within a particular class
+                of lock. In such cases, the order in which the locks belonging
+                these various subclasses are acquired is important and must be
+                validated.
+@Input          hLock            The lock to be acquired.
+@Input          subclass         The subclass of the lock.
+@Return         None.
+ */ /**************************************************************************/
+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock))
+
+/**************************************************************************/ /*!
+@Function       OSLockRelease
+@Description    Releases an operating system lock.
+@Input          hLock            The lock to be released.
+@Return         None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockRelease(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function       OSLockIsLocked
+@Description    Tests whether or not an operating system lock is currently
+                locked.
+@Input          hLock            The lock to be tested.
+@Return         IMG_TRUE if locked, IMG_FALSE if not locked.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSLockIsLocked(POS_LOCK hLock);
+
+#if defined(__linux__)
+
+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */
+#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter)
+#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i)
+#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) \
+       __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv)
+#define OSAtomicOr(pCounter, iVal) __sync_or_and_fetch((&(pCounter)->counter), iVal)
+
+static inline IMG_UINT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_UINT32 iNewVal)
+{
+       IMG_UINT32 iOldVal;
+       IMG_UINT32 iLastVal;
+
+       iLastVal = OSAtomicRead(pCounter);
+       do
+       {
+               iOldVal = iLastVal;
+               iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal);
+       }
+       while (iOldVal != iLastVal);
+
+       return iOldVal;
+}
+
+#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr)
+#define OSAtomicAddUnless(pCounter, incr, test) ({ \
+       IMG_INT32 c; IMG_INT32 old; \
+       c = OSAtomicRead(pCounter); \
+       while (1) { \
+               if (c == (test)) break; \
+               old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \
+               if (old == c) break; \
+               c = old; \
+       } c; })
+
+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr))
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else
+
+/*************************************************************************/ /*!
+@Function       OSAtomicRead
+@Description    Read the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to read
+@Return         The value of the atomic variable
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicWrite
+@Description    Write the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be written to
+@Input          v               The value to write
+@Return         None
+*/ /**************************************************************************/
+IMG_INTERNAL
+void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/* For the following atomic operations, in addition to being SMP-safe,
+   should also  have a memory barrier around each operation  */
+/*************************************************************************/ /*!
+@Function       OSAtomicIncrement
+@Description    Increment the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be incremented
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicDecrement
+@Description    Decrement the value of a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be decremented
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicAdd
+@Description    Add a specified value to a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to add the value to
+@Input          v               The value to be added
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicAddUnless
+@Description    Add a specified value to a variable atomically unless it
+                already equals a particular value.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to add the value to
+@Input          v               The value to be added to 'pCounter'
+@Input          t               The test value. If 'pCounter' equals this,
+                                its value will not be adjusted
+@Return         The old value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicSubtract
+@Description    Subtract a specified value to a variable atomically.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to subtract the value from
+@Input          v               The value to be subtracted
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicSubtractUnless
+@Description    Subtract a specified value from a variable atomically unless
+                it already equals a particular value.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to subtract the value from
+@Input          v               The value to be subtracted from 'pCounter'
+@Input          t               The test value. If 'pCounter' equals this,
+                                its value will not be adjusted
+@Return         The old value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicCompareExchange
+@Description    Set a variable to a given value only if it is currently
+                equal to a specified value. The whole operation must be atomic.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be checked and
+                                possibly updated
+@Input          oldv            The value the atomic variable must have in
+                                order to be modified
+@Input          newv            The value to write to the atomic variable if
+                                it equals 'oldv'
+@Return         The old value of *pCounter
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicExchange
+@Description    Set a variable to a given value and retrieve previous value.
+                The whole operation must be atomic.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be updated
+@Input          iNewVal         The value to write to the atomic variable
+@Return         The previous value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_INT32 iNewVal);
+
+/*************************************************************************/ /*!
+@Function       OSAtomicOr
+@Description    Set a variable to the bitwise or of its current value and the
+                specified value. Equivalent to *pCounter |= iVal.
+                The whole operation must be atomic.
+                Atomic functions must be implemented in a manner that
+                is both symmetric multiprocessor (SMP) safe and has a memory
+                barrier around each operation.
+@Input          pCounter        The atomic variable to be updated
+@Input          iVal            The value to bitwise or against
+@Return         The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicOr(ATOMIC_T *pCounter, IMG_INT32 iVal);
+
+/* For now, spin-locks are required on Linux only, so other platforms fake
+ * spinlocks with normal mutex locks */
+/*! Type definitions for OS_SPINLOCK accessor and creation / deletion */
+typedef unsigned long OS_SPINLOCK_FLAGS;
+/*! Pointer to an OS Spinlock */
+#define POS_SPINLOCK POS_LOCK
+/*! Wrapper for OSLockCreate() */
+#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock)
+/*! Wrapper for OSLockDestroy() */
+#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock)
+/*! Wrapper for OSLockAcquire() */
+#define OSSpinLockAcquire(pLock, flags) {flags = 0; OSLockAcquire(pLock);}
+/*! Wrapper for OSLockRelease() */
+#define OSSpinLockRelease(pLock, flags) {flags = 0; OSLockRelease(pLock);}
+
+#endif /* defined(__linux__) */
+#endif /* defined(__linux__) && defined(__KERNEL__) */
+
+#endif /* LOCK_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/osmmap.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/osmmap.h
new file mode 100644 (file)
index 0000000..40a509d
--- /dev/null
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File
+@Title          OS Interface for mapping PMRs into CPU space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef OSMMAP_H
+#define OSMMAP_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/*************************************************************************/ /*!
+@Function       OSMMapPMR
+@Description    Maps the specified PMR into CPU memory so that it may be
+                accessed by the user process.
+                Whether the memory is mapped read only, read/write, or not at
+                all, is dependent on the PMR itself.
+                The PMR handle is opaque to the user, and lower levels of this
+                stack ensure that the handle is private to this process, such
+                that this API cannot be abused to gain access to other people's
+                PMRs. The OS implementation of this function should return the
+                virtual address and length for the User to use. The "PrivData"
+                is to be stored opaquely by the caller (N.B. he should make no
+                assumptions, in particular, NULL is a valid handle) and given
+                back to the call to OSMUnmapPMR.
+                The OS implementation is free to use the PrivData handle for
+                any purpose it sees fit.
+@Input          hBridge              The bridge handle.
+@Input          hPMR                 The handle of the PMR to be mapped.
+@Input          uiPMRLength          The size of the PMR.
+@Input          uiFlags              Flags indicating how the mapping should
+                                     be done (read-only, etc). These may not
+                                     be honoured if the PMR does not permit
+                                     them.
+@Output         phOSMMapPrivDataOut  Returned private data.
+@Output         ppvMappingAddressOut The returned mapping.
+@Output         puiMappingLengthOut  The size of the returned mapping.
+@Return         PVRSRV_OK on success, failure code otherwise.
+ */ /*************************************************************************/
+PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRLength,
+          PVRSRV_MEMALLOCFLAGS_T uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          size_t *puiMappingLengthOut);
+
+/*************************************************************************/ /*!
+@Function       OSMUnmapPMR
+@Description    Unmaps the specified PMR from CPU memory.
+                This function is the counterpart to OSMMapPMR.
+                The caller is required to pass the PMR handle back in along
+                with the same 3-tuple of information that was returned by the
+                call to OSMMapPMR in phOSMMapPrivDataOut.
+                It is possible to unmap only part of the original mapping
+                with this call, by specifying only the address range to be
+                unmapped in pvMappingAddress and uiMappingLength.
+@Input          hBridge              The bridge handle.
+@Input          hPMR                 The handle of the PMR to be unmapped.
+@Input          hOSMMapPrivData      The OS private data of the mapping.
+@Input          pvMappingAddress     The address to be unmapped.
+@Input          uiMappingLength      The size to be unmapped.
+@Return         PVRSRV_OK on success, failure code otherwise.
+ */ /*************************************************************************/
+void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            size_t uiMappingLength);
+
+#endif /* OSMMAP_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/proc_stats.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/proc_stats.h
new file mode 100644 (file)
index 0000000..a4e9c78
--- /dev/null
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          Process and driver statistic definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PROC_STATS_H
+#define PROC_STATS_H
+
+/* X-Macro for Process stat keys */
+#define PVRSRV_PROCESS_STAT_KEY \
+       X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \
+       X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \
+       X(PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \
+       X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \
+       X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \
+       X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \
+       X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \
+       X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+       X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+       X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \
+       X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \
+       X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \
+       X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") \
+       X(PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \
+       X(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \
+       X(PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddress")
+
+
+/* X-Macro for Driver stat keys */
+#define PVRSRV_DRIVER_STAT_KEY \
+       X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+       X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+       X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \
+       X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \
+       X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \
+       X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \
+       X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax")
+
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+       PVRSRV_PROCESS_STAT_KEY
+#undef X
+       PVRSRV_PROCESS_STAT_TYPE_COUNT
+}PVRSRV_PROCESS_STAT_TYPE;
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+       PVRSRV_DRIVER_STAT_KEY
+#undef X
+       PVRSRV_DRIVER_STAT_TYPE_COUNT
+}PVRSRV_DRIVER_STAT_TYPE;
+
+extern const IMG_CHAR *const pszProcessStatType[];
+
+extern const IMG_CHAR *const pszDriverStatType[];
+
+#endif // PROC_STATS_H
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/ra.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/ra.h
new file mode 100644 (file)
index 0000000..d306af7
--- /dev/null
@@ -0,0 +1,386 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RA_H
+#define RA_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#define RA_MAX_NAME_LENGTH 20
+
+/** Resource arena.
+ *  struct _RA_ARENA_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ RA_ARENA;                    //PRQA S 3313
+
+/** Resource arena's iterator.
+ *  struct _RA_ARENA_ITERATOR_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ITERATOR_ RA_ARENA_ITERATOR;
+
+typedef struct _RA_ITERATOR_DATA_ {
+       IMG_UINT64 uiAddr;
+       IMG_UINT64 uiSize;
+       IMG_BOOL bFree;
+} RA_ITERATOR_DATA;
+
+/** Resource arena usage statistics.
+ *  struct _RA_USAGE_STATS
+ */
+typedef struct _RA_USAGE_STATS {
+       IMG_UINT64      ui64TotalArenaSize;
+       IMG_UINT64      ui64FreeArenaSize;
+}RA_USAGE_STATS, *PRA_USAGE_STATS;
+
+/*
+ * Per-Arena handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data. It is given it in RA_Create, and
+ * promises to pass it to calls to the ImportAlloc and ImportFree callbacks
+ */
+typedef IMG_HANDLE RA_PERARENA_HANDLE;
+/*
+ * Per-Import handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data. It is given it on a per-import basis,
+ * basis, either the "initial" import at RA_Create time, or further imports
+ * via the ImportAlloc callback. It sends it back via the ImportFree callback,
+ * and also provides it in answer to any RA_Alloc request to signify from
+ * which "import" the allocation came.
+ */
+typedef IMG_HANDLE RA_PERISPAN_HANDLE;
+
+typedef IMG_UINT64 RA_BASE_T;
+typedef IMG_UINT32 RA_LOG2QUANTUM_T;
+typedef IMG_UINT64 RA_LENGTH_T;
+
+/* Lock classes: describes the level of nesting between different arenas. */
+#define RA_LOCKCLASS_0 0
+#define RA_LOCKCLASS_1 1
+#define RA_LOCKCLASS_2 2
+
+#define RA_NO_IMPORT_MULTIPLIER 1
+
+/*
+ * Allocation Policies that govern the resource areas.
+ * */
+
+/* --- Resource allocation policy definitions ---
+* | 31.........4|......3....|........2.............|1...................0|
+* | Reserved    | No split  | Area bucket selection| Alloc node selection|
+*/
+
+/*
+ * Fast allocation policy allows to pick the first node
+ * that satisfies the request.
+ * It is the default policy for all arenas.
+ *  */
+#define RA_POLICY_ALLOC_FAST                   (0U)
+/*
+ * Optimal allocation policy allows to pick the lowest size node
+ * that satisfies the request. This picking policy helps in reducing the fragmentation.
+ * This minimises the necessity to split the nodes more often as the optimal
+ * ones are picked.
+ * As a result any future higher size allocation requests are likely to succeed
+ */
+#define RA_POLICY_ALLOC_OPTIMAL                (1U)
+#define RA_POLICY_ALLOC_NODE_SELECT_MASK                       (3U)
+
+/*
+ * Bucket selection policies
+ * */
+/* Assured bucket policy makes sure the selected bucket is guaranteed
+ * to satisfy the given request. Generally Nodes picked up from such a
+ * bucket need to be further split. However picking node that belongs to this
+ * bucket is likely to succeed and thus promises better response times */
+#define RA_POLICY_BUCKET_ASSURED_FIT           (0U)
+/*
+ * Best fit bucket policy selects a bucket with free nodes that are likely
+ * to satisfy the request and nodes that are close to the requested size.
+ * Nodes picked up from this bucket may likely to satisfy the request but not
+ * guaranteed. Failing to satisfy the request from this bucket mean further
+ * higher size buckets are selected in the later iterations till the request
+ * is satisfied.
+ *
+ * Hence response times may vary depending on availability of free nodes
+ * that satisfy the request.
+ * */
+#define RA_POLICY_BUCKET_BEST_FIT              (4U)
+#define RA_POLICY_BUCKET_MASK                  (4U)
+
+/* This flag ensures the imports will not be split up and Allocations will always get
+ * their own import
+ */
+#define RA_POLICY_NO_SPLIT                     (8U)
+#define RA_POLICY_NO_SPLIT_MASK                (8U)
+
+/*
+ * Default Arena Policy
+ * */
+#define RA_POLICY_DEFAULT                      (RA_POLICY_ALLOC_FAST | RA_POLICY_BUCKET_ASSURED_FIT)
+
+/*
+ * Flags in an "import" must match the flags for an allocation
+ */
+typedef IMG_UINT64 RA_FLAGS_T;
+
+/*************************************************************************/ /*!
+@Function       Callback function PFN_RA_ALLOC
+@Description    RA import allocate function
+@Input          RA_PERARENA_HANDLE RA handle
+@Input          RA_LENGTH_T        Request size
+@Input          RA_FLAGS_T         RA flags
+@Input          IMG_CHAR           Annotation
+@Input          RA_BASE_T          Allocation base
+@Input          RA_LENGTH_T        Actual size
+@Input          RA_PERISPAN_HANDLE Per import private data
+@Return         PVRSRV_ERROR       PVRSRV_OK or error code
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_RA_ALLOC)(RA_PERARENA_HANDLE,
+                                                                        RA_LENGTH_T,
+                                                                        RA_FLAGS_T,
+                                                                        const IMG_CHAR*,
+                                                                        RA_BASE_T*,
+                                                                        RA_LENGTH_T*,
+                                                                        RA_PERISPAN_HANDLE*);
+
+/*************************************************************************/ /*!
+@Function       Callback function PFN_RA_FREE
+@Description    RA free imported allocation
+@Input          RA_PERARENA_HANDLE   RA handle
+@Input          RA_BASE_T            Allocation base
+@Output         RA_PERISPAN_HANDLE   Per import private data
+*/ /**************************************************************************/
+typedef void (*PFN_RA_FREE)(RA_PERARENA_HANDLE,
+                                                       RA_BASE_T,
+                                                       RA_PERISPAN_HANDLE);
+
+/**
+ *  @Function   RA_Create
+ *
+ *  @Description    To create a resource arena.
+ *
+ *  @Input name - the name of the arena for diagnostic purposes.
+ *  @Input uLog2Quantum - the arena allocation quantum.
+ *  @Input ui32LockClass - the lock class level this arena uses.
+ *  @Input imp_alloc - a resource allocation callback or 0.
+ *  @Input imp_free - a resource de-allocation callback or 0.
+ *  @Input per_arena_handle - private handle passed to alloc and free or 0.
+ *  @Input ui32PlicyFlags - Policies that govern the arena.
+ *  @Return pointer to arena, or NULL.
+ */
+RA_ARENA *
+RA_Create(IMG_CHAR *name,
+          /* subsequent imports: */
+          RA_LOG2QUANTUM_T uLog2Quantum,
+          IMG_UINT32 ui32LockClass,
+          PFN_RA_ALLOC imp_alloc,
+          PFN_RA_FREE imp_free,
+          RA_PERARENA_HANDLE per_arena_handle,
+          IMG_UINT32 ui32PolicyFlags);
+
+/**
+ *  @Function   RA_Create_With_Span
+ *
+ *  @Description
+ *
+ *  Create a resource arena and initialises it, with a given resource span.
+ *
+ *  @Input name - String briefly describing the RA's purpose.
+ *  @Input uLog2Quantum - the arena allocation quantum.
+ *  @Input ui64CpuBase - CPU Physical Base Address of the RA.
+ *  @Input ui64SpanDevBase - Device Physical Base Address of the RA.
+ *  @Input ui64SpanSize - Size of the span to add to the created RA.
+ *  @Return pointer to arena, or NULL.
+*/
+RA_ARENA *
+RA_Create_With_Span(IMG_CHAR *name,
+                    RA_LOG2QUANTUM_T uLog2Quantum,
+                    IMG_UINT64 ui64CpuBase,
+                    IMG_UINT64 ui64SpanDevBase,
+                    IMG_UINT64 ui64SpanSize);
+
+/**
+ *  @Function   RA_Delete
+ *
+ *  @Description
+ *
+ *  To delete a resource arena. All resources allocated from the arena
+ *  must be freed before deleting the arena.
+ *
+ *  @Input  pArena - the arena to delete.
+ *  @Return None
+ */
+void
+RA_Delete(RA_ARENA *pArena);
+
+/**
+ *  @Function   RA_Add
+ *
+ *  @Description
+ *
+ *  To add a resource span to an arena. The span must not overlap with
+ *  any span previously added to the arena.
+ *
+ *  @Input pArena - the arena to add a span into.
+ *  @Input base - the base of the span.
+ *  @Input uSize - the extent of the span.
+ *  @Input hPriv - handle associated to the span (reserved for user uses)
+ *  @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Add(RA_ARENA *pArena,
+       RA_BASE_T base,
+       RA_LENGTH_T uSize,
+       RA_FLAGS_T uFlags,
+       RA_PERISPAN_HANDLE hPriv);
+
+/**
+ *  @Function   RA_Alloc
+ *
+ *  @Description    To allocate resource from an arena.
+ *
+ *  @Input  pArena - the arena
+ *  @Input  uRequestSize - the size of resource segment requested.
+ *  @Input  uImportMultiplier - Import x-times of the uRequestSize
+ *          for future RA_Alloc calls.
+ *          Use RA_NO_IMPORT_MULTIPLIER to import the exact size.
+ *  @Input  uImportFlags - flags influencing allocation policy.
+ *  @Input  uAlignment - the alignment constraint required for the
+ *          allocated segment, use 0 if alignment not required.
+ *  @Input  pszAnnotation - a string to describe the allocation
+ *  @Output base - allocated base resource
+ *  @Output pActualSize - the actual_size of resource segment allocated,
+ *          typically rounded up by quantum.
+ *  @Output phPriv - the user reference associated with allocated
+ *          resource span.
+ *  @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_Alloc(RA_ARENA *pArena,
+         RA_LENGTH_T uRequestSize,
+         IMG_UINT8 uImportMultiplier,
+         RA_FLAGS_T uImportFlags,
+         RA_LENGTH_T uAlignment,
+         const IMG_CHAR *pszAnnotation,
+         RA_BASE_T *base,
+         RA_LENGTH_T *pActualSize,
+         RA_PERISPAN_HANDLE *phPriv);
+
+/**
+ *  @Function   RA_Alloc_Range
+ *
+ *  @Description
+ *
+ *  To allocate a resource at a specified base from an arena.
+ *
+ *  @Input  pArena - the arena
+ *  @Input  uRequestSize - the size of resource segment requested.
+ *  @Input  uImportFlags - flags influencing allocation policy.
+ *  @Input  uAlignment - the alignment constraint required for the
+ *          allocated segment, use 0 if alignment not required.
+ *  @Input  base - allocated base resource
+ *  @Output pActualSize - the actual_size of resource segment allocated,
+ *          typically rounded up by quantum.
+ *  @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_Alloc_Range(RA_ARENA *pArena,
+                 RA_LENGTH_T uRequestSize,
+                 RA_FLAGS_T uImportFlags,
+                 RA_LENGTH_T uAlignment,
+                 RA_BASE_T base,
+                 RA_LENGTH_T *pActualSize);
+
+/**
+ *  @Function   RA_Free
+ *
+ *  @Description    To free a resource segment.
+ *
+ *  @Input  pArena - the arena the segment was originally allocated from.
+ *  @Input  base - the base of the resource span to free.
+ *
+ *  @Return None
+ */
+void
+RA_Free(RA_ARENA *pArena, RA_BASE_T base);
+
+/**
+ *  @Function   RA_Get_Usage_Stats
+ *
+ *  @Description    To collect the arena usage statistics.
+ *
+ *  @Input  pArena - the arena to acquire usage statistics from.
+ *  @Input  psRAStats - the buffer to hold the usage statistics of the arena.
+ *
+ *  @Return None
+ */
+IMG_INTERNAL void
+RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats);
+
+IMG_INTERNAL RA_ARENA_ITERATOR *
+RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments);
+
+IMG_INTERNAL void
+RA_IteratorReset(RA_ARENA_ITERATOR *pIter);
+
+IMG_INTERNAL void
+RA_IteratorRelease(RA_ARENA_ITERATOR *pIter);
+
+IMG_INTERNAL IMG_BOOL
+RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData);
+
+/*************************************************************************/ /*!
+@Function       RA_BlockDump
+@Description    Debug dump of all memory allocations within the RA and the space
+                between. A '#' represents a block of memory (the arena's quantum
+                in size) that has been allocated whereas a '.' represents a free
+                block.
+@Input          pArena        The arena to dump.
+@Input          pfnLogDump    The dumping method.
+@Input          pPrivData     Data to be passed into the pfnLogDump method.
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RA_BlockDump(RA_ARENA *pArena,
+             __printf(2, 3) void (*pfnLogDump)(void*, IMG_CHAR*, ...),
+             void *pPrivData);
+
+#endif
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/sync.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/sync.h
new file mode 100644 (file)
index 0000000..f126915
--- /dev/null
@@ -0,0 +1,292 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_H
+#define SYNC_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "sync_prim_internal.h"
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#include "device_connection.h"
+
+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextCreate
+
+@Description    Create a new synchronisation context
+
+@Input          hBridge                 Bridge handle
+
+@Input          hDeviceNode             Device node handle
+
+@Output         hSyncPrimContext        Handle to the created synchronisation
+                                        primitive context
+
+@Return         PVRSRV_OK if the synchronisation primitive context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+                      PSYNC_PRIM_CONTEXT    *hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextDestroy
+
+@Description    Destroy a synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimAlloc
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAlloc(PSYNC_PRIM_CONTEXT      hSyncPrimContext,
+              PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+              const IMG_CHAR          *pszClassName);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimFree
+
+@Description    Free a synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to free
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully freed
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimSet
+
+@Description    Set the synchronisation primitive to a value
+
+@Input          psSync                  The synchronisation primitive to set
+
+@Input          ui32Value               Value to set it to
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+#if defined(NO_HARDWARE)
+
+/*************************************************************************/ /*!
+@Function       SyncPrimNoHwUpdate
+
+@Description    Updates the synchronisation primitive value (in NoHardware drivers)
+
+@Input          psSync                  The synchronisation primitive to update
+
+@Input          ui32Value               Value to update it to
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+#endif
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       SyncPrimPDump
+
+@Description    PDump the current value of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpValue
+
+@Description    PDump the ui32Value as the value of the synchronisation
+                primitive (regardless of the current value).
+
+@Input          psSync          The synchronisation primitive to PDump
+@Input          ui32Value       Value to give to the sync prim on the pdump
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpPol
+
+@Description    Do a PDump poll of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          ui32Value               Value to poll for
+
+@Input          ui32Mask                PDump mask operator
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                IMG_UINT32 ui32Value,
+                                IMG_UINT32 ui32Mask,
+                                PDUMP_POLL_OPERATOR eOperator,
+                                IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpCBP
+
+@Description    Do a PDump CB poll using the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          uiWriteOffset           Current write offset of buffer
+
+@Input          uiPacketSize            Size of the packet to write into CB
+
+@Input          uiBufferSize            Size of the CB
+
+@Return         None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                IMG_UINT64 uiWriteOffset,
+                                IMG_UINT64 uiPacketSize,
+                                IMG_UINT64 uiBufferSize);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpValue)
+#endif
+static INLINE void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+       PVR_UNREFERENCED_PARAMETER(psSync);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDump)
+#endif
+static INLINE void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+       PVR_UNREFERENCED_PARAMETER(psSync);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpPol)
+#endif
+static INLINE void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                IMG_UINT32 ui32Value,
+                                IMG_UINT32 ui32Mask,
+                                PDUMP_POLL_OPERATOR eOperator,
+                                IMG_UINT32 ui32PDumpFlags)
+{
+       PVR_UNREFERENCED_PARAMETER(psSync);
+       PVR_UNREFERENCED_PARAMETER(ui32Value);
+       PVR_UNREFERENCED_PARAMETER(ui32Mask);
+       PVR_UNREFERENCED_PARAMETER(eOperator);
+       PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpCBP)
+#endif
+static INLINE void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                IMG_UINT64 uiWriteOffset,
+                                IMG_UINT64 uiPacketSize,
+                                IMG_UINT64 uiBufferSize)
+{
+       PVR_UNREFERENCED_PARAMETER(psSync);
+       PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+       PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+       PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif /* PDUMP */
+#endif /* SYNC_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/sync_internal.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/sync_internal.h
new file mode 100644 (file)
index 0000000..29c8360
--- /dev/null
@@ -0,0 +1,127 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal client side interface for services
+                synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SYNC_INTERNAL
+#define SYNC_INTERNAL
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+#include "sync_prim_internal.h"
+
+#define LOCAL_SYNC_PRIM_RESET_VALUE 0
+#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u
+
+/*
+       Debug feature to protect against GP DM page faults when
+       sync prims are freed by client before work is completed.
+*/
+#define LOCAL_SYNC_BLOCK_RETAIN_FIRST
+
+/*
+       Private structure's
+*/
+#define SYNC_PRIM_NAME_SIZE            50
+typedef struct SYNC_PRIM_CONTEXT_TAG
+{
+       SHARED_DEV_CONNECTION       hDevConnection;
+       IMG_CHAR                                        azName[SYNC_PRIM_NAME_SIZE];    /*!< Name of the RA */
+       RA_ARENA                                        *psSubAllocRA;                                  /*!< RA context */
+       IMG_CHAR                                        azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */
+       RA_ARENA                                        *psSpanRA;                                              /*!< RA used for span management of SubAllocRA */
+       ATOMIC_T                                hRefCount;      /*!< Ref count for this context */
+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
+       IMG_HANDLE                                      hFirstSyncPrim; /*!< Handle to the first allocated sync prim */
+#endif
+} SYNC_PRIM_CONTEXT;
+
+typedef struct SYNC_PRIM_BLOCK_TAG
+{
+       SYNC_PRIM_CONTEXT       *psContext;                             /*!< Our copy of the services connection */
+       IMG_HANDLE                      hServerSyncPrimBlock;   /*!< Server handle for this block */
+       IMG_UINT32                      ui32SyncBlockSize;              /*!< Size of the sync prim block */
+       IMG_UINT32                      ui32FirmwareAddr;               /*!< Firmware address */
+       DEVMEM_MEMDESC          *hMemDesc;                              /*!< Host mapping handle */
+       IMG_UINT32 __iomem      *pui32LinAddr;                  /*!< User CPU mapping */
+       IMG_UINT64                      uiSpanBase;                             /*!< Base of this import in the span RA */
+       DLLIST_NODE                     sListNode;                              /*!< List node for the sync block list */
+} SYNC_PRIM_BLOCK;
+
+typedef enum SYNC_PRIM_TYPE_TAG
+{
+       SYNC_PRIM_TYPE_UNKNOWN = 0,
+       SYNC_PRIM_TYPE_LOCAL,
+       SYNC_PRIM_TYPE_SERVER,
+} SYNC_PRIM_TYPE;
+
+typedef struct SYNC_PRIM_LOCAL_TAG
+{
+       ATOMIC_T                                hRefCount;      /*!< Ref count for this sync */
+       SYNC_PRIM_BLOCK                 *psSyncBlock;   /*!< Synchronisation block this primitive is allocated on */
+       IMG_UINT64                              uiSpanAddr;             /*!< Span address of the sync */
+       IMG_HANDLE                              hRecord;                /*!< Sync record handle */
+} SYNC_PRIM_LOCAL;
+
+typedef struct SYNC_PRIM_TAG
+{
+       PVRSRV_CLIENT_SYNC_PRIM sCommon;                /*!< Client visible part of the sync prim */
+       SYNC_PRIM_TYPE                  eType;                  /*!< Sync primitive type */
+       union {
+               SYNC_PRIM_LOCAL         sLocal;                 /*!< Local sync primitive data */
+       } u;
+} SYNC_PRIM;
+
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr);
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+                                                       IMG_HANDLE *phBlock,
+                                                       IMG_UINT32 *pui32Offset);
+
+
+#endif /* SYNC_INTERNAL */
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/tlclient.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/tlclient.h
new file mode 100644 (file)
index 0000000..00f7aa8
--- /dev/null
@@ -0,0 +1,257 @@
+/*************************************************************************/ /*!
+@File           tlclient.h
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef TLCLIENT_H
+#define TLCLIENT_H
+
+
+#include "img_defs.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_error.h"
+
+
+/* This value is used for the hSrvHandle argument in the client API when
+ * called directly from the kernel which will lead to a direct bridge access.
+ */
+#define DIRECT_BRIDGE_HANDLE   ((IMG_HANDLE)0xDEADBEEFU)
+
+
+/*************************************************************************/ /*!
+ @Function      TLClientOpenStream
+ @Description   Open a descriptor onto an existing kernel transport stream.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         pszName         Address of the stream name string, no longer
+                                than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input         ui32Mode        Unused
+ @Output        phSD            Address of a pointer to an stream object
+ @Return        PVRSRV_ERROR_NOT_FOUND          when named stream not found
+ @Return        PVRSRV_ERROR_ALREADY_OPEN       stream already open by another
+ @Return        PVRSRV_ERROR_STREAM_ERROR       internal driver state error
+ @Return        PVRSRV_ERROR_TIMEOUT            timed out, stream not found
+ @Return        PVRSRV_ERROR                    for other system codes
+*/ /**************************************************************************/
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection,
+               const IMG_CHAR* pszName,
+               IMG_UINT32   ui32Mode,
+               IMG_HANDLE*  phSD);
+
+
+/*************************************************************************/ /*!
+ @Function      TLClientCloseStream
+ @Description   Close and release the stream connection to Services kernel
+                server transport layer. Any outstanding Acquire will be
+                released.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Return        PVRSRV_ERROR_HANDLE_NOT_FOUND   when SD handle is not known
+ @Return        PVRSRV_ERROR_STREAM_ERROR       internal driver state error
+ @Return        PVRSRV_ERROR                    for system codes
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD);
+
+/*************************************************************************/ /*!
+ @Function      TLClientDiscoverStreams
+ @Description   Finds all streams that's name starts with pszNamePattern and
+                ends with a number.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         pszNamePattern  Name pattern. Must be beginning of a string.
+ @Output        aszStreams      Array of numbers from end of the discovered
+                names.
+ @inOut         pui32NumFound   When input, max number that can fit into
+                                pui32Streams. When output, number of
+                                discovered streams.
+ @Return        PVRSRV_ERROR    for system codes
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection,
+               const IMG_CHAR *pszNamePattern,
+               IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+               IMG_UINT32 *pui32NumFound);
+
+/*************************************************************************/ /*!
+ @Function      TLClientReserveStream
+ @Description   Reserves a region with given size in the stream. If the stream
+                is already reserved the function will return an error.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Output        ppui8Data       pointer to the buffer
+ @Input         ui32Size        size of the data
+ @Return
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT8 **ppui8Data,
+               IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLClientStreamReserve2
+ @Description   Reserves a region with given size in the stream. If the stream
+                is already reserved the function will return an error.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Output        ppui8Data       pointer to the buffer
+ @Input         ui32Size        size of the data
+ @Input         ui32SizeMin     minimum size of the data
+ @Input         ui32Available   available space in buffer
+ @Return
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT8 **ppui8Data,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32SizeMin,
+               IMG_UINT32 *pui32Available);
+
+/*************************************************************************/ /*!
+ @Function      TLClientStreamCommit
+ @Description   Commits previously reserved region in the stream and therefore
+                allows next reserves.
+                This function call has to be preceded by the call to
+                TLClientReserveStream or TLClientReserveStream2.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to close
+ @Input         ui32Size        Size of the data
+ @Return
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLClientAcquireData
+ @Description   When there is data available in the stream buffer this call
+                returns with the address and length of the data buffer the
+                client can safely read. This buffer may contain one or more
+                packets of data.
+                If no data is available then this call blocks until it becomes
+                available. However if the stream has been destroyed while
+                waiting then a resource unavailable error will be returned to
+                the caller. Clients must pair this call with a ReleaseData
+                call.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Output        ppPacketBuf     Address of a pointer to an byte buffer. On exit
+                                pointer contains address of buffer to read from
+ @Output        puiBufLen       Pointer to an integer. On exit it is the size
+                                of the data to read from the packet buffer
+ @Return        PVRSRV_ERROR_RESOURCE_UNAVAILABLE  when stream no longer exists
+ @Return        PVRSRV_ERROR_HANDLE_NOT_FOUND   when SD handle not known
+ @Return        PVRSRV_ERROR_STREAM_ERROR       internal driver state error
+ @Return        PVRSRV_ERROR_RETRY              release not called beforehand
+ @Return        PVRSRV_ERROR_TIMEOUT            block timed out, no data
+ @Return        PVRSRV_ERROR                    for other system codes
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE  hSD,
+               IMG_PBYTE*  ppPacketBuf,
+               IMG_UINT32* puiBufLen);
+
+
+/*************************************************************************/ /*!
+ @Function      TLClientReleaseData
+ @Description   Called after client has read the stream data out of the buffer
+                The data is subsequently flushed from the stream buffer to make
+                room for more data packets from the stream source.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Return        PVRSRV_ERROR_RESOURCE_UNAVAILABLE  when stream no longer exists
+ @Return        PVRSRV_ERROR_HANDLE_NOT_FOUND   when SD handle not known to TL
+ @Return        PVRSRV_ERROR_STREAM_ERROR       internal driver state error
+ @Return        PVRSRV_ERROR_RETRY              acquire not called beforehand
+ @Return        PVRSRV_ERROR                    for system codes
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD);
+
+/*************************************************************************/ /*!
+ @Function      TLClientReleaseDataLess
+ @Description   Called after client has read only some data out of the buffer
+                and wishes to complete the read early i.e. does not want to
+                read the full data that the acquire call returned e.g read just
+                one packet from the stream.
+                The data is subsequently flushed from the stream buffer to make
+                room for more data packets from the stream source.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Input         uiActualReadLen Size of data read, in bytes. Must be on a TL
+                                packet boundary.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS     when read length too big
+ @Return        PVRSRV_ERROR_RESOURCE_UNAVAILABLE  when stream no longer exists
+ @Return        PVRSRV_ERROR_HANDLE_NOT_FOUND   when SD handle not known to TL
+ @Return        PVRSRV_ERROR_STREAM_ERROR       internal driver state error
+ @Return        PVRSRV_ERROR_RETRY              acquire not called beforehand
+ @Return        PVRSRV_ERROR                    for system codes
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen);
+
+/*************************************************************************/ /*!
+ @Function      TLClientWriteData
+ @Description   Writes data to the stream.
+ @Input         hDevConnection  Address of a pointer to a connection object
+ @Input         hSD             Handle of the stream object to read
+ @Input         ui32Size        Size of the data
+ @Input         pui8Data        Pointer to data
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection,
+               IMG_HANDLE hSD,
+               IMG_UINT32 ui32Size,
+               IMG_BYTE *pui8Data);
+
+
+#endif /* TLCLIENT_H */
+
+/******************************************************************************
+ End of file (tlclient.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/shared/include/tutilsdefs.h b/drivers/gpu/drm/img/img-rogue/services/shared/include/tutilsdefs.h
new file mode 100644 (file)
index 0000000..b89e4a4
--- /dev/null
@@ -0,0 +1,230 @@
+/*************************************************************************/ /*!
+@File           tutilsdefs.h
+@Title          Testing utils bridge defines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared structures and constants between client and server sides
+                of tutils bridge
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef TUTILSDEFS_H
+#define TUTILSDEFS_H
+
+
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_sync_km.h"
+
+/******************************************************************************
+ *
+ * TEST Related definitions and constants
+ */
+#define PVR_TL_TEST_STREAM_BRIDGE_NAME "TLBRIDGE_TEST"
+#define PVR_TL_TEST_UMBASE             0x00202000
+#define PVR_TL_TEST_OFFSET             0x0008
+#define PVR_TL_TEST_LEN                0x0010
+
+#define PVR_TL_TEST_STREAM2_NAME       "TLSTREAM2_TEST"
+#define PVR_TL_TEST_STREAM2_SIZE       2
+
+#define PVR_TL_TEST_STREAM3_NAME       "TLSTREAM3_TEST"
+#define PVR_TL_TEST_STREAM3_SIZE       256
+
+// This constant, when used as a parameter in StreamCreate, lessens the size of
+// the buffer that is created for a stream, to avoid going over a page boundary.
+#define PVR_TL_TEST_STREAM_BUFFER_REDUCTION 32
+
+#define PVR_TL_TEST_CMD_SOURCE_START                   10
+typedef struct _PVR_TL_TEST_CMD_SOURCE_START_IN_
+{
+       /* Stream name must always be first in struct */
+       IMG_CHAR    pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+       IMG_UINT16  uiStreamSizeInPages;  /* # of 4Kb pages */
+       IMG_UINT16  uiInterval;           /* in milliseconds */
+       IMG_UINT16  uiCallbackKicks;      /* 0 for no limit of timer call backs */
+       IMG_UINT16  uiEOSMarkerKicks;     /* Insert EOS Marker every N Kicks, 0 for none */
+       IMG_UINT16  uiPacketSizeInBytes;  /* 0 for random size between 1..255 size in bytes */
+       IMG_UINT32  uiStreamCreateFlags;  /* See TLStreamCreate() */
+       IMG_UINT16  uiStartDelay;         /* 0 for normal uiInterval delay, one off delay in ms */
+       IMG_BOOL    bDoNotDeleteStream;   /* When true the stream is not deleted on self
+                                          * cleanup sources only the timers and other resources are */
+       IMG_BOOL    bDelayStreamCreate;   /* When true the stream used in the source is created
+                                          * in the first kick. False for normal behaviour where
+                                          * the stream is created in the bridge source start context */
+} PVR_TL_TEST_CMD_SOURCE_START_IN;
+
+
+#define PVR_TL_TEST_CMD_SOURCE_STOP            11
+typedef struct _PVR_TL_TEST_CMD_SOURCE_STOP_IN_
+{
+       /* Stream name must always be first in struct */
+       IMG_CHAR  pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+       IMG_BOOL  bDoNotDeleteStream;
+} PVR_TL_TEST_CMD_SOURCE_STOP_IN;
+
+#define PVR_TL_TEST_CMD_SOURCE_START2  12      /* Uses two stage data submit */
+typedef PVR_TL_TEST_CMD_SOURCE_START_IN PVR_TL_TEST_CMD_SOURCE_START2_IN;
+
+#define PVR_TL_TEST_CMD_DEBUG_LEVEL    13
+/* No typedef, uses integer uiIn1 in union */
+
+#define PVR_TL_TEST_CMD_DUMP_TL_STATE  14
+/* No typedef, uses integer uiIn1 in union */
+
+#define PVR_TL_TEST_CMD_STREAM_CREATE  15
+typedef struct _PVR_TL_TEST_CMD_STREAM_CREATE_IN_
+{
+       /* Stream name must always be first in struct */
+       IMG_CHAR    pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+       IMG_UINT16  uiStreamSizeInPages;
+       IMG_UINT32  uiStreamCreateFlags;
+       IMG_BOOL    bWithOpenCallback;
+} PVR_TL_TEST_CMD_STREAM_CREATE_IN;
+
+#define PVR_TL_TEST_CMD_STREAM_CLOSE   16
+typedef struct _PVR_TL_TEST_CMD_STREAM_NAME_IN_
+{
+       IMG_CHAR  pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+} PVR_TL_TEST_CMD_STREAM_NAME_IN;
+
+#define PVR_TL_TEST_CMD_STREAM_OPEN 17
+
+#define PVR_TL_TEST_CMD_DUMP_HWPERF_STATE 18
+
+#define PVR_TL_TEST_CMD_FLUSH_HWPERF_FWBUF 19
+
+#define PVR_TL_TEST_CMD_DUMP_PDUMP_STATE 21
+
+typedef union _PVR_TL_TEST_CMD_IN_
+{
+       PVR_TL_TEST_CMD_SOURCE_START_IN sStart;
+       PVR_TL_TEST_CMD_SOURCE_STOP_IN  sStop;
+/*     PVR_TL_TEST_CMD_SOURCE_START_IN sStart2;  Used by #12, use sStart instead */
+       IMG_UINT32      uiIn1;                                           /* Used by #13, #14 */
+       PVR_TL_TEST_CMD_STREAM_CREATE_IN  sCreate;
+       PVR_TL_TEST_CMD_STREAM_NAME_IN sName;
+       IMG_UINT32 uiParams[6];
+} PVR_TL_TEST_CMD_IN;
+
+/* Has to be the largest test IN structure */
+#define PVR_TL_TEST_PARAM_MAX_SIZE  (sizeof(PVR_TL_TEST_CMD_IN)+4)
+
+#define PVR_TL_TEST_CMD_SET_PWR_STATE              22
+#define PVR_TL_TEST_CMD_GET_PWR_STATE              23
+#define PVR_TL_TEST_CMD_SET_DWT_PWR_CHANGE_COUNTER 24
+#define PVR_TL_TEST_CMD_GET_DWT_PWR_CHANGE_COUNTER 25
+
+#define PVR_TL_TEST_PWR_STATE_ON  1
+#define PVR_TL_TEST_PWR_STATE_OFF 0
+
+/****************************************************************************
+ * PowMonTestThread IOCTL calls and constants
+ */
+
+#define PVR_POWMON_CMD_GET_ESTIMATES   1
+#define PVR_POWMON_CMD_SET_THREAD_LATENCY      2
+#define PVR_POWMON_CMD_TEST_THREAD_UPDATE_STATE        3
+
+#define PVR_POWMON_TEST_THREAD_RESUME  1
+#define PVR_POWMON_TEST_THREAD_PAUSE   0
+
+/****************************************************************************
+ * PowerTestThread IOCTL calls and constants
+ */
+
+#define PVR_POWER_TEST_CMD_DVFS                 1
+#define PVR_POWER_TEST_CMD_FORCED_IDLE          2
+#define PVR_POWER_TEST_CMD_CANCEL_FORCED_IDLE   3
+#define PVR_POWER_TEST_CMD_POWER_ON             4
+#define PVR_POWER_TEST_CMD_POWER_OFF            5
+#define PVR_POWER_TEST_CMD_APM_LATENCY          6
+#define PVR_POWER_TEST_CMD_INVALID              7
+
+#define PVR_POWER_TEST_NON_FORCED 0
+#define PVR_POWER_TEST_FORCED     1
+
+/****************************************************************************
+ * SyncCheckpointTest IOCTL types
+ */
+
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CONTEXT_CREATE         26
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CONTEXT_DESTROY                27
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_REGISTER_FUNCS         28
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CREATE                         29
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CREATE_NULL_CTXT       30
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_CREATE_NULL_RTRN       31
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_DESTROY                                32
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_SIGNAL                         33
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_ERROR                          34
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_IS_SIGNALLED           35
+#define PVR_TL_TEST_CMD_SYNC_CHECKPOINT_IS_ERRORED                     36
+
+typedef struct _PVR_TL_TEST_CMD_CHECKPOINT_CREATE_IN_
+{
+       /* Checkpoint name must always be first in struct */
+       IMG_CHAR    pszCheckpointName[PVRSRV_SYNC_NAME_LENGTH];
+       IMG_UINT16  uiStreamSizeInPages;
+       IMG_UINT32  uiStreamCreateFlags;
+} PVR_TL_TEST_CMD_CHECKPOINT_CREATE_IN;
+
+#define PVR_TL_TEST_CMD_SET_STREAM_OPEN_COUNTER 37
+#define PVR_TL_TEST_CMD_GET_STREAM_OPEN_COUNTER 38
+
+typedef struct _PVR_TL_TEST_CMD_STREAM_OPEN_COUNTER_IN_
+{
+       IMG_CHAR    pszStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+       IMG_UINT32  ui32Counter;
+} PVR_TL_TEST_CMD_STREAM_OPEN_COUNTER_IN;
+
+/****************************************************************************
+ * KmallocThreshold IOCTL types
+ */
+
+#define PVR_TL_TEST_CMD_KMALLOC 39
+
+typedef struct _PVR_TL_TEST_CMD_KMALLOC_IN_
+{
+       IMG_UINT32 uiAllocCount;
+       IMG_UINT32 uiAllocSize;
+       IMG_UINT32 uiFailedAllocThreshold;
+       IMG_UINT32 uiFailedAllocFrequency;
+} PVR_TL_TEST_CMD_KMALLOC_IN;
+
+#endif /* TUTILSDEFS_H */
+
+/******************************************************************************
+ End of file (tutilsdefs.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/common/env/linux/interrupt_support.c b/drivers/gpu/drm/img/img-rogue/services/system/common/env/linux/interrupt_support.c
new file mode 100644 (file)
index 0000000..c67d453
--- /dev/null
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/interrupt.h>
+
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "interrupt_support.h"
+
+typedef struct LISR_DATA_TAG
+{
+       IMG_UINT32      ui32IRQ;
+       PFN_SYS_LISR    pfnLISR;
+       void            *pvData;
+} LISR_DATA;
+
+static irqreturn_t SystemISRWrapper(int irq, void *dev_id)
+{
+       LISR_DATA *psLISRData = (LISR_DATA *)dev_id;
+
+       PVR_UNREFERENCED_PARAMETER(irq);
+
+       if (psLISRData)
+       {
+               if (psLISRData->pfnLISR(psLISRData->pvData))
+               {
+                       return IRQ_HANDLED;
+               }
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__));
+       }
+
+       return IRQ_NONE;
+}
+
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+                                IMG_UINT32 ui32IRQ,
+                                const IMG_CHAR *pszDevName,
+                                PFN_SYS_LISR pfnLISR,
+                                void *pvData,
+                                IMG_UINT32 ui32Flags)
+{
+       LISR_DATA *psLISRData;
+       unsigned long ulIRQFlags = 0;
+
+       if (pfnLISR == NULL || pvData == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32Flags & ~SYS_IRQ_FLAG_MASK)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK)
+       {
+               case SYS_IRQ_FLAG_TRIGGER_DEFAULT:
+                       break;
+               case SYS_IRQ_FLAG_TRIGGER_LOW:
+                       ulIRQFlags |= IRQF_TRIGGER_LOW;
+                       break;
+               case SYS_IRQ_FLAG_TRIGGER_HIGH:
+                       ulIRQFlags |= IRQF_TRIGGER_HIGH;
+                       break;
+               default:
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32Flags & SYS_IRQ_FLAG_SHARED)
+       {
+               ulIRQFlags |= IRQF_SHARED;
+       }
+
+       psLISRData = OSAllocMem(sizeof(*psLISRData));
+       if (psLISRData == NULL)
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+
+       psLISRData->ui32IRQ = ui32IRQ;
+       psLISRData->pfnLISR = pfnLISR;
+       psLISRData->pvData = pvData;
+
+       if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData))
+       {
+               OSFreeMem(psLISRData);
+
+               return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+       }
+
+       *phLISR = (IMG_HANDLE)psLISRData;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR)
+{
+       LISR_DATA *psLISRData = (LISR_DATA *)hLISR;
+
+       if (psLISRData == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       free_irq(psLISRData->ui32IRQ, psLISRData);
+
+       OSFreeMem(psLISRData);
+
+       return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/common/env/linux/pci_support.c b/drivers/gpu/drm/img/img-rogue/services/system/common/env/linux/pci_support.c
new file mode 100644 (file)
index 0000000..c3bbcc4
--- /dev/null
@@ -0,0 +1,726 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_MTRR)
+#include <asm/mtrr.h>
+#endif
+
+#include "pci_support.h"
+#include "allocmem.h"
+
+typedef        struct _PVR_PCI_DEV_TAG
+{
+       struct pci_dev          *psPCIDev;
+       HOST_PCI_INIT_FLAGS     ePCIFlags;
+       IMG_BOOL                abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+       int                     iMTRR[DEVICE_COUNT_RESOURCE];
+#endif
+} PVR_PCI_DEV;
+
+/*************************************************************************/ /*!
+@Function       OSPCISetDev
+@Description    Set a PCI device for subsequent use.
+@Input          pvPCICookie             Pointer to OS specific PCI structure
+@Input          eFlags                  Flags
+@Return                PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+{
+       int err;
+       IMG_UINT32 i;
+       PVR_PCI_DEV *psPVRPCI;
+
+       psPVRPCI = OSAllocMem(sizeof(*psPVRPCI));
+       if (psPVRPCI == NULL)
+       {
+               printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n");
+               return NULL;
+       }
+
+       psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
+       psPVRPCI->ePCIFlags = eFlags;
+
+       err = pci_enable_device(psPVRPCI->psPCIDev);
+       if (err != 0)
+       {
+               printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err);
+               OSFreeMem(psPVRPCI);
+               return NULL;
+       }
+
+       if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)        /* PRQA S 3358 */ /* misuse of enums */
+       {
+               pci_set_master(psPVRPCI->psPCIDev);
+       }
+
+       if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)               /* PRQA S 3358 */ /* misuse of enums */
+       {
+#if defined(CONFIG_PCI_MSI)
+               err = pci_enable_msi(psPVRPCI->psPCIDev);
+               if (err != 0)
+               {
+                       printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err);
+                       psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */
+               }
+#else
+               printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel");
+#endif
+       }
+
+       /* Initialise the PCI resource and MTRR tracking array */
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+               psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+               psPVRPCI->iMTRR[i] = -1;
+#endif
+       }
+
+       return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAcquireDev
+@Description    Acquire a PCI device for subsequent use.
+@Input          ui16VendorID            Vendor PCI ID
+@Input          ui16DeviceID            Device PCI ID
+@Input          eFlags                  Flags
+@Return                PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID,
+                                     IMG_UINT16 ui16DeviceID,
+                                     HOST_PCI_INIT_FLAGS eFlags)
+{
+       struct pci_dev *psPCIDev;
+
+       psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
+       if (psPCIDev == NULL)
+       {
+               return NULL;
+       }
+
+       return OSPCISetDev((void *)psPCIDev, eFlags);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIIRQ
+@Description    Get the interrupt number for the device.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16DeviceID           Pointer to where the interrupt number
+                                        should be returned
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+       if (pui32IRQ == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *pui32IRQ = psPVRPCI->psPCIDev->irq;
+
+       return PVRSRV_OK;
+}
+
+/* Functions supported by OSPCIAddrRangeFunc */
+enum HOST_PCI_ADDR_RANGE_FUNC
+{
+       HOST_PCI_ADDR_RANGE_FUNC_LEN,
+       HOST_PCI_ADDR_RANGE_FUNC_START,
+       HOST_PCI_ADDR_RANGE_FUNC_END,
+       HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
+       HOST_PCI_ADDR_RANGE_FUNC_RELEASE
+};
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeFunc
+@Description    Internal support function for various address range related
+                functions
+@Input          eFunc                   Function to perform
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                IMG_UINT32              Function dependent value
+*/ /**************************************************************************/
+static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+                                                                                PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+                                                                                IMG_UINT32 ui32Index)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+       if (ui32Index >= DEVICE_COUNT_RESOURCE)
+       {
+               printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range");
+               return 0;
+       }
+
+       switch (eFunc)
+       {
+               case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+               {
+                       return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+               }
+               case HOST_PCI_ADDR_RANGE_FUNC_START:
+               {
+                       return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+               }
+               case HOST_PCI_ADDR_RANGE_FUNC_END:
+               {
+                       return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+               }
+               case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+               {
+                       int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
+                       if (err != 0)
+                       {
+                               printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err);
+                               return 0;
+                       }
+                       psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+                       return 1;
+               }
+               case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+               {
+                       if (psPVRPCI->abPCIResourceInUse[ui32Index])
+                       {
+                               pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
+                               psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+                       }
+                       return 1;
+               }
+               default:
+               {
+                       printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function");
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeLen
+@Description    Returns length of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                IMG_UINT32              Length of address range or 0 if no
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeStart
+@Description    Returns the start of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                IMG_UINT32              Start of address range or 0 if no
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeEnd
+@Description    Returns the end of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                IMG_UINT32              End of address range or 0 if no such
+                                        range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRange
+@Description    Request a given address range index for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+                                                                  IMG_UINT32 ui32Index)
+{
+       if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0)
+       {
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+       else
+       {
+               return PVRSRV_OK;
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRange
+@Description    Release a given address range that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0)
+       {
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+       else
+       {
+               return PVRSRV_OK;
+       }
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRegion
+@Description    Request a given region from an address range for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          uiOffset              Offset into the address range that forms
+                                        the start of the region
+@Input          uiLength              Length of the region
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+                                                                       IMG_UINT32 ui32Index,
+                                                                       IMG_UINT64 uiOffset,
+                                                                       IMG_UINT64 uiLength)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       resource_size_t start;
+       resource_size_t end;
+
+       start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+       end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+       /* Check that the requested region is valid */
+       if ((start + uiOffset + uiLength - 1) > end)
+       {
+               return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+       }
+
+       if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+       {
+               if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+               {
+                       return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+               }
+       }
+       else
+       {
+               if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+               {
+                       return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRegion
+@Description    Release a given region, from an address range, that is no
+                longer in use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          ui32Offset              Offset into the address range that forms
+                                        the start of the region
+@Input          ui32Length              Length of the region
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+                                                                       IMG_UINT32 ui32Index,
+                                                                       IMG_UINT64 uiOffset,
+                                                                       IMG_UINT64 uiLength)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       resource_size_t start;
+       resource_size_t end;
+
+       start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+       end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+       /* Check that the region is valid */
+       if ((start + uiOffset + uiLength - 1) > end)
+       {
+               return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+       }
+
+       if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+       {
+               release_region(start + uiOffset, uiLength);
+       }
+       else
+       {
+               release_mem_region(start + uiOffset, uiLength);
+       }
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseDev
+@Description    Release a PCI device that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       int i;
+
+       /* Release all PCI regions that are currently in use */
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+               if (psPVRPCI->abPCIResourceInUse[i])
+               {
+                       pci_release_region(psPVRPCI->psPCIDev, i);
+                       psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+               }
+       }
+
+#if defined(CONFIG_PCI_MSI)
+       if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)               /* PRQA S 3358 */ /* misuse of enums */
+       {
+               pci_disable_msi(psPVRPCI->psPCIDev);
+       }
+#endif
+
+       if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)        /* PRQA S 3358 */ /* misuse of enums */
+       {
+               pci_clear_master(psPVRPCI->psPCIDev);
+       }
+
+       pci_disable_device(psPVRPCI->psPCIDev);
+
+       OSFreeMem(psPVRPCI);
+       /*not nulling pointer, copy on stack*/
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCISuspendDev
+@Description    Prepare PCI device to be turned off by power management
+@Input          hPVRPCI                 PCI device handle
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       int i;
+       int err;
+
+       /* Release all PCI regions that are currently in use */
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+               if (psPVRPCI->abPCIResourceInUse[i])
+               {
+                       pci_release_region(psPVRPCI->psPCIDev, i);
+               }
+       }
+
+       err = pci_save_state(psPVRPCI->psPCIDev);
+       if (err != 0)
+       {
+               printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err);
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+
+       pci_disable_device(psPVRPCI->psPCIDev);
+
+       err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
+       switch (err)
+       {
+               case 0:
+                       break;
+               case -EIO:
+                       printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM");
+                       break;
+               case -EINVAL:
+                       printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state");
+                       break;
+               default:
+                       printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err);
+                       break;
+       }
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIResumeDev
+@Description    Prepare a PCI device to be resumed by power management
+@Input          hPVRPCI                 PCI device handle
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       int err;
+       int i;
+
+       err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
+       switch (err)
+       {
+               case 0:
+                       break;
+               case -EIO:
+                       printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM");
+                       break;
+               case -EINVAL:
+                       printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state");
+                       return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+               default:
+                       printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err);
+                       return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+       }
+
+       pci_restore_state(psPVRPCI->psPCIDev);
+
+       err = pci_enable_device(psPVRPCI->psPCIDev);
+       if (err != 0)
+       {
+               printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err);
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+
+       if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)        /* PRQA S 3358 */ /* misuse of enums */
+               pci_set_master(psPVRPCI->psPCIDev);
+
+       /* Restore the PCI resource tracking array */
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+               if (psPVRPCI->abPCIResourceInUse[i])
+               {
+                       err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
+                       if (err != 0)
+                       {
+                               printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err);
+                       }
+               }
+       }
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIGetVendorDeviceIDs
+@Description    Retrieve PCI vendor ID and device ID.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16VendorID           Vendor ID
+@Output         pui16DeviceID           Device ID
+@Return         PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+                                     IMG_UINT16 *pui16VendorID,
+                                     IMG_UINT16 *pui16DeviceID)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       struct pci_dev *psPCIDev;
+
+       if (psPVRPCI == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       psPCIDev = psPVRPCI->psPCIDev;
+       if (psPCIDev == NULL)
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       *pui16VendorID = psPCIDev->vendor;
+       *pui16DeviceID = psPCIDev->device;
+
+       return PVRSRV_OK;
+}
+
+#if defined(CONFIG_MTRR)
+
+/*************************************************************************/ /*!
+@Function       OSPCIClearResourceMTRRs
+@Description    Clear any BIOS-configured MTRRs for a PCI memory region
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return                PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       resource_size_t start, end;
+       int res;
+
+       start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+       end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+       res = arch_io_reserve_memtype_wc(start, end - start);
+       if (res)
+       {
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+#endif
+       res = arch_phys_wc_add(start, end - start);
+       if (res < 0)
+       {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+               arch_io_free_memtype_wc(start, end - start);
+#endif
+
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+       psPVRPCI->iMTRR[ui32Index] = res;
+#else
+
+       res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0);
+       if (res < 0)
+       {
+               printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+
+       res = mtrr_del(res, start, end - start);
+       if (res < 0)
+       {
+               printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res);
+               return PVRSRV_ERROR_PCI_CALL_FAILED;
+       }
+
+       /* Workaround for overlapping MTRRs. */
+       {
+               IMG_BOOL bGotMTRR0 = IMG_FALSE;
+
+               /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning
+                * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic &
+                * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour.
+                *
+                * WRBACK is incompatible with some PCI devices, so try to split
+                * the UNCACHABLE regions up and insert a WRCOMB region instead.
+                */
+               res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0);
+               if (res < 0)
+               {
+                       /* If this fails, services has probably run before and created
+                        * a write-combined MTRR for the test chip. Assume it has, and
+                        * don't return an error here.
+                        */
+                       return PVRSRV_OK;
+               }
+
+               if (res == 0)
+                       bGotMTRR0 = IMG_TRUE;
+
+               res = mtrr_del(res, start, end - start);
+               if (res < 0)
+               {
+                       printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res);
+                       return PVRSRV_ERROR_PCI_CALL_FAILED;
+               }
+
+               if (bGotMTRR0)
+               {
+                       /* Replace 0 with a non-overlapping WRBACK MTRR */
+                       res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0);
+                       if (res < 0)
+                       {
+                               printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+                               return PVRSRV_ERROR_PCI_CALL_FAILED;
+                       }
+
+                       /* Add a WRCOMB MTRR for the PCI device memory bar */
+                       res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0);
+                       if (res < 0)
+                       {
+                               printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res);
+                               return PVRSRV_ERROR_PCI_CALL_FAILED;
+                       }
+               }
+       }
+#endif
+
+       return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseResourceMTRRs
+@Description    Release resources allocated by OSPCIClearResourceMTRRs
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+*/ /**************************************************************************/
+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+       PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+       if (psPVRPCI->iMTRR[ui32Index] >= 0)
+       {
+               arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]);
+               psPVRPCI->iMTRR[ui32Index] = -1;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+               {
+                       resource_size_t start, end;
+
+                       start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+                       end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+                       arch_io_free_memtype_wc(start, end - start);
+               }
+#endif
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+       PVR_UNREFERENCED_PARAMETER(ui32Index);
+#endif
+}
+#endif /* defined(CONFIG_MTRR) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/common/sysconfig_cmn.c b/drivers/gpu/drm/img/img-rogue/services/system/common/sysconfig_cmn.c
new file mode 100644 (file)
index 0000000..ac878dd
--- /dev/null
@@ -0,0 +1,132 @@
+/*************************************************************************/ /*!
+@File
+@Title          Sysconfig layer common to all platforms
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements system layer functions common to all platforms
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+
+void SysRGXErrorNotify(IMG_HANDLE hSysData,
+                       PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData)
+{
+       PVR_UNREFERENCED_PARAMETER(hSysData);
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+       {
+               IMG_UINT32 ui32DgbLvl;
+
+               switch (psErrorData->eResetReason)
+               {
+                       case RGX_CONTEXT_RESET_REASON_NONE:
+                       case RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP:
+                       case RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP:
+                       case RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING:
+                       case RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING:
+                       case RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH:
+                       case RGX_CONTEXT_RESET_REASON_GPU_ECC_OK:
+                       case RGX_CONTEXT_RESET_REASON_FW_ECC_OK:
+                       {
+                               ui32DgbLvl = PVR_DBG_MESSAGE;
+                               break;
+                       }
+                       case RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR:
+                       case RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR:
+                       {
+                               ui32DgbLvl = PVR_DBG_WARNING;
+                               break;
+                       }
+                       case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM:
+                       case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM:
+                       case RGX_CONTEXT_RESET_REASON_FW_ECC_ERR:
+                       case RGX_CONTEXT_RESET_REASON_FW_WATCHDOG:
+                       case RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT:
+                       case RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR:
+                       {
+                               ui32DgbLvl = PVR_DBG_ERROR;
+                               break;
+                       }
+                       default:
+                       {
+                               PVR_ASSERT(false && "Unhandled reset reason");
+                               ui32DgbLvl = PVR_DBG_ERROR;
+                               break;
+                       }
+               }
+
+               if (psErrorData->pid > 0)
+               {
+                       PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " PID %d experienced error %d",
+                                        psErrorData->pid, psErrorData->eResetReason);
+               }
+               else
+               {
+                       PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " Device experienced error %d",
+                                        psErrorData->eResetReason);
+               }
+
+               switch (psErrorData->eResetReason)
+               {
+                       case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM:
+                       case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM:
+                       {
+                               PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, "   ExtJobRef 0x%x, DM %d",
+                                                psErrorData->uErrData.sChecksumErrData.ui32ExtJobRef,
+                                                psErrorData->uErrData.sChecksumErrData.eDM);
+                       break;
+                       }
+                       default:
+                       {
+                               break;
+                       }
+               }
+       }
+#else
+       PVR_UNREFERENCED_PARAMETER(psErrorData);
+#endif /* PVRSRV_NEED_PVR_DPF */
+}
+
+/******************************************************************************
+ End of file (sysconfig_cmn.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/include/interrupt_support.h b/drivers/gpu/drm/img/img-rogue/services/system/include/interrupt_support.h
new file mode 100644 (file)
index 0000000..b87772d
--- /dev/null
@@ -0,0 +1,103 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(INTERRUPT_SUPPORT_H)
+#define INTERRUPT_SUPPORT_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device.h"
+
+/*! Default trigger type for the interrupt line. */
+#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0)
+/*! Interrupt triggered when interrupt line is low. */
+#define SYS_IRQ_FLAG_TRIGGER_LOW     (0x1 << 0)
+/*! Interrupt triggered when interrupt line is high. */
+#define SYS_IRQ_FLAG_TRIGGER_HIGH    (0x2 << 0)
+/*! Interrupt trigger mask. */
+#define SYS_IRQ_FLAG_TRIGGER_MASK    (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \
+                                      SYS_IRQ_FLAG_TRIGGER_LOW | \
+                                      SYS_IRQ_FLAG_TRIGGER_HIGH)
+/*! The irq is allowed to be shared among several devices. */
+#define SYS_IRQ_FLAG_SHARED          (0x1 << 8)
+
+/*! Interrupt flags mask. */
+#define SYS_IRQ_FLAG_MASK            (SYS_IRQ_FLAG_TRIGGER_MASK | \
+                                      SYS_IRQ_FLAG_SHARED)
+
+/*************************************************************************/ /*!
+@Description    Pointer to a system Low-level Interrupt Service Routine (LISR).
+@Input  pvData  Private data provided to the LISR.
+@Return         IMG_TRUE if interrupt handled, IMG_FALSE otherwise.
+*/ /**************************************************************************/
+typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData);
+
+/*************************************************************************/ /*!
+@Function       OSInstallSystemLISR
+@Description    Installs a system low-level interrupt handler
+@Output         phLISR                  On return, contains a handle to the
+                                        installed LISR
+@Input          ui32IRQ                 The IRQ number for which the
+                                        interrupt handler should be installed
+@Input          pszDevName              Name of the device for which the handler
+                                        is being installed
+@Input          pfnLISR                 A pointer to an interrupt handler
+                                        function
+@Input          pvData                  A pointer to data that should be passed
+                                        to pfnLISR when it is called
+@Input          ui32Flags               Interrupt flags
+@Return         PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR,
+                                IMG_UINT32 ui32IRQ,
+                                const IMG_CHAR *pszDevName,
+                                PFN_SYS_LISR pfnLISR,
+                                void *pvData,
+                                IMG_UINT32 ui32Flags);
+
+/*************************************************************************/ /*!
+@Function       OSUninstallSystemLISR
+@Description    Uninstalls a system low-level interrupt handler
+@Input          hLISRData              The handle to the LISR to uninstall
+@Return         PVRSRV_OK on success, a failure code otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData);
+#endif /* !defined(INTERRUPT_SUPPORT_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/include/pci_support.h b/drivers/gpu/drm/img/img-rogue/services/system/include/pci_support.h
new file mode 100644 (file)
index 0000000..29746c6
--- /dev/null
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PCI_SUPPORT_H
+#define PCI_SUPPORT_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(__linux__)
+#include <linux/pci.h>
+#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev))
+#else
+#define TO_PCI_COOKIE(dev) (dev)
+#endif
+
+typedef enum _HOST_PCI_INIT_FLAGS_
+{
+       HOST_PCI_INIT_FLAG_BUS_MASTER   = 0x00000001,
+       HOST_PCI_INIT_FLAG_MSI          = 0x00000002,
+       HOST_PCI_INIT_FLAG_FORCE_I32    = 0x7fffffff
+} HOST_PCI_INIT_FLAGS;
+
+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID);
+
+#if defined(CONFIG_MTRR)
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+#else
+static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+       PVR_UNREFERENCED_PARAMETER(ui32Index);
+       return PVRSRV_OK;
+}
+
+static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+       PVR_UNREFERENCED_PARAMETER(hPVRPCI);
+       PVR_UNREFERENCED_PARAMETER(ui32Index);
+}
+#endif
+
+#endif /* PCI_SUPPORT_H */
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/include/syscommon.h b/drivers/gpu/drm/img/img-rogue/services/system/include/syscommon.h
new file mode 100644 (file)
index 0000000..9349748
--- /dev/null
@@ -0,0 +1,146 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common system-specific declarations and
+                macros that are supported by all systems
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(SYSCOMMON_H)
+#define SYSCOMMON_H
+
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+
+/*************************************************************************/ /*!
+@Description    Pointer to a Low-level Interrupt Service Routine (LISR).
+@Input  pvData  Private data provided to the LISR.
+@Return         True if interrupt handled, false otherwise.
+*/ /**************************************************************************/
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function       SysDevInit
+@Description    System specific device initialisation function.
+@Input          pvOSDevice          pointer to the OS device reference
+@Input          ppsDevConfig        returned device configuration info
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig);
+
+/**************************************************************************/ /*!
+@Function       SysDevDeInit
+@Description    System specific device deinitialisation function.
+@Input          psDevConfig        device configuration info of the device to be
+                                   deinitialised
+@Return         None.
+*/ /***************************************************************************/
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/**************************************************************************/ /*!
+@Function       SysDebugInfo
+@Description    Dump system specific device debug information.
+@Input          psDevConfig         pointer to device configuration info
+@Input          pfnDumpDebugPrintf  the 'printf' function to be called to
+                                    display the debug info
+@Input          pvDumpDebugFile     optional file identifier to be passed to
+                                    the 'printf' function if required
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+                               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile);
+
+/**************************************************************************/ /*!
+@Function       SysInstallDeviceLISR
+@Description    Installs the system Low-level Interrupt Service Routine (LISR)
+                which handles low-level processing of interrupts from the device
+                (GPU).
+                The LISR will be invoked when the device raises an interrupt. An
+                LISR may not be descheduled, so code which needs to do so should
+                be placed in an MISR.
+                The installed LISR will schedule any MISRs once it has completed
+                its interrupt processing, by calling OSScheduleMISR().
+@Input          hSysData      pointer to the system data of the device
+@Input          ui32IRQ       the IRQ on which the LISR is to be installed
+@Input          pszName       name of the module installing the LISR
+@Input          pfnLISR       pointer to the function to be installed as the
+                              LISR
+@Input          pvData        private data provided to the LISR
+@Output         phLISRData    handle to the installed LISR (to be used for a
+                              subsequent uninstall)
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+                                                                 IMG_UINT32 ui32IRQ,
+                                                                 const IMG_CHAR *pszName,
+                                                                 PFN_LISR pfnLISR,
+                                                                 void *pvData,
+                                                                 IMG_HANDLE *phLISRData);
+
+/**************************************************************************/ /*!
+@Function       SysUninstallDeviceLISR
+@Description    Uninstalls the system Low-level Interrupt Service Routine (LISR)
+                which handles low-level processing of interrupts from the device
+                (GPU).
+@Input          hLISRData     handle of the LISR to be uninstalled
+@Return         PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+/**************************************************************************/ /*!
+@Function       SysRGXErrorNotify
+@Description    Error reporting callback function, registered as the
+                pfnSysDevErrorNotify member of the PVRSRV_DEVICE_CONFIG
+                struct. System layer will be notified of device errors and
+                resets via this callback.
+                NB. implementers should ensure that the minimal amount of
+                work is done in this callback function, as it will be
+                executed in the main RGX MISR. (e.g. any blocking or lengthy
+                work should be performed by a worker queue/thread instead).
+@Input          hSysData      pointer to the system data of the device
+@Output         psErrorData   structure containing details of the reported error
+@Return         None.
+*/ /***************************************************************************/
+void SysRGXErrorNotify(IMG_HANDLE hSysData,
+                       PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData);
+
+#endif /* !defined(SYSCOMMON_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/include/sysvalidation.h b/drivers/gpu/drm/img/img-rogue/services/system/include/sysvalidation.h
new file mode 100644 (file)
index 0000000..5f6d5f9
--- /dev/null
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title          Validation System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+                needed for hardware validation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SYSVALIDATION_H)
+#define SYSVALIDATION_H
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "img_types.h"
+#include "rgxdefs_km.h"
+#include "virt_validation_defs.h"
+
+void SysInitVirtInitialization(IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+                         IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+void SysSetTrustedDeviceAceEnabled(void);
+#endif
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#endif /* !defined(SYSVALIDATION_H) */
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/common/env/linux/dma_support.c b/drivers/gpu/drm/img/img-rogue/services/system/rogue/common/env/linux/dma_support.c
new file mode 100644 (file)
index 0000000..d846fb7
--- /dev/null
@@ -0,0 +1,527 @@
+/*************************************************************************/ /*!
+@File           dma_support.c
+@Title          System DMA support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides a contiguous memory allocator (i.e. DMA allocator);
+                APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <asm-generic/getorder.h>
+
+#include "allocmem.h"
+#include "dma_support.h"
+#include "pvr_vmap.h"
+#include "kernel_compatibility.h"
+
+#define DMA_MAX_IOREMAP_ENTRIES 2
+static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE;
+static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}};
+
+extern void do_invalid_range(unsigned long start, unsigned long len);
+
+static void*
+SysDmaAcquireKernelAddress(struct page *psPage, IMG_UINT64 ui64Size, DMA_ALLOC *psDmaAlloc)
+{
+       IMG_BOOL bPageByPage = IMG_TRUE;
+       IMG_UINT32 uiIdx;
+       void *pvVirtAddr = NULL;
+       IMG_UINT32 ui32PgCount = (IMG_UINT32)(ui64Size >> OSGetPageShift());
+       PVRSRV_DEVICE_NODE *psDevNode = OSAllocZMemNoStats(sizeof(*psDevNode));
+       PVRSRV_DEVICE_CONFIG *psDevConfig = OSAllocZMemNoStats(sizeof(*psDevConfig));
+       struct page **pagearray = OSAllocZMemNoStats(ui32PgCount * sizeof(struct page *));
+       void *pvOSDevice = psDmaAlloc->pvOSDevice;
+#if defined(CONFIG_ARM64)
+       pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+       pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+       /* Validate all required dynamic tmp buffer allocations */
+       if (psDevNode == NULL || psDevConfig == NULL || pagearray == NULL)
+       {
+               if (psDevNode)
+               {
+                       OSFreeMem(psDevNode);
+               }
+
+               if (psDevConfig)
+               {
+                       OSFreeMem(psDevConfig);
+               }
+
+               if (pagearray)
+               {
+                       OSFreeMem(pagearray);
+               }
+
+               goto e0;
+       }
+
+       /* Fake psDevNode->psDevConfig->pvOSDevice */
+       psDevConfig->pvOSDevice = pvOSDevice;
+       psDevNode->psDevConfig = psDevConfig;
+
+       /* Evict any page data contents from d-cache */
+       for (uiIdx = 0; uiIdx < ui32PgCount; uiIdx++)
+       {
+               void *pvVirtStart, *pvVirtEnd;
+               IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd;
+
+               /* Prepare array required for vmap */
+               pagearray[uiIdx] = &psPage[uiIdx];
+
+               if (bPageByPage)
+               {
+#if defined(CONFIG_64BIT)
+                       bPageByPage = IMG_FALSE;
+
+                       pvVirtStart = kmap(&psPage[uiIdx]);
+                       pvVirtEnd = pvVirtStart + ui64Size;
+
+                       sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]);
+                       sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + ui64Size;
+                       /* all pages have a kernel linear address, flush entire range */
+#else
+                       pvVirtStart = kmap(&psPage[uiIdx]);
+                       pvVirtEnd = pvVirtStart + PAGE_SIZE;
+
+                       sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]);
+                       sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE;
+                       /* pages might be from HIGHMEM, need to kmap/flush per page */
+#endif
+
+                       /* Fallback to range-based d-cache flush */
+                       OSCPUCacheInvalidateRangeKM(psDevNode,
+                                                                               pvVirtStart, pvVirtEnd,
+                                                                               sCPUPhysStart, sCPUPhysEnd);
+
+                       kunmap(&psPage[uiIdx]);
+               }
+       }
+
+    do_invalid_range(0x0, 0x200000);
+
+       /* Remap pages into VMALLOC space */
+       pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot);
+       psDmaAlloc->PageProps = prot;
+
+       /* Clean-up tmp buffers */
+       OSFreeMem(psDevConfig);
+       OSFreeMem(psDevNode);
+       OSFreeMem(pagearray);
+
+e0:
+       return pvVirtAddr;
+}
+
+static void SysDmaReleaseKernelAddress(void *pvVirtAddr, IMG_UINT64 ui64Size, pgprot_t pgprot)
+{
+       pvr_vunmap(pvVirtAddr, ui64Size >> OSGetPageShift(), pgprot);
+}
+
+/*!
+******************************************************************************
+ @Function                     SysDmaAllocMem
+
+ @Description          Allocates physically contiguous memory
+
+ @Return                       PVRSRV_ERROR    PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                                                       error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc)
+{
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       struct device *psDev;
+       struct page *psPage;
+       size_t uiSize;
+
+       if (psDmaAlloc == NULL ||
+               psDmaAlloc->hHandle ||
+               psDmaAlloc->pvVirtAddr ||
+               psDmaAlloc->ui64Size == 0 ||
+               psDmaAlloc->sBusAddr.uiAddr ||
+               psDmaAlloc->pvOSDevice == NULL)
+       {
+               PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+       psDev = (struct device *)psDmaAlloc->pvOSDevice;
+
+       psDmaAlloc->hHandle = dma_alloc_coherent(psDev, uiSize, (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, GFP_KERNEL);
+
+       if (psDmaAlloc->hHandle)
+       {
+               psDmaAlloc->pvVirtAddr = psDmaAlloc->hHandle;
+
+               PVR_DPF((PVR_DBG_MESSAGE,
+                               "Allocated DMA buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX,
+                               psDmaAlloc->pvVirtAddr,
+                               psDmaAlloc->sBusAddr.uiAddr,
+                               uiSize));
+       }
+       else if ((psPage = alloc_pages(GFP_KERNEL, get_order(uiSize))))
+       {
+               psDmaAlloc->sBusAddr.uiAddr = dma_map_page(psDev, psPage, 0, uiSize, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(psDev, psDmaAlloc->sBusAddr.uiAddr))
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "dma_map_page() failed, page 0x%p order %d",
+                                       psPage,
+                                       get_order(uiSize)));
+                       __free_pages(psPage, get_order(uiSize));
+                       goto e0;
+               }
+               psDmaAlloc->psPage = psPage;
+
+               psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(psPage, uiSize, psDmaAlloc);
+               if (! psDmaAlloc->pvVirtAddr)
+               {
+                       PVR_DPF((PVR_DBG_ERROR,
+                                       "SysDmaAcquireKernelAddress() failed, page 0x%p order %d",
+                                       psPage,
+                                       get_order(uiSize)));
+                       dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL);
+                       __free_pages(psPage, get_order(uiSize));
+                       goto e0;
+               }
+
+               PVR_DPF((PVR_DBG_MESSAGE,
+                               "Allocated contiguous buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX,
+                               psDmaAlloc->pvVirtAddr,
+                               psDmaAlloc->sBusAddr.uiAddr,
+                               uiSize));
+       }
+       else
+       {
+               PVR_DPF((PVR_DBG_ERROR, "Unable to allocate contiguous buffer, size: 0x"IMG_SIZE_FMTSPECX, uiSize));
+               eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+       }
+
+e0:
+       PVR_LOG_RETURN_IF_FALSE((psDmaAlloc->pvVirtAddr), "DMA/CMA allocation failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES);
+       return eError;
+}
+
+/*!
+******************************************************************************
+ @Function                     SysDmaFreeMem
+
+ @Description          Free physically contiguous memory
+
+ @Return                       void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc)
+{
+       size_t uiSize;
+       struct device *psDev;
+
+       if (psDmaAlloc == NULL ||
+               psDmaAlloc->ui64Size == 0 ||
+               psDmaAlloc->pvOSDevice == NULL ||
+               psDmaAlloc->pvVirtAddr == NULL ||
+               psDmaAlloc->sBusAddr.uiAddr == 0)
+       {
+               PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+               return;
+       }
+
+       uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+       psDev = (struct device *)psDmaAlloc->pvOSDevice;
+
+       if (psDmaAlloc->pvVirtAddr != psDmaAlloc->hHandle)
+       {
+               SysDmaReleaseKernelAddress(psDmaAlloc->pvVirtAddr, uiSize, psDmaAlloc->PageProps);
+       }
+
+       if (! psDmaAlloc->hHandle)
+       {
+               struct page *psPage;
+               dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL);
+               psPage = psDmaAlloc->psPage;
+               __free_pages(psPage, get_order(uiSize));
+               return;
+       }
+
+       dma_free_coherent(psDev, uiSize, psDmaAlloc->hHandle, (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr);
+}
+
+/*!
+******************************************************************************
+ @Function                     SysDmaRegisterForIoRemapping
+
+ @Description          Registers DMA_ALLOC for manual I/O remapping
+
+ @Return                       PVRSRV_ERROR    PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                                                       error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+       size_t uiSize;
+       IMG_UINT32 ui32Idx;
+       IMG_BOOL bTabEntryFound = IMG_TRUE;
+       PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS;
+
+       if (psDmaAlloc == NULL ||
+               psDmaAlloc->ui64Size == 0 ||
+               psDmaAlloc->pvOSDevice == NULL ||
+               psDmaAlloc->pvVirtAddr == NULL ||
+               psDmaAlloc->sBusAddr.uiAddr == 0)
+       {
+               PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+
+       for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+       {
+               /* Check if an I/O remap entry exists for remapping */
+               if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL)
+               {
+                       PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0);
+                       PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0);
+                       break;
+               }
+       }
+
+       if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES)
+       {
+               bTabEntryFound = IMG_FALSE;
+       }
+
+       if (bTabEntryFound)
+       {
+               IMG_BOOL bSameVAddr, bSamePAddr, bSameSize;
+
+               bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr;
+               bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr;
+               bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == uiSize;
+
+               if (bSameVAddr)
+               {
+                       if (bSamePAddr && bSameSize)
+                       {
+                               eError = PVRSRV_OK;
+                       }
+                       else
+                       {
+                               eError = PVRSRV_ERROR_ALREADY_EXISTS;
+                       }
+               }
+               else
+               {
+                       PVR_ASSERT(bSamePAddr == IMG_FALSE);
+
+                       gsDmaIoRemapArray[ui32Idx].ui64Size = uiSize;
+                       gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr;
+                       gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr;
+
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                       "DMA: register I/O remap: "
+                                       "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX,
+                                       psDmaAlloc->pvVirtAddr,
+                                       psDmaAlloc->sBusAddr.uiAddr,
+                                       uiSize));
+
+                       gbEnableDmaIoRemapping = IMG_TRUE;
+                       eError = PVRSRV_OK;
+               }
+       }
+
+       return eError;
+}
+
+/*!
+******************************************************************************
+ @Function                     SysDmaDeregisterForIoRemapping
+
+ @Description          Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return                       void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+       size_t uiSize;
+       IMG_UINT32 ui32Idx;
+
+       if (psDmaAlloc == NULL ||
+               psDmaAlloc->ui64Size == 0 ||
+               psDmaAlloc->pvOSDevice == NULL ||
+               psDmaAlloc->pvVirtAddr == NULL ||
+               psDmaAlloc->sBusAddr.uiAddr == 0)
+       {
+               PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+               return;
+       }
+
+       uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE);
+
+       /* Remove specified entries from list of I/O remap entries */
+       for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+       {
+               if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr)
+               {
+                       gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0;
+                       gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL;
+                       gsDmaIoRemapArray[ui32Idx].ui64Size = 0;
+
+                       PVR_DPF((PVR_DBG_MESSAGE,
+                                       "DMA: deregister I/O remap: "
+                                       "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX,
+                                       psDmaAlloc->pvVirtAddr,
+                                       psDmaAlloc->sBusAddr.uiAddr,
+                                       uiSize));
+
+                       break;
+               }
+       }
+
+       /* Check if no other I/O remap entries exists for remapping */
+       for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+       {
+               if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL)
+               {
+                       break;
+               }
+       }
+
+       if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES)
+       {
+               /* No entries found so disable remapping */
+               gbEnableDmaIoRemapping = IMG_FALSE;
+       }
+}
+
+/*!
+******************************************************************************
+ @Function                     SysDmaDevPAddrToCpuVAddr
+
+ @Description          Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return                       IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size)
+{
+       IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL;
+       DMA_ALLOC *psHeapDmaAlloc;
+       IMG_UINT32 ui32Idx;
+
+       if (gbEnableDmaIoRemapping == IMG_FALSE)
+       {
+               return pvDMAVirtAddr;
+       }
+
+       for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+       {
+               psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+               if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr)
+               {
+                       IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+                       IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr;
+
+                       if (uiOffset < uiSpan)
+                       {
+                               PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan);
+                               pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset;
+
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                       "DMA: remap: PA: 0x%llx => VA: 0x%p",
+                                       uiAddr, pvDMAVirtAddr));
+
+                               break;
+                       }
+               }
+       }
+
+       return pvDMAVirtAddr;
+}
+
+/*!
+******************************************************************************
+ @Function                     SysDmaCpuVAddrToDevPAddr
+
+ @Description          Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return                       Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr)
+{
+       IMG_UINT64 uiAddr = 0;
+       DMA_ALLOC *psHeapDmaAlloc;
+       IMG_UINT32 ui32Idx;
+
+       if (gbEnableDmaIoRemapping == IMG_FALSE)
+       {
+               return uiAddr;
+       }
+
+       for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+       {
+               psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+               if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr)
+               {
+                       IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+                       IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr;
+
+                       if (uiOffset < uiSpan)
+                       {
+                               uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset;
+
+                               PVR_DPF((PVR_DBG_MESSAGE,
+                                       "DMA: remap: VA: 0x%p => PA: 0x%llx",
+                                       pvDMAVirtAddr, uiAddr));
+
+                               break;
+                       }
+               }
+       }
+
+       return uiAddr;
+}
+
+/******************************************************************************
+ End of file (dma_support.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/common/vmm_type_stub.c b/drivers/gpu/drm/img/img-rogue/services/system/rogue/common/vmm_type_stub.c
new file mode 100644 (file)
index 0000000..747bf4a
--- /dev/null
@@ -0,0 +1,119 @@
+/*************************************************************************/ /*!
+@File                  vmm_type_stub.c
+@Title          Stub VM manager type
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Sample stub (no-operation) VM manager implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxheapconfig.h"
+
+#include "vmm_impl.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID,
+                                         IMG_UINT32 ui32DevID,
+                                         IMG_UINT64 ui64Size,
+                                         IMG_UINT64 ui64Addr)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+       PVR_UNREFERENCED_PARAMETER(ui32DevID);
+       PVR_UNREFERENCED_PARAMETER(ui64Size);
+       PVR_UNREFERENCED_PARAMETER(ui64Addr);
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID,
+                                               IMG_UINT32 ui32DevID)
+{
+       PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+       PVR_UNREFERENCED_PARAMETER(ui32DevID);
+       return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static VMM_PVZ_CONNECTION gsStubVmmPvz =
+{
+       .sClientFuncTab = {
+               /* pfnMapDevPhysHeap */
+               &StubVMMMapDevPhysHeap,
+
+               /* pfnUnmapDevPhysHeap */
+               &StubVMMUnmapDevPhysHeap
+       },
+
+       .sServerFuncTab = {
+               /* pfnMapDevPhysHeap */
+               &PvzServerMapDevPhysHeap,
+
+               /* pfnUnmapDevPhysHeap */
+               &PvzServerUnmapDevPhysHeap
+       },
+
+       .sVmmFuncTab = {
+               /* pfnOnVmOnline */
+               &PvzServerOnVmOnline,
+
+               /* pfnOnVmOffline */
+               &PvzServerOnVmOffline,
+
+               /* pfnVMMConfigure */
+               &PvzServerVMMConfigure
+       }
+};
+
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection)
+{
+       PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS);
+       *psPvzConnection = &gsStubVmmPvz;
+       PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support"));
+       return PVRSRV_OK;
+}
+
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection)
+{
+       PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection");
+}
+
+/******************************************************************************
+ End of file (vmm_type_stub.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/include/dma_support.h b/drivers/gpu/drm/img/img-rogue/services/system/rogue/include/dma_support.h
new file mode 100644 (file)
index 0000000..c1d22bd
--- /dev/null
@@ -0,0 +1,117 @@
+/*************************************************************************/ /*!
+@File           dma_support.h
+@Title          Device contiguous memory allocator and I/O re-mapper
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides a contiguous memory allocator API; mainly
+                used for allocating / ioremapping (DMA/PA <-> CPU/VA)
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DMA_SUPPORT_H
+#define DMA_SUPPORT_H
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+typedef struct _DMA_ALLOC_
+{
+       IMG_UINT64       ui64Size;
+       IMG_CPU_VIRTADDR pvVirtAddr;
+       IMG_DEV_PHYADDR  sBusAddr;
+       IMG_HANDLE       hHandle;
+#if defined(__linux__)
+       struct page      *psPage;
+       pgprot_t         PageProps;
+#endif
+       void             *pvOSDevice;
+} DMA_ALLOC;
+
+/*!
+*******************************************************************************
+ @Function      SysDmaAllocMem
+ @Description   Allocates physically contiguous memory
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function      SysDmaFreeMem
+ @Description   Free physically contiguous memory
+ @Return        void
+******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function      SysDmaRegisterForIoRemapping
+ @Description   Registers DMA_ALLOC for manual I/O remapping
+ @Return        PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function      SysDmaDeregisterForIoRemapping
+ @Description   Deregisters DMA_ALLOC from manual I/O remapping
+ @Return        void
+******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function      SysDmaDevPAddrToCpuVAddr
+ @Description   Maps a DMA_ALLOC physical address to CPU virtual address
+ @Return        IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+******************************************************************************/
+IMG_CPU_VIRTADDR
+SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size);
+
+/*!
+*******************************************************************************
+ @Function      SysDmaCpuVAddrToDevPAddr
+ @Description   Maps a DMA_ALLOC CPU virtual address to physical address
+ @Return        Non-zero value on success. Otherwise, a 0
+******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr);
+
+#endif /* DMA_SUPPORT_H */
+
+/******************************************************************************
+ End of file (dma_support.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/Kbuild.mk b/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/Kbuild.mk
new file mode 100755 (executable)
index 0000000..215a4e0
--- /dev/null
@@ -0,0 +1,49 @@
+########################################################################### ###
+#@File
+#@Copyright     Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License       Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/rogue/common/env/linux/dma_support.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/$(PVR_ARCH)/common/vmm_type_$(VMM_TYPE).o
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysconfig.c b/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysconfig.c
new file mode 100644 (file)
index 0000000..91f7a4d
--- /dev/null
@@ -0,0 +1,644 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the system layer for QEMU vexpress virtual-platform
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(__linux__)
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <soc/starfive/jh7110_pmu.h>
+#endif
+
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "interrupt_support.h"
+
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "sysinfo.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#include "interrupt_support.h"
+#include "vz_vmm_pvz.h"
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+#include <asm/pgtable.h>
+#include "pvr_vmap.h"
+#include "linux/highmem.h"
+
+#include "kernel_compatibility.h"
+#include <linux/pm_runtime.h>
+
+struct sf7110_cfg  sf_cfg_t = {0,};
+
+static RGX_TIMING_INFORMATION  gsRGXTimingInfo;
+static RGX_DATA                        gsRGXData;
+static PVRSRV_DEVICE_CONFIG    gsDevices[1];
+static PHYS_HEAP_FUNCTIONS     gsPhysHeapFuncs;
+static PHYS_HEAP_CONFIG                gsPhysHeapConfig[1];
+static struct page *g_zero_page = NULL;
+
+
+#if defined(SUPPORT_PDVFS)
+static const IMG_OPP asOPPTable[] =
+{
+       { 824,  240000000},
+       { 856,  280000000},
+       { 935,  380000000},
+       { 982,  440000000},
+       { 1061, 540000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(asOPPTable[0]))
+
+static void SetFrequency(IMG_UINT32 ui32Frequency) {}
+static void SetVoltage(IMG_UINT32 ui32Volt) {}
+#endif
+
+extern void sifive_l2_flush64_range(unsigned long start, unsigned long len);
+
+void do_sifive_l2_flush64_range(unsigned long start, unsigned long len)
+{
+       sifive_l2_flush64_range(ALIGN_DOWN(start, 64), len + start % 64);
+}
+
+void do_invalid_range(unsigned long start, unsigned long len)
+{
+       unsigned long sz = 2 * 1024 * 1024;
+       unsigned long *pv = NULL;
+
+       if(NULL == g_zero_page)
+       {
+               g_zero_page = alloc_pages(GFP_KERNEL, get_order(sz));
+               if (NULL == g_zero_page)
+               {
+                       printk("alloc zero invalid page failed!\\n");
+                       return;
+               }
+       }
+       pv = page_address(g_zero_page);
+       memset(pv, 0, sz);
+
+       do_sifive_l2_flush64_range(page_to_phys(g_zero_page), sz);
+}
+
+/*
+       CPU to Device physical address translation
+*/
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+                                       IMG_UINT32 ui32NumOfAddr,
+                                       IMG_DEV_PHYADDR *psDevPAddr,
+                                       IMG_CPU_PHYADDR *psCpuPAddr)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+       /* Optimise common case */
+       psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+       if (ui32NumOfAddr > 1)
+       {
+               IMG_UINT32 ui32Idx;
+               for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+               {
+                       psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+               }
+       }
+}
+
+/*
+       Device to CPU physical address translation
+*/
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+                                                                  IMG_UINT32 ui32NumOfAddr,
+                                                                  IMG_CPU_PHYADDR *psCpuPAddr,
+                                                                  IMG_DEV_PHYADDR *psDevPAddr)
+{
+       PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+       /* Optimise common case */
+       psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+       if (ui32NumOfAddr > 1)
+       {
+               IMG_UINT32 ui32Idx;
+               for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+               {
+                       psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+               }
+       }
+}
+
+static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features)
+{
+#if defined(SUPPORT_AXI_ACE_TEST)
+       if ( ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+       {
+               gsDevices[0].eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY;
+       }
+       else
+#endif
+       {
+               psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_EMULATED;
+       }
+}
+
+#if 1
+
+void SysDevHost_Cache_Maintenance(IMG_HANDLE hSysData,
+                                                                       PVRSRV_CACHE_OP eRequestType,
+                                                                       void *pvVirtStart,
+                                                                       void *pvVirtEnd,
+                                                                       IMG_CPU_PHYADDR sCPUPhysStart,
+                                                                       IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+       unsigned long len = 0;
+
+       /* valid the input phy address */
+       if(sCPUPhysStart.uiAddr == 0 || sCPUPhysEnd.uiAddr == 0 || sCPUPhysEnd.uiAddr < sCPUPhysStart.uiAddr)
+       {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache op[%d] range [%llx---%llx]\n",
+                       __func__, (uint32_t)eRequestType,
+                       (uint64_t)sCPUPhysStart.uiAddr, (uint64_t)sCPUPhysEnd.uiAddr));
+               return;
+       }
+       len = (unsigned long)(sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr);
+       //printk("FF cop:%d, s:%llx, len:%lx\n", eRequestType, sCPUPhysStart.uiAddr, len);
+       //if(len < 64)
+       //      dump_stack();
+       switch (eRequestType)
+       {
+               case PVRSRV_CACHE_OP_INVALIDATE:
+                       //do_invalid_range(sCPUPhysStart.uiAddr, len);
+                       break;
+               case PVRSRV_CACHE_OP_CLEAN:
+               case PVRSRV_CACHE_OP_FLUSH:
+                       do_sifive_l2_flush64_range(sCPUPhysStart.uiAddr, len);
+                       break;
+               default:
+                       PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", __func__, (uint32_t)eRequestType));
+                       break;
+       }
+
+}
+#endif
+
+static IMG_UINT32 sys_gpu_runtime_resume(IMG_HANDLE hd)
+{
+       starfive_pmu_hw_event_turn_off_mask(0);
+       clk_prepare_enable(sf_cfg_t.clk_axi);
+       u0_img_gpu_enable();
+
+       return 0;
+}
+
+static IMG_UINT32 sys_gpu_runtime_suspend(IMG_HANDLE hd)
+{
+       u0_img_gpu_disable();
+       clk_disable_unprepare(sf_cfg_t.clk_axi);
+       starfive_pmu_hw_event_turn_off_mask((uint32_t)-1);
+
+       return 0;
+}
+
+static int create_sf7110_cfg(struct device *dev)
+{
+       struct sf7110_cfg *psf = &sf_cfg_t;
+
+       psf->dev = dev;
+       mutex_init(&psf->set_power_state);
+       psf->gpu_reg_base = ioremap(STARFIVE_7110_GPU_PBASE, STARFIVE_7110_GPU_SIZE);
+       if(!psf->gpu_reg_base)
+               return -ENOMEM;
+       psf->gpu_reg_start = STARFIVE_7110_GPU_PBASE;
+       psf->gpu_reg_size = STARFIVE_7110_GPU_SIZE;
+
+       psf->clk_apb = devm_clk_get_optional(dev, "clk_apb");
+       if (IS_ERR(psf->clk_apb)) {
+               dev_err(dev, "failed to get gpu clk_apb\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->clk_rtc = devm_clk_get_optional(dev, "clk_rtc");
+       if (IS_ERR(psf->clk_rtc)) {
+               dev_err(dev, "failed to get gpu clk_rtc\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->clk_core = devm_clk_get_optional(dev, "clk_core");
+       if (IS_ERR(psf->clk_core)) {
+               dev_err(dev, "failed to get gpu clk_core\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->clk_sys = devm_clk_get_optional(dev, "clk_sys");
+       if (IS_ERR(psf->clk_sys)) {
+               dev_err(dev, "failed to get gpu clk_sys\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->clk_axi = devm_clk_get_optional(dev, "clk_axi");
+       if (IS_ERR(psf->clk_axi)) {
+               dev_err(dev, "failed to get gpu clk_axi\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->clk_div = devm_clk_get_optional(dev, "clk_bv");
+       if (IS_ERR(psf->clk_div)) {
+               dev_err(dev, "failed to get gpu clk_div\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->rst_apb = devm_reset_control_get_exclusive(dev, "rst_apb");
+       if (IS_ERR(psf->rst_apb)) {
+               dev_err(dev, "failed to get GPU rst_apb\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->rst_doma = devm_reset_control_get_exclusive(dev, "rst_doma");
+       if (IS_ERR(psf->rst_doma)) {
+               dev_err(dev, "failed to get GPU rst_doma\n");
+               goto err_gpu_unmap;
+       }
+
+       psf->runtime_resume = sys_gpu_runtime_resume;
+       psf->runtime_suspend = sys_gpu_runtime_suspend;
+
+       return 0;
+err_gpu_unmap:
+       iounmap(psf->gpu_reg_base);
+       return -ENOMEM;
+}
+
+void u0_img_gpu_enable(void)
+{
+       clk_prepare_enable(sf_cfg_t.clk_apb);
+       clk_prepare_enable(sf_cfg_t.clk_rtc);
+       clk_set_rate(sf_cfg_t.clk_div, RGX_STARFIVE_7100_CORE_CLOCK_SPEED);
+       clk_prepare_enable(sf_cfg_t.clk_core);
+       clk_prepare_enable(sf_cfg_t.clk_sys);
+
+       reset_control_deassert(sf_cfg_t.rst_apb);
+       reset_control_deassert(sf_cfg_t.rst_doma);
+}
+
+
+void u0_img_gpu_disable(void)
+{
+       reset_control_assert(sf_cfg_t.rst_apb);
+       reset_control_assert(sf_cfg_t.rst_doma);
+
+       clk_disable_unprepare(sf_cfg_t.clk_apb);
+       clk_disable_unprepare(sf_cfg_t.clk_rtc);
+       clk_disable_unprepare(sf_cfg_t.clk_core);
+       clk_disable_unprepare(sf_cfg_t.clk_sys);
+}
+
+static int sys_gpu_enable(void)
+{
+       int ret;
+
+       ret = pm_runtime_get_sync(sf_cfg_t.dev);
+       if (ret < 0) {
+               dev_err(sf_cfg_t.dev, "gpu: failed to get pm runtime: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int sys_gpu_disable(void)
+{
+       pm_runtime_put_sync(sf_cfg_t.dev);
+       //pm_runtime_disable(sf_cfg_t.dev);
+       return 0;
+}
+
+static PVRSRV_ERROR sfSysDevPrePowerState(
+               IMG_HANDLE hSysData,
+               PVRSRV_SYS_POWER_STATE eNewPowerState,
+               PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+               PVRSRV_POWER_FLAGS ePwrFlags)
+{
+       struct sf7110_cfg *psf = hSysData;
+
+       pr_debug("(%s()) state: current=%d, new=%d; flags: 0x%08x", __func__,
+            eCurrentPowerState, eNewPowerState, ePwrFlags);
+
+       mutex_lock(&psf->set_power_state);
+
+       if ((PVRSRV_SYS_POWER_STATE_OFF == eNewPowerState) &&
+               (PVRSRV_SYS_POWER_STATE_ON == eCurrentPowerState))
+               sys_gpu_disable();
+
+       mutex_unlock(&psf->set_power_state);
+       return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR sfSysDevPostPowerState(
+               IMG_HANDLE hSysData,
+               PVRSRV_SYS_POWER_STATE eNewPowerState,
+               PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+               PVRSRV_POWER_FLAGS ePwrFlags)
+{
+       struct sf7110_cfg *psf = hSysData;
+       PVRSRV_ERROR ret;
+
+       pr_debug("(%s()) state: current=%d, new=%d; flags: 0x%08x", __func__,
+            eCurrentPowerState, eNewPowerState, ePwrFlags);
+
+       mutex_lock(&psf->set_power_state);
+
+       if ((PVRSRV_SYS_POWER_STATE_ON == eNewPowerState) &&
+               (PVRSRV_SYS_POWER_STATE_OFF == eCurrentPowerState)) {
+               if (sys_gpu_enable()) {
+                       ret = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+                       goto done;
+               }
+       }
+       ret = PVRSRV_OK;
+done:
+       mutex_unlock(&psf->set_power_state);
+
+       return ret;
+}
+
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+#if defined(__linux__)
+       int iIrq;
+       struct resource *psDevMemRes = NULL;
+       struct platform_device *psDev;
+
+       psDev = to_platform_device((struct device *)pvOSDevice);
+       printk("@@ dev ptr:%llx/%d/%d\n", (uint64_t)psDev,DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT,
+       PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG);
+#endif
+
+       if (gsDevices[0].pvOSDevice)
+               return PVRSRV_ERROR_INVALID_DEVICE;
+
+#if defined(__linux__)
+       dma_set_mask(pvOSDevice, DMA_BIT_MASK(32));
+#endif
+
+       /*
+        * Setup information about physical memory heap(s) we have
+        */
+       gsPhysHeapFuncs.pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr;
+       gsPhysHeapFuncs.pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr;
+
+       gsPhysHeapConfig[0].pszPDumpMemspaceName = "SYSMEM";
+       gsPhysHeapConfig[0].eType = PHYS_HEAP_TYPE_UMA;
+       gsPhysHeapConfig[0].psMemFuncs = &gsPhysHeapFuncs;
+       gsPhysHeapConfig[0].hPrivData = NULL;
+       gsPhysHeapConfig[0].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL;
+       //ui32NextPhysHeapID += 1;
+
+       /*
+        * Setup RGX specific timing data
+        */
+       gsRGXTimingInfo.ui32CoreClockSpeed      = RGX_STARFIVE_7100_CORE_CLOCK_SPEED;
+       gsRGXTimingInfo.bEnableActivePM         = IMG_TRUE;
+       gsRGXTimingInfo.bEnableRDPowIsland      = IMG_TRUE;
+       gsRGXTimingInfo.ui32ActivePMLatencyms   = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+       /*
+        *Setup RGX specific data
+        */
+       gsRGXData.psRGXTimingInfo = &gsRGXTimingInfo;
+
+       /*
+        * Setup device
+        */
+       gsDevices[0].pvOSDevice         = pvOSDevice;
+       gsDevices[0].pszName            = "starfive_7110";
+       gsDevices[0].pszVersion         = NULL;
+
+       /* Device setup information */
+#if defined(__linux__)
+       psDevMemRes = platform_get_resource(psDev, IORESOURCE_MEM, 0);
+       if (psDevMemRes) {
+               gsDevices[0].sRegsCpuPBase.uiAddr = psDevMemRes->start;
+               gsDevices[0].ui32RegsSize = (unsigned int)(psDevMemRes->end - psDevMemRes->start);
+       } else
+#endif
+       {
+#if defined(__linux__)
+               PVR_LOG(("%s: platform_get_resource() failed, using mmio/sz 0x%x/0x%x",
+                               __func__,
+                               STARFIVE_7110_GPU_PBASE,
+                               STARFIVE_7110_GPU_SIZE));
+#endif
+               gsDevices[0].sRegsCpuPBase.uiAddr   = STARFIVE_7110_GPU_PBASE;
+               gsDevices[0].ui32RegsSize           = STARFIVE_7110_GPU_SIZE;
+       }
+
+       gsDevices[0].eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+#if defined(__linux__)
+       iIrq = platform_get_irq(psDev, 0);
+       if (iIrq >= 0) {
+               gsDevices[0].ui32IRQ = (IMG_UINT32) iIrq;
+       } else
+#endif
+       {
+#if defined(__linux__)
+               PVR_LOG(("%s: platform_get_irq() failed, using irq %d",
+                               __func__,
+                               STARFIVE_7110_IRQ_GPU));
+#endif
+               gsDevices[0].ui32IRQ = STARFIVE_7110_IRQ_GPU;
+       }
+
+       /* Device's physical heaps */
+       gsDevices[0].pasPhysHeaps = gsPhysHeapConfig;
+       gsDevices[0].ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfig);
+
+       if (create_sf7110_cfg(&psDev->dev)) {
+               return PVRSRV_ERROR_BAD_MAPPING;
+       }
+       gsDevices[0].hSysData = &sf_cfg_t;
+
+       pm_runtime_enable(sf_cfg_t.dev);
+       /* power management on HW system */
+       gsDevices[0].pfnPrePowerState = sfSysDevPrePowerState;
+       gsDevices[0].pfnPostPowerState = sfSysDevPostPowerState;
+
+       /* No clock frequency either */
+       gsDevices[0].pfnClockFreqGet = NULL;
+
+       gsDevices[0].hDevData = &gsRGXData;
+
+       gsDevices[0].pfnSysDevFeatureDepInit = &SysDevFeatureDepInit;
+
+       gsDevices[0].bHasFBCDCVersion31 = IMG_FALSE;
+       gsDevices[0].bDevicePA0IsValid = IMG_FALSE;
+
+       /* device error notify callback function */
+       gsDevices[0].pfnSysDevErrorNotify = NULL;
+       gsDevices[0].pfnHostCacheMaintenance = SysDevHost_Cache_Maintenance;
+
+#if defined(SUPPORT_PDVFS)
+       gsDevices[0].sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+       gsDevices[0].sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+       gsDevices[0].sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+       gsDevices[0].sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+#endif
+
+       *ppsDevConfig = &gsDevices[0];
+
+       return PVRSRV_OK;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+       psDevConfig->pvOSDevice = NULL;
+}
+
+typedef struct LISR_DATA_TAG
+{
+       IMG_UINT32      ui32IRQ;
+       PFN_SYS_LISR    pfnLISR;
+       void            *pvData;
+} LISR_DATA;
+
+static irqreturn_t img_interrupt(int irq, void *dev_id)
+{
+       LISR_DATA *psLISRData = (LISR_DATA *)dev_id;
+
+       PVR_UNREFERENCED_PARAMETER(irq);
+
+       if (psLISRData) {
+               if (psLISRData->pfnLISR(psLISRData->pvData))
+                       return IRQ_HANDLED;
+       } else {
+               PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__));
+       }
+
+       return IRQ_NONE;
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+                                       IMG_UINT32 ui32IRQ,
+                                       const IMG_CHAR *pszName,
+                                       PFN_LISR pfnLISR,
+                                       void *pvData,
+                                       IMG_HANDLE *phLISRData)
+{
+       unsigned long ui32Flags = SYS_IRQ_FLAG_TRIGGER_DEFAULT;
+       LISR_DATA *psLISRData;
+       unsigned long ulIRQFlags = 0;
+
+       PVR_UNREFERENCED_PARAMETER(hSysData);
+
+       if (pfnLISR == NULL || pvData == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       if (ui32Flags & ~SYS_IRQ_FLAG_MASK)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK)
+       {
+               case SYS_IRQ_FLAG_TRIGGER_DEFAULT:
+                       break;
+               case SYS_IRQ_FLAG_TRIGGER_LOW:
+                       ulIRQFlags |= IRQF_TRIGGER_LOW;
+                       break;
+               case SYS_IRQ_FLAG_TRIGGER_HIGH:
+                       ulIRQFlags |= IRQF_TRIGGER_HIGH;
+                       break;
+               default:
+                       return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+
+       if (ui32Flags & SYS_IRQ_FLAG_SHARED)
+               ulIRQFlags |= IRQF_SHARED;
+
+       psLISRData = OSAllocMem(sizeof(*psLISRData));
+       if (psLISRData == NULL)
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+
+       psLISRData->ui32IRQ = ui32IRQ;
+       psLISRData->pfnLISR = pfnLISR;
+       psLISRData->pvData = pvData;
+       if (request_irq(ui32IRQ, img_interrupt, ulIRQFlags, pszName, psLISRData)) {
+               OSFreeMem(psLISRData);
+               return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+       }
+       *phLISRData = (IMG_HANDLE)psLISRData;
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+       LISR_DATA *psLISRData = (LISR_DATA *)hLISRData;
+
+       if (psLISRData == NULL)
+               return PVRSRV_ERROR_INVALID_PARAMS;
+
+       free_irq(psLISRData->ui32IRQ, psLISRData);
+
+       OSFreeMem(psLISRData);
+
+       return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+                               DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               void *pvDumpDebugFile)
+{
+       PVR_UNREFERENCED_PARAMETER(psDevConfig);
+       PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+       PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+       return PVRSRV_OK;
+}
+
+struct sf7110_cfg *sys_get_privdata(void)
+{
+       return &sf_cfg_t;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysconfig.h b/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysconfig.h
new file mode 100644 (file)
index 0000000..435f637
--- /dev/null
@@ -0,0 +1,100 @@
+/*************************************************************************/ /*!
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+#define STARFIVE_7110_IRQ_GPU 82  //77 + 5
+#define STARFIVE_7110_GPU_SIZE 0x00100000
+#define STARFIVE_7110_GPU_PBASE 0x18000000
+
+typedef IMG_UINT32 (*SYS_DEV_CLK_GET)(IMG_HANDLE hData);
+
+struct sf7110_cfg {
+       void __iomem *gpu_reg_base;
+       resource_size_t gpu_reg_start;
+       resource_size_t gpu_reg_size;
+
+       struct clk *clk_apb;
+       struct clk *clk_rtc;
+       struct clk *clk_core;
+       struct clk *clk_sys;
+       struct clk *clk_axi;
+       struct clk *clk_div;
+       struct reset_control *rst_apb;
+       struct reset_control *rst_doma;
+
+       /* mutex protect for set power state */
+       struct mutex set_power_state;
+
+       /* for gpu device freq/volt update, to be fill later */
+       //struct clk **top_clk;
+       struct device *dev;
+       SYS_DEV_CLK_GET runtime_resume;
+       SYS_DEV_CLK_GET runtime_suspend;
+};
+
+#define mk_crg_offset(x)  ((x) - (U0_SYS_CRG__SAIF_BD_APBS__BASE_ADDR))
+
+#if  defined(__FPGA_VERIFICATION__)
+#define RGX_STARFIVE_7100_CORE_CLOCK_SPEED (8 * 1000 * 1000)//actually the CLK is 8M on FPGA platform 
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (80000)
+#else
+#define RGX_STARFIVE_7100_CORE_CLOCK_SPEED (594.0 * 1000 * 1000)//maybe 400M?
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (100)
+#endif
+
+extern void do_sifive_l2_flush64_range(unsigned long start, unsigned long len);
+
+void u0_img_gpu_enable(void);
+void u0_img_gpu_disable(void);
+struct sf7110_cfg *sys_get_privdata(void);
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif /* __SYSCCONFIG_H__ */
diff --git a/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysinfo.h b/drivers/gpu/drm/img/img-rogue/services/system/rogue/sf_7110/sysinfo.h
new file mode 100644 (file)
index 0000000..456c4d2
--- /dev/null
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if  defined(__FPGA_VERIFICATION__) //defined(VIRTUAL_PLATFORM)
+//#warning  "Building PowerVR Rogue GPU for FPGA verification"
+#define MAX_HW_TIME_US                                                         (20000000)
+#else
+#define MAX_HW_TIME_US                                                         (500000)
+#endif
+
+#define FATAL_ERROR_DETECTION_POLL_MS                          (10000)
+#if defined(VIRTUAL_PLATFORM)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT                (10000)
+#else
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT                (1500)//(10000)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT       (3600000)
+#define WAIT_TRY_COUNT                                                         (10000)
+
+
+#if 1//defined(VIRTUAL_PLATFORM)
+#define SYS_RGX_OF_COMPATIBLE "img-gpu"
+#endif
+
+#define IMG_GPU_DEBUG
+
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME "rgx_starfive"
+#endif
+
+#endif /* !defined(__SYSINFO_H__) */
diff --git a/drivers/gpu/drm/img/kernel_compatibility.h b/drivers/gpu/drm/img/kernel_compatibility.h
new file mode 100644 (file)
index 0000000..6a94c8c
--- /dev/null
@@ -0,0 +1,521 @@
+/*************************************************************************/ /*!
+@Title          Kernel versions compatibility macros
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Per-version macros to allow code to seamlessly use older kernel
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_COMPATIBILITY_H__
+#define __KERNEL_COMPATIBILITY_H__
+
+#include <linux/version.h>
+
+/*
+ * Stop supporting an old kernel? Remove the top block.
+ * New incompatible kernel?       Append a new block at the bottom.
+ *
+ * Please write you version test as `VERSION < X.Y`, and use the earliest
+ * possible version :)
+ */
+
+/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this
+ * so we work around the limitation by vsnprintf() + seq_puts().
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define seq_vprintf(seq_file, fmt, args) \
+do { \
+       char aszBuffer[512]; /* maximum message buffer size */ \
+       vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \
+       seq_puts(seq_file, aszBuffer); \
+} while (0)
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+
+/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */
+#define VM_DONTDUMP VM_RESERVED
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */
+
+/*
+ * Note: this fix had to be written backwards because get_unused_fd_flags
+ * was already defined but not exported on kernels < 3.7
+ *
+ * When removing support for kernels < 3.7, this block should be removed
+ * and all `get_unused_fd()` should be manually replaced with
+ * `get_unused_fd_flags(0)`
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+
+/* Linux 3.19 removed get_unused_fd() */
+/* get_unused_fd_flags was introduced in 3.7 */
+#define get_unused_fd() get_unused_fd_flags(0)
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+
+/*
+ * Headers shouldn't normally be included by this file but this is a special
+ * case as it's not obvious from the name that devfreq_add_device needs this
+ * include.
+ */
+#include <linux/string.h>
+
+#define devfreq_add_device(dev, profile, name, data) \
+       ({ \
+               struct devfreq *__devfreq; \
+               if (name && !strcmp(name, "simple_ondemand")) \
+                       __devfreq = devfreq_add_device(dev, profile, \
+                                                          &devfreq_simple_ondemand, data); \
+               else \
+                       __devfreq = ERR_PTR(-EINVAL); \
+               __devfreq; \
+       })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+
+#define DRIVER_RENDER 0
+#define DRM_RENDER_ALLOW 0
+
+/* Linux 3.12 introduced a new shrinker API */
+#define SHRINK_STOP (~0UL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+
+#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev)
+#define dev_pm_opp_get_freq(opp) opp_get_freq(opp)
+#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp)
+#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt)
+#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq)
+
+#if defined(CONFIG_ARM)
+/* Linux 3.13 renamed ioremap_cached to ioremap_cache */
+#define ioremap_cache(cookie, size) ioremap_cached(cookie, size)
+#endif /* defined(CONFIG_ARM) */
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+
+/* Linux 3.14 introduced a new set of sized min and max defines */
+#ifndef U32_MAX
+#define U32_MAX ((u32)UINT_MAX)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to
+ * `struct page **pages` */
+#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+
+/*
+ * Linux 4.7 removed this function but its replacement was available since 3.19.
+ */
+#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e)
+
+/* seq_has_overflowed() was introduced in 3.19 but the structure elements
+ * have been available since 2.x
+ */
+#include <linux/seq_file.h>
+static inline bool seq_has_overflowed(struct seq_file *m)
+{
+       return m->count == m->size;
+}
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+
+#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \
+       ({ \
+               struct dentry *de; \
+               de = debugfs_create_file(name, mode, parent, data, fops); \
+               if (de) \
+                       de->d_inode->i_size = file_size; \
+               de; \
+       })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#define drm_fb_helper_unregister_fbi(fb_helper) \
+       ({ \
+               if ((fb_helper) && (fb_helper)->fbdev) \
+                       unregister_framebuffer((fb_helper)->fbdev); \
+       })
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+
+/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */
+#define __GFP_RECLAIM __GFP_WAIT
+
+#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev)
+#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev)
+#else
+#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \
+       (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/* Linux 4.5 added a new printf-style parameter for debug messages */
+
+#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \
+       drm_encoder_init(dev, encoder, funcs, encoder_type)
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \
+       ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); })
+
+#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \
+       drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs)
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \
+       ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+
+/*
+ * Linux 4.6 removed the first two parameters, the "struct task_struct" type
+ * pointer "current" is defined in asm/current.h, which makes it pointless
+ * to pass it on every function call.
+*/
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+       get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas)
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 replaced the write/force parameters with "gup_flags" */
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+       get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+       (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/*
+ * Linux 4.6 removed the start and end arguments as it now always maps
+ * the entire DMA-BUF.
+ * Additionally, dma_buf_end_cpu_access() now returns an int error.
+ */
+#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION)
+#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+                 (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+
+/* Linux 4.7 removed the first arguments as it's never been used */
+#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle)
+
+/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */
+#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 changed the second argument to a drm_file pointer */
+#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp)
+#define drm_vma_node_allow(node, file_priv) drm_vma_node_allow(node, (file_priv)->filp)
+#define drm_vma_node_revoke(node, file_priv) drm_vma_node_revoke(node, (file_priv)->filp)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#define refcount_read(r) atomic_read(r)
+#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT)
+
+#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd)
+
+/*
+ * In Linux Kernels >= 4.12 for x86 another level of page tables has been
+ * added. The added level (p4d) sits between pgd and pud, so when it
+ * doesn`t exist, pud_offset function takes pgd as a parameter instead
+ * of p4d.
+ */
+#define p4d_t pgd_t
+#define p4d_offset(pgd, address) (pgd)
+#define p4d_none(p4d) (0)
+#define p4d_bad(p4d) (0)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+
+#define drm_mode_object_get(obj)          drm_mode_object_reference(obj)
+#define drm_mode_object_put(obj)          drm_mode_object_unreference(obj)
+#define drm_connector_get(obj)            drm_connector_reference(obj)
+#define drm_connector_put(obj)            drm_connector_unreference(obj)
+#define drm_framebuffer_get(obj)          drm_framebuffer_reference(obj)
+#define drm_framebuffer_put(obj)          drm_framebuffer_unreference(obj)
+#define drm_gem_object_get(obj)           drm_gem_object_reference(obj)
+#define drm_gem_object_put_locked(obj)    drm_gem_object_unreference(obj)
+#define __drm_gem_object_put(obj)         __drm_gem_object_unreference(obj)
+#define drm_property_blob_get(obj)        drm_property_reference_blob(obj)
+#define drm_property_blob_put(obj)        drm_property_unreference_blob(obj)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+
+#define drm_dev_put(dev) drm_dev_unref(dev)
+
+#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type)
+#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+
+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \
+                                                                                       min_scale, max_scale, \
+                                                                                       can_position, can_update_disabled) \
+       ({ \
+               const struct drm_rect __clip = { \
+                       .x2 = crtc_state->crtc->mode.hdisplay, \
+                       .y2 = crtc_state->crtc->mode.vdisplay, \
+               }; \
+               int __ret = drm_plane_helper_check_state(plane_state, \
+                                                                                                &__clip, \
+                                                                                                min_scale, max_scale, \
+                                                                                                can_position, \
+                                                                                                can_update_disabled); \
+               __ret; \
+       })
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+
+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \
+                                                                                       min_scale, max_scale, \
+                                                                                       can_position, can_update_disabled) \
+       ({ \
+               const struct drm_rect __clip = { \
+                       .x2 = crtc_state->crtc->mode.hdisplay, \
+                       .y2 = crtc_state->crtc->mode.vdisplay, \
+               }; \
+               int __ret = drm_atomic_helper_check_plane_state(plane_state, \
+                                                                                                               crtc_state, \
+                                                                                                               &__clip, \
+                                                                                                               min_scale, max_scale, \
+                                                                                                               can_position, \
+                                                                                                               can_update_disabled); \
+               __ret; \
+       })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+
+#define drm_connector_attach_encoder(connector, encoder) \
+       drm_mode_connector_attach_encoder(connector, encoder)
+
+#define drm_connector_update_edid_property(connector, edid) \
+       drm_mode_connector_update_edid_property(connector, edid)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+
+/*
+ * Work around architectures, e.g. MIPS, that define copy_from_user and
+ * copy_to_user as macros that call access_ok, as this gets redefined below.
+ * As of kernel 4.12, these functions are no longer defined per-architecture
+ * so this work around isn't needed.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#if defined(copy_from_user)
+ /*
+  * NOTE: This function should not be called directly as it exists simply to
+  * work around copy_from_user being defined as a macro that calls access_ok.
+  */
+static inline int
+__pvr_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       return copy_from_user(to, from, n);
+}
+
+#undef copy_from_user
+#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
+#endif
+
+#if defined(copy_to_user)
+ /*
+  * NOTE: This function should not be called directly as it exists simply to
+  * work around copy_to_user being defined as a macro that calls access_ok.
+  */
+static inline int
+__pvr_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       return copy_to_user(to, from, n);
+}
+
+#undef copy_to_user
+#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
+#endif
+#endif
+
+/*
+ * Linux 5.0 dropped the type argument.
+ *
+ * This is unused in at least Linux 3.4 and above for all architectures other
+ * than 'um' (User Mode Linux), which stopped using it in 4.2.
+ */
+#if defined(access_ok)
+ /*
+  * NOTE: This function should not be called directly as it exists simply to
+  * work around access_ok being defined as a macro.
+  */
+static inline int
+__pvr_access_ok_compat(int type, const void __user * addr, unsigned long size)
+{
+       return access_ok(type, addr, size);
+}
+
+#undef access_ok
+#define access_ok(addr, size) __pvr_access_ok_compat(0, addr, size)
+#else
+#define access_ok(addr, size) access_ok(0, addr, size)
+#endif
+
+#endif
+
+/*
+ * Before v5.8, the "struct mm" has a semaphore named "mmap_sem" which is
+ * renamed to "mmap_lock" in v5.8. Moreover, new APIs are provided to
+ * access this lock starting from v5.8.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+
+#define mmap_write_lock(mm)   down_write(&mm->mmap_sem)
+#define mmap_write_unlock(mm) up_write(&mm->mmap_sem)
+
+#define mmap_read_lock(mm)    down_read(&mm->mmap_sem)
+#define mmap_read_unlock(mm)  up_read(&mm->mmap_sem)
+
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#define drm_gem_object_put(obj) drm_gem_object_unreference_unlocked(obj)
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0))
+#define drm_gem_object_put(obj) drm_gem_object_put_unlocked(obj)
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+
+#define drm_prime_pages_to_sg(dev, pages, nr_pages) \
+       drm_prime_pages_to_sg(pages, nr_pages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+
+struct dma_buf_map {
+       void *vaddr;
+};
+
+#define dma_buf_vmap(dmabuf, map) \
+       ({ \
+               (map)->vaddr = dma_buf_vmap(dmabuf); \
+               (map)->vaddr ? 0 : ((dmabuf) && (dmabuf)->ops->vmap) ? -ENOMEM : -EINVAL; \
+       })
+
+#define dma_buf_vunmap(dmabuf, map) \
+       ({ \
+               dma_buf_vunmap(dmabuf, (map)->vaddr); \
+               (map)->vaddr = NULL; \
+       })
+
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0))
+
+#define drm_prime_sg_to_page_array(sgt, pages, npages) \
+       drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, npages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0))
+
+#define drm_gem_plane_helper_prepare_fb drm_gem_fb_prepare_fb
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) */
+
+/*
+ * Linux 5.11 renames the privileged uaccess routines for arm64 and Android
+ * kernel v5.10 merges the change as well. These routines are only used for
+ * arm64 so CONFIG_ARM64 testing can be ignored.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) || \
+       ((LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) && !defined(ANDROID))
+#define uaccess_enable_privileged() uaccess_enable()
+#define uaccess_disable_privileged() uaccess_disable()
+#endif
+
+#endif /* __KERNEL_COMPATIBILITY_H__ */
diff --git a/drivers/gpu/drm/img/kernel_config_compatibility.h b/drivers/gpu/drm/img/kernel_config_compatibility.h
new file mode 100644 (file)
index 0000000..63effd6
--- /dev/null
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@Title          Kernel config compatibility define options
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file is exclusively for Linux config kernel options.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_CONFIG_COMPATIBILITY_H__
+#define __KERNEL_CONFIG_COMPATIBILITY_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#ifdef SUPPORT_DRM_FBDEV_EMULATION
+#define CONFIG_DRM_FBDEV_EMULATION
+#endif
+#endif
+
+#endif /* __KERNEL_CONFIG_COMPATIBILITY_H__ */
diff --git a/drivers/gpu/drm/img/kernel_nospec.h b/drivers/gpu/drm/img/kernel_nospec.h
new file mode 100644 (file)
index 0000000..e27a3eb
--- /dev/null
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          Macro to limit CPU speculative execution in kernel code
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Per-version macros to allow code to seamlessly use older kernel
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_NOSPEC_H__
+#define __KERNEL_NOSPEC_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) ||                 \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) &&               \
+        LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) ||            \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) &&               \
+        LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) ||             \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) &&                \
+        LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118)))
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/nospec.h>
+#else
+#define array_index_nospec(index, size) (index)
+#endif
+
+/*
+ * For Ubuntu kernels, the features available for a given Linux version code
+ * may not match those in upstream kernels. This is the case for the
+ * availability of the array_index_nospec macro.
+ */
+#if !defined(array_index_nospec)
+#define array_index_nospec(index, size) (index)
+#endif
+
+#endif /* __KERNEL_NOSPEC_H__ */
diff --git a/drivers/gpu/drm/img/pvr_dma_resv.h b/drivers/gpu/drm/img/pvr_dma_resv.h
new file mode 100644 (file)
index 0000000..a51c9de
--- /dev/null
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title          Kernel reservation object compatibility header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Per-version macros to allow code to seamlessly use older kernel
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_DMA_RESV_H__
+#define __PVR_DMA_RESV_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+#include <linux/dma-resv.h>
+#else
+#include <linux/reservation.h>
+
+/* Reservation object types */
+#define dma_resv                       reservation_object
+#define dma_resv_list                  reservation_object_list
+
+/* Reservation object functions */
+#define dma_resv_add_excl_fence                reservation_object_add_excl_fence
+#define dma_resv_add_shared_fence      reservation_object_add_shared_fence
+#define dma_resv_fini                  reservation_object_fini
+#define dma_resv_get_excl              reservation_object_get_excl
+#define dma_resv_get_list              reservation_object_get_list
+#define dma_resv_held                  reservation_object_held
+#define dma_resv_init                  reservation_object_init
+#define dma_resv_reserve_shared                reservation_object_reserve_shared
+#define dma_resv_test_signaled_rcu     reservation_object_test_signaled_rcu
+#define dma_resv_wait_timeout_rcu      reservation_object_wait_timeout_rcu
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0))
+
+#define dma_resv_shared_list   dma_resv_get_list
+#define dma_resv_excl_fence    dma_resv_get_excl
+#define dma_resv_wait_timeout  dma_resv_wait_timeout_rcu
+#define dma_resv_test_signaled dma_resv_test_signaled_rcu
+#define dma_resv_get_fences    dma_resv_get_fences_rcu
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) */
+
+#endif /* __PVR_DMA_RESV_H__ */
diff --git a/drivers/gpu/drm/img/pvr_linux_fence.h b/drivers/gpu/drm/img/pvr_linux_fence.h
new file mode 100644 (file)
index 0000000..b9c542a
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * @File
+ * @Title       PowerVR Linux fence compatibility header
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if !defined(__PVR_LINUX_FENCE_H__)
+#define __PVR_LINUX_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+       !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE)
+#include <linux/fence.h>
+#else
+#include <linux/dma-fence.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+       !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE)
+/* Structures */
+#define        dma_fence fence
+#define dma_fence_array fence_array
+#define        dma_fence_cb fence_cb
+#define        dma_fence_ops fence_ops
+
+/* Defines and Enums */
+#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
+#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT
+#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS
+
+#define DMA_FENCE_ERR FENCE_ERR
+#define        DMA_FENCE_TRACE FENCE_TRACE
+#define DMA_FENCE_WARN FENCE_WARN
+
+/* Functions */
+#define dma_fence_add_callback fence_add_callback
+#define dma_fence_context_alloc fence_context_alloc
+#define dma_fence_default_wait fence_default_wait
+#define dma_fence_is_signaled fence_is_signaled
+#define dma_fence_enable_sw_signaling fence_enable_sw_signaling
+#define dma_fence_free fence_free
+#define dma_fence_get fence_get
+#define dma_fence_get_rcu fence_get_rcu
+#define dma_fence_init fence_init
+#define dma_fence_is_array fence_is_array
+#define dma_fence_put fence_put
+#define dma_fence_signal fence_signal
+#define dma_fence_wait fence_wait
+#define to_dma_fence_array to_fence_array
+
+static inline signed long
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
+{
+       signed long lret;
+
+       lret = fence_wait_timeout(fence, intr, timeout);
+       if (lret || timeout)
+               return lret;
+
+       return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0;
+}
+
+#endif
+
+#endif /* !defined(__PVR_LINUX_FENCE_H__) */
diff --git a/drivers/gpu/drm/img/pvr_vmap.h b/drivers/gpu/drm/img/pvr_vmap.h
new file mode 100644 (file)
index 0000000..661d0fe
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * @File        pvr_vmap.h
+ * @Title       Utility functions for virtual memory mapping
+ * @Codingstyle LinuxKernel
+ * @Copyright   Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+ * @License     Dual MIT/GPLv2
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License Version 2 ("GPL") in which case the provisions
+ * of GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms of
+ * GPL, and not to allow others to use your version of this file under the terms
+ * of the MIT license, indicate your decision by deleting the provisions above
+ * and replace them with the notice and other provisions required by GPL as set
+ * out in the file called "GPL-COPYING" included in this distribution. If you do
+ * not delete the provisions above, a recipient may use your version of this file
+ * under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT-COPYING".
+ *
+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PVR_VMAP_H
+#define PVR_VMAP_H
+
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+
+
+
+
+
+#ifndef CACHE_TEST
+#define RISCV_STARFIVE_CACHE_COHERENT_FIX
+#endif
+#ifndef RISCV_STARFIVE_CACHE_COHERENT_FIX
+static inline void *pvr_vmap(struct page **pages,
+                            unsigned int count,
+                            __maybe_unused unsigned long flags,
+                            pgprot_t prot)
+{
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+       return vmap(pages, count, flags, prot);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+       return vm_map_ram(pages, count, -1, prot);
+#else
+       if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+               return vm_map_ram(pages, count, -1);
+       else
+               return vmap(pages, count, flags, prot);
+#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */
+}
+
+static inline void pvr_vunmap(void *pages,
+                             __maybe_unused unsigned int count,
+                             __maybe_unused pgprot_t prot)
+{
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+       vunmap(pages);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+       vm_unmap_ram(pages, count);
+#else
+       if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+               vm_unmap_ram(pages, count);
+       else
+               vunmap(pages);
+#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */
+}
+#else
+extern void *riscv_vmap(struct page **pages, unsigned int count,
+          unsigned long flags, pgprot_t prot);
+static inline void *pvr_vmap(struct page **pages,
+                            unsigned int count,
+                            __maybe_unused unsigned long flags,
+                            pgprot_t prot)
+{
+       return riscv_vmap(pages, count, flags, prot);
+}
+
+
+static inline void pvr_vunmap(void *pages,
+                             __maybe_unused unsigned int count,
+                             __maybe_unused pgprot_t prot)
+{
+       vunmap(pages);
+}
+#endif /* RISCV_STARFIVE_CACHE_COHERENT_FIX */
+
+#endif /* PVR_VMAP_H */
diff --git a/drivers/gpu/drm/img/pvrversion.h b/drivers/gpu/drm/img/pvrversion.h
new file mode 100644 (file)
index 0000000..c62b3f7
--- /dev/null
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File           pvrversion.h
+@Title          PowerVR version numbers and strings.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Version numbers and strings for PowerVR components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRVERSION_H
+#define PVRVERSION_H
+
+#define PVRVERSION_MAJ               1U
+#define PVRVERSION_MIN               17U
+
+#define PVRVERSION_FAMILY           "rogueddk"
+#define PVRVERSION_BRANCHNAME       "1.17"
+#define PVRVERSION_BUILD             6210866
+#define PVRVERSION_BSCONTROL        "Rogue_DDK_Linux_WS"
+
+#define PVRVERSION_STRING           "Rogue_DDK_Linux_WS rogueddk 1.17@6210866"
+#define PVRVERSION_STRING_SHORT     "1.17@6210866"
+
+#define COPYRIGHT_TXT               "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI          621
+#define PVRVERSION_BUILD_LO          866
+#define PVRVERSION_STRING_NUMERIC   "1.17.621.866"
+
+#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU)
+
+#endif /* PVRVERSION_H */
diff --git a/drivers/gpu/drm/img/riscv_vmap.c b/drivers/gpu/drm/img/riscv_vmap.c
new file mode 100644 (file)
index 0000000..760ef96
--- /dev/null
@@ -0,0 +1,206 @@
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pfn.h>
+#include <linux/kmemleak.h>
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/llist.h>
+#include <linux/bitops.h>
+#include <linux/rbtree_augmented.h>
+#include <linux/overflow.h>
+#include <linux/hugetlb.h>
+
+#include <linux/uaccess.h>
+#include <asm/tlbflush.h>
+#include <asm/shmparam.h>
+
+#include "pgalloc-track.h"
+#include "riscv_vmap.h"
+
+#define SYSPORT_MEM_PFN_OFFSET  (0x400000000 >> PAGE_SHIFT)
+#define __mk_pte(page, prot)     pfn_pte(page_to_pfn(page) + SYSPORT_MEM_PFN_OFFSET, prot)
+
+static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+               unsigned long end, pgprot_t prot, struct page **pages, int *nr,
+               pgtbl_mod_mask *mask)
+{
+       pte_t *pte;
+
+       /*
+        * nr is a running index into the array which helps higher level
+        * callers keep track of where we're up to.
+        */
+
+       pte = pte_alloc_kernel_track(pmd, addr, mask);
+       if (!pte)
+               return -ENOMEM;
+       do {
+               struct page *page = pages[*nr];
+
+               if (WARN_ON(!pte_none(*pte)))
+                       return -EBUSY;
+               if (WARN_ON(!page))
+                       return -ENOMEM;
+               /* Special processing for starfive RISC-V, pfn add an offset of 0x400000*/
+               set_pte_at(&init_mm, addr, pte, __mk_pte(page, prot));
+               (*nr)++;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+       *mask |= PGTBL_PTE_MODIFIED;
+       return 0;
+}
+
+static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+               unsigned long end, pgprot_t prot, struct page **pages, int *nr,
+               pgtbl_mod_mask *mask)
+{
+       pmd_t *pmd;
+       unsigned long next;
+
+       pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+               next = pmd_addr_end(addr, end);
+               if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
+                       return -ENOMEM;
+       } while (pmd++, addr = next, addr != end);
+       return 0;
+}
+
+static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
+               unsigned long end, pgprot_t prot, struct page **pages, int *nr,
+               pgtbl_mod_mask *mask)
+{
+       pud_t *pud;
+       unsigned long next;
+
+       pud = pud_alloc_track(&init_mm, p4d, addr, mask);
+       if (!pud)
+               return -ENOMEM;
+       do {
+               next = pud_addr_end(addr, end);
+               if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
+                       return -ENOMEM;
+       } while (pud++, addr = next, addr != end);
+       return 0;
+}
+
+static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
+               unsigned long end, pgprot_t prot, struct page **pages, int *nr,
+               pgtbl_mod_mask *mask)
+{
+       p4d_t *p4d;
+       unsigned long next;
+
+       p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
+       if (!p4d)
+               return -ENOMEM;
+       do {
+               next = p4d_addr_end(addr, end);
+               if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
+                       return -ENOMEM;
+       } while (p4d++, addr = next, addr != end);
+       return 0;
+}
+
+static int __map_kernel_range_noflush(unsigned long addr, unsigned long size,
+                            pgprot_t prot, struct page **pages)
+{
+       unsigned long start = addr;
+       unsigned long end = addr + size;
+       unsigned long next;
+       pgd_t *pgd;
+       int err = 0;
+       int nr = 0;
+       pgtbl_mod_mask mask = 0;
+
+       BUG_ON(addr >= end);
+       pgd = pgd_offset_k(addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_bad(*pgd))
+                       mask |= PGTBL_PGD_MODIFIED;
+               err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
+               if (err)
+                       return err;
+       } while (pgd++, addr = next, addr != end);
+
+       if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+               arch_sync_kernel_mappings(start, end);
+
+       return 0;
+}
+
+static int __map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
+               struct page **pages)
+{
+       int ret;
+
+       ret = __map_kernel_range_noflush(start, size, prot, pages);
+       flush_cache_vmap(start, start + size);
+       return ret;
+}
+
+void *riscv_vmap(struct page **pages, unsigned int count,
+          unsigned long flags, pgprot_t prot)
+{
+       struct vm_struct *area;
+       unsigned long size;             /* In bytes */
+
+       might_sleep();
+
+       if (count > totalram_pages())
+               return NULL;
+
+       size = (unsigned long)count << PAGE_SHIFT;
+       area = get_vm_area_caller(size, flags, __builtin_return_address(0));
+       if (!area)
+               return NULL;
+
+       if (__map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
+                       pages) < 0) {
+               vunmap(area->addr);
+               return NULL;
+       }
+
+       if (flags & VM_MAP_PUT_PAGES)
+               area->pages = pages;
+       return area->addr;
+}
+EXPORT_SYMBOL_GPL(riscv_vmap);
+
+/*---------------Test code-----*/
+#ifdef IMG_GPU_DEBUG
+extern unsigned long va2pa(unsigned long *vaddr_in);
+void test_riscv_vmap(void)
+{
+       struct page *pages[2];
+       void *tmp;
+       pgprot_t prot = PAGE_KERNEL;
+       
+       prot = pgprot_noncached(prot);
+       printk("Page prot = %lx", pgprot_val(prot));
+       
+       pages[0] = alloc_page(GFP_KERNEL);
+       pages[1] = alloc_page(GFP_KERNEL);
+       
+       printk("Page struct: %lx %lx\n", (unsigned long)pages[0], (unsigned long)pages[1]);
+       printk("virt_addr_valid: %d %d\n", virt_addr_valid(pages[0]), virt_addr_valid(pages[1]));
+       printk("PPN: %lx %lx\n", page_to_pfn(pages[0]), page_to_pfn(pages[1]));
+       printk("Page address: %lx %lx\n", (unsigned long)page_address(pages[0]), (unsigned long)page_address(pages[1]));
+       tmp = riscv_vmap(pages, 2, VM_READ | VM_WRITE, prot);
+       va2pa(tmp);
+       memset(tmp, 0x78, 8 * 1024);
+       
+       vunmap(tmp);
+       __free_pages(pages[0], 0);
+       __free_pages(pages[1], 0);
+       printk("Pages freed!\n");
+       //free_page((unsigned long)pages[0]);
+       //free_page((unsigned long)pages[1]);
+}
+#endif
diff --git a/drivers/gpu/drm/img/riscv_vmap.h b/drivers/gpu/drm/img/riscv_vmap.h
new file mode 100644 (file)
index 0000000..c916444
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef RISCV_VMAP_H
+#define RISCV_VMAP_H
+
+void *riscv_vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot);
+void test_riscv_vmap(void);
+
+#endif